aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2013-01-25 19:31:21 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-01-25 19:31:21 -0500
commit7b5c4a65cc27f017c170b025f8d6d75dabb11c6f (patch)
tree05deacbc66a9f5c27147a6ea975211ae82281044 /net
parent3596f5bb0a6afd01a784bfe120f420edbbf82861 (diff)
parent949db153b6466c6f7cad5a427ecea94985927311 (diff)
Merge tag 'v3.8-rc5' into x86/mm
The __pa() fixup series that follows touches KVM code that is not present in the existing branch based on v3.7-rc5, so merge in the current upstream from Linus. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c15
-rw-r--r--net/8021q/vlan_dev.c12
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/atm/atm_sysfs.c40
-rw-r--r--net/atm/br2684.c91
-rw-r--r--net/atm/common.c12
-rw-r--r--net/atm/pppoatm.c68
-rw-r--r--net/batman-adv/Kconfig11
-rw-r--r--net/batman-adv/Makefile1
-rw-r--r--net/batman-adv/bat_iv_ogm.c53
-rw-r--r--net/batman-adv/bitarray.c23
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c131
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h6
-rw-r--r--net/batman-adv/debugfs.c60
-rw-r--r--net/batman-adv/distributed-arp-table.c1066
-rw-r--r--net/batman-adv/distributed-arp-table.h167
-rw-r--r--net/batman-adv/gateway_client.c19
-rw-r--r--net/batman-adv/hard-interface.c51
-rw-r--r--net/batman-adv/hash.h22
-rw-r--r--net/batman-adv/icmp_socket.c16
-rw-r--r--net/batman-adv/main.c89
-rw-r--r--net/batman-adv/main.h36
-rw-r--r--net/batman-adv/originator.c22
-rw-r--r--net/batman-adv/packet.h86
-rw-r--r--net/batman-adv/routing.c315
-rw-r--r--net/batman-adv/send.c43
-rw-r--r--net/batman-adv/send.h3
-rw-r--r--net/batman-adv/soft-interface.c106
-rw-r--r--net/batman-adv/sysfs.c58
-rw-r--r--net/batman-adv/translation-table.c541
-rw-r--r--net/batman-adv/translation-table.h8
-rw-r--r--net/batman-adv/types.h102
-rw-r--r--net/batman-adv/unicast.c143
-rw-r--r--net/batman-adv/unicast.h36
-rw-r--r--net/batman-adv/vis.c44
-rw-r--r--net/bluetooth/Kconfig2
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/a2mp.c459
-rw-r--r--net/bluetooth/af_bluetooth.c10
-rw-r--r--net/bluetooth/amp.c471
-rw-r--r--net/bluetooth/bnep/core.c3
-rw-r--r--net/bluetooth/bnep/netdev.c1
-rw-r--r--net/bluetooth/cmtp/capi.c2
-rw-r--r--net/bluetooth/cmtp/core.c2
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_conn.c76
-rw-r--r--net/bluetooth/hci_core.c237
-rw-r--r--net/bluetooth/hci_event.c562
-rw-r--r--net/bluetooth/hci_sysfs.c10
-rw-r--r--net/bluetooth/hidp/core.c17
-rw-r--r--net/bluetooth/l2cap_core.c1577
-rw-r--r--net/bluetooth/l2cap_sock.c94
-rw-r--r--net/bluetooth/lib.c14
-rw-r--r--net/bluetooth/mgmt.c117
-rw-r--r--net/bluetooth/rfcomm/core.c19
-rw-r--r--net/bluetooth/rfcomm/sock.c13
-rw-r--r--net/bluetooth/rfcomm/tty.c6
-rw-r--r--net/bluetooth/sco.c98
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_if.c8
-rw-r--r--net/bridge/br_input.c17
-rw-r--r--net/bridge/br_ioctl.c25
-rw-r--r--net/bridge/br_mdb.c493
-rw-r--r--net/bridge/br_multicast.c88
-rw-r--r--net/bridge/br_netlink.c247
-rw-r--r--net/bridge/br_private.h55
-rw-r--r--net/bridge/br_stp.c22
-rw-r--r--net/bridge/br_stp_bpdu.c7
-rw-r--r--net/bridge/br_sysfs_br.c22
-rw-r--r--net/bridge/br_sysfs_if.c47
-rw-r--r--net/caif/caif_usb.c18
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/can/bcm.c3
-rw-r--r--net/can/gw.c6
-rw-r--r--net/can/proc.c2
-rw-r--r--net/ceph/ceph_common.c3
-rw-r--r--net/ceph/messenger.c130
-rw-r--r--net/ceph/osd_client.c93
-rw-r--r--net/ceph/osdmap.c47
-rw-r--r--net/core/dev.c250
-rw-r--r--net/core/dev_addr_lists.c3
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/filter.c139
-rw-r--r--net/core/flow.c4
-rw-r--r--net/core/neighbour.c20
-rw-r--r--net/core/net-sysfs.c41
-rw-r--r--net/core/net_namespace.c55
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/netprio_cgroup.c262
-rw-r--r--net/core/pktgen.c47
-rw-r--r--net/core/rtnetlink.c230
-rw-r--r--net/core/scm.c6
-rw-r--r--net/core/skbuff.c40
-rw-r--r--net/core/sock.c84
-rw-r--r--net/core/sysctl_net_core.c5
-rw-r--r--net/dcb/dcbnl.c8
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/decnet/dn_fib.c6
-rw-r--r--net/dns_resolver/dns_key.c15
-rw-r--r--net/dsa/Kconfig18
-rw-r--r--net/ieee802154/6lowpan.c3
-rw-r--r--net/ipv4/af_inet.c93
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/devinet.c200
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/inet_connection_sock.c41
-rw-r--r--net/ipv4/inet_diag.c159
-rw-r--r--net/ipv4/inet_hashtables.c36
-rw-r--r--net/ipv4/ip_fragment.c23
-rw-r--r--net/ipv4/ip_gre.c45
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_sockglue.c42
-rw-r--r--net/ipv4/ip_vti.c31
-rw-r--r--net/ipv4/ipconfig.c14
-rw-r--r--net/ipv4/ipip.c271
-rw-r--r--net/ipv4/ipmr.c141
-rw-r--r--net/ipv4/netfilter/arp_tables.c8
-rw-r--r--net/ipv4/netfilter/ip_tables.c8
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c9
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/iptable_nat.c15
-rw-r--r--net/ipv4/protocol.c21
-rw-r--r--net/ipv4/route.c36
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv4/tcp.c59
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_input.c80
-rw-r--r--net/ipv4/tcp_ipv4.c44
-rw-r--r--net/ipv4/tcp_metrics.c12
-rw-r--r--net/ipv4/tcp_minisocks.c8
-rw-r--r--net/ipv4/tcp_output.c24
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv4/xfrm4_policy.c13
-rw-r--r--net/ipv6/Makefile5
-rw-r--r--net/ipv6/addrconf.c229
-rw-r--r--net/ipv6/af_inet6.c245
-rw-r--r--net/ipv6/ah6.c10
-rw-r--r--net/ipv6/anycast.c7
-rw-r--r--net/ipv6/datagram.c8
-rw-r--r--net/ipv6/exthdrs.c70
-rw-r--r--net/ipv6/exthdrs_core.c168
-rw-r--r--net/ipv6/exthdrs_offload.c41
-rw-r--r--net/ipv6/fib6_rules.c2
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c3
-rw-r--r--net/ipv6/inet6_hashtables.c27
-rw-r--r--net/ipv6/ip6_fib.c57
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_gre.c32
-rw-r--r--net/ipv6/ip6_offload.c282
-rw-r--r--net/ipv6/ip6_offload.h18
-rw-r--r--net/ipv6/ip6_output.c72
-rw-r--r--net/ipv6/ip6_tunnel.c288
-rw-r--r--net/ipv6/ip6mr.c157
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/mcast.c7
-rw-r--r--net/ipv6/ndisc.c81
-rw-r--r--net/ipv6/netfilter/ip6_tables.c117
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c33
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c1
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c2
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c15
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c74
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c6
-rw-r--r--net/ipv6/netfilter/nf_nat_proto_icmpv6.c2
-rw-r--r--net/ipv6/output_core.c76
-rw-r--r--net/ipv6/protocol.c25
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c210
-rw-r--r--net/ipv6/sit.c459
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c127
-rw-r--r--net/ipv6/tcpv6_offload.c95
-rw-r--r--net/ipv6/udp.c94
-rw-r--r--net/ipv6/udp_offload.c120
-rw-r--r--net/ipv6/xfrm6_policy.c21
-rw-r--r--net/ipv6/xfrm6_state.c4
-rw-r--r--net/irda/ircomm/ircomm_tty.c1
-rw-r--r--net/irda/irttp.c1
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_netlink.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/aes_cmac.c18
-rw-r--r--net/mac80211/agg-rx.c2
-rw-r--r--net/mac80211/agg-tx.c14
-rw-r--r--net/mac80211/cfg.c504
-rw-r--r--net/mac80211/chan.c439
-rw-r--r--net/mac80211/debugfs.h6
-rw-r--r--net/mac80211/debugfs_key.c23
-rw-r--r--net/mac80211/debugfs_netdev.c78
-rw-r--r--net/mac80211/debugfs_sta.c59
-rw-r--r--net/mac80211/driver-ops.h142
-rw-r--r--net/mac80211/ht.c4
-rw-r--r--net/mac80211/ibss.c188
-rw-r--r--net/mac80211/ieee80211_i.h232
-rw-r--r--net/mac80211/iface.c191
-rw-r--r--net/mac80211/key.c15
-rw-r--r--net/mac80211/key.h11
-rw-r--r--net/mac80211/main.c156
-rw-r--r--net/mac80211/mesh.c90
-rw-r--r--net/mac80211/mesh.h20
-rw-r--r--net/mac80211/mesh_plink.c64
-rw-r--r--net/mac80211/mesh_sync.c105
-rw-r--r--net/mac80211/mlme.c810
-rw-r--r--net/mac80211/offchannel.c24
-rw-r--r--net/mac80211/pm.c46
-rw-r--r--net/mac80211/rate.c5
-rw-r--r--net/mac80211/rate.h12
-rw-r--r--net/mac80211/rc80211_minstrel.c9
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c8
-rw-r--r--net/mac80211/rx.c250
-rw-r--r--net/mac80211/scan.c107
-rw-r--r--net/mac80211/sta_info.c126
-rw-r--r--net/mac80211/sta_info.h34
-rw-r--r--net/mac80211/status.c178
-rw-r--r--net/mac80211/trace.h246
-rw-r--r--net/mac80211/tx.c300
-rw-r--r--net/mac80211/util.c365
-rw-r--r--net/mac80211/vht.c35
-rw-r--r--net/mac80211/wme.c40
-rw-r--r--net/mac80211/wpa.c5
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/mac802154/tx.c7
-rw-r--r--net/mac802154/wpan.c6
-rw-r--r--net/netfilter/Kconfig7
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c245
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c2
-rw-r--r--net/netfilter/ipvs/Kconfig7
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c404
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c42
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c40
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c41
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c81
-rw-r--r--net/netfilter/nf_conntrack_acct.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c33
-rw-r--r--net/netfilter/nf_conntrack_ecache.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c120
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c4
-rw-r--r--net/netfilter/nf_log.c2
-rw-r--r--net/netfilter/nf_queue.c152
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c3
-rw-r--r--net/netfilter/nfnetlink_log.c16
-rw-r--r--net/netfilter/nfnetlink_queue_core.c14
-rw-r--r--net/netfilter/xt_CT.c58
-rw-r--r--net/netfilter/xt_HMARK.c8
-rw-r--r--net/netfilter/xt_hashlimit.c54
-rw-r--r--net/netfilter/xt_ipvs.c4
-rw-r--r--net/netfilter/xt_recent.c43
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/nfc/Kconfig4
-rw-r--r--net/nfc/core.c33
-rw-r--r--net/nfc/hci/command.c28
-rw-r--r--net/nfc/hci/core.c90
-rw-r--r--net/nfc/hci/llc.c2
-rw-r--r--net/nfc/hci/llc_shdlc.c7
-rw-r--r--net/nfc/llcp/Kconfig4
-rw-r--r--net/nfc/llcp/commands.c148
-rw-r--r--net/nfc/llcp/llcp.c250
-rw-r--r--net/nfc/llcp/llcp.h13
-rw-r--r--net/nfc/llcp/sock.c38
-rw-r--r--net/nfc/nci/Kconfig4
-rw-r--r--net/nfc/nci/core.c29
-rw-r--r--net/nfc/netlink.c157
-rw-r--r--net/nfc/nfc.h6
-rw-r--r--net/nfc/rawsock.c1
-rw-r--r--net/openvswitch/actions.c97
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c42
-rw-r--r--net/openvswitch/flow.h8
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/openvswitch/vport-netdev.h3
-rw-r--r--net/openvswitch/vport.c5
-rw-r--r--net/packet/af_packet.c50
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/phonet/pn_netlink.c6
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_cm.c11
-rw-r--r--net/rds/ib_recv.c33
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rfkill/rfkill-gpio.c2
-rw-r--r--net/rfkill/rfkill-regulator.c6
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_cgroup.c52
-rw-r--r--net/sched/sch_api.c20
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_generic.c11
-rw-r--r--net/sched/sch_htb.c139
-rw-r--r--net/sched/sch_mq.c4
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sched/sch_qfq.c830
-rw-r--r--net/sctp/Kconfig60
-rw-r--r--net/sctp/associola.c16
-rw-r--r--net/sctp/chunk.c20
-rw-r--r--net/sctp/endpointola.c7
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/output.c14
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/sctp/probe.c3
-rw-r--r--net/sctp/proc.c29
-rw-r--r--net/sctp/protocol.c15
-rw-r--r--net/sctp/sm_make_chunk.c24
-rw-r--r--net/sctp/sm_sideeffect.c55
-rw-r--r--net/sctp/sm_statefuns.c22
-rw-r--r--net/sctp/socket.c92
-rw-r--r--net/sctp/sysctl.c59
-rw-r--r--net/sctp/transport.c22
-rw-r--r--net/sctp/tsnmap.c8
-rw-r--r--net/sctp/ulpqueue.c3
-rw-r--r--net/socket.c8
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c17
-rw-r--r--net/sunrpc/backchannel_rqst.c9
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/clnt.c45
-rw-r--r--net/sunrpc/rpc_pipe.c9
-rw-r--r--net/sunrpc/rpcb_clnt.c6
-rw-r--r--net/sunrpc/sched.c101
-rw-r--r--net/sunrpc/svc.c20
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcsock.c104
-rw-r--r--net/sunrpc/xdr.c5
-rw-r--r--net/sunrpc/xprt.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c4
-rw-r--r--net/sunrpc/xprtsock.c19
-rw-r--r--net/sysctl_net.c15
-rw-r--r--net/tipc/Kconfig13
-rw-r--r--net/tipc/bcast.c27
-rw-r--r--net/tipc/bearer.c110
-rw-r--r--net/tipc/bearer.h24
-rw-r--r--net/tipc/core.c5
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/link.c232
-rw-r--r--net/tipc/link.h4
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/node.c15
-rw-r--r--net/tipc/node.h6
-rw-r--r--net/tipc/port.c32
-rw-r--r--net/tipc/port.h6
-rw-r--r--net/tipc/socket.c411
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/unix/sysctl_net_unix.c4
-rw-r--r--net/wireless/Kconfig5
-rw-r--r--net/wireless/Makefile4
-rw-r--r--net/wireless/ap.c4
-rw-r--r--net/wireless/chan.c313
-rw-r--r--net/wireless/core.c26
-rw-r--r--net/wireless/core.h32
-rw-r--r--net/wireless/ethtool.c15
-rw-r--r--net/wireless/ibss.c36
-rw-r--r--net/wireless/mesh.c59
-rw-r--r--net/wireless/mlme.c116
-rw-r--r--net/wireless/nl80211.c868
-rw-r--r--net/wireless/nl80211.h8
-rw-r--r--net/wireless/rdev-ops.h878
-rw-r--r--net/wireless/reg.c14
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/scan.c615
-rw-r--r--net/wireless/sme.c25
-rw-r--r--net/wireless/sysfs.c9
-rw-r--r--net/wireless/trace.c7
-rw-r--r--net/wireless/trace.h2324
-rw-r--r--net/wireless/util.c197
-rw-r--r--net/wireless/wext-compat.c76
-rw-r--r--net/wireless/wext-sme.c19
-rw-r--r--net/xfrm/xfrm_ipcomp.c8
-rw-r--r--net/xfrm/xfrm_replay.c13
-rw-r--r--net/xfrm/xfrm_sysctl.c4
-rw-r--r--net/xfrm/xfrm_user.c2
409 files changed, 23129 insertions, 7727 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ee070722a3a3..a292e8050ef2 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -242,6 +242,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
242 * hope the underlying device can handle it. 242 * hope the underlying device can handle it.
243 */ 243 */
244 new_dev->mtu = real_dev->mtu; 244 new_dev->mtu = real_dev->mtu;
245 new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
245 246
246 vlan_dev_priv(new_dev)->vlan_id = vlan_id; 247 vlan_dev_priv(new_dev)->vlan_id = vlan_id;
247 vlan_dev_priv(new_dev)->real_dev = real_dev; 248 vlan_dev_priv(new_dev)->real_dev = real_dev;
@@ -294,7 +295,7 @@ static void vlan_transfer_features(struct net_device *dev,
294 else 295 else
295 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; 296 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
296 297
297#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 298#if IS_ENABLED(CONFIG_FCOE)
298 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 299 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
299#endif 300#endif
300 301
@@ -529,7 +530,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
529 switch (args.cmd) { 530 switch (args.cmd) {
530 case SET_VLAN_INGRESS_PRIORITY_CMD: 531 case SET_VLAN_INGRESS_PRIORITY_CMD:
531 err = -EPERM; 532 err = -EPERM;
532 if (!capable(CAP_NET_ADMIN)) 533 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
533 break; 534 break;
534 vlan_dev_set_ingress_priority(dev, 535 vlan_dev_set_ingress_priority(dev,
535 args.u.skb_priority, 536 args.u.skb_priority,
@@ -539,7 +540,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
539 540
540 case SET_VLAN_EGRESS_PRIORITY_CMD: 541 case SET_VLAN_EGRESS_PRIORITY_CMD:
541 err = -EPERM; 542 err = -EPERM;
542 if (!capable(CAP_NET_ADMIN)) 543 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
543 break; 544 break;
544 err = vlan_dev_set_egress_priority(dev, 545 err = vlan_dev_set_egress_priority(dev,
545 args.u.skb_priority, 546 args.u.skb_priority,
@@ -548,7 +549,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
548 549
549 case SET_VLAN_FLAG_CMD: 550 case SET_VLAN_FLAG_CMD:
550 err = -EPERM; 551 err = -EPERM;
551 if (!capable(CAP_NET_ADMIN)) 552 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
552 break; 553 break;
553 err = vlan_dev_change_flags(dev, 554 err = vlan_dev_change_flags(dev,
554 args.vlan_qos ? args.u.flag : 0, 555 args.vlan_qos ? args.u.flag : 0,
@@ -557,7 +558,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
557 558
558 case SET_VLAN_NAME_TYPE_CMD: 559 case SET_VLAN_NAME_TYPE_CMD:
559 err = -EPERM; 560 err = -EPERM;
560 if (!capable(CAP_NET_ADMIN)) 561 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
561 break; 562 break;
562 if ((args.u.name_type >= 0) && 563 if ((args.u.name_type >= 0) &&
563 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { 564 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
@@ -573,14 +574,14 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
573 574
574 case ADD_VLAN_CMD: 575 case ADD_VLAN_CMD:
575 err = -EPERM; 576 err = -EPERM;
576 if (!capable(CAP_NET_ADMIN)) 577 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
577 break; 578 break;
578 err = register_vlan_device(dev, args.u.VID); 579 err = register_vlan_device(dev, args.u.VID);
579 break; 580 break;
580 581
581 case DEL_VLAN_CMD: 582 case DEL_VLAN_CMD:
582 err = -EPERM; 583 err = -EPERM;
583 if (!capable(CAP_NET_ADMIN)) 584 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
584 break; 585 break;
585 unregister_vlan_dev(dev, NULL); 586 unregister_vlan_dev(dev, NULL);
586 err = 0; 587 err = 0;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 402442402af7..4a6d31a082b9 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -409,7 +409,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
409 return err; 409 return err;
410} 410}
411 411
412#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 412#if IS_ENABLED(CONFIG_FCOE)
413static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, 413static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
414 struct scatterlist *sgl, unsigned int sgc) 414 struct scatterlist *sgl, unsigned int sgc)
415{ 415{
@@ -531,6 +531,10 @@ static const struct header_ops vlan_header_ops = {
531 .parse = eth_header_parse, 531 .parse = eth_header_parse,
532}; 532};
533 533
534static struct device_type vlan_type = {
535 .name = "vlan",
536};
537
534static const struct net_device_ops vlan_netdev_ops; 538static const struct net_device_ops vlan_netdev_ops;
535 539
536static int vlan_dev_init(struct net_device *dev) 540static int vlan_dev_init(struct net_device *dev)
@@ -564,7 +568,7 @@ static int vlan_dev_init(struct net_device *dev)
564 if (is_zero_ether_addr(dev->broadcast)) 568 if (is_zero_ether_addr(dev->broadcast))
565 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 569 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
566 570
567#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 571#if IS_ENABLED(CONFIG_FCOE)
568 dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; 572 dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
569#endif 573#endif
570 574
@@ -579,6 +583,8 @@ static int vlan_dev_init(struct net_device *dev)
579 583
580 dev->netdev_ops = &vlan_netdev_ops; 584 dev->netdev_ops = &vlan_netdev_ops;
581 585
586 SET_NETDEV_DEVTYPE(dev, &vlan_type);
587
582 if (is_vlan_dev(real_dev)) 588 if (is_vlan_dev(real_dev))
583 subclass = 1; 589 subclass = 1;
584 590
@@ -741,7 +747,7 @@ static const struct net_device_ops vlan_netdev_ops = {
741 .ndo_do_ioctl = vlan_dev_ioctl, 747 .ndo_do_ioctl = vlan_dev_ioctl,
742 .ndo_neigh_setup = vlan_dev_neigh_setup, 748 .ndo_neigh_setup = vlan_dev_neigh_setup,
743 .ndo_get_stats64 = vlan_dev_get_stats64, 749 .ndo_get_stats64 = vlan_dev_get_stats64,
744#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 750#if IS_ENABLED(CONFIG_FCOE)
745 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 751 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
746 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 752 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
747 .ndo_fcoe_enable = vlan_dev_fcoe_enable, 753 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 35b8911b1c8e..fd05c81cb348 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -39,6 +39,7 @@
39#include <linux/inet.h> 39#include <linux/inet.h>
40#include <linux/idr.h> 40#include <linux/idr.h>
41#include <linux/file.h> 41#include <linux/file.h>
42#include <linux/highmem.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <net/9p/9p.h> 44#include <net/9p/9p.h>
44#include <linux/parser.h> 45#include <linux/parser.h>
@@ -325,7 +326,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
325 int count = nr_pages; 326 int count = nr_pages;
326 while (nr_pages) { 327 while (nr_pages) {
327 s = rest_of_page(data); 328 s = rest_of_page(data);
328 pages[index++] = virt_to_page(data); 329 pages[index++] = kmap_to_page(data);
329 data += s; 330 data += s;
330 nr_pages--; 331 nr_pages--;
331 } 332 }
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f49da5814bc3..350bf62b2ae3 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -14,49 +14,45 @@ static ssize_t show_type(struct device *cdev,
14 struct device_attribute *attr, char *buf) 14 struct device_attribute *attr, char *buf)
15{ 15{
16 struct atm_dev *adev = to_atm_dev(cdev); 16 struct atm_dev *adev = to_atm_dev(cdev);
17 return sprintf(buf, "%s\n", adev->type); 17
18 return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
18} 19}
19 20
20static ssize_t show_address(struct device *cdev, 21static ssize_t show_address(struct device *cdev,
21 struct device_attribute *attr, char *buf) 22 struct device_attribute *attr, char *buf)
22{ 23{
23 char *pos = buf;
24 struct atm_dev *adev = to_atm_dev(cdev); 24 struct atm_dev *adev = to_atm_dev(cdev);
25 int i;
26
27 for (i = 0; i < (ESI_LEN - 1); i++)
28 pos += sprintf(pos, "%02x:", adev->esi[i]);
29 pos += sprintf(pos, "%02x\n", adev->esi[i]);
30 25
31 return pos - buf; 26 return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
32} 27}
33 28
34static ssize_t show_atmaddress(struct device *cdev, 29static ssize_t show_atmaddress(struct device *cdev,
35 struct device_attribute *attr, char *buf) 30 struct device_attribute *attr, char *buf)
36{ 31{
37 unsigned long flags; 32 unsigned long flags;
38 char *pos = buf;
39 struct atm_dev *adev = to_atm_dev(cdev); 33 struct atm_dev *adev = to_atm_dev(cdev);
40 struct atm_dev_addr *aaddr; 34 struct atm_dev_addr *aaddr;
41 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin; 35 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin;
42 int i, j; 36 int i, j, count = 0;
43 37
44 spin_lock_irqsave(&adev->lock, flags); 38 spin_lock_irqsave(&adev->lock, flags);
45 list_for_each_entry(aaddr, &adev->local, entry) { 39 list_for_each_entry(aaddr, &adev->local, entry) {
46 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 40 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
47 if (j == *fmt) { 41 if (j == *fmt) {
48 pos += sprintf(pos, "."); 42 count += scnprintf(buf + count,
43 PAGE_SIZE - count, ".");
49 ++fmt; 44 ++fmt;
50 j = 0; 45 j = 0;
51 } 46 }
52 pos += sprintf(pos, "%02x", 47 count += scnprintf(buf + count,
53 aaddr->addr.sas_addr.prv[i]); 48 PAGE_SIZE - count, "%02x",
49 aaddr->addr.sas_addr.prv[i]);
54 } 50 }
55 pos += sprintf(pos, "\n"); 51 count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
56 } 52 }
57 spin_unlock_irqrestore(&adev->lock, flags); 53 spin_unlock_irqrestore(&adev->lock, flags);
58 54
59 return pos - buf; 55 return count;
60} 56}
61 57
62static ssize_t show_atmindex(struct device *cdev, 58static ssize_t show_atmindex(struct device *cdev,
@@ -64,25 +60,21 @@ static ssize_t show_atmindex(struct device *cdev,
64{ 60{
65 struct atm_dev *adev = to_atm_dev(cdev); 61 struct atm_dev *adev = to_atm_dev(cdev);
66 62
67 return sprintf(buf, "%d\n", adev->number); 63 return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
68} 64}
69 65
70static ssize_t show_carrier(struct device *cdev, 66static ssize_t show_carrier(struct device *cdev,
71 struct device_attribute *attr, char *buf) 67 struct device_attribute *attr, char *buf)
72{ 68{
73 char *pos = buf;
74 struct atm_dev *adev = to_atm_dev(cdev); 69 struct atm_dev *adev = to_atm_dev(cdev);
75 70
76 pos += sprintf(pos, "%d\n", 71 return scnprintf(buf, PAGE_SIZE, "%d\n",
77 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1); 72 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
78
79 return pos - buf;
80} 73}
81 74
82static ssize_t show_link_rate(struct device *cdev, 75static ssize_t show_link_rate(struct device *cdev,
83 struct device_attribute *attr, char *buf) 76 struct device_attribute *attr, char *buf)
84{ 77{
85 char *pos = buf;
86 struct atm_dev *adev = to_atm_dev(cdev); 78 struct atm_dev *adev = to_atm_dev(cdev);
87 int link_rate; 79 int link_rate;
88 80
@@ -100,9 +92,7 @@ static ssize_t show_link_rate(struct device *cdev,
100 default: 92 default:
101 link_rate = adev->link_rate * 8 * 53; 93 link_rate = adev->link_rate * 8 * 53;
102 } 94 }
103 pos += sprintf(pos, "%d\n", link_rate); 95 return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
104
105 return pos - buf;
106} 96}
107 97
108static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 98static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 4819d31533e0..403e71fa88fe 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -68,12 +68,15 @@ struct br2684_vcc {
68 /* keep old push, pop functions for chaining */ 68 /* keep old push, pop functions for chaining */
69 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb); 69 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
70 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); 70 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
71 void (*old_release_cb)(struct atm_vcc *vcc);
72 struct module *old_owner;
71 enum br2684_encaps encaps; 73 enum br2684_encaps encaps;
72 struct list_head brvccs; 74 struct list_head brvccs;
73#ifdef CONFIG_ATM_BR2684_IPFILTER 75#ifdef CONFIG_ATM_BR2684_IPFILTER
74 struct br2684_filter filter; 76 struct br2684_filter filter;
75#endif /* CONFIG_ATM_BR2684_IPFILTER */ 77#endif /* CONFIG_ATM_BR2684_IPFILTER */
76 unsigned int copies_needed, copies_failed; 78 unsigned int copies_needed, copies_failed;
79 atomic_t qspace;
77}; 80};
78 81
79struct br2684_dev { 82struct br2684_dev {
@@ -181,18 +184,15 @@ static struct notifier_block atm_dev_notifier = {
181static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb) 184static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
182{ 185{
183 struct br2684_vcc *brvcc = BR2684_VCC(vcc); 186 struct br2684_vcc *brvcc = BR2684_VCC(vcc);
184 struct net_device *net_dev = skb->dev;
185 187
186 pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev); 188 pr_debug("(vcc %p ; net_dev %p )\n", vcc, brvcc->device);
187 brvcc->old_pop(vcc, skb); 189 brvcc->old_pop(vcc, skb);
188 190
189 if (!net_dev) 191 /* If the queue space just went up from zero, wake */
190 return; 192 if (atomic_inc_return(&brvcc->qspace) == 1)
191 193 netif_wake_queue(brvcc->device);
192 if (atm_may_send(vcc, 0))
193 netif_wake_queue(net_dev);
194
195} 194}
195
196/* 196/*
197 * Send a packet out a particular vcc. Not to useful right now, but paves 197 * Send a packet out a particular vcc. Not to useful right now, but paves
198 * the way for multiple vcc's per itf. Returns true if we can send, 198 * the way for multiple vcc's per itf. Returns true if we can send,
@@ -256,16 +256,30 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
256 ATM_SKB(skb)->atm_options = atmvcc->atm_options; 256 ATM_SKB(skb)->atm_options = atmvcc->atm_options;
257 dev->stats.tx_packets++; 257 dev->stats.tx_packets++;
258 dev->stats.tx_bytes += skb->len; 258 dev->stats.tx_bytes += skb->len;
259 atmvcc->send(atmvcc, skb);
260 259
261 if (!atm_may_send(atmvcc, 0)) { 260 if (atomic_dec_return(&brvcc->qspace) < 1) {
261 /* No more please! */
262 netif_stop_queue(brvcc->device); 262 netif_stop_queue(brvcc->device);
263 /*check for race with br2684_pop*/ 263 /* We might have raced with br2684_pop() */
264 if (atm_may_send(atmvcc, 0)) 264 if (unlikely(atomic_read(&brvcc->qspace) > 0))
265 netif_start_queue(brvcc->device); 265 netif_wake_queue(brvcc->device);
266 } 266 }
267 267
268 return 1; 268 /* If this fails immediately, the skb will be freed and br2684_pop()
269 will wake the queue if appropriate. Just return an error so that
270 the stats are updated correctly */
271 return !atmvcc->send(atmvcc, skb);
272}
273
274static void br2684_release_cb(struct atm_vcc *atmvcc)
275{
276 struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
277
278 if (atomic_read(&brvcc->qspace) > 0)
279 netif_wake_queue(brvcc->device);
280
281 if (brvcc->old_release_cb)
282 brvcc->old_release_cb(atmvcc);
269} 283}
270 284
271static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb, 285static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
@@ -279,6 +293,8 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
279{ 293{
280 struct br2684_dev *brdev = BRPRIV(dev); 294 struct br2684_dev *brdev = BRPRIV(dev);
281 struct br2684_vcc *brvcc; 295 struct br2684_vcc *brvcc;
296 struct atm_vcc *atmvcc;
297 netdev_tx_t ret = NETDEV_TX_OK;
282 298
283 pr_debug("skb_dst(skb)=%p\n", skb_dst(skb)); 299 pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
284 read_lock(&devs_lock); 300 read_lock(&devs_lock);
@@ -289,9 +305,26 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
289 dev->stats.tx_carrier_errors++; 305 dev->stats.tx_carrier_errors++;
290 /* netif_stop_queue(dev); */ 306 /* netif_stop_queue(dev); */
291 dev_kfree_skb(skb); 307 dev_kfree_skb(skb);
292 read_unlock(&devs_lock); 308 goto out_devs;
293 return NETDEV_TX_OK;
294 } 309 }
310 atmvcc = brvcc->atmvcc;
311
312 bh_lock_sock(sk_atm(atmvcc));
313
314 if (test_bit(ATM_VF_RELEASED, &atmvcc->flags) ||
315 test_bit(ATM_VF_CLOSE, &atmvcc->flags) ||
316 !test_bit(ATM_VF_READY, &atmvcc->flags)) {
317 dev->stats.tx_dropped++;
318 dev_kfree_skb(skb);
319 goto out;
320 }
321
322 if (sock_owned_by_user(sk_atm(atmvcc))) {
323 netif_stop_queue(brvcc->device);
324 ret = NETDEV_TX_BUSY;
325 goto out;
326 }
327
295 if (!br2684_xmit_vcc(skb, dev, brvcc)) { 328 if (!br2684_xmit_vcc(skb, dev, brvcc)) {
296 /* 329 /*
297 * We should probably use netif_*_queue() here, but that 330 * We should probably use netif_*_queue() here, but that
@@ -303,8 +336,11 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
303 dev->stats.tx_errors++; 336 dev->stats.tx_errors++;
304 dev->stats.tx_fifo_errors++; 337 dev->stats.tx_fifo_errors++;
305 } 338 }
339 out:
340 bh_unlock_sock(sk_atm(atmvcc));
341 out_devs:
306 read_unlock(&devs_lock); 342 read_unlock(&devs_lock);
307 return NETDEV_TX_OK; 343 return ret;
308} 344}
309 345
310/* 346/*
@@ -377,9 +413,10 @@ static void br2684_close_vcc(struct br2684_vcc *brvcc)
377 list_del(&brvcc->brvccs); 413 list_del(&brvcc->brvccs);
378 write_unlock_irq(&devs_lock); 414 write_unlock_irq(&devs_lock);
379 brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */ 415 brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */
416 brvcc->atmvcc->release_cb = brvcc->old_release_cb;
380 brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */ 417 brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */
418 module_put(brvcc->old_owner);
381 kfree(brvcc); 419 kfree(brvcc);
382 module_put(THIS_MODULE);
383} 420}
384 421
385/* when AAL5 PDU comes in: */ 422/* when AAL5 PDU comes in: */
@@ -504,6 +541,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
504 brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 541 brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
505 if (!brvcc) 542 if (!brvcc)
506 return -ENOMEM; 543 return -ENOMEM;
544 /*
545 * Allow two packets in the ATM queue. One actually being sent, and one
546 * for the ATM 'TX done' handler to send. It shouldn't take long to get
547 * the next one from the netdev queue, when we need it. More than that
548 * would be bufferbloat.
549 */
550 atomic_set(&brvcc->qspace, 2);
507 write_lock_irq(&devs_lock); 551 write_lock_irq(&devs_lock);
508 net_dev = br2684_find_dev(&be.ifspec); 552 net_dev = br2684_find_dev(&be.ifspec);
509 if (net_dev == NULL) { 553 if (net_dev == NULL) {
@@ -546,9 +590,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
546 brvcc->encaps = (enum br2684_encaps)be.encaps; 590 brvcc->encaps = (enum br2684_encaps)be.encaps;
547 brvcc->old_push = atmvcc->push; 591 brvcc->old_push = atmvcc->push;
548 brvcc->old_pop = atmvcc->pop; 592 brvcc->old_pop = atmvcc->pop;
593 brvcc->old_release_cb = atmvcc->release_cb;
594 brvcc->old_owner = atmvcc->owner;
549 barrier(); 595 barrier();
550 atmvcc->push = br2684_push; 596 atmvcc->push = br2684_push;
551 atmvcc->pop = br2684_pop; 597 atmvcc->pop = br2684_pop;
598 atmvcc->release_cb = br2684_release_cb;
599 atmvcc->owner = THIS_MODULE;
552 600
553 /* initialize netdev carrier state */ 601 /* initialize netdev carrier state */
554 if (atmvcc->dev->signal == ATM_PHY_SIG_LOST) 602 if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
@@ -687,10 +735,13 @@ static int br2684_ioctl(struct socket *sock, unsigned int cmd,
687 return -ENOIOCTLCMD; 735 return -ENOIOCTLCMD;
688 if (!capable(CAP_NET_ADMIN)) 736 if (!capable(CAP_NET_ADMIN))
689 return -EPERM; 737 return -EPERM;
690 if (cmd == ATM_SETBACKEND) 738 if (cmd == ATM_SETBACKEND) {
739 if (sock->state != SS_CONNECTED)
740 return -EINVAL;
691 return br2684_regvcc(atmvcc, argp); 741 return br2684_regvcc(atmvcc, argp);
692 else 742 } else {
693 return br2684_create(argp); 743 return br2684_create(argp);
744 }
694#ifdef CONFIG_ATM_BR2684_IPFILTER 745#ifdef CONFIG_ATM_BR2684_IPFILTER
695 case BR2684_SETFILT: 746 case BR2684_SETFILT:
696 if (atmvcc->push != br2684_push) 747 if (atmvcc->push != br2684_push)
diff --git a/net/atm/common.c b/net/atm/common.c
index 0c0ad930a632..806fc0a40051 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -126,10 +126,19 @@ static void vcc_write_space(struct sock *sk)
126 rcu_read_unlock(); 126 rcu_read_unlock();
127} 127}
128 128
129static void vcc_release_cb(struct sock *sk)
130{
131 struct atm_vcc *vcc = atm_sk(sk);
132
133 if (vcc->release_cb)
134 vcc->release_cb(vcc);
135}
136
129static struct proto vcc_proto = { 137static struct proto vcc_proto = {
130 .name = "VCC", 138 .name = "VCC",
131 .owner = THIS_MODULE, 139 .owner = THIS_MODULE,
132 .obj_size = sizeof(struct atm_vcc), 140 .obj_size = sizeof(struct atm_vcc),
141 .release_cb = vcc_release_cb,
133}; 142};
134 143
135int vcc_create(struct net *net, struct socket *sock, int protocol, int family) 144int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
@@ -156,7 +165,9 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
156 atomic_set(&sk->sk_rmem_alloc, 0); 165 atomic_set(&sk->sk_rmem_alloc, 0);
157 vcc->push = NULL; 166 vcc->push = NULL;
158 vcc->pop = NULL; 167 vcc->pop = NULL;
168 vcc->owner = NULL;
159 vcc->push_oam = NULL; 169 vcc->push_oam = NULL;
170 vcc->release_cb = NULL;
160 vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */ 171 vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
161 vcc->atm_options = vcc->aal_options = 0; 172 vcc->atm_options = vcc->aal_options = 0;
162 sk->sk_destruct = vcc_sock_destruct; 173 sk->sk_destruct = vcc_sock_destruct;
@@ -175,6 +186,7 @@ static void vcc_destroy_socket(struct sock *sk)
175 vcc->dev->ops->close(vcc); 186 vcc->dev->ops->close(vcc);
176 if (vcc->push) 187 if (vcc->push)
177 vcc->push(vcc, NULL); /* atmarpd has no push */ 188 vcc->push(vcc, NULL); /* atmarpd has no push */
189 module_put(vcc->owner);
178 190
179 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 191 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
180 atm_return(vcc, skb->truesize); 192 atm_return(vcc, skb->truesize);
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 226dca989448..8c93267ce969 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -60,6 +60,8 @@ struct pppoatm_vcc {
60 struct atm_vcc *atmvcc; /* VCC descriptor */ 60 struct atm_vcc *atmvcc; /* VCC descriptor */
61 void (*old_push)(struct atm_vcc *, struct sk_buff *); 61 void (*old_push)(struct atm_vcc *, struct sk_buff *);
62 void (*old_pop)(struct atm_vcc *, struct sk_buff *); 62 void (*old_pop)(struct atm_vcc *, struct sk_buff *);
63 void (*old_release_cb)(struct atm_vcc *);
64 struct module *old_owner;
63 /* keep old push/pop for detaching */ 65 /* keep old push/pop for detaching */
64 enum pppoatm_encaps encaps; 66 enum pppoatm_encaps encaps;
65 atomic_t inflight; 67 atomic_t inflight;
@@ -107,6 +109,24 @@ static void pppoatm_wakeup_sender(unsigned long arg)
107 ppp_output_wakeup((struct ppp_channel *) arg); 109 ppp_output_wakeup((struct ppp_channel *) arg);
108} 110}
109 111
112static void pppoatm_release_cb(struct atm_vcc *atmvcc)
113{
114 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
115
116 /*
117 * As in pppoatm_pop(), it's safe to clear the BLOCKED bit here because
118 * the wakeup *can't* race with pppoatm_send(). They both hold the PPP
119 * channel's ->downl lock. And the potential race with *setting* it,
120 * which leads to the double-check dance in pppoatm_may_send(), doesn't
121 * exist here. In the sock_owned_by_user() case in pppoatm_send(), we
122 * set the BLOCKED bit while the socket is still locked. We know that
123 * ->release_cb() can't be called until that's done.
124 */
125 if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
126 tasklet_schedule(&pvcc->wakeup_tasklet);
127 if (pvcc->old_release_cb)
128 pvcc->old_release_cb(atmvcc);
129}
110/* 130/*
111 * This gets called every time the ATM card has finished sending our 131 * This gets called every time the ATM card has finished sending our
112 * skb. The ->old_pop will take care up normal atm flow control, 132 * skb. The ->old_pop will take care up normal atm flow control,
@@ -151,12 +171,11 @@ static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
151 pvcc = atmvcc_to_pvcc(atmvcc); 171 pvcc = atmvcc_to_pvcc(atmvcc);
152 atmvcc->push = pvcc->old_push; 172 atmvcc->push = pvcc->old_push;
153 atmvcc->pop = pvcc->old_pop; 173 atmvcc->pop = pvcc->old_pop;
174 atmvcc->release_cb = pvcc->old_release_cb;
154 tasklet_kill(&pvcc->wakeup_tasklet); 175 tasklet_kill(&pvcc->wakeup_tasklet);
155 ppp_unregister_channel(&pvcc->chan); 176 ppp_unregister_channel(&pvcc->chan);
156 atmvcc->user_back = NULL; 177 atmvcc->user_back = NULL;
157 kfree(pvcc); 178 kfree(pvcc);
158 /* Gee, I hope we have the big kernel lock here... */
159 module_put(THIS_MODULE);
160} 179}
161 180
162/* Called when an AAL5 PDU comes in */ 181/* Called when an AAL5 PDU comes in */
@@ -165,9 +184,13 @@ static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
165 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); 184 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
166 pr_debug("\n"); 185 pr_debug("\n");
167 if (skb == NULL) { /* VCC was closed */ 186 if (skb == NULL) { /* VCC was closed */
187 struct module *module;
188
168 pr_debug("removing ATMPPP VCC %p\n", pvcc); 189 pr_debug("removing ATMPPP VCC %p\n", pvcc);
190 module = pvcc->old_owner;
169 pppoatm_unassign_vcc(atmvcc); 191 pppoatm_unassign_vcc(atmvcc);
170 atmvcc->push(atmvcc, NULL); /* Pass along bad news */ 192 atmvcc->push(atmvcc, NULL); /* Pass along bad news */
193 module_put(module);
171 return; 194 return;
172 } 195 }
173 atm_return(atmvcc, skb->truesize); 196 atm_return(atmvcc, skb->truesize);
@@ -211,7 +234,7 @@ error:
211 ppp_input_error(&pvcc->chan, 0); 234 ppp_input_error(&pvcc->chan, 0);
212} 235}
213 236
214static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size) 237static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
215{ 238{
216 /* 239 /*
217 * It's not clear that we need to bother with using atm_may_send() 240 * It's not clear that we need to bother with using atm_may_send()
@@ -269,10 +292,33 @@ static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
269static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) 292static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
270{ 293{
271 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan); 294 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
295 struct atm_vcc *vcc;
296 int ret;
297
272 ATM_SKB(skb)->vcc = pvcc->atmvcc; 298 ATM_SKB(skb)->vcc = pvcc->atmvcc;
273 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc); 299 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
274 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT)) 300 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
275 (void) skb_pull(skb, 1); 301 (void) skb_pull(skb, 1);
302
303 vcc = ATM_SKB(skb)->vcc;
304 bh_lock_sock(sk_atm(vcc));
305 if (sock_owned_by_user(sk_atm(vcc))) {
306 /*
307 * Needs to happen (and be flushed, hence test_and_) before we unlock
308 * the socket. It needs to be seen by the time our ->release_cb gets
309 * called.
310 */
311 test_and_set_bit(BLOCKED, &pvcc->blocked);
312 goto nospace;
313 }
314 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
315 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
316 !test_bit(ATM_VF_READY, &vcc->flags)) {
317 bh_unlock_sock(sk_atm(vcc));
318 kfree_skb(skb);
319 return DROP_PACKET;
320 }
321
276 switch (pvcc->encaps) { /* LLC encapsulation needed */ 322 switch (pvcc->encaps) { /* LLC encapsulation needed */
277 case e_llc: 323 case e_llc:
278 if (skb_headroom(skb) < LLC_LEN) { 324 if (skb_headroom(skb) < LLC_LEN) {
@@ -285,8 +331,10 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
285 } 331 }
286 consume_skb(skb); 332 consume_skb(skb);
287 skb = n; 333 skb = n;
288 if (skb == NULL) 334 if (skb == NULL) {
335 bh_unlock_sock(sk_atm(vcc));
289 return DROP_PACKET; 336 return DROP_PACKET;
337 }
290 } else if (!pppoatm_may_send(pvcc, skb->truesize)) 338 } else if (!pppoatm_may_send(pvcc, skb->truesize))
291 goto nospace; 339 goto nospace;
292 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN); 340 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
@@ -296,6 +344,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
296 goto nospace; 344 goto nospace;
297 break; 345 break;
298 case e_autodetect: 346 case e_autodetect:
347 bh_unlock_sock(sk_atm(vcc));
299 pr_debug("Trying to send without setting encaps!\n"); 348 pr_debug("Trying to send without setting encaps!\n");
300 kfree_skb(skb); 349 kfree_skb(skb);
301 return 1; 350 return 1;
@@ -305,9 +354,12 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
305 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; 354 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
306 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", 355 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
307 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev); 356 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
308 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) 357 ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
309 ? DROP_PACKET : 1; 358 ? DROP_PACKET : 1;
359 bh_unlock_sock(sk_atm(vcc));
360 return ret;
310nospace: 361nospace:
362 bh_unlock_sock(sk_atm(vcc));
311 /* 363 /*
312 * We don't have space to send this SKB now, but we might have 364 * We don't have space to send this SKB now, but we might have
313 * already applied SC_COMP_PROT compression, so may need to undo 365 * already applied SC_COMP_PROT compression, so may need to undo
@@ -362,6 +414,8 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
362 atomic_set(&pvcc->inflight, NONE_INFLIGHT); 414 atomic_set(&pvcc->inflight, NONE_INFLIGHT);
363 pvcc->old_push = atmvcc->push; 415 pvcc->old_push = atmvcc->push;
364 pvcc->old_pop = atmvcc->pop; 416 pvcc->old_pop = atmvcc->pop;
417 pvcc->old_owner = atmvcc->owner;
418 pvcc->old_release_cb = atmvcc->release_cb;
365 pvcc->encaps = (enum pppoatm_encaps) be.encaps; 419 pvcc->encaps = (enum pppoatm_encaps) be.encaps;
366 pvcc->chan.private = pvcc; 420 pvcc->chan.private = pvcc;
367 pvcc->chan.ops = &pppoatm_ops; 421 pvcc->chan.ops = &pppoatm_ops;
@@ -377,7 +431,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
377 atmvcc->user_back = pvcc; 431 atmvcc->user_back = pvcc;
378 atmvcc->push = pppoatm_push; 432 atmvcc->push = pppoatm_push;
379 atmvcc->pop = pppoatm_pop; 433 atmvcc->pop = pppoatm_pop;
434 atmvcc->release_cb = pppoatm_release_cb;
380 __module_get(THIS_MODULE); 435 __module_get(THIS_MODULE);
436 atmvcc->owner = THIS_MODULE;
381 437
382 /* re-process everything received between connection setup and 438 /* re-process everything received between connection setup and
383 backend setup */ 439 backend setup */
@@ -406,6 +462,8 @@ static int pppoatm_ioctl(struct socket *sock, unsigned int cmd,
406 return -ENOIOCTLCMD; 462 return -ENOIOCTLCMD;
407 if (!capable(CAP_NET_ADMIN)) 463 if (!capable(CAP_NET_ADMIN))
408 return -EPERM; 464 return -EPERM;
465 if (sock->state != SS_CONNECTED)
466 return -EINVAL;
409 return pppoatm_assign_vcc(atmvcc, argp); 467 return pppoatm_assign_vcc(atmvcc, argp);
410 } 468 }
411 case PPPIOCGCHAN: 469 case PPPIOCGCHAN:
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 53f5244e28f8..8d8afb134b3a 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -6,6 +6,7 @@ config BATMAN_ADV
6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol" 6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
7 depends on NET 7 depends on NET
8 select CRC16 8 select CRC16
9 select LIBCRC32C
9 default n 10 default n
10 help 11 help
11 B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is 12 B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
@@ -25,6 +26,16 @@ config BATMAN_ADV_BLA
25 more than one mesh node in the same LAN, you can safely remove 26 more than one mesh node in the same LAN, you can safely remove
26 this feature and save some space. 27 this feature and save some space.
27 28
29config BATMAN_ADV_DAT
30 bool "Distributed ARP Table"
31 depends on BATMAN_ADV && INET
32 default n
33 help
34 This option enables DAT (Distributed ARP Table), a DHT based
35 mechanism that increases ARP reliability on sparse wireless
36 mesh networks. If you think that your network does not need
37 this option you can safely remove it and save some space.
38
28config BATMAN_ADV_DEBUG 39config BATMAN_ADV_DEBUG
29 bool "B.A.T.M.A.N. debugging" 40 bool "B.A.T.M.A.N. debugging"
30 depends on BATMAN_ADV 41 depends on BATMAN_ADV
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 8676d2b1d574..e45e3b4e32e3 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,6 +23,7 @@ batman-adv-y += bat_iv_ogm.o
23batman-adv-y += bitarray.o 23batman-adv-y += bitarray.o
24batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o 24batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
25batman-adv-y += debugfs.o 25batman-adv-y += debugfs.o
26batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
26batman-adv-y += gateway_client.o 27batman-adv-y += gateway_client.o
27batman-adv-y += gateway_common.o 28batman-adv-y += gateway_common.o
28batman-adv-y += hard-interface.o 29batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index b02b75dae3a8..7d02ebd11a7f 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -57,20 +57,22 @@ out:
57static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) 57static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
58{ 58{
59 struct batadv_ogm_packet *batadv_ogm_packet; 59 struct batadv_ogm_packet *batadv_ogm_packet;
60 unsigned char *ogm_buff;
60 uint32_t random_seqno; 61 uint32_t random_seqno;
61 int res = -ENOMEM; 62 int res = -ENOMEM;
62 63
63 /* randomize initial seqno to avoid collision */ 64 /* randomize initial seqno to avoid collision */
64 get_random_bytes(&random_seqno, sizeof(random_seqno)); 65 get_random_bytes(&random_seqno, sizeof(random_seqno));
65 atomic_set(&hard_iface->seqno, random_seqno); 66 atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
66 67
67 hard_iface->packet_len = BATADV_OGM_HLEN; 68 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
68 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 69 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
69 70 if (!ogm_buff)
70 if (!hard_iface->packet_buff)
71 goto out; 71 goto out;
72 72
73 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; 73 hard_iface->bat_iv.ogm_buff = ogm_buff;
74
75 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
74 batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; 76 batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
75 batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; 77 batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
76 batadv_ogm_packet->header.ttl = 2; 78 batadv_ogm_packet->header.ttl = 2;
@@ -87,15 +89,16 @@ out:
87 89
88static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) 90static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
89{ 91{
90 kfree(hard_iface->packet_buff); 92 kfree(hard_iface->bat_iv.ogm_buff);
91 hard_iface->packet_buff = NULL; 93 hard_iface->bat_iv.ogm_buff = NULL;
92} 94}
93 95
94static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) 96static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
95{ 97{
96 struct batadv_ogm_packet *batadv_ogm_packet; 98 struct batadv_ogm_packet *batadv_ogm_packet;
99 unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
97 100
98 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; 101 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
99 memcpy(batadv_ogm_packet->orig, 102 memcpy(batadv_ogm_packet->orig,
100 hard_iface->net_dev->dev_addr, ETH_ALEN); 103 hard_iface->net_dev->dev_addr, ETH_ALEN);
101 memcpy(batadv_ogm_packet->prev_sender, 104 memcpy(batadv_ogm_packet->prev_sender,
@@ -106,8 +109,9 @@ static void
106batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) 109batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
107{ 110{
108 struct batadv_ogm_packet *batadv_ogm_packet; 111 struct batadv_ogm_packet *batadv_ogm_packet;
112 unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
109 113
110 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; 114 batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
111 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; 115 batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
112 batadv_ogm_packet->header.ttl = BATADV_TTL; 116 batadv_ogm_packet->header.ttl = BATADV_TTL;
113} 117}
@@ -119,7 +123,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
119 unsigned int msecs; 123 unsigned int msecs;
120 124
121 msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER; 125 msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
122 msecs += (random32() % 2 * BATADV_JITTER); 126 msecs += random32() % (2 * BATADV_JITTER);
123 127
124 return jiffies + msecs_to_jiffies(msecs); 128 return jiffies + msecs_to_jiffies(msecs);
125} 129}
@@ -407,9 +411,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
407 411
408 if ((atomic_read(&bat_priv->aggregated_ogms)) && 412 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
409 (packet_len < BATADV_MAX_AGGREGATION_BYTES)) 413 (packet_len < BATADV_MAX_AGGREGATION_BYTES))
410 skb_size = BATADV_MAX_AGGREGATION_BYTES + ETH_HLEN; 414 skb_size = BATADV_MAX_AGGREGATION_BYTES;
411 else 415 else
412 skb_size = packet_len + ETH_HLEN; 416 skb_size = packet_len;
417
418 skb_size += ETH_HLEN + NET_IP_ALIGN;
413 419
414 forw_packet_aggr->skb = dev_alloc_skb(skb_size); 420 forw_packet_aggr->skb = dev_alloc_skb(skb_size);
415 if (!forw_packet_aggr->skb) { 421 if (!forw_packet_aggr->skb) {
@@ -418,7 +424,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
418 kfree(forw_packet_aggr); 424 kfree(forw_packet_aggr);
419 goto out; 425 goto out;
420 } 426 }
421 skb_reserve(forw_packet_aggr->skb, ETH_HLEN); 427 skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN);
422 428
423 INIT_HLIST_NODE(&forw_packet_aggr->list); 429 INIT_HLIST_NODE(&forw_packet_aggr->list);
424 430
@@ -590,8 +596,10 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
590static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) 596static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
591{ 597{
592 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 598 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
599 unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
593 struct batadv_ogm_packet *batadv_ogm_packet; 600 struct batadv_ogm_packet *batadv_ogm_packet;
594 struct batadv_hard_iface *primary_if; 601 struct batadv_hard_iface *primary_if;
602 int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
595 int vis_server, tt_num_changes = 0; 603 int vis_server, tt_num_changes = 0;
596 uint32_t seqno; 604 uint32_t seqno;
597 uint8_t bandwidth; 605 uint8_t bandwidth;
@@ -600,17 +608,16 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
600 primary_if = batadv_primary_if_get_selected(bat_priv); 608 primary_if = batadv_primary_if_get_selected(bat_priv);
601 609
602 if (hard_iface == primary_if) 610 if (hard_iface == primary_if)
603 tt_num_changes = batadv_tt_append_diff(bat_priv, 611 tt_num_changes = batadv_tt_append_diff(bat_priv, ogm_buff,
604 &hard_iface->packet_buff, 612 ogm_buff_len,
605 &hard_iface->packet_len,
606 BATADV_OGM_HLEN); 613 BATADV_OGM_HLEN);
607 614
608 batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; 615 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
609 616
610 /* change sequence number to network order */ 617 /* change sequence number to network order */
611 seqno = (uint32_t)atomic_read(&hard_iface->seqno); 618 seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
612 batadv_ogm_packet->seqno = htonl(seqno); 619 batadv_ogm_packet->seqno = htonl(seqno);
613 atomic_inc(&hard_iface->seqno); 620 atomic_inc(&hard_iface->bat_iv.ogm_seqno);
614 621
615 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn); 622 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
616 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc); 623 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
@@ -631,8 +638,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
631 } 638 }
632 639
633 batadv_slide_own_bcast_window(hard_iface); 640 batadv_slide_own_bcast_window(hard_iface);
634 batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, 641 batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
635 hard_iface->packet_len, hard_iface, 1, 642 hard_iface->bat_iv.ogm_buff_len, hard_iface, 1,
636 batadv_iv_ogm_emit_send_time(bat_priv)); 643 batadv_iv_ogm_emit_send_time(bat_priv));
637 644
638 if (primary_if) 645 if (primary_if)
@@ -1015,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1015 return; 1022 return;
1016 1023
1017 /* could be changed by schedule_own_packet() */ 1024 /* could be changed by schedule_own_packet() */
1018 if_incoming_seqno = atomic_read(&if_incoming->seqno); 1025 if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
1019 1026
1020 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK) 1027 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
1021 has_directlink_flag = 1; 1028 has_directlink_flag = 1;
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index aea174cdbfbd..5453b17d8df2 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -79,20 +79,17 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
79 * or the old packet got delayed somewhere in the network. The 79 * or the old packet got delayed somewhere in the network. The
80 * packet should be dropped without calling this function if the 80 * packet should be dropped without calling this function if the
81 * seqno window is protected. 81 * seqno window is protected.
82 *
83 * seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE
84 * or
85 * seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE
82 */ 86 */
83 if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE || 87 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
84 seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) { 88 "Other host probably restarted!\n");
85 89
86 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 90 bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
87 "Other host probably restarted!\n"); 91 if (set_mark)
88 92 batadv_set_bit(seq_bits, 0);
89 bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
90 if (set_mark)
91 batadv_set_bit(seq_bits, 0);
92
93 return 1;
94 }
95 93
96 /* never reached */ 94 return 1;
97 return 0;
98} 95}
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index fd8d5afec0dd..5aebe9327d68 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -40,15 +40,11 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
40/* return the index of the claim */ 40/* return the index of the claim */
41static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) 41static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
42{ 42{
43 const unsigned char *key = data; 43 struct batadv_claim *claim = (struct batadv_claim *)data;
44 uint32_t hash = 0; 44 uint32_t hash = 0;
45 size_t i;
46 45
47 for (i = 0; i < ETH_ALEN + sizeof(short); i++) { 46 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
48 hash += key[i]; 47 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
49 hash += (hash << 10);
50 hash ^= (hash >> 6);
51 }
52 48
53 hash += (hash << 3); 49 hash += (hash << 3);
54 hash ^= (hash >> 11); 50 hash ^= (hash >> 11);
@@ -61,15 +57,11 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
61static inline uint32_t batadv_choose_backbone_gw(const void *data, 57static inline uint32_t batadv_choose_backbone_gw(const void *data,
62 uint32_t size) 58 uint32_t size)
63{ 59{
64 const unsigned char *key = data; 60 struct batadv_claim *claim = (struct batadv_claim *)data;
65 uint32_t hash = 0; 61 uint32_t hash = 0;
66 size_t i;
67 62
68 for (i = 0; i < ETH_ALEN + sizeof(short); i++) { 63 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
69 hash += key[i]; 64 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
70 hash += (hash << 10);
71 hash ^= (hash >> 6);
72 }
73 65
74 hash += (hash << 3); 66 hash += (hash << 3);
75 hash ^= (hash >> 11); 67 hash ^= (hash >> 11);
@@ -85,8 +77,15 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node,
85{ 77{
86 const void *data1 = container_of(node, struct batadv_backbone_gw, 78 const void *data1 = container_of(node, struct batadv_backbone_gw,
87 hash_entry); 79 hash_entry);
80 const struct batadv_backbone_gw *gw1 = data1, *gw2 = data2;
81
82 if (!batadv_compare_eth(gw1->orig, gw2->orig))
83 return 0;
84
85 if (gw1->vid != gw2->vid)
86 return 0;
88 87
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 88 return 1;
90} 89}
91 90
92/* compares address and vid of two claims */ 91/* compares address and vid of two claims */
@@ -95,8 +94,15 @@ static int batadv_compare_claim(const struct hlist_node *node,
95{ 94{
96 const void *data1 = container_of(node, struct batadv_claim, 95 const void *data1 = container_of(node, struct batadv_claim,
97 hash_entry); 96 hash_entry);
97 const struct batadv_claim *cl1 = data1, *cl2 = data2;
98
99 if (!batadv_compare_eth(cl1->addr, cl2->addr))
100 return 0;
101
102 if (cl1->vid != cl2->vid)
103 return 0;
98 104
99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 105 return 1;
100} 106}
101 107
102/* free a backbone gw */ 108/* free a backbone gw */
@@ -362,7 +368,7 @@ out:
362 */ 368 */
363static struct batadv_backbone_gw * 369static struct batadv_backbone_gw *
364batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, 370batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
365 short vid) 371 short vid, bool own_backbone)
366{ 372{
367 struct batadv_backbone_gw *entry; 373 struct batadv_backbone_gw *entry;
368 struct batadv_orig_node *orig_node; 374 struct batadv_orig_node *orig_node;
@@ -386,6 +392,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
386 entry->crc = BATADV_BLA_CRC_INIT; 392 entry->crc = BATADV_BLA_CRC_INIT;
387 entry->bat_priv = bat_priv; 393 entry->bat_priv = bat_priv;
388 atomic_set(&entry->request_sent, 0); 394 atomic_set(&entry->request_sent, 0);
395 atomic_set(&entry->wait_periods, 0);
389 memcpy(entry->orig, orig, ETH_ALEN); 396 memcpy(entry->orig, orig, ETH_ALEN);
390 397
391 /* one for the hash, one for returning */ 398 /* one for the hash, one for returning */
@@ -409,6 +416,16 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
409 "became a backbone gateway"); 416 "became a backbone gateway");
410 batadv_orig_node_free_ref(orig_node); 417 batadv_orig_node_free_ref(orig_node);
411 } 418 }
419
420 if (own_backbone) {
421 batadv_bla_send_announce(bat_priv, entry);
422
423 /* this will be decreased in the worker thread */
424 atomic_inc(&entry->request_sent);
425 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
426 atomic_inc(&bat_priv->bla.num_requests);
427 }
428
412 return entry; 429 return entry;
413} 430}
414 431
@@ -424,7 +441,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
424 441
425 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, 442 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
426 primary_if->net_dev->dev_addr, 443 primary_if->net_dev->dev_addr,
427 vid); 444 vid, true);
428 if (unlikely(!backbone_gw)) 445 if (unlikely(!backbone_gw))
429 return; 446 return;
430 447
@@ -632,7 +649,8 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
632 if (memcmp(an_addr, batadv_announce_mac, 4) != 0) 649 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
633 return 0; 650 return 0;
634 651
635 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid); 652 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
653 false);
636 654
637 if (unlikely(!backbone_gw)) 655 if (unlikely(!backbone_gw))
638 return 1; 656 return 1;
@@ -730,7 +748,8 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
730 748
731 /* register the gateway if not yet available, and add the claim. */ 749 /* register the gateway if not yet available, and add the claim. */
732 750
733 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid); 751 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
752 false);
734 753
735 if (unlikely(!backbone_gw)) 754 if (unlikely(!backbone_gw))
736 return 1; 755 return 1;
@@ -1140,6 +1159,24 @@ static void batadv_bla_periodic_work(struct work_struct *work)
1140 backbone_gw->lasttime = jiffies; 1159 backbone_gw->lasttime = jiffies;
1141 1160
1142 batadv_bla_send_announce(bat_priv, backbone_gw); 1161 batadv_bla_send_announce(bat_priv, backbone_gw);
1162
1163 /* request_sent is only set after creation to avoid
1164 * problems when we are not yet known as backbone gw
1165 * in the backbone.
1166 *
1167 * We can reset this now after we waited some periods
1168 * to give bridge forward delays and bla group forming
1169 * some grace time.
1170 */
1171
1172 if (atomic_read(&backbone_gw->request_sent) == 0)
1173 continue;
1174
1175 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1176 continue;
1177
1178 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1179 atomic_set(&backbone_gw->request_sent, 0);
1143 } 1180 }
1144 rcu_read_unlock(); 1181 rcu_read_unlock();
1145 } 1182 }
@@ -1212,8 +1249,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1212/** 1249/**
1213 * batadv_bla_check_bcast_duplist 1250 * batadv_bla_check_bcast_duplist
1214 * @bat_priv: the bat priv with all the soft interface information 1251 * @bat_priv: the bat priv with all the soft interface information
1215 * @bcast_packet: encapsulated broadcast frame plus batman header 1252 * @skb: contains the bcast_packet to be checked
1216 * @bcast_packet_len: length of encapsulated broadcast frame plus batman header
1217 * 1253 *
1218 * check if it is on our broadcast list. Another gateway might 1254 * check if it is on our broadcast list. Another gateway might
1219 * have sent the same packet because it is connected to the same backbone, 1255 * have sent the same packet because it is connected to the same backbone,
@@ -1225,20 +1261,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
1225 * the same host however as this might be intended. 1261 * the same host however as this might be intended.
1226 */ 1262 */
1227int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 1263int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1228 struct batadv_bcast_packet *bcast_packet, 1264 struct sk_buff *skb)
1229 int bcast_packet_len)
1230{ 1265{
1231 int i, length, curr, ret = 0; 1266 int i, curr, ret = 0;
1232 uint8_t *content; 1267 __be32 crc;
1233 uint16_t crc; 1268 struct batadv_bcast_packet *bcast_packet;
1234 struct batadv_bcast_duplist_entry *entry; 1269 struct batadv_bcast_duplist_entry *entry;
1235 1270
1236 length = bcast_packet_len - sizeof(*bcast_packet); 1271 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1237 content = (uint8_t *)bcast_packet;
1238 content += sizeof(*bcast_packet);
1239 1272
1240 /* calculate the crc ... */ 1273 /* calculate the crc ... */
1241 crc = crc16(0, content, length); 1274 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1242 1275
1243 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); 1276 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1244 1277
@@ -1585,23 +1618,11 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1585 struct hlist_head *head; 1618 struct hlist_head *head;
1586 uint32_t i; 1619 uint32_t i;
1587 bool is_own; 1620 bool is_own;
1588 int ret = 0;
1589 uint8_t *primary_addr; 1621 uint8_t *primary_addr;
1590 1622
1591 primary_if = batadv_primary_if_get_selected(bat_priv); 1623 primary_if = batadv_seq_print_text_primary_if_get(seq);
1592 if (!primary_if) { 1624 if (!primary_if)
1593 ret = seq_printf(seq,
1594 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1595 net_dev->name);
1596 goto out;
1597 }
1598
1599 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1600 ret = seq_printf(seq,
1601 "BATMAN mesh %s disabled - primary interface not active\n",
1602 net_dev->name);
1603 goto out; 1625 goto out;
1604 }
1605 1626
1606 primary_addr = primary_if->net_dev->dev_addr; 1627 primary_addr = primary_if->net_dev->dev_addr;
1607 seq_printf(seq, 1628 seq_printf(seq,
@@ -1628,7 +1649,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1628out: 1649out:
1629 if (primary_if) 1650 if (primary_if)
1630 batadv_hardif_free_ref(primary_if); 1651 batadv_hardif_free_ref(primary_if);
1631 return ret; 1652 return 0;
1632} 1653}
1633 1654
1634int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) 1655int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
@@ -1643,23 +1664,11 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1643 int secs, msecs; 1664 int secs, msecs;
1644 uint32_t i; 1665 uint32_t i;
1645 bool is_own; 1666 bool is_own;
1646 int ret = 0;
1647 uint8_t *primary_addr; 1667 uint8_t *primary_addr;
1648 1668
1649 primary_if = batadv_primary_if_get_selected(bat_priv); 1669 primary_if = batadv_seq_print_text_primary_if_get(seq);
1650 if (!primary_if) { 1670 if (!primary_if)
1651 ret = seq_printf(seq,
1652 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1653 net_dev->name);
1654 goto out;
1655 }
1656
1657 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1658 ret = seq_printf(seq,
1659 "BATMAN mesh %s disabled - primary interface not active\n",
1660 net_dev->name);
1661 goto out; 1671 goto out;
1662 }
1663 1672
1664 primary_addr = primary_if->net_dev->dev_addr; 1673 primary_addr = primary_if->net_dev->dev_addr;
1665 seq_printf(seq, 1674 seq_printf(seq,
@@ -1693,5 +1702,5 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1693out: 1702out:
1694 if (primary_if) 1703 if (primary_if)
1695 batadv_hardif_free_ref(primary_if); 1704 batadv_hardif_free_ref(primary_if);
1696 return ret; 1705 return 0;
1697} 1706}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 789cb73bde67..196d9a0254bc 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -31,8 +31,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
31 void *offset); 31 void *offset);
32int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig); 32int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
33int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 33int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
34 struct batadv_bcast_packet *bcast_packet, 34 struct sk_buff *skb);
35 int hdr_size);
36void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, 35void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
37 struct batadv_hard_iface *primary_if, 36 struct batadv_hard_iface *primary_if,
38 struct batadv_hard_iface *oldif); 37 struct batadv_hard_iface *oldif);
@@ -81,8 +80,7 @@ static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
81 80
82static inline int 81static inline int
83batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 82batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
84 struct batadv_bcast_packet *bcast_packet, 83 struct sk_buff *skb)
85 int hdr_size)
86{ 84{
87 return 0; 85 return 0;
88} 86}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 391d4fb2026f..6f58ddd53bff 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -31,6 +31,7 @@
31#include "vis.h" 31#include "vis.h"
32#include "icmp_socket.h" 32#include "icmp_socket.h"
33#include "bridge_loop_avoidance.h" 33#include "bridge_loop_avoidance.h"
34#include "distributed-arp-table.h"
34 35
35static struct dentry *batadv_debugfs; 36static struct dentry *batadv_debugfs;
36 37
@@ -99,15 +100,17 @@ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
99 100
100static int batadv_log_open(struct inode *inode, struct file *file) 101static int batadv_log_open(struct inode *inode, struct file *file)
101{ 102{
103 if (!try_module_get(THIS_MODULE))
104 return -EBUSY;
105
102 nonseekable_open(inode, file); 106 nonseekable_open(inode, file);
103 file->private_data = inode->i_private; 107 file->private_data = inode->i_private;
104 batadv_inc_module_count();
105 return 0; 108 return 0;
106} 109}
107 110
108static int batadv_log_release(struct inode *inode, struct file *file) 111static int batadv_log_release(struct inode *inode, struct file *file)
109{ 112{
110 batadv_dec_module_count(); 113 module_put(THIS_MODULE);
111 return 0; 114 return 0;
112} 115}
113 116
@@ -278,6 +281,19 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
278 281
279#endif 282#endif
280 283
284#ifdef CONFIG_BATMAN_ADV_DAT
285/**
286 * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
287 * @inode: inode which was opened
288 * @file: file handle to be initialized
289 */
290static int batadv_dat_cache_open(struct inode *inode, struct file *file)
291{
292 struct net_device *net_dev = (struct net_device *)inode->i_private;
293 return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
294}
295#endif
296
281static int batadv_transtable_local_open(struct inode *inode, struct file *file) 297static int batadv_transtable_local_open(struct inode *inode, struct file *file)
282{ 298{
283 struct net_device *net_dev = (struct net_device *)inode->i_private; 299 struct net_device *net_dev = (struct net_device *)inode->i_private;
@@ -307,7 +323,17 @@ struct batadv_debuginfo batadv_debuginfo_##_name = { \
307 } \ 323 } \
308}; 324};
309 325
326/* the following attributes are general and therefore they will be directly
327 * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
328 */
310static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open); 329static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open);
330
331static struct batadv_debuginfo *batadv_general_debuginfos[] = {
332 &batadv_debuginfo_routing_algos,
333 NULL,
334};
335
336/* The following attributes are per soft interface */
311static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open); 337static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open);
312static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open); 338static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open);
313static BATADV_DEBUGINFO(transtable_global, S_IRUGO, 339static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
@@ -317,6 +343,9 @@ static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
317static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO, 343static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
318 batadv_bla_backbone_table_open); 344 batadv_bla_backbone_table_open);
319#endif 345#endif
346#ifdef CONFIG_BATMAN_ADV_DAT
347static BATADV_DEBUGINFO(dat_cache, S_IRUGO, batadv_dat_cache_open);
348#endif
320static BATADV_DEBUGINFO(transtable_local, S_IRUGO, 349static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
321 batadv_transtable_local_open); 350 batadv_transtable_local_open);
322static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open); 351static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
@@ -329,6 +358,9 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
329 &batadv_debuginfo_bla_claim_table, 358 &batadv_debuginfo_bla_claim_table,
330 &batadv_debuginfo_bla_backbone_table, 359 &batadv_debuginfo_bla_backbone_table,
331#endif 360#endif
361#ifdef CONFIG_BATMAN_ADV_DAT
362 &batadv_debuginfo_dat_cache,
363#endif
332 &batadv_debuginfo_transtable_local, 364 &batadv_debuginfo_transtable_local,
333 &batadv_debuginfo_vis_data, 365 &batadv_debuginfo_vis_data,
334 NULL, 366 NULL,
@@ -336,7 +368,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
336 368
337void batadv_debugfs_init(void) 369void batadv_debugfs_init(void)
338{ 370{
339 struct batadv_debuginfo *bat_debug; 371 struct batadv_debuginfo **bat_debug;
340 struct dentry *file; 372 struct dentry *file;
341 373
342 batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL); 374 batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL);
@@ -344,17 +376,23 @@ void batadv_debugfs_init(void)
344 batadv_debugfs = NULL; 376 batadv_debugfs = NULL;
345 377
346 if (!batadv_debugfs) 378 if (!batadv_debugfs)
347 goto out; 379 goto err;
348 380
349 bat_debug = &batadv_debuginfo_routing_algos; 381 for (bat_debug = batadv_general_debuginfos; *bat_debug; ++bat_debug) {
350 file = debugfs_create_file(bat_debug->attr.name, 382 file = debugfs_create_file(((*bat_debug)->attr).name,
351 S_IFREG | bat_debug->attr.mode, 383 S_IFREG | ((*bat_debug)->attr).mode,
352 batadv_debugfs, NULL, &bat_debug->fops); 384 batadv_debugfs, NULL,
353 if (!file) 385 &(*bat_debug)->fops);
354 pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name); 386 if (!file) {
387 pr_err("Can't add general debugfs file: %s\n",
388 ((*bat_debug)->attr).name);
389 goto err;
390 }
391 }
355 392
356out:
357 return; 393 return;
394err:
395 debugfs_remove_recursive(batadv_debugfs);
358} 396}
359 397
360void batadv_debugfs_destroy(void) 398void batadv_debugfs_destroy(void)
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
new file mode 100644
index 000000000000..8e1d89d2b1c1
--- /dev/null
+++ b/net/batman-adv/distributed-arp-table.c
@@ -0,0 +1,1066 @@
1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 *
3 * Antonio Quartulli
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#include <linux/if_ether.h>
21#include <linux/if_arp.h>
22#include <net/arp.h>
23
24#include "main.h"
25#include "hash.h"
26#include "distributed-arp-table.h"
27#include "hard-interface.h"
28#include "originator.h"
29#include "send.h"
30#include "types.h"
31#include "translation-table.h"
32#include "unicast.h"
33
34static void batadv_dat_purge(struct work_struct *work);
35
36/**
37 * batadv_dat_start_timer - initialise the DAT periodic worker
38 * @bat_priv: the bat priv with all the soft interface information
39 */
40static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
41{
42 INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
43 queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
44 msecs_to_jiffies(10000));
45}
46
47/**
48 * batadv_dat_entry_free_ref - decrements the dat_entry refcounter and possibly
49 * free it
50 * @dat_entry: the oentry to free
51 */
52static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry)
53{
54 if (atomic_dec_and_test(&dat_entry->refcount))
55 kfree_rcu(dat_entry, rcu);
56}
57
58/**
59 * batadv_dat_to_purge - checks whether a dat_entry has to be purged or not
60 * @dat_entry: the entry to check
61 *
62 * Returns true if the entry has to be purged now, false otherwise
63 */
64static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
65{
66 return batadv_has_timed_out(dat_entry->last_update,
67 BATADV_DAT_ENTRY_TIMEOUT);
68}
69
70/**
71 * __batadv_dat_purge - delete entries from the DAT local storage
72 * @bat_priv: the bat priv with all the soft interface information
73 * @to_purge: function in charge to decide whether an entry has to be purged or
74 * not. This function takes the dat_entry as argument and has to
75 * returns a boolean value: true is the entry has to be deleted,
76 * false otherwise
77 *
78 * Loops over each entry in the DAT local storage and delete it if and only if
79 * the to_purge function passed as argument returns true
80 */
81static void __batadv_dat_purge(struct batadv_priv *bat_priv,
82 bool (*to_purge)(struct batadv_dat_entry *))
83{
84 spinlock_t *list_lock; /* protects write access to the hash lists */
85 struct batadv_dat_entry *dat_entry;
86 struct hlist_node *node, *node_tmp;
87 struct hlist_head *head;
88 uint32_t i;
89
90 if (!bat_priv->dat.hash)
91 return;
92
93 for (i = 0; i < bat_priv->dat.hash->size; i++) {
94 head = &bat_priv->dat.hash->table[i];
95 list_lock = &bat_priv->dat.hash->list_locks[i];
96
97 spin_lock_bh(list_lock);
98 hlist_for_each_entry_safe(dat_entry, node, node_tmp, head,
99 hash_entry) {
100 /* if an helper function has been passed as parameter,
101 * ask it if the entry has to be purged or not
102 */
103 if (to_purge && !to_purge(dat_entry))
104 continue;
105
106 hlist_del_rcu(node);
107 batadv_dat_entry_free_ref(dat_entry);
108 }
109 spin_unlock_bh(list_lock);
110 }
111}
112
113/**
114 * batadv_dat_purge - periodic task that deletes old entries from the local DAT
115 * hash table
116 * @work: kernel work struct
117 */
118static void batadv_dat_purge(struct work_struct *work)
119{
120 struct delayed_work *delayed_work;
121 struct batadv_priv_dat *priv_dat;
122 struct batadv_priv *bat_priv;
123
124 delayed_work = container_of(work, struct delayed_work, work);
125 priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
126 bat_priv = container_of(priv_dat, struct batadv_priv, dat);
127
128 __batadv_dat_purge(bat_priv, batadv_dat_to_purge);
129 batadv_dat_start_timer(bat_priv);
130}
131
132/**
133 * batadv_compare_dat - comparing function used in the local DAT hash table
134 * @node: node in the local table
135 * @data2: second object to compare the node to
136 *
137 * Returns 1 if the two entry are the same, 0 otherwise
138 */
139static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
140{
141 const void *data1 = container_of(node, struct batadv_dat_entry,
142 hash_entry);
143
144 return (memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0);
145}
146
147/**
148 * batadv_arp_hw_src - extract the hw_src field from an ARP packet
149 * @skb: ARP packet
150 * @hdr_size: size of the possible header before the ARP packet
151 *
152 * Returns the value of the hw_src field in the ARP packet
153 */
154static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
155{
156 uint8_t *addr;
157
158 addr = (uint8_t *)(skb->data + hdr_size);
159 addr += ETH_HLEN + sizeof(struct arphdr);
160
161 return addr;
162}
163
164/**
165 * batadv_arp_ip_src - extract the ip_src field from an ARP packet
166 * @skb: ARP packet
167 * @hdr_size: size of the possible header before the ARP packet
168 *
169 * Returns the value of the ip_src field in the ARP packet
170 */
171static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
172{
173 return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN);
174}
175
176/**
177 * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet
178 * @skb: ARP packet
179 * @hdr_size: size of the possible header before the ARP packet
180 *
181 * Returns the value of the hw_dst field in the ARP packet
182 */
183static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
184{
185 return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
186}
187
188/**
189 * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet
190 * @skb: ARP packet
191 * @hdr_size: size of the possible header before the ARP packet
192 *
193 * Returns the value of the ip_dst field in the ARP packet
194 */
195static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
196{
197 return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4);
198}
199
200/**
201 * batadv_hash_dat - compute the hash value for an IP address
202 * @data: data to hash
203 * @size: size of the hash table
204 *
205 * Returns the selected index in the hash table for the given data
206 */
207static uint32_t batadv_hash_dat(const void *data, uint32_t size)
208{
209 const unsigned char *key = data;
210 uint32_t hash = 0;
211 size_t i;
212
213 for (i = 0; i < 4; i++) {
214 hash += key[i];
215 hash += (hash << 10);
216 hash ^= (hash >> 6);
217 }
218
219 hash += (hash << 3);
220 hash ^= (hash >> 11);
221 hash += (hash << 15);
222
223 return hash % size;
224}
225
226/**
227 * batadv_dat_entry_hash_find - looks for a given dat_entry in the local hash
228 * table
229 * @bat_priv: the bat priv with all the soft interface information
230 * @ip: search key
231 *
232 * Returns the dat_entry if found, NULL otherwise
233 */
234static struct batadv_dat_entry *
235batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
236{
237 struct hlist_head *head;
238 struct hlist_node *node;
239 struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
240 struct batadv_hashtable *hash = bat_priv->dat.hash;
241 uint32_t index;
242
243 if (!hash)
244 return NULL;
245
246 index = batadv_hash_dat(&ip, hash->size);
247 head = &hash->table[index];
248
249 rcu_read_lock();
250 hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
251 if (dat_entry->ip != ip)
252 continue;
253
254 if (!atomic_inc_not_zero(&dat_entry->refcount))
255 continue;
256
257 dat_entry_tmp = dat_entry;
258 break;
259 }
260 rcu_read_unlock();
261
262 return dat_entry_tmp;
263}
264
265/**
266 * batadv_dat_entry_add - add a new dat entry or update it if already exists
267 * @bat_priv: the bat priv with all the soft interface information
268 * @ip: ipv4 to add/edit
269 * @mac_addr: mac address to assign to the given ipv4
270 */
271static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
272 uint8_t *mac_addr)
273{
274 struct batadv_dat_entry *dat_entry;
275 int hash_added;
276
277 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip);
278 /* if this entry is already known, just update it */
279 if (dat_entry) {
280 if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
281 memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
282 dat_entry->last_update = jiffies;
283 batadv_dbg(BATADV_DBG_DAT, bat_priv,
284 "Entry updated: %pI4 %pM\n", &dat_entry->ip,
285 dat_entry->mac_addr);
286 goto out;
287 }
288
289 dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC);
290 if (!dat_entry)
291 goto out;
292
293 dat_entry->ip = ip;
294 memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
295 dat_entry->last_update = jiffies;
296 atomic_set(&dat_entry->refcount, 2);
297
298 hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
299 batadv_hash_dat, &dat_entry->ip,
300 &dat_entry->hash_entry);
301
302 if (unlikely(hash_added != 0)) {
303 /* remove the reference for the hash */
304 batadv_dat_entry_free_ref(dat_entry);
305 goto out;
306 }
307
308 batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM\n",
309 &dat_entry->ip, dat_entry->mac_addr);
310
311out:
312 if (dat_entry)
313 batadv_dat_entry_free_ref(dat_entry);
314}
315
316#ifdef CONFIG_BATMAN_ADV_DEBUG
317
318/**
319 * batadv_dbg_arp - print a debug message containing all the ARP packet details
320 * @bat_priv: the bat priv with all the soft interface information
321 * @skb: ARP packet
322 * @type: ARP type
323 * @hdr_size: size of the possible header before the ARP packet
324 * @msg: message to print together with the debugging information
325 */
326static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
327 uint16_t type, int hdr_size, char *msg)
328{
329 struct batadv_unicast_4addr_packet *unicast_4addr_packet;
330 struct batadv_bcast_packet *bcast_pkt;
331 uint8_t *orig_addr;
332 __be32 ip_src, ip_dst;
333
334 if (msg)
335 batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg);
336
337 ip_src = batadv_arp_ip_src(skb, hdr_size);
338 ip_dst = batadv_arp_ip_dst(skb, hdr_size);
339 batadv_dbg(BATADV_DBG_DAT, bat_priv,
340 "ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n",
341 batadv_arp_hw_src(skb, hdr_size), &ip_src,
342 batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
343
344 if (hdr_size == 0)
345 return;
346
347 /* if the ARP packet is encapsulated in a batman packet, let's print
348 * some debug messages
349 */
350 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
351
352 switch (unicast_4addr_packet->u.header.packet_type) {
353 case BATADV_UNICAST:
354 batadv_dbg(BATADV_DBG_DAT, bat_priv,
355 "* encapsulated within a UNICAST packet\n");
356 break;
357 case BATADV_UNICAST_4ADDR:
358 batadv_dbg(BATADV_DBG_DAT, bat_priv,
359 "* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n",
360 unicast_4addr_packet->src);
361 switch (unicast_4addr_packet->subtype) {
362 case BATADV_P_DAT_DHT_PUT:
363 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n");
364 break;
365 case BATADV_P_DAT_DHT_GET:
366 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n");
367 break;
368 case BATADV_P_DAT_CACHE_REPLY:
369 batadv_dbg(BATADV_DBG_DAT, bat_priv,
370 "* type: DAT_CACHE_REPLY\n");
371 break;
372 case BATADV_P_DATA:
373 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n");
374 break;
375 default:
376 batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
377 unicast_4addr_packet->u.header.packet_type);
378 }
379 break;
380 case BATADV_BCAST:
381 bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet;
382 orig_addr = bcast_pkt->orig;
383 batadv_dbg(BATADV_DBG_DAT, bat_priv,
384 "* encapsulated within a BCAST packet (src: %pM)\n",
385 orig_addr);
386 break;
387 default:
388 batadv_dbg(BATADV_DBG_DAT, bat_priv,
389 "* encapsulated within an unknown packet type (0x%x)\n",
390 unicast_4addr_packet->u.header.packet_type);
391 }
392}
393
394#else
395
396static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
397 uint16_t type, int hdr_size, char *msg)
398{
399}
400
401#endif /* CONFIG_BATMAN_ADV_DEBUG */
402
403/**
404 * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate
405 * @res: the array with the already selected candidates
406 * @select: number of already selected candidates
407 * @tmp_max: address of the currently evaluated node
408 * @max: current round max address
409 * @last_max: address of the last selected candidate
410 * @candidate: orig_node under evaluation
411 * @max_orig_node: last selected candidate
412 *
413 * Returns true if the node has been elected as next candidate or false othrwise
414 */
415static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
416 int select, batadv_dat_addr_t tmp_max,
417 batadv_dat_addr_t max,
418 batadv_dat_addr_t last_max,
419 struct batadv_orig_node *candidate,
420 struct batadv_orig_node *max_orig_node)
421{
422 bool ret = false;
423 int j;
424
425 /* Check if this node has already been selected... */
426 for (j = 0; j < select; j++)
427 if (res[j].orig_node == candidate)
428 break;
429 /* ..and possibly skip it */
430 if (j < select)
431 goto out;
432 /* sanity check: has it already been selected? This should not happen */
433 if (tmp_max > last_max)
434 goto out;
435 /* check if during this iteration an originator with a closer dht
436 * address has already been found
437 */
438 if (tmp_max < max)
439 goto out;
440 /* this is an hash collision with the temporary selected node. Choose
441 * the one with the lowest address
442 */
443 if ((tmp_max == max) &&
444 (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
445 goto out;
446
447 ret = true;
448out:
449 return ret;
450}
451
452/**
453 * batadv_choose_next_candidate - select the next DHT candidate
454 * @bat_priv: the bat priv with all the soft interface information
455 * @cands: candidates array
456 * @select: number of candidates already present in the array
457 * @ip_key: key to look up in the DHT
458 * @last_max: pointer where the address of the selected candidate will be saved
459 */
460static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
461 struct batadv_dat_candidate *cands,
462 int select, batadv_dat_addr_t ip_key,
463 batadv_dat_addr_t *last_max)
464{
465 batadv_dat_addr_t max = 0, tmp_max = 0;
466 struct batadv_orig_node *orig_node, *max_orig_node = NULL;
467 struct batadv_hashtable *hash = bat_priv->orig_hash;
468 struct hlist_node *node;
469 struct hlist_head *head;
470 int i;
471
472 /* if no node is eligible as candidate, leave the candidate type as
473 * NOT_FOUND
474 */
475 cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;
476
477 /* iterate over the originator list and find the node with closest
478 * dat_address which has not been selected yet
479 */
480 for (i = 0; i < hash->size; i++) {
481 head = &hash->table[i];
482
483 rcu_read_lock();
484 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
485 /* the dht space is a ring and addresses are unsigned */
486 tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
487 ip_key;
488
489 if (!batadv_is_orig_node_eligible(cands, select,
490 tmp_max, max,
491 *last_max, orig_node,
492 max_orig_node))
493 continue;
494
495 if (!atomic_inc_not_zero(&orig_node->refcount))
496 continue;
497
498 max = tmp_max;
499 if (max_orig_node)
500 batadv_orig_node_free_ref(max_orig_node);
501 max_orig_node = orig_node;
502 }
503 rcu_read_unlock();
504 }
505 if (max_orig_node) {
506 cands[select].type = BATADV_DAT_CANDIDATE_ORIG;
507 cands[select].orig_node = max_orig_node;
508 batadv_dbg(BATADV_DBG_DAT, bat_priv,
509 "dat_select_candidates() %d: selected %pM addr=%u dist=%u\n",
510 select, max_orig_node->orig, max_orig_node->dat_addr,
511 max);
512 }
513 *last_max = max;
514}
515
516/**
517 * batadv_dat_select_candidates - selects the nodes which the DHT message has to
518 * be sent to
519 * @bat_priv: the bat priv with all the soft interface information
520 * @ip_dst: ipv4 to look up in the DHT
521 *
522 * An originator O is selected if and only if its DHT_ID value is one of three
523 * closest values (from the LEFT, with wrap around if needed) then the hash
524 * value of the key. ip_dst is the key.
525 *
526 * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM
527 */
528static struct batadv_dat_candidate *
529batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
530{
531 int select;
532 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
533 struct batadv_dat_candidate *res;
534
535 if (!bat_priv->orig_hash)
536 return NULL;
537
538 res = kmalloc(BATADV_DAT_CANDIDATES_NUM * sizeof(*res), GFP_ATOMIC);
539 if (!res)
540 return NULL;
541
542 ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst,
543 BATADV_DAT_ADDR_MAX);
544
545 batadv_dbg(BATADV_DBG_DAT, bat_priv,
546 "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst,
547 ip_key);
548
549 for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++)
550 batadv_choose_next_candidate(bat_priv, res, select, ip_key,
551 &last_max);
552
553 return res;
554}
555
556/**
557 * batadv_dat_send_data - send a payload to the selected candidates
558 * @bat_priv: the bat priv with all the soft interface information
559 * @skb: payload to send
560 * @ip: the DHT key
561 * @packet_subtype: unicast4addr packet subtype to use
562 *
563 * In this function the skb is copied by means of pskb_copy() and is sent as
564 * unicast packet to each of the selected candidates
565 *
566 * Returns true if the packet is sent to at least one candidate, false otherwise
567 */
568static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
569 struct sk_buff *skb, __be32 ip,
570 int packet_subtype)
571{
572 int i;
573 bool ret = false;
574 int send_status;
575 struct batadv_neigh_node *neigh_node = NULL;
576 struct sk_buff *tmp_skb;
577 struct batadv_dat_candidate *cand;
578
579 cand = batadv_dat_select_candidates(bat_priv, ip);
580 if (!cand)
581 goto out;
582
583 batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
584
585 for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) {
586 if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND)
587 continue;
588
589 neigh_node = batadv_orig_node_get_router(cand[i].orig_node);
590 if (!neigh_node)
591 goto free_orig;
592
593 tmp_skb = pskb_copy(skb, GFP_ATOMIC);
594 if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb,
595 cand[i].orig_node,
596 packet_subtype)) {
597 kfree_skb(tmp_skb);
598 goto free_neigh;
599 }
600
601 send_status = batadv_send_skb_packet(tmp_skb,
602 neigh_node->if_incoming,
603 neigh_node->addr);
604 if (send_status == NET_XMIT_SUCCESS) {
605 /* count the sent packet */
606 switch (packet_subtype) {
607 case BATADV_P_DAT_DHT_GET:
608 batadv_inc_counter(bat_priv,
609 BATADV_CNT_DAT_GET_TX);
610 break;
611 case BATADV_P_DAT_DHT_PUT:
612 batadv_inc_counter(bat_priv,
613 BATADV_CNT_DAT_PUT_TX);
614 break;
615 }
616
617 /* packet sent to a candidate: return true */
618 ret = true;
619 }
620free_neigh:
621 batadv_neigh_node_free_ref(neigh_node);
622free_orig:
623 batadv_orig_node_free_ref(cand[i].orig_node);
624 }
625
626out:
627 kfree(cand);
628 return ret;
629}
630
631/**
632 * batadv_dat_hash_free - free the local DAT hash table
633 * @bat_priv: the bat priv with all the soft interface information
634 */
635static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
636{
637 if (!bat_priv->dat.hash)
638 return;
639
640 __batadv_dat_purge(bat_priv, NULL);
641
642 batadv_hash_destroy(bat_priv->dat.hash);
643
644 bat_priv->dat.hash = NULL;
645}
646
647/**
648 * batadv_dat_init - initialise the DAT internals
649 * @bat_priv: the bat priv with all the soft interface information
650 */
651int batadv_dat_init(struct batadv_priv *bat_priv)
652{
653 if (bat_priv->dat.hash)
654 return 0;
655
656 bat_priv->dat.hash = batadv_hash_new(1024);
657
658 if (!bat_priv->dat.hash)
659 return -ENOMEM;
660
661 batadv_dat_start_timer(bat_priv);
662
663 return 0;
664}
665
666/**
667 * batadv_dat_free - free the DAT internals
668 * @bat_priv: the bat priv with all the soft interface information
669 */
670void batadv_dat_free(struct batadv_priv *bat_priv)
671{
672 cancel_delayed_work_sync(&bat_priv->dat.work);
673
674 batadv_dat_hash_free(bat_priv);
675}
676
677/**
678 * batadv_dat_cache_seq_print_text - print the local DAT hash table
679 * @seq: seq file to print on
680 * @offset: not used
681 */
682int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
683{
684 struct net_device *net_dev = (struct net_device *)seq->private;
685 struct batadv_priv *bat_priv = netdev_priv(net_dev);
686 struct batadv_hashtable *hash = bat_priv->dat.hash;
687 struct batadv_dat_entry *dat_entry;
688 struct batadv_hard_iface *primary_if;
689 struct hlist_node *node;
690 struct hlist_head *head;
691 unsigned long last_seen_jiffies;
692 int last_seen_msecs, last_seen_secs, last_seen_mins;
693 uint32_t i;
694
695 primary_if = batadv_seq_print_text_primary_if_get(seq);
696 if (!primary_if)
697 goto out;
698
699 seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
700 seq_printf(seq, " %-7s %-13s %5s\n", "IPv4", "MAC",
701 "last-seen");
702
703 for (i = 0; i < hash->size; i++) {
704 head = &hash->table[i];
705
706 rcu_read_lock();
707 hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
708 last_seen_jiffies = jiffies - dat_entry->last_update;
709 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
710 last_seen_mins = last_seen_msecs / 60000;
711 last_seen_msecs = last_seen_msecs % 60000;
712 last_seen_secs = last_seen_msecs / 1000;
713
714 seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n",
715 &dat_entry->ip, dat_entry->mac_addr,
716 last_seen_mins, last_seen_secs);
717 }
718 rcu_read_unlock();
719 }
720
721out:
722 if (primary_if)
723 batadv_hardif_free_ref(primary_if);
724 return 0;
725}
726
727/**
728 * batadv_arp_get_type - parse an ARP packet and gets the type
729 * @bat_priv: the bat priv with all the soft interface information
730 * @skb: packet to analyse
731 * @hdr_size: size of the possible header before the ARP packet in the skb
732 *
733 * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise
734 */
735static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
736 struct sk_buff *skb, int hdr_size)
737{
738 struct arphdr *arphdr;
739 struct ethhdr *ethhdr;
740 __be32 ip_src, ip_dst;
741 uint16_t type = 0;
742
743 /* pull the ethernet header */
744 if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
745 goto out;
746
747 ethhdr = (struct ethhdr *)(skb->data + hdr_size);
748
749 if (ethhdr->h_proto != htons(ETH_P_ARP))
750 goto out;
751
752 /* pull the ARP payload */
753 if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN +
754 arp_hdr_len(skb->dev))))
755 goto out;
756
757 arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN);
758
759 /* Check whether the ARP packet carries a valid
760 * IP information
761 */
762 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
763 goto out;
764
765 if (arphdr->ar_pro != htons(ETH_P_IP))
766 goto out;
767
768 if (arphdr->ar_hln != ETH_ALEN)
769 goto out;
770
771 if (arphdr->ar_pln != 4)
772 goto out;
773
774 /* Check for bad reply/request. If the ARP message is not sane, DAT
775 * will simply ignore it
776 */
777 ip_src = batadv_arp_ip_src(skb, hdr_size);
778 ip_dst = batadv_arp_ip_dst(skb, hdr_size);
779 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
780 ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst))
781 goto out;
782
783 type = ntohs(arphdr->ar_op);
784out:
785 return type;
786}
787
788/**
789 * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
790 * answer using DAT
791 * @bat_priv: the bat priv with all the soft interface information
792 * @skb: packet to check
793 *
794 * Returns true if the message has been sent to the dht candidates, false
795 * otherwise. In case of true the message has to be enqueued to permit the
796 * fallback
797 */
798bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
799 struct sk_buff *skb)
800{
801 uint16_t type = 0;
802 __be32 ip_dst, ip_src;
803 uint8_t *hw_src;
804 bool ret = false;
805 struct batadv_dat_entry *dat_entry = NULL;
806 struct sk_buff *skb_new;
807 struct batadv_hard_iface *primary_if = NULL;
808
809 if (!atomic_read(&bat_priv->distributed_arp_table))
810 goto out;
811
812 type = batadv_arp_get_type(bat_priv, skb, 0);
813 /* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
814 * message to the selected DHT candidates
815 */
816 if (type != ARPOP_REQUEST)
817 goto out;
818
819 batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REQUEST");
820
821 ip_src = batadv_arp_ip_src(skb, 0);
822 hw_src = batadv_arp_hw_src(skb, 0);
823 ip_dst = batadv_arp_ip_dst(skb, 0);
824
825 batadv_dat_entry_add(bat_priv, ip_src, hw_src);
826
827 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
828 if (dat_entry) {
829 primary_if = batadv_primary_if_get_selected(bat_priv);
830 if (!primary_if)
831 goto out;
832
833 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
834 primary_if->soft_iface, ip_dst, hw_src,
835 dat_entry->mac_addr, hw_src);
836 if (!skb_new)
837 goto out;
838
839 skb_reset_mac_header(skb_new);
840 skb_new->protocol = eth_type_trans(skb_new,
841 primary_if->soft_iface);
842 bat_priv->stats.rx_packets++;
843 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
844 primary_if->soft_iface->last_rx = jiffies;
845
846 netif_rx(skb_new);
847 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
848 ret = true;
849 } else {
850 /* Send the request on the DHT */
851 ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
852 BATADV_P_DAT_DHT_GET);
853 }
854out:
855 if (dat_entry)
856 batadv_dat_entry_free_ref(dat_entry);
857 if (primary_if)
858 batadv_hardif_free_ref(primary_if);
859 return ret;
860}
861
862/**
863 * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to
864 * answer using the local DAT storage
865 * @bat_priv: the bat priv with all the soft interface information
866 * @skb: packet to check
867 * @hdr_size: size of the encapsulation header
868 *
869 * Returns true if the request has been answered, false otherwise
870 */
871bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
872 struct sk_buff *skb, int hdr_size)
873{
874 uint16_t type;
875 __be32 ip_src, ip_dst;
876 uint8_t *hw_src;
877 struct sk_buff *skb_new;
878 struct batadv_hard_iface *primary_if = NULL;
879 struct batadv_dat_entry *dat_entry = NULL;
880 bool ret = false;
881 int err;
882
883 if (!atomic_read(&bat_priv->distributed_arp_table))
884 goto out;
885
886 type = batadv_arp_get_type(bat_priv, skb, hdr_size);
887 if (type != ARPOP_REQUEST)
888 goto out;
889
890 hw_src = batadv_arp_hw_src(skb, hdr_size);
891 ip_src = batadv_arp_ip_src(skb, hdr_size);
892 ip_dst = batadv_arp_ip_dst(skb, hdr_size);
893
894 batadv_dbg_arp(bat_priv, skb, type, hdr_size,
895 "Parsing incoming ARP REQUEST");
896
897 batadv_dat_entry_add(bat_priv, ip_src, hw_src);
898
899 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
900 if (!dat_entry)
901 goto out;
902
903 primary_if = batadv_primary_if_get_selected(bat_priv);
904 if (!primary_if)
905 goto out;
906
907 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
908 primary_if->soft_iface, ip_dst, hw_src,
909 dat_entry->mac_addr, hw_src);
910
911 if (!skb_new)
912 goto out;
913
914 /* to preserve backwards compatibility, here the node has to answer
915 * using the same packet type it received for the request. This is due
916 * to that if a node is not using the 4addr packet format it may not
917 * support it.
918 */
919 if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
920 err = batadv_unicast_4addr_send_skb(bat_priv, skb_new,
921 BATADV_P_DAT_CACHE_REPLY);
922 else
923 err = batadv_unicast_send_skb(bat_priv, skb_new);
924
925 if (!err) {
926 batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
927 ret = true;
928 }
929out:
930 if (dat_entry)
931 batadv_dat_entry_free_ref(dat_entry);
932 if (primary_if)
933 batadv_hardif_free_ref(primary_if);
934 if (ret)
935 kfree_skb(skb);
936 return ret;
937}
938
939/**
940 * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT
941 * @bat_priv: the bat priv with all the soft interface information
942 * @skb: packet to check
943 */
944void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
945 struct sk_buff *skb)
946{
947 uint16_t type;
948 __be32 ip_src, ip_dst;
949 uint8_t *hw_src, *hw_dst;
950
951 if (!atomic_read(&bat_priv->distributed_arp_table))
952 return;
953
954 type = batadv_arp_get_type(bat_priv, skb, 0);
955 if (type != ARPOP_REPLY)
956 return;
957
958 batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REPLY");
959
960 hw_src = batadv_arp_hw_src(skb, 0);
961 ip_src = batadv_arp_ip_src(skb, 0);
962 hw_dst = batadv_arp_hw_dst(skb, 0);
963 ip_dst = batadv_arp_ip_dst(skb, 0);
964
965 batadv_dat_entry_add(bat_priv, ip_src, hw_src);
966 batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
967
968 /* Send the ARP reply to the candidates for both the IP addresses that
969 * the node got within the ARP reply
970 */
971 batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
972 batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
973}
974/**
975 * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
976 * DAT storage only
977 * @bat_priv: the bat priv with all the soft interface information
978 * @skb: packet to check
979 * @hdr_size: siaze of the encapsulation header
980 */
981bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
982 struct sk_buff *skb, int hdr_size)
983{
984 uint16_t type;
985 __be32 ip_src, ip_dst;
986 uint8_t *hw_src, *hw_dst;
987 bool ret = false;
988
989 if (!atomic_read(&bat_priv->distributed_arp_table))
990 goto out;
991
992 type = batadv_arp_get_type(bat_priv, skb, hdr_size);
993 if (type != ARPOP_REPLY)
994 goto out;
995
996 batadv_dbg_arp(bat_priv, skb, type, hdr_size,
997 "Parsing incoming ARP REPLY");
998
999 hw_src = batadv_arp_hw_src(skb, hdr_size);
1000 ip_src = batadv_arp_ip_src(skb, hdr_size);
1001 hw_dst = batadv_arp_hw_dst(skb, hdr_size);
1002 ip_dst = batadv_arp_ip_dst(skb, hdr_size);
1003
1004 /* Update our internal cache with both the IP addresses the node got
1005 * within the ARP reply
1006 */
1007 batadv_dat_entry_add(bat_priv, ip_src, hw_src);
1008 batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
1009
1010 /* if this REPLY is directed to a client of mine, let's deliver the
1011 * packet to the interface
1012 */
1013 ret = !batadv_is_my_client(bat_priv, hw_dst);
1014out:
1015 /* if ret == false -> packet has to be delivered to the interface */
1016 return ret;
1017}
1018
1019/**
1020 * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
1021 * (because the node has already got the reply via DAT) or not
1022 * @bat_priv: the bat priv with all the soft interface information
1023 * @forw_packet: the broadcast packet
1024 *
1025 * Returns true if the node can drop the packet, false otherwise
1026 */
1027bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
1028 struct batadv_forw_packet *forw_packet)
1029{
1030 uint16_t type;
1031 __be32 ip_dst;
1032 struct batadv_dat_entry *dat_entry = NULL;
1033 bool ret = false;
1034 const size_t bcast_len = sizeof(struct batadv_bcast_packet);
1035
1036 if (!atomic_read(&bat_priv->distributed_arp_table))
1037 goto out;
1038
1039 /* If this packet is an ARP_REQUEST and the node already has the
1040 * information that it is going to ask, then the packet can be dropped
1041 */
1042 if (forw_packet->num_packets)
1043 goto out;
1044
1045 type = batadv_arp_get_type(bat_priv, forw_packet->skb, bcast_len);
1046 if (type != ARPOP_REQUEST)
1047 goto out;
1048
1049 ip_dst = batadv_arp_ip_dst(forw_packet->skb, bcast_len);
1050 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
1051 /* check if the node already got this entry */
1052 if (!dat_entry) {
1053 batadv_dbg(BATADV_DBG_DAT, bat_priv,
1054 "ARP Request for %pI4: fallback\n", &ip_dst);
1055 goto out;
1056 }
1057
1058 batadv_dbg(BATADV_DBG_DAT, bat_priv,
1059 "ARP Request for %pI4: fallback prevented\n", &ip_dst);
1060 ret = true;
1061
1062out:
1063 if (dat_entry)
1064 batadv_dat_entry_free_ref(dat_entry);
1065 return ret;
1066}
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
new file mode 100644
index 000000000000..d060c033e7de
--- /dev/null
+++ b/net/batman-adv/distributed-arp-table.h
@@ -0,0 +1,167 @@
1/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 *
3 * Antonio Quartulli
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#ifndef _NET_BATMAN_ADV_ARP_H_
21#define _NET_BATMAN_ADV_ARP_H_
22
23#ifdef CONFIG_BATMAN_ADV_DAT
24
25#include "types.h"
26#include "originator.h"
27
28#include <linux/if_arp.h>
29
30#define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
31
32bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
33 struct sk_buff *skb);
34bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
35 struct sk_buff *skb, int hdr_size);
36void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
37 struct sk_buff *skb);
38bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
39 struct sk_buff *skb, int hdr_size);
40bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
41 struct batadv_forw_packet *forw_packet);
42
43/**
44 * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node
45 * @orig_node: the node to assign the DAT address to
46 */
47static inline void
48batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
49{
50 uint32_t addr;
51
52 addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX);
53 orig_node->dat_addr = (batadv_dat_addr_t)addr;
54}
55
56/**
57 * batadv_dat_init_own_addr - assign a DAT address to the node itself
58 * @bat_priv: the bat priv with all the soft interface information
59 * @primary_if: a pointer to the primary interface
60 */
61static inline void
62batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
63 struct batadv_hard_iface *primary_if)
64{
65 uint32_t addr;
66
67 addr = batadv_choose_orig(primary_if->net_dev->dev_addr,
68 BATADV_DAT_ADDR_MAX);
69
70 bat_priv->dat.addr = (batadv_dat_addr_t)addr;
71}
72
73int batadv_dat_init(struct batadv_priv *bat_priv);
74void batadv_dat_free(struct batadv_priv *bat_priv);
75int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
76
77/**
78 * batadv_dat_inc_counter - increment the correct DAT packet counter
79 * @bat_priv: the bat priv with all the soft interface information
80 * @subtype: the 4addr subtype of the packet to be counted
81 *
82 * Updates the ethtool statistics for the received packet if it is a DAT subtype
83 */
84static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
85 uint8_t subtype)
86{
87 switch (subtype) {
88 case BATADV_P_DAT_DHT_GET:
89 batadv_inc_counter(bat_priv,
90 BATADV_CNT_DAT_GET_RX);
91 break;
92 case BATADV_P_DAT_DHT_PUT:
93 batadv_inc_counter(bat_priv,
94 BATADV_CNT_DAT_PUT_RX);
95 break;
96 }
97}
98
99#else
100
101static inline bool
102batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
103 struct sk_buff *skb)
104{
105 return false;
106}
107
108static inline bool
109batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
110 struct sk_buff *skb, int hdr_size)
111{
112 return false;
113}
114
115static inline bool
116batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
117 struct sk_buff *skb)
118{
119 return false;
120}
121
122static inline bool
123batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
124 struct sk_buff *skb, int hdr_size)
125{
126 return false;
127}
128
129static inline bool
130batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
131 struct batadv_forw_packet *forw_packet)
132{
133 return false;
134}
135
136static inline void
137batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
138{
139}
140
141static inline void batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
142 struct batadv_hard_iface *iface)
143{
144}
145
146static inline void batadv_arp_change_timeout(struct net_device *soft_iface,
147 const char *name)
148{
149}
150
151static inline int batadv_dat_init(struct batadv_priv *bat_priv)
152{
153 return 0;
154}
155
156static inline void batadv_dat_free(struct batadv_priv *bat_priv)
157{
158}
159
160static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
161 uint8_t subtype)
162{
163}
164
165#endif /* CONFIG_BATMAN_ADV_DAT */
166
167#endif /* _NET_BATMAN_ADV_ARP_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 15d67abc10a4..dd07c7e3654f 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -477,22 +477,11 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
477 struct batadv_hard_iface *primary_if; 477 struct batadv_hard_iface *primary_if;
478 struct batadv_gw_node *gw_node; 478 struct batadv_gw_node *gw_node;
479 struct hlist_node *node; 479 struct hlist_node *node;
480 int gw_count = 0, ret = 0; 480 int gw_count = 0;
481 481
482 primary_if = batadv_primary_if_get_selected(bat_priv); 482 primary_if = batadv_seq_print_text_primary_if_get(seq);
483 if (!primary_if) { 483 if (!primary_if)
484 ret = seq_printf(seq,
485 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
486 net_dev->name);
487 goto out; 484 goto out;
488 }
489
490 if (primary_if->if_status != BATADV_IF_ACTIVE) {
491 ret = seq_printf(seq,
492 "BATMAN mesh %s disabled - primary interface not active\n",
493 net_dev->name);
494 goto out;
495 }
496 485
497 seq_printf(seq, 486 seq_printf(seq,
498 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 487 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
@@ -519,7 +508,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
519out: 508out:
520 if (primary_if) 509 if (primary_if)
521 batadv_hardif_free_ref(primary_if); 510 batadv_hardif_free_ref(primary_if);
522 return ret; 511 return 0;
523} 512}
524 513
525static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) 514static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index d112fd6750b0..f1d37cd81815 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "main.h" 20#include "main.h"
21#include "distributed-arp-table.h"
21#include "hard-interface.h" 22#include "hard-interface.h"
22#include "soft-interface.h" 23#include "soft-interface.h"
23#include "send.h" 24#include "send.h"
@@ -29,6 +30,7 @@
29#include "bridge_loop_avoidance.h" 30#include "bridge_loop_avoidance.h"
30 31
31#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
32 34
33void batadv_hardif_free_rcu(struct rcu_head *rcu) 35void batadv_hardif_free_rcu(struct rcu_head *rcu)
34{ 36{
@@ -58,6 +60,45 @@ out:
58 return hard_iface; 60 return hard_iface;
59} 61}
60 62
63/**
64 * batadv_is_on_batman_iface - check if a device is a batman iface descendant
65 * @net_dev: the device to check
66 *
67 * If the user creates any virtual device on top of a batman-adv interface, it
68 * is important to prevent this new interface to be used to create a new mesh
69 * network (this behaviour would lead to a batman-over-batman configuration).
70 * This function recursively checks all the fathers of the device passed as
71 * argument looking for a batman-adv soft interface.
72 *
73 * Returns true if the device is descendant of a batman-adv mesh interface (or
74 * if it is a batman-adv interface itself), false otherwise
75 */
76static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
77{
78 struct net_device *parent_dev;
79 bool ret;
80
81 /* check if this is a batman-adv mesh interface */
82 if (batadv_softif_is_valid(net_dev))
83 return true;
84
85 /* no more parents..stop recursion */
86 if (net_dev->iflink == net_dev->ifindex)
87 return false;
88
89 /* recurse over the parent device */
90 parent_dev = dev_get_by_index(&init_net, net_dev->iflink);
91 /* if we got a NULL parent_dev there is something broken.. */
92 if (WARN(!parent_dev, "Cannot find parent device"))
93 return false;
94
95 ret = batadv_is_on_batman_iface(parent_dev);
96
97 if (parent_dev)
98 dev_put(parent_dev);
99 return ret;
100}
101
61static int batadv_is_valid_iface(const struct net_device *net_dev) 102static int batadv_is_valid_iface(const struct net_device *net_dev)
62{ 103{
63 if (net_dev->flags & IFF_LOOPBACK) 104 if (net_dev->flags & IFF_LOOPBACK)
@@ -70,7 +111,7 @@ static int batadv_is_valid_iface(const struct net_device *net_dev)
70 return 0; 111 return 0;
71 112
72 /* no batman over batman */ 113 /* no batman over batman */
73 if (batadv_softif_is_valid(net_dev)) 114 if (batadv_is_on_batman_iface(net_dev))
74 return 0; 115 return 0;
75 116
76 return 1; 117 return 1;
@@ -109,6 +150,8 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
109 if (!primary_if) 150 if (!primary_if)
110 goto out; 151 goto out;
111 152
153 batadv_dat_init_own_addr(bat_priv, primary_if);
154
112 skb = bat_priv->vis.my_info->skb_packet; 155 skb = bat_priv->vis.my_info->skb_packet;
113 vis_packet = (struct batadv_vis_packet *)skb->data; 156 vis_packet = (struct batadv_vis_packet *)skb->data;
114 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); 157 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -269,7 +312,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
269{ 312{
270 struct batadv_priv *bat_priv; 313 struct batadv_priv *bat_priv;
271 struct net_device *soft_iface; 314 struct net_device *soft_iface;
272 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); 315 __be16 ethertype = __constant_htons(ETH_P_BATMAN);
273 int ret; 316 int ret;
274 317
275 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) 318 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
@@ -450,8 +493,8 @@ batadv_hardif_add_interface(struct net_device *net_dev)
450 /* This can't be called via a bat_priv callback because 493 /* This can't be called via a bat_priv callback because
451 * we have no bat_priv yet. 494 * we have no bat_priv yet.
452 */ 495 */
453 atomic_set(&hard_iface->seqno, 1); 496 atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
454 hard_iface->packet_buff = NULL; 497 hard_iface->bat_iv.ogm_buff = NULL;
455 498
456 return hard_iface; 499 return hard_iface;
457 500
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 977de9c75fc2..e05333905afd 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -82,6 +82,28 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash,
82} 82}
83 83
84/** 84/**
85 * batadv_hash_bytes - hash some bytes and add them to the previous hash
86 * @hash: previous hash value
87 * @data: data to be hashed
88 * @size: number of bytes to be hashed
89 *
90 * Returns the new hash value.
91 */
92static inline uint32_t batadv_hash_bytes(uint32_t hash, void *data,
93 uint32_t size)
94{
95 const unsigned char *key = data;
96 int i;
97
98 for (i = 0; i < size; i++) {
99 hash += key[i];
100 hash += (hash << 10);
101 hash ^= (hash >> 6);
102 }
103 return hash;
104}
105
106/**
85 * batadv_hash_add - adds data to the hashtable 107 * batadv_hash_add - adds data to the hashtable
86 * @hash: storage hash table 108 * @hash: storage hash table
87 * @compare: callback to determine if 2 hash elements are identical 109 * @compare: callback to determine if 2 hash elements are identical
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index bde3cf747507..87ca8095b011 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -42,12 +42,16 @@ static int batadv_socket_open(struct inode *inode, struct file *file)
42 unsigned int i; 42 unsigned int i;
43 struct batadv_socket_client *socket_client; 43 struct batadv_socket_client *socket_client;
44 44
45 if (!try_module_get(THIS_MODULE))
46 return -EBUSY;
47
45 nonseekable_open(inode, file); 48 nonseekable_open(inode, file);
46 49
47 socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL); 50 socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL);
48 51 if (!socket_client) {
49 if (!socket_client) 52 module_put(THIS_MODULE);
50 return -ENOMEM; 53 return -ENOMEM;
54 }
51 55
52 for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) { 56 for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) {
53 if (!batadv_socket_client_hash[i]) { 57 if (!batadv_socket_client_hash[i]) {
@@ -59,6 +63,7 @@ static int batadv_socket_open(struct inode *inode, struct file *file)
59 if (i == ARRAY_SIZE(batadv_socket_client_hash)) { 63 if (i == ARRAY_SIZE(batadv_socket_client_hash)) {
60 pr_err("Error - can't add another packet client: maximum number of clients reached\n"); 64 pr_err("Error - can't add another packet client: maximum number of clients reached\n");
61 kfree(socket_client); 65 kfree(socket_client);
66 module_put(THIS_MODULE);
62 return -EXFULL; 67 return -EXFULL;
63 } 68 }
64 69
@@ -71,7 +76,6 @@ static int batadv_socket_open(struct inode *inode, struct file *file)
71 76
72 file->private_data = socket_client; 77 file->private_data = socket_client;
73 78
74 batadv_inc_module_count();
75 return 0; 79 return 0;
76} 80}
77 81
@@ -96,7 +100,7 @@ static int batadv_socket_release(struct inode *inode, struct file *file)
96 spin_unlock_bh(&socket_client->lock); 100 spin_unlock_bh(&socket_client->lock);
97 101
98 kfree(socket_client); 102 kfree(socket_client);
99 batadv_dec_module_count(); 103 module_put(THIS_MODULE);
100 104
101 return 0; 105 return 0;
102} 106}
@@ -173,13 +177,13 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
173 if (len >= sizeof(struct batadv_icmp_packet_rr)) 177 if (len >= sizeof(struct batadv_icmp_packet_rr))
174 packet_len = sizeof(struct batadv_icmp_packet_rr); 178 packet_len = sizeof(struct batadv_icmp_packet_rr);
175 179
176 skb = dev_alloc_skb(packet_len + ETH_HLEN); 180 skb = dev_alloc_skb(packet_len + ETH_HLEN + NET_IP_ALIGN);
177 if (!skb) { 181 if (!skb) {
178 len = -ENOMEM; 182 len = -ENOMEM;
179 goto out; 183 goto out;
180 } 184 }
181 185
182 skb_reserve(skb, ETH_HLEN); 186 skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
183 icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len); 187 icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
184 188
185 if (copy_from_user(icmp_packet, buff, packet_len)) { 189 if (copy_from_user(icmp_packet, buff, packet_len)) {
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b4aa470bc4a6..f65a222b7b83 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -17,6 +17,8 @@
17 * 02110-1301, USA 17 * 02110-1301, USA
18 */ 18 */
19 19
20#include <linux/crc32c.h>
21#include <linux/highmem.h>
20#include "main.h" 22#include "main.h"
21#include "sysfs.h" 23#include "sysfs.h"
22#include "debugfs.h" 24#include "debugfs.h"
@@ -29,6 +31,7 @@
29#include "hard-interface.h" 31#include "hard-interface.h"
30#include "gateway_client.h" 32#include "gateway_client.h"
31#include "bridge_loop_avoidance.h" 33#include "bridge_loop_avoidance.h"
34#include "distributed-arp-table.h"
32#include "vis.h" 35#include "vis.h"
33#include "hash.h" 36#include "hash.h"
34#include "bat_algo.h" 37#include "bat_algo.h"
@@ -128,6 +131,10 @@ int batadv_mesh_init(struct net_device *soft_iface)
128 if (ret < 0) 131 if (ret < 0)
129 goto err; 132 goto err;
130 133
134 ret = batadv_dat_init(bat_priv);
135 if (ret < 0)
136 goto err;
137
131 atomic_set(&bat_priv->gw.reselect, 0); 138 atomic_set(&bat_priv->gw.reselect, 0);
132 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); 139 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
133 140
@@ -155,21 +162,13 @@ void batadv_mesh_free(struct net_device *soft_iface)
155 162
156 batadv_bla_free(bat_priv); 163 batadv_bla_free(bat_priv);
157 164
165 batadv_dat_free(bat_priv);
166
158 free_percpu(bat_priv->bat_counters); 167 free_percpu(bat_priv->bat_counters);
159 168
160 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 169 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
161} 170}
162 171
163void batadv_inc_module_count(void)
164{
165 try_module_get(THIS_MODULE);
166}
167
168void batadv_dec_module_count(void)
169{
170 module_put(THIS_MODULE);
171}
172
173int batadv_is_my_mac(const uint8_t *addr) 172int batadv_is_my_mac(const uint8_t *addr)
174{ 173{
175 const struct batadv_hard_iface *hard_iface; 174 const struct batadv_hard_iface *hard_iface;
@@ -188,6 +187,42 @@ int batadv_is_my_mac(const uint8_t *addr)
188 return 0; 187 return 0;
189} 188}
190 189
190/**
191 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
192 * function that requires the primary interface
193 * @seq: debugfs table seq_file struct
194 *
195 * Returns primary interface if found or NULL otherwise.
196 */
197struct batadv_hard_iface *
198batadv_seq_print_text_primary_if_get(struct seq_file *seq)
199{
200 struct net_device *net_dev = (struct net_device *)seq->private;
201 struct batadv_priv *bat_priv = netdev_priv(net_dev);
202 struct batadv_hard_iface *primary_if;
203
204 primary_if = batadv_primary_if_get_selected(bat_priv);
205
206 if (!primary_if) {
207 seq_printf(seq,
208 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
209 net_dev->name);
210 goto out;
211 }
212
213 if (primary_if->if_status == BATADV_IF_ACTIVE)
214 goto out;
215
216 seq_printf(seq,
217 "BATMAN mesh %s disabled - primary interface not active\n",
218 net_dev->name);
219 batadv_hardif_free_ref(primary_if);
220 primary_if = NULL;
221
222out:
223 return primary_if;
224}
225
191static int batadv_recv_unhandled_packet(struct sk_buff *skb, 226static int batadv_recv_unhandled_packet(struct sk_buff *skb,
192 struct batadv_hard_iface *recv_if) 227 struct batadv_hard_iface *recv_if)
193{ 228{
@@ -274,6 +309,8 @@ static void batadv_recv_handler_init(void)
274 309
275 /* batman icmp packet */ 310 /* batman icmp packet */
276 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet; 311 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
312 /* unicast with 4 addresses packet */
313 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
277 /* unicast packet */ 314 /* unicast packet */
278 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet; 315 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
279 /* fragmented unicast packet */ 316 /* fragmented unicast packet */
@@ -385,6 +422,38 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
385 return 0; 422 return 0;
386} 423}
387 424
425/**
426 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
427 * the header
428 * @skb: skb pointing to fragmented socket buffers
429 * @payload_ptr: Pointer to position inside the head buffer of the skb
430 * marking the start of the data to be CRC'ed
431 *
432 * payload_ptr must always point to an address in the skb head buffer and not to
433 * a fragment.
434 */
435__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
436{
437 u32 crc = 0;
438 unsigned int from;
439 unsigned int to = skb->len;
440 struct skb_seq_state st;
441 const u8 *data;
442 unsigned int len;
443 unsigned int consumed = 0;
444
445 from = (unsigned int)(payload_ptr - skb->data);
446
447 skb_prepare_seq_read(skb, from, to, &st);
448 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
449 crc = crc32c(crc, data, len);
450 consumed += len;
451 }
452 skb_abort_seq_read(&st);
453
454 return htonl(crc);
455}
456
388static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) 457static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
389{ 458{
390 struct batadv_algo_ops *bat_algo_ops; 459 struct batadv_algo_ops *bat_algo_ops;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index d57b746219de..2f85577086a7 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
26#define BATADV_DRIVER_DEVICE "batman-adv" 26#define BATADV_DRIVER_DEVICE "batman-adv"
27 27
28#ifndef BATADV_SOURCE_VERSION 28#ifndef BATADV_SOURCE_VERSION
29#define BATADV_SOURCE_VERSION "2012.4.0" 29#define BATADV_SOURCE_VERSION "2012.5.0"
30#endif 30#endif
31 31
32/* B.A.T.M.A.N. parameters */ 32/* B.A.T.M.A.N. parameters */
@@ -44,6 +44,7 @@
44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */ 44#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */ 45#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
46#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */ 46#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
47#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
47/* sliding packet range of received originator messages in sequence numbers 48/* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) 49 * (should be a multiple of our word size)
49 */ 50 */
@@ -73,6 +74,11 @@
73 74
74#define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */ 75#define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */
75 76
77/* msecs after which an ARP_REQUEST is sent in broadcast as fallback */
78#define ARP_REQ_DELAY 250
79/* numbers of originator to contact for any PUT/GET DHT operation */
80#define BATADV_DAT_CANDIDATES_NUM 3
81
76#define BATADV_VIS_INTERVAL 5000 /* 5 seconds */ 82#define BATADV_VIS_INTERVAL 5000 /* 5 seconds */
77 83
78/* how much worse secondary interfaces may be to be considered as bonding 84/* how much worse secondary interfaces may be to be considered as bonding
@@ -89,6 +95,7 @@
89#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */ 95#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
90#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3) 96#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
91#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10) 97#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
98#define BATADV_BLA_WAIT_PERIODS 3
92 99
93#define BATADV_DUPLIST_SIZE 16 100#define BATADV_DUPLIST_SIZE 16
94#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */ 101#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
@@ -117,6 +124,9 @@ enum batadv_uev_type {
117 124
118#define BATADV_GW_THRESHOLD 50 125#define BATADV_GW_THRESHOLD 50
119 126
127#define BATADV_DAT_CANDIDATE_NOT_FOUND 0
128#define BATADV_DAT_CANDIDATE_ORIG 1
129
120/* Debug Messages */ 130/* Debug Messages */
121#ifdef pr_fmt 131#ifdef pr_fmt
122#undef pr_fmt 132#undef pr_fmt
@@ -150,9 +160,9 @@ extern struct workqueue_struct *batadv_event_workqueue;
150 160
151int batadv_mesh_init(struct net_device *soft_iface); 161int batadv_mesh_init(struct net_device *soft_iface);
152void batadv_mesh_free(struct net_device *soft_iface); 162void batadv_mesh_free(struct net_device *soft_iface);
153void batadv_inc_module_count(void);
154void batadv_dec_module_count(void);
155int batadv_is_my_mac(const uint8_t *addr); 163int batadv_is_my_mac(const uint8_t *addr);
164struct batadv_hard_iface *
165batadv_seq_print_text_primary_if_get(struct seq_file *seq);
156int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 166int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
157 struct packet_type *ptype, 167 struct packet_type *ptype,
158 struct net_device *orig_dev); 168 struct net_device *orig_dev);
@@ -164,14 +174,24 @@ void batadv_recv_handler_unregister(uint8_t packet_type);
164int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops); 174int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
165int batadv_algo_select(struct batadv_priv *bat_priv, char *name); 175int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
166int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); 176int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
177__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
167 178
168/* all messages related to routing / flooding / broadcasting / etc */ 179/**
180 * enum batadv_dbg_level - available log levels
181 * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
182 * @BATADV_DBG_ROUTES: route added / changed / deleted
183 * @BATADV_DBG_TT: translation table messages
184 * @BATADV_DBG_BLA: bridge loop avoidance messages
185 * @BATADV_DBG_DAT: ARP snooping and DAT related messages
186 * @BATADV_DBG_ALL: the union of all the above log levels
187 */
169enum batadv_dbg_level { 188enum batadv_dbg_level {
170 BATADV_DBG_BATMAN = BIT(0), 189 BATADV_DBG_BATMAN = BIT(0),
171 BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */ 190 BATADV_DBG_ROUTES = BIT(1),
172 BATADV_DBG_TT = BIT(2), /* translation table operations */ 191 BATADV_DBG_TT = BIT(2),
173 BATADV_DBG_BLA = BIT(3), /* bridge loop avoidance */ 192 BATADV_DBG_BLA = BIT(3),
174 BATADV_DBG_ALL = 15, 193 BATADV_DBG_DAT = BIT(4),
194 BATADV_DBG_ALL = 31,
175}; 195};
176 196
177#ifdef CONFIG_BATMAN_ADV_DEBUG 197#ifdef CONFIG_BATMAN_ADV_DEBUG
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ac9bdf8f80a6..8c32cf1c2dec 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "main.h" 20#include "main.h"
21#include "distributed-arp-table.h"
21#include "originator.h" 22#include "originator.h"
22#include "hash.h" 23#include "hash.h"
23#include "translation-table.h" 24#include "translation-table.h"
@@ -220,9 +221,9 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
220 atomic_set(&orig_node->refcount, 2); 221 atomic_set(&orig_node->refcount, 2);
221 222
222 orig_node->tt_initialised = false; 223 orig_node->tt_initialised = false;
223 orig_node->tt_poss_change = false;
224 orig_node->bat_priv = bat_priv; 224 orig_node->bat_priv = bat_priv;
225 memcpy(orig_node->orig, addr, ETH_ALEN); 225 memcpy(orig_node->orig, addr, ETH_ALEN);
226 batadv_dat_init_orig_node_addr(orig_node);
226 orig_node->router = NULL; 227 orig_node->router = NULL;
227 orig_node->tt_crc = 0; 228 orig_node->tt_crc = 0;
228 atomic_set(&orig_node->last_ttvn, 0); 229 atomic_set(&orig_node->last_ttvn, 0);
@@ -415,23 +416,10 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
415 int last_seen_msecs; 416 int last_seen_msecs;
416 unsigned long last_seen_jiffies; 417 unsigned long last_seen_jiffies;
417 uint32_t i; 418 uint32_t i;
418 int ret = 0;
419 419
420 primary_if = batadv_primary_if_get_selected(bat_priv); 420 primary_if = batadv_seq_print_text_primary_if_get(seq);
421 421 if (!primary_if)
422 if (!primary_if) {
423 ret = seq_printf(seq,
424 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
425 net_dev->name);
426 goto out;
427 }
428
429 if (primary_if->if_status != BATADV_IF_ACTIVE) {
430 ret = seq_printf(seq,
431 "BATMAN mesh %s disabled - primary interface not active\n",
432 net_dev->name);
433 goto out; 422 goto out;
434 }
435 423
436 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 424 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
437 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 425 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
@@ -485,7 +473,7 @@ next:
485out: 473out:
486 if (primary_if) 474 if (primary_if)
487 batadv_hardif_free_ref(primary_if); 475 batadv_hardif_free_ref(primary_if);
488 return ret; 476 return 0;
489} 477}
490 478
491static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node, 479static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 2d23a14c220e..cb6405bf755c 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -20,17 +20,30 @@
20#ifndef _NET_BATMAN_ADV_PACKET_H_ 20#ifndef _NET_BATMAN_ADV_PACKET_H_
21#define _NET_BATMAN_ADV_PACKET_H_ 21#define _NET_BATMAN_ADV_PACKET_H_
22 22
23#define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
24
25enum batadv_packettype { 23enum batadv_packettype {
26 BATADV_IV_OGM = 0x01, 24 BATADV_IV_OGM = 0x01,
27 BATADV_ICMP = 0x02, 25 BATADV_ICMP = 0x02,
28 BATADV_UNICAST = 0x03, 26 BATADV_UNICAST = 0x03,
29 BATADV_BCAST = 0x04, 27 BATADV_BCAST = 0x04,
30 BATADV_VIS = 0x05, 28 BATADV_VIS = 0x05,
31 BATADV_UNICAST_FRAG = 0x06, 29 BATADV_UNICAST_FRAG = 0x06,
32 BATADV_TT_QUERY = 0x07, 30 BATADV_TT_QUERY = 0x07,
33 BATADV_ROAM_ADV = 0x08, 31 BATADV_ROAM_ADV = 0x08,
32 BATADV_UNICAST_4ADDR = 0x09,
33};
34
35/**
36 * enum batadv_subtype - packet subtype for unicast4addr
37 * @BATADV_P_DATA: user payload
38 * @BATADV_P_DAT_DHT_GET: DHT request message
39 * @BATADV_P_DAT_DHT_PUT: DHT store message
40 * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT
41 */
42enum batadv_subtype {
43 BATADV_P_DATA = 0x01,
44 BATADV_P_DAT_DHT_GET = 0x02,
45 BATADV_P_DAT_DHT_PUT = 0x03,
46 BATADV_P_DAT_CACHE_REPLY = 0x04,
34}; 47};
35 48
36/* this file is included by batctl which needs these defines */ 49/* this file is included by batctl which needs these defines */
@@ -106,13 +119,16 @@ struct batadv_bla_claim_dst {
106 uint8_t magic[3]; /* FF:43:05 */ 119 uint8_t magic[3]; /* FF:43:05 */
107 uint8_t type; /* bla_claimframe */ 120 uint8_t type; /* bla_claimframe */
108 __be16 group; /* group id */ 121 __be16 group; /* group id */
109} __packed; 122};
110 123
111struct batadv_header { 124struct batadv_header {
112 uint8_t packet_type; 125 uint8_t packet_type;
113 uint8_t version; /* batman version field */ 126 uint8_t version; /* batman version field */
114 uint8_t ttl; 127 uint8_t ttl;
115} __packed; 128 /* the parent struct has to add a byte after the header to make
129 * everything 4 bytes aligned again
130 */
131};
116 132
117struct batadv_ogm_packet { 133struct batadv_ogm_packet {
118 struct batadv_header header; 134 struct batadv_header header;
@@ -137,7 +153,7 @@ struct batadv_icmp_packet {
137 __be16 seqno; 153 __be16 seqno;
138 uint8_t uid; 154 uint8_t uid;
139 uint8_t reserved; 155 uint8_t reserved;
140} __packed; 156};
141 157
142#define BATADV_RR_LEN 16 158#define BATADV_RR_LEN 16
143 159
@@ -153,13 +169,44 @@ struct batadv_icmp_packet_rr {
153 uint8_t uid; 169 uint8_t uid;
154 uint8_t rr_cur; 170 uint8_t rr_cur;
155 uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; 171 uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
156} __packed; 172};
173
174/* All packet headers in front of an ethernet header have to be completely
175 * divisible by 2 but not by 4 to make the payload after the ethernet
176 * header again 4 bytes boundary aligned.
177 *
178 * A packing of 2 is necessary to avoid extra padding at the end of the struct
179 * caused by a structure member which is larger than two bytes. Otherwise
180 * the structure would not fulfill the previously mentioned rule to avoid the
181 * misalignment of the payload after the ethernet header. It may also lead to
182 * leakage of information when the padding it not initialized before sending.
183 */
184#pragma pack(2)
157 185
158struct batadv_unicast_packet { 186struct batadv_unicast_packet {
159 struct batadv_header header; 187 struct batadv_header header;
160 uint8_t ttvn; /* destination translation table version number */ 188 uint8_t ttvn; /* destination translation table version number */
161 uint8_t dest[ETH_ALEN]; 189 uint8_t dest[ETH_ALEN];
162} __packed; 190 /* "4 bytes boundary + 2 bytes" long to make the payload after the
191 * following ethernet header again 4 bytes boundary aligned
192 */
193};
194
195/**
196 * struct batadv_unicast_4addr_packet - extended unicast packet
197 * @u: common unicast packet header
198 * @src: address of the source
199 * @subtype: packet subtype
200 */
201struct batadv_unicast_4addr_packet {
202 struct batadv_unicast_packet u;
203 uint8_t src[ETH_ALEN];
204 uint8_t subtype;
205 uint8_t reserved;
206 /* "4 bytes boundary + 2 bytes" long to make the payload after the
207 * following ethernet header again 4 bytes boundary aligned
208 */
209};
163 210
164struct batadv_unicast_frag_packet { 211struct batadv_unicast_frag_packet {
165 struct batadv_header header; 212 struct batadv_header header;
@@ -176,7 +223,12 @@ struct batadv_bcast_packet {
176 uint8_t reserved; 223 uint8_t reserved;
177 __be32 seqno; 224 __be32 seqno;
178 uint8_t orig[ETH_ALEN]; 225 uint8_t orig[ETH_ALEN];
179} __packed; 226 /* "4 bytes boundary + 2 bytes" long to make the payload after the
227 * following ethernet header again 4 bytes boundary aligned
228 */
229};
230
231#pragma pack()
180 232
181struct batadv_vis_packet { 233struct batadv_vis_packet {
182 struct batadv_header header; 234 struct batadv_header header;
@@ -187,7 +239,7 @@ struct batadv_vis_packet {
187 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ 239 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
188 uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */ 240 uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */
189 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ 241 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
190} __packed; 242};
191 243
192struct batadv_tt_query_packet { 244struct batadv_tt_query_packet {
193 struct batadv_header header; 245 struct batadv_header header;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 376b4cc6ca82..1aa1722d0187 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -28,6 +28,7 @@
28#include "vis.h" 28#include "vis.h"
29#include "unicast.h" 29#include "unicast.h"
30#include "bridge_loop_avoidance.h" 30#include "bridge_loop_avoidance.h"
31#include "distributed-arp-table.h"
31 32
32static int batadv_route_unicast_packet(struct sk_buff *skb, 33static int batadv_route_unicast_packet(struct sk_buff *skb,
33 struct batadv_hard_iface *recv_if); 34 struct batadv_hard_iface *recv_if);
@@ -284,7 +285,6 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
284{ 285{
285 struct batadv_hard_iface *primary_if = NULL; 286 struct batadv_hard_iface *primary_if = NULL;
286 struct batadv_orig_node *orig_node = NULL; 287 struct batadv_orig_node *orig_node = NULL;
287 struct batadv_neigh_node *router = NULL;
288 struct batadv_icmp_packet_rr *icmp_packet; 288 struct batadv_icmp_packet_rr *icmp_packet;
289 int ret = NET_RX_DROP; 289 int ret = NET_RX_DROP;
290 290
@@ -306,10 +306,6 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
306 if (!orig_node) 306 if (!orig_node)
307 goto out; 307 goto out;
308 308
309 router = batadv_orig_node_get_router(orig_node);
310 if (!router)
311 goto out;
312
313 /* create a copy of the skb, if needed, to modify it. */ 309 /* create a copy of the skb, if needed, to modify it. */
314 if (skb_cow(skb, ETH_HLEN) < 0) 310 if (skb_cow(skb, ETH_HLEN) < 0)
315 goto out; 311 goto out;
@@ -321,14 +317,12 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
321 icmp_packet->msg_type = BATADV_ECHO_REPLY; 317 icmp_packet->msg_type = BATADV_ECHO_REPLY;
322 icmp_packet->header.ttl = BATADV_TTL; 318 icmp_packet->header.ttl = BATADV_TTL;
323 319
324 batadv_send_skb_packet(skb, router->if_incoming, router->addr); 320 if (batadv_send_skb_to_orig(skb, orig_node, NULL))
325 ret = NET_RX_SUCCESS; 321 ret = NET_RX_SUCCESS;
326 322
327out: 323out:
328 if (primary_if) 324 if (primary_if)
329 batadv_hardif_free_ref(primary_if); 325 batadv_hardif_free_ref(primary_if);
330 if (router)
331 batadv_neigh_node_free_ref(router);
332 if (orig_node) 326 if (orig_node)
333 batadv_orig_node_free_ref(orig_node); 327 batadv_orig_node_free_ref(orig_node);
334 return ret; 328 return ret;
@@ -339,7 +333,6 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
339{ 333{
340 struct batadv_hard_iface *primary_if = NULL; 334 struct batadv_hard_iface *primary_if = NULL;
341 struct batadv_orig_node *orig_node = NULL; 335 struct batadv_orig_node *orig_node = NULL;
342 struct batadv_neigh_node *router = NULL;
343 struct batadv_icmp_packet *icmp_packet; 336 struct batadv_icmp_packet *icmp_packet;
344 int ret = NET_RX_DROP; 337 int ret = NET_RX_DROP;
345 338
@@ -361,10 +354,6 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
361 if (!orig_node) 354 if (!orig_node)
362 goto out; 355 goto out;
363 356
364 router = batadv_orig_node_get_router(orig_node);
365 if (!router)
366 goto out;
367
368 /* create a copy of the skb, if needed, to modify it. */ 357 /* create a copy of the skb, if needed, to modify it. */
369 if (skb_cow(skb, ETH_HLEN) < 0) 358 if (skb_cow(skb, ETH_HLEN) < 0)
370 goto out; 359 goto out;
@@ -376,14 +365,12 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
376 icmp_packet->msg_type = BATADV_TTL_EXCEEDED; 365 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
377 icmp_packet->header.ttl = BATADV_TTL; 366 icmp_packet->header.ttl = BATADV_TTL;
378 367
379 batadv_send_skb_packet(skb, router->if_incoming, router->addr); 368 if (batadv_send_skb_to_orig(skb, orig_node, NULL))
380 ret = NET_RX_SUCCESS; 369 ret = NET_RX_SUCCESS;
381 370
382out: 371out:
383 if (primary_if) 372 if (primary_if)
384 batadv_hardif_free_ref(primary_if); 373 batadv_hardif_free_ref(primary_if);
385 if (router)
386 batadv_neigh_node_free_ref(router);
387 if (orig_node) 374 if (orig_node)
388 batadv_orig_node_free_ref(orig_node); 375 batadv_orig_node_free_ref(orig_node);
389 return ret; 376 return ret;
@@ -397,7 +384,6 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
397 struct batadv_icmp_packet_rr *icmp_packet; 384 struct batadv_icmp_packet_rr *icmp_packet;
398 struct ethhdr *ethhdr; 385 struct ethhdr *ethhdr;
399 struct batadv_orig_node *orig_node = NULL; 386 struct batadv_orig_node *orig_node = NULL;
400 struct batadv_neigh_node *router = NULL;
401 int hdr_size = sizeof(struct batadv_icmp_packet); 387 int hdr_size = sizeof(struct batadv_icmp_packet);
402 int ret = NET_RX_DROP; 388 int ret = NET_RX_DROP;
403 389
@@ -446,10 +432,6 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
446 if (!orig_node) 432 if (!orig_node)
447 goto out; 433 goto out;
448 434
449 router = batadv_orig_node_get_router(orig_node);
450 if (!router)
451 goto out;
452
453 /* create a copy of the skb, if needed, to modify it. */ 435 /* create a copy of the skb, if needed, to modify it. */
454 if (skb_cow(skb, ETH_HLEN) < 0) 436 if (skb_cow(skb, ETH_HLEN) < 0)
455 goto out; 437 goto out;
@@ -460,12 +442,10 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
460 icmp_packet->header.ttl--; 442 icmp_packet->header.ttl--;
461 443
462 /* route it */ 444 /* route it */
463 batadv_send_skb_packet(skb, router->if_incoming, router->addr); 445 if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
464 ret = NET_RX_SUCCESS; 446 ret = NET_RX_SUCCESS;
465 447
466out: 448out:
467 if (router)
468 batadv_neigh_node_free_ref(router);
469 if (orig_node) 449 if (orig_node)
470 batadv_orig_node_free_ref(orig_node); 450 batadv_orig_node_free_ref(orig_node);
471 return ret; 451 return ret;
@@ -549,25 +529,18 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
549 if (tmp_neigh_node->if_incoming == recv_if) 529 if (tmp_neigh_node->if_incoming == recv_if)
550 continue; 530 continue;
551 531
552 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 532 if (router && tmp_neigh_node->tq_avg <= router->tq_avg)
553 continue; 533 continue;
554 534
555 /* if we don't have a router yet 535 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
556 * or this one is better, choose it. 536 continue;
557 */
558 if ((!router) ||
559 (tmp_neigh_node->tq_avg > router->tq_avg)) {
560 /* decrement refcount of
561 * previously selected router
562 */
563 if (router)
564 batadv_neigh_node_free_ref(router);
565 537
566 router = tmp_neigh_node; 538 /* decrement refcount of previously selected router */
567 atomic_inc_not_zero(&router->refcount); 539 if (router)
568 } 540 batadv_neigh_node_free_ref(router);
569 541
570 batadv_neigh_node_free_ref(tmp_neigh_node); 542 /* we found a better router (or at least one valid router) */
543 router = tmp_neigh_node;
571 } 544 }
572 545
573 /* use the first candidate if nothing was found. */ 546 /* use the first candidate if nothing was found. */
@@ -687,21 +660,8 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
687 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); 660 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
688 struct batadv_roam_adv_packet *roam_adv_packet; 661 struct batadv_roam_adv_packet *roam_adv_packet;
689 struct batadv_orig_node *orig_node; 662 struct batadv_orig_node *orig_node;
690 struct ethhdr *ethhdr;
691
692 /* drop packet if it has not necessary minimum size */
693 if (unlikely(!pskb_may_pull(skb,
694 sizeof(struct batadv_roam_adv_packet))))
695 goto out;
696
697 ethhdr = (struct ethhdr *)skb_mac_header(skb);
698
699 /* packet with unicast indication but broadcast recipient */
700 if (is_broadcast_ether_addr(ethhdr->h_dest))
701 goto out;
702 663
703 /* packet with broadcast sender address */ 664 if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0)
704 if (is_broadcast_ether_addr(ethhdr->h_source))
705 goto out; 665 goto out;
706 666
707 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); 667 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
@@ -730,12 +690,6 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
730 BATADV_TT_CLIENT_ROAM, 690 BATADV_TT_CLIENT_ROAM,
731 atomic_read(&orig_node->last_ttvn) + 1); 691 atomic_read(&orig_node->last_ttvn) + 1);
732 692
733 /* Roaming phase starts: I have new information but the ttvn has not
734 * been incremented yet. This flag will make me check all the incoming
735 * packets for the correct destination.
736 */
737 bat_priv->tt.poss_change = true;
738
739 batadv_orig_node_free_ref(orig_node); 693 batadv_orig_node_free_ref(orig_node);
740out: 694out:
741 /* returning NET_RX_DROP will make the caller function kfree the skb */ 695 /* returning NET_RX_DROP will make the caller function kfree the skb */
@@ -907,8 +861,8 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
907 skb->len + ETH_HLEN); 861 skb->len + ETH_HLEN);
908 862
909 /* route it */ 863 /* route it */
910 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 864 if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
911 ret = NET_RX_SUCCESS; 865 ret = NET_RX_SUCCESS;
912 866
913out: 867out:
914 if (neigh_node) 868 if (neigh_node)
@@ -918,80 +872,161 @@ out:
918 return ret; 872 return ret;
919} 873}
920 874
875/**
876 * batadv_reroute_unicast_packet - update the unicast header for re-routing
877 * @bat_priv: the bat priv with all the soft interface information
878 * @unicast_packet: the unicast header to be updated
879 * @dst_addr: the payload destination
880 *
881 * Search the translation table for dst_addr and update the unicast header with
882 * the new corresponding information (originator address where the destination
883 * client currently is and its known TTVN)
884 *
885 * Returns true if the packet header has been updated, false otherwise
886 */
887static bool
888batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
889 struct batadv_unicast_packet *unicast_packet,
890 uint8_t *dst_addr)
891{
892 struct batadv_orig_node *orig_node = NULL;
893 struct batadv_hard_iface *primary_if = NULL;
894 bool ret = false;
895 uint8_t *orig_addr, orig_ttvn;
896
897 if (batadv_is_my_client(bat_priv, dst_addr)) {
898 primary_if = batadv_primary_if_get_selected(bat_priv);
899 if (!primary_if)
900 goto out;
901 orig_addr = primary_if->net_dev->dev_addr;
902 orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
903 } else {
904 orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr);
905 if (!orig_node)
906 goto out;
907
908 if (batadv_compare_eth(orig_node->orig, unicast_packet->dest))
909 goto out;
910
911 orig_addr = orig_node->orig;
912 orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
913 }
914
915 /* update the packet header */
916 memcpy(unicast_packet->dest, orig_addr, ETH_ALEN);
917 unicast_packet->ttvn = orig_ttvn;
918
919 ret = true;
920out:
921 if (primary_if)
922 batadv_hardif_free_ref(primary_if);
923 if (orig_node)
924 batadv_orig_node_free_ref(orig_node);
925
926 return ret;
927}
928
921static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, 929static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
922 struct sk_buff *skb) { 930 struct sk_buff *skb) {
923 uint8_t curr_ttvn; 931 uint8_t curr_ttvn, old_ttvn;
924 struct batadv_orig_node *orig_node; 932 struct batadv_orig_node *orig_node;
925 struct ethhdr *ethhdr; 933 struct ethhdr *ethhdr;
926 struct batadv_hard_iface *primary_if; 934 struct batadv_hard_iface *primary_if;
927 struct batadv_unicast_packet *unicast_packet; 935 struct batadv_unicast_packet *unicast_packet;
928 bool tt_poss_change;
929 int is_old_ttvn; 936 int is_old_ttvn;
930 937
931 /* I could need to modify it */ 938 /* check if there is enough data before accessing it */
932 if (skb_cow(skb, sizeof(struct batadv_unicast_packet)) < 0) 939 if (pskb_may_pull(skb, sizeof(*unicast_packet) + ETH_HLEN) < 0)
940 return 0;
941
942 /* create a copy of the skb (in case of for re-routing) to modify it. */
943 if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
933 return 0; 944 return 0;
934 945
935 unicast_packet = (struct batadv_unicast_packet *)skb->data; 946 unicast_packet = (struct batadv_unicast_packet *)skb->data;
947 ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
936 948
937 if (batadv_is_my_mac(unicast_packet->dest)) { 949 /* check if the destination client was served by this node and it is now
938 tt_poss_change = bat_priv->tt.poss_change; 950 * roaming. In this case, it means that the node has got a ROAM_ADV
939 curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); 951 * message and that it knows the new destination in the mesh to re-route
940 } else { 952 * the packet to
953 */
954 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest)) {
955 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
956 ethhdr->h_dest))
957 net_ratelimited_function(batadv_dbg, BATADV_DBG_TT,
958 bat_priv,
959 "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
960 unicast_packet->dest,
961 ethhdr->h_dest);
962 /* at this point the mesh destination should have been
963 * substituted with the originator address found in the global
964 * table. If not, let the packet go untouched anyway because
965 * there is nothing the node can do
966 */
967 return 1;
968 }
969
970 /* retrieve the TTVN known by this node for the packet destination. This
971 * value is used later to check if the node which sent (or re-routed
972 * last time) the packet had an updated information or not
973 */
974 curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
975 if (!batadv_is_my_mac(unicast_packet->dest)) {
941 orig_node = batadv_orig_hash_find(bat_priv, 976 orig_node = batadv_orig_hash_find(bat_priv,
942 unicast_packet->dest); 977 unicast_packet->dest);
943 978 /* if it is not possible to find the orig_node representing the
979 * destination, the packet can immediately be dropped as it will
980 * not be possible to deliver it
981 */
944 if (!orig_node) 982 if (!orig_node)
945 return 0; 983 return 0;
946 984
947 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 985 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
948 tt_poss_change = orig_node->tt_poss_change;
949 batadv_orig_node_free_ref(orig_node); 986 batadv_orig_node_free_ref(orig_node);
950 } 987 }
951 988
952 /* Check whether I have to reroute the packet */ 989 /* check if the TTVN contained in the packet is fresher than what the
990 * node knows
991 */
953 is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn); 992 is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
954 if (is_old_ttvn || tt_poss_change) { 993 if (!is_old_ttvn)
955 /* check if there is enough data before accessing it */ 994 return 1;
956 if (pskb_may_pull(skb, sizeof(struct batadv_unicast_packet) +
957 ETH_HLEN) < 0)
958 return 0;
959 995
960 ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet)); 996 old_ttvn = unicast_packet->ttvn;
997 /* the packet was forged based on outdated network information. Its
998 * destination can possibly be updated and forwarded towards the new
999 * target host
1000 */
1001 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
1002 ethhdr->h_dest)) {
1003 net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv,
1004 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
1005 unicast_packet->dest, ethhdr->h_dest,
1006 old_ttvn, curr_ttvn);
1007 return 1;
1008 }
961 1009
962 /* we don't have an updated route for this client, so we should 1010 /* the packet has not been re-routed: either the destination is
963 * not try to reroute the packet!! 1011 * currently served by this node or there is no destination at all and
964 */ 1012 * it is possible to drop the packet
965 if (batadv_tt_global_client_is_roaming(bat_priv, 1013 */
966 ethhdr->h_dest)) 1014 if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
967 return 1; 1015 return 0;
968 1016
969 orig_node = batadv_transtable_search(bat_priv, NULL, 1017 /* update the header in order to let the packet be delivered to this
970 ethhdr->h_dest); 1018 * node's soft interface
971 1019 */
972 if (!orig_node) { 1020 primary_if = batadv_primary_if_get_selected(bat_priv);
973 if (!batadv_is_my_client(bat_priv, ethhdr->h_dest)) 1021 if (!primary_if)
974 return 0; 1022 return 0;
975 primary_if = batadv_primary_if_get_selected(bat_priv);
976 if (!primary_if)
977 return 0;
978 memcpy(unicast_packet->dest,
979 primary_if->net_dev->dev_addr, ETH_ALEN);
980 batadv_hardif_free_ref(primary_if);
981 } else {
982 memcpy(unicast_packet->dest, orig_node->orig,
983 ETH_ALEN);
984 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
985 batadv_orig_node_free_ref(orig_node);
986 }
987 1023
988 batadv_dbg(BATADV_DBG_ROUTES, bat_priv, 1024 memcpy(unicast_packet->dest, primary_if->net_dev->dev_addr, ETH_ALEN);
989 "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n", 1025
990 unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest, 1026 batadv_hardif_free_ref(primary_if);
991 unicast_packet->dest); 1027
1028 unicast_packet->ttvn = curr_ttvn;
992 1029
993 unicast_packet->ttvn = curr_ttvn;
994 }
995 return 1; 1030 return 1;
996} 1031}
997 1032
@@ -1000,7 +1035,19 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1000{ 1035{
1001 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1036 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1002 struct batadv_unicast_packet *unicast_packet; 1037 struct batadv_unicast_packet *unicast_packet;
1038 struct batadv_unicast_4addr_packet *unicast_4addr_packet;
1039 uint8_t *orig_addr;
1040 struct batadv_orig_node *orig_node = NULL;
1003 int hdr_size = sizeof(*unicast_packet); 1041 int hdr_size = sizeof(*unicast_packet);
1042 bool is4addr;
1043
1044 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1045 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
1046
1047 is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR;
1048 /* the caller function should have already pulled 2 bytes */
1049 if (is4addr)
1050 hdr_size = sizeof(*unicast_4addr_packet);
1004 1051
1005 if (batadv_check_unicast_packet(skb, hdr_size) < 0) 1052 if (batadv_check_unicast_packet(skb, hdr_size) < 0)
1006 return NET_RX_DROP; 1053 return NET_RX_DROP;
@@ -1008,12 +1055,28 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1008 if (!batadv_check_unicast_ttvn(bat_priv, skb)) 1055 if (!batadv_check_unicast_ttvn(bat_priv, skb))
1009 return NET_RX_DROP; 1056 return NET_RX_DROP;
1010 1057
1011 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1012
1013 /* packet for me */ 1058 /* packet for me */
1014 if (batadv_is_my_mac(unicast_packet->dest)) { 1059 if (batadv_is_my_mac(unicast_packet->dest)) {
1060 if (is4addr) {
1061 batadv_dat_inc_counter(bat_priv,
1062 unicast_4addr_packet->subtype);
1063 orig_addr = unicast_4addr_packet->src;
1064 orig_node = batadv_orig_hash_find(bat_priv, orig_addr);
1065 }
1066
1067 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
1068 hdr_size))
1069 goto rx_success;
1070 if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
1071 hdr_size))
1072 goto rx_success;
1073
1015 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, 1074 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
1016 NULL); 1075 orig_node);
1076
1077rx_success:
1078 if (orig_node)
1079 batadv_orig_node_free_ref(orig_node);
1017 1080
1018 return NET_RX_SUCCESS; 1081 return NET_RX_SUCCESS;
1019 } 1082 }
@@ -1050,8 +1113,17 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1050 if (!new_skb) 1113 if (!new_skb)
1051 return NET_RX_SUCCESS; 1114 return NET_RX_SUCCESS;
1052 1115
1116 if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb,
1117 hdr_size))
1118 goto rx_success;
1119 if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb,
1120 hdr_size))
1121 goto rx_success;
1122
1053 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if, 1123 batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
1054 sizeof(struct batadv_unicast_packet), NULL); 1124 sizeof(struct batadv_unicast_packet), NULL);
1125
1126rx_success:
1055 return NET_RX_SUCCESS; 1127 return NET_RX_SUCCESS;
1056 } 1128 }
1057 1129
@@ -1124,14 +1196,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1124 1196
1125 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1197 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1126 1198
1127 /* keep skb linear for crc calculation */
1128 if (skb_linearize(skb) < 0)
1129 goto out;
1130
1131 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1132
1133 /* check whether this has been sent by another originator before */ 1199 /* check whether this has been sent by another originator before */
1134 if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len)) 1200 if (batadv_bla_check_bcast_duplist(bat_priv, skb))
1135 goto out; 1201 goto out;
1136 1202
1137 /* rebroadcast packet */ 1203 /* rebroadcast packet */
@@ -1143,9 +1209,16 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1143 if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size)) 1209 if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
1144 goto out; 1210 goto out;
1145 1211
1212 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
1213 goto rx_success;
1214 if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
1215 goto rx_success;
1216
1146 /* broadcast for me */ 1217 /* broadcast for me */
1147 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, 1218 batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
1148 orig_node); 1219 orig_node);
1220
1221rx_success:
1149 ret = NET_RX_SUCCESS; 1222 ret = NET_RX_SUCCESS;
1150 goto out; 1223 goto out;
1151 1224
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 570a8bce0364..4425af9dad40 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "main.h" 20#include "main.h"
21#include "distributed-arp-table.h"
21#include "send.h" 22#include "send.h"
22#include "routing.h" 23#include "routing.h"
23#include "translation-table.h" 24#include "translation-table.h"
@@ -27,6 +28,8 @@
27#include "gateway_common.h" 28#include "gateway_common.h"
28#include "originator.h" 29#include "originator.h"
29 30
31#include <linux/if_ether.h>
32
30static void batadv_send_outstanding_bcast_packet(struct work_struct *work); 33static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
31 34
32/* send out an already prepared packet to the given address via the 35/* send out an already prepared packet to the given address via the
@@ -59,11 +62,11 @@ int batadv_send_skb_packet(struct sk_buff *skb,
59 ethhdr = (struct ethhdr *)skb_mac_header(skb); 62 ethhdr = (struct ethhdr *)skb_mac_header(skb);
60 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); 63 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 64 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62 ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN); 65 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
63 66
64 skb_set_network_header(skb, ETH_HLEN); 67 skb_set_network_header(skb, ETH_HLEN);
65 skb->priority = TC_PRIO_CONTROL; 68 skb->priority = TC_PRIO_CONTROL;
66 skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN); 69 skb->protocol = __constant_htons(ETH_P_BATMAN);
67 70
68 skb->dev = hard_iface->net_dev; 71 skb->dev = hard_iface->net_dev;
69 72
@@ -77,6 +80,39 @@ send_skb_err:
77 return NET_XMIT_DROP; 80 return NET_XMIT_DROP;
78} 81}
79 82
83/**
84 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
85 * @skb: Packet to be transmitted.
86 * @orig_node: Final destination of the packet.
87 * @recv_if: Interface used when receiving the packet (can be NULL).
88 *
89 * Looks up the best next-hop towards the passed originator and passes the
90 * skb on for preparation of MAC header. If the packet originated from this
91 * host, NULL can be passed as recv_if and no interface alternating is
92 * attempted.
93 *
94 * Returns TRUE on success; FALSE otherwise.
95 */
96bool batadv_send_skb_to_orig(struct sk_buff *skb,
97 struct batadv_orig_node *orig_node,
98 struct batadv_hard_iface *recv_if)
99{
100 struct batadv_priv *bat_priv = orig_node->bat_priv;
101 struct batadv_neigh_node *neigh_node;
102
103 /* batadv_find_router() increases neigh_nodes refcount if found. */
104 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
105 if (!neigh_node)
106 return false;
107
108 /* route it */
109 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
110
111 batadv_neigh_node_free_ref(neigh_node);
112
113 return true;
114}
115
80void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) 116void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
81{ 117{
82 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 118 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -209,6 +245,9 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
209 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) 245 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
210 goto out; 246 goto out;
211 247
248 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
249 goto out;
250
212 /* rebroadcast packet */ 251 /* rebroadcast packet */
213 rcu_read_lock(); 252 rcu_read_lock();
214 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 253 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 643329b787ed..0078dece1abc 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -23,6 +23,9 @@
23int batadv_send_skb_packet(struct sk_buff *skb, 23int batadv_send_skb_packet(struct sk_buff *skb,
24 struct batadv_hard_iface *hard_iface, 24 struct batadv_hard_iface *hard_iface,
25 const uint8_t *dst_addr); 25 const uint8_t *dst_addr);
26bool batadv_send_skb_to_orig(struct sk_buff *skb,
27 struct batadv_orig_node *orig_node,
28 struct batadv_hard_iface *recv_if);
26void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface); 29void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
27int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, 30int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
28 const struct sk_buff *skb, 31 const struct sk_buff *skb,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b9a28d2dd3e8..6b548fde8e04 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -20,6 +20,7 @@
20#include "main.h" 20#include "main.h"
21#include "soft-interface.h" 21#include "soft-interface.h"
22#include "hard-interface.h" 22#include "hard-interface.h"
23#include "distributed-arp-table.h"
23#include "routing.h" 24#include "routing.h"
24#include "send.h" 25#include "send.h"
25#include "debugfs.h" 26#include "debugfs.h"
@@ -33,6 +34,7 @@
33#include <linux/ethtool.h> 34#include <linux/ethtool.h>
34#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
35#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/if_ether.h>
36#include "unicast.h" 38#include "unicast.h"
37#include "bridge_loop_avoidance.h" 39#include "bridge_loop_avoidance.h"
38 40
@@ -145,14 +147,17 @@ static int batadv_interface_tx(struct sk_buff *skb,
145 struct batadv_hard_iface *primary_if = NULL; 147 struct batadv_hard_iface *primary_if = NULL;
146 struct batadv_bcast_packet *bcast_packet; 148 struct batadv_bcast_packet *bcast_packet;
147 struct vlan_ethhdr *vhdr; 149 struct vlan_ethhdr *vhdr;
148 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); 150 __be16 ethertype = __constant_htons(ETH_P_BATMAN);
149 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 151 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
150 0x00}; 152 0x00, 0x00};
153 static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
154 0x00, 0x00};
151 unsigned int header_len = 0; 155 unsigned int header_len = 0;
152 int data_len = skb->len, ret; 156 int data_len = skb->len, ret;
153 short vid __maybe_unused = -1; 157 short vid __maybe_unused = -1;
154 bool do_bcast = false; 158 bool do_bcast = false;
155 uint32_t seqno; 159 uint32_t seqno;
160 unsigned long brd_delay = 1;
156 161
157 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) 162 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
158 goto dropped; 163 goto dropped;
@@ -168,7 +173,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
168 break; 173 break;
169 174
170 /* fall through */ 175 /* fall through */
171 case BATADV_ETH_P_BATMAN: 176 case ETH_P_BATMAN:
172 goto dropped; 177 goto dropped;
173 } 178 }
174 179
@@ -180,10 +185,16 @@ static int batadv_interface_tx(struct sk_buff *skb,
180 185
181 /* don't accept stp packets. STP does not help in meshes. 186 /* don't accept stp packets. STP does not help in meshes.
182 * better use the bridge loop avoidance ... 187 * better use the bridge loop avoidance ...
188 *
189 * The same goes for ECTP sent at least by some Cisco Switches,
190 * it might confuse the mesh when used with bridge loop avoidance.
183 */ 191 */
184 if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) 192 if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
185 goto dropped; 193 goto dropped;
186 194
195 if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
196 goto dropped;
197
187 if (is_multicast_ether_addr(ethhdr->h_dest)) { 198 if (is_multicast_ether_addr(ethhdr->h_dest)) {
188 do_bcast = true; 199 do_bcast = true;
189 200
@@ -216,6 +227,13 @@ static int batadv_interface_tx(struct sk_buff *skb,
216 if (!primary_if) 227 if (!primary_if)
217 goto dropped; 228 goto dropped;
218 229
230 /* in case of ARP request, we do not immediately broadcasti the
231 * packet, instead we first wait for DAT to try to retrieve the
232 * correct ARP entry
233 */
234 if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
235 brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
236
219 if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) 237 if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
220 goto dropped; 238 goto dropped;
221 239
@@ -237,7 +255,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
237 seqno = atomic_inc_return(&bat_priv->bcast_seqno); 255 seqno = atomic_inc_return(&bat_priv->bcast_seqno);
238 bcast_packet->seqno = htonl(seqno); 256 bcast_packet->seqno = htonl(seqno);
239 257
240 batadv_add_bcast_packet_to_list(bat_priv, skb, 1); 258 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
241 259
242 /* a copy is stored in the bcast list, therefore removing 260 /* a copy is stored in the bcast list, therefore removing
243 * the original skb. 261 * the original skb.
@@ -252,7 +270,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
252 goto dropped; 270 goto dropped;
253 } 271 }
254 272
255 ret = batadv_unicast_send_skb(skb, bat_priv); 273 if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
274 goto dropped;
275
276 batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
277
278 ret = batadv_unicast_send_skb(bat_priv, skb);
256 if (ret != 0) 279 if (ret != 0)
257 goto dropped_freed; 280 goto dropped_freed;
258 } 281 }
@@ -280,7 +303,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
280 struct vlan_ethhdr *vhdr; 303 struct vlan_ethhdr *vhdr;
281 struct batadv_header *batadv_header = (struct batadv_header *)skb->data; 304 struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
282 short vid __maybe_unused = -1; 305 short vid __maybe_unused = -1;
283 __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); 306 __be16 ethertype = __constant_htons(ETH_P_BATMAN);
284 bool is_bcast; 307 bool is_bcast;
285 308
286 is_bcast = (batadv_header->packet_type == BATADV_BCAST); 309 is_bcast = (batadv_header->packet_type == BATADV_BCAST);
@@ -303,7 +326,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
303 break; 326 break;
304 327
305 /* fall through */ 328 /* fall through */
306 case BATADV_ETH_P_BATMAN: 329 case ETH_P_BATMAN:
307 goto dropped; 330 goto dropped;
308 } 331 }
309 332
@@ -325,6 +348,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
325 348
326 soft_iface->last_rx = jiffies; 349 soft_iface->last_rx = jiffies;
327 350
351 /* Let the bridge loop avoidance check the packet. If will
352 * not handle it, we can safely push it up.
353 */
354 if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
355 goto out;
356
328 if (orig_node) 357 if (orig_node)
329 batadv_tt_add_temporary_global_entry(bat_priv, orig_node, 358 batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
330 ethhdr->h_source); 359 ethhdr->h_source);
@@ -332,12 +361,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
332 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 361 if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
333 goto dropped; 362 goto dropped;
334 363
335 /* Let the bridge loop avoidance check the packet. If will
336 * not handle it, we can safely push it up.
337 */
338 if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
339 goto out;
340
341 netif_rx(skb); 364 netif_rx(skb);
342 goto out; 365 goto out;
343 366
@@ -347,7 +370,51 @@ out:
347 return; 370 return;
348} 371}
349 372
373/* batman-adv network devices have devices nesting below it and are a special
374 * "super class" of normal network devices; split their locks off into a
375 * separate class since they always nest.
376 */
377static struct lock_class_key batadv_netdev_xmit_lock_key;
378static struct lock_class_key batadv_netdev_addr_lock_key;
379
380/**
381 * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue
382 * @dev: device which owns the tx queue
383 * @txq: tx queue to modify
384 * @_unused: always NULL
385 */
386static void batadv_set_lockdep_class_one(struct net_device *dev,
387 struct netdev_queue *txq,
388 void *_unused)
389{
390 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
391}
392
393/**
394 * batadv_set_lockdep_class - Set txq and addr_list lockdep class
395 * @dev: network device to modify
396 */
397static void batadv_set_lockdep_class(struct net_device *dev)
398{
399 lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
400 netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
401}
402
403/**
404 * batadv_softif_init - Late stage initialization of soft interface
405 * @dev: registered network device to modify
406 *
407 * Returns error code on failures
408 */
409static int batadv_softif_init(struct net_device *dev)
410{
411 batadv_set_lockdep_class(dev);
412
413 return 0;
414}
415
350static const struct net_device_ops batadv_netdev_ops = { 416static const struct net_device_ops batadv_netdev_ops = {
417 .ndo_init = batadv_softif_init,
351 .ndo_open = batadv_interface_open, 418 .ndo_open = batadv_interface_open,
352 .ndo_stop = batadv_interface_release, 419 .ndo_stop = batadv_interface_release,
353 .ndo_get_stats = batadv_interface_stats, 420 .ndo_get_stats = batadv_interface_stats,
@@ -414,6 +481,9 @@ struct net_device *batadv_softif_create(const char *name)
414 atomic_set(&bat_priv->aggregated_ogms, 1); 481 atomic_set(&bat_priv->aggregated_ogms, 1);
415 atomic_set(&bat_priv->bonding, 0); 482 atomic_set(&bat_priv->bonding, 0);
416 atomic_set(&bat_priv->bridge_loop_avoidance, 0); 483 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
484#ifdef CONFIG_BATMAN_ADV_DAT
485 atomic_set(&bat_priv->distributed_arp_table, 1);
486#endif
417 atomic_set(&bat_priv->ap_isolation, 0); 487 atomic_set(&bat_priv->ap_isolation, 0);
418 atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE); 488 atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
419 atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF); 489 atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
@@ -436,7 +506,6 @@ struct net_device *batadv_softif_create(const char *name)
436#endif 506#endif
437 bat_priv->tt.last_changeset = NULL; 507 bat_priv->tt.last_changeset = NULL;
438 bat_priv->tt.last_changeset_len = 0; 508 bat_priv->tt.last_changeset_len = 0;
439 bat_priv->tt.poss_change = false;
440 509
441 bat_priv->primary_if = NULL; 510 bat_priv->primary_if = NULL;
442 bat_priv->num_ifaces = 0; 511 bat_priv->num_ifaces = 0;
@@ -556,6 +625,13 @@ static const struct {
556 { "tt_response_rx" }, 625 { "tt_response_rx" },
557 { "tt_roam_adv_tx" }, 626 { "tt_roam_adv_tx" },
558 { "tt_roam_adv_rx" }, 627 { "tt_roam_adv_rx" },
628#ifdef CONFIG_BATMAN_ADV_DAT
629 { "dat_get_tx" },
630 { "dat_get_rx" },
631 { "dat_put_tx" },
632 { "dat_put_rx" },
633 { "dat_cached_reply_tx" },
634#endif
559}; 635};
560 636
561static void batadv_get_strings(struct net_device *dev, uint32_t stringset, 637static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 66518c75c217..84a55cb19b0b 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -20,6 +20,7 @@
20#include "main.h" 20#include "main.h"
21#include "sysfs.h" 21#include "sysfs.h"
22#include "translation-table.h" 22#include "translation-table.h"
23#include "distributed-arp-table.h"
23#include "originator.h" 24#include "originator.h"
24#include "hard-interface.h" 25#include "hard-interface.h"
25#include "gateway_common.h" 26#include "gateway_common.h"
@@ -122,55 +123,6 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
122 batadv_store_##_name) 123 batadv_store_##_name)
123 124
124 125
125#define BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
126ssize_t batadv_store_##_name(struct kobject *kobj, \
127 struct attribute *attr, char *buff, \
128 size_t count) \
129{ \
130 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
131 struct batadv_hard_iface *hard_iface; \
132 ssize_t length; \
133 \
134 hard_iface = batadv_hardif_get_by_netdev(net_dev); \
135 if (!hard_iface) \
136 return 0; \
137 \
138 length = __batadv_store_uint_attr(buff, count, _min, _max, \
139 _post_func, attr, \
140 &hard_iface->_name, net_dev); \
141 \
142 batadv_hardif_free_ref(hard_iface); \
143 return length; \
144}
145
146#define BATADV_ATTR_HIF_SHOW_UINT(_name) \
147ssize_t batadv_show_##_name(struct kobject *kobj, \
148 struct attribute *attr, char *buff) \
149{ \
150 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
151 struct batadv_hard_iface *hard_iface; \
152 ssize_t length; \
153 \
154 hard_iface = batadv_hardif_get_by_netdev(net_dev); \
155 if (!hard_iface) \
156 return 0; \
157 \
158 length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
159 \
160 batadv_hardif_free_ref(hard_iface); \
161 return length; \
162}
163
164/* Use this, if you are going to set [name] in hard_iface to an
165 * unsigned integer value
166 */
167#define BATADV_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
168 static BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)\
169 static BATADV_ATTR_HIF_SHOW_UINT(_name) \
170 static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
171 batadv_store_##_name)
172
173
174static int batadv_store_bool_attr(char *buff, size_t count, 126static int batadv_store_bool_attr(char *buff, size_t count,
175 struct net_device *net_dev, 127 struct net_device *net_dev,
176 const char *attr_name, atomic_t *attr) 128 const char *attr_name, atomic_t *attr)
@@ -469,6 +421,9 @@ BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
469#ifdef CONFIG_BATMAN_ADV_BLA 421#ifdef CONFIG_BATMAN_ADV_BLA
470BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); 422BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
471#endif 423#endif
424#ifdef CONFIG_BATMAN_ADV_DAT
425BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR, NULL);
426#endif
472BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu); 427BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
473BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); 428BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
474static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode, 429static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
@@ -494,6 +449,9 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
494#ifdef CONFIG_BATMAN_ADV_BLA 449#ifdef CONFIG_BATMAN_ADV_BLA
495 &batadv_attr_bridge_loop_avoidance, 450 &batadv_attr_bridge_loop_avoidance,
496#endif 451#endif
452#ifdef CONFIG_BATMAN_ADV_DAT
453 &batadv_attr_distributed_arp_table,
454#endif
497 &batadv_attr_fragmentation, 455 &batadv_attr_fragmentation,
498 &batadv_attr_ap_isolation, 456 &batadv_attr_ap_isolation,
499 &batadv_attr_vis_mode, 457 &batadv_attr_vis_mode,
@@ -730,7 +688,7 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
730 enum batadv_uev_action action, const char *data) 688 enum batadv_uev_action action, const char *data)
731{ 689{
732 int ret = -ENOMEM; 690 int ret = -ENOMEM;
733 struct batadv_hard_iface *primary_if = NULL; 691 struct batadv_hard_iface *primary_if;
734 struct kobject *bat_kobj; 692 struct kobject *bat_kobj;
735 char *uevent_env[4] = { NULL, NULL, NULL, NULL }; 693 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
736 694
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 112edd371b2f..22457a7952ba 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -238,92 +238,134 @@ static int batadv_tt_local_init(struct batadv_priv *bat_priv)
238 return 0; 238 return 0;
239} 239}
240 240
241static void batadv_tt_global_free(struct batadv_priv *bat_priv,
242 struct batadv_tt_global_entry *tt_global,
243 const char *message)
244{
245 batadv_dbg(BATADV_DBG_TT, bat_priv,
246 "Deleting global tt entry %pM: %s\n",
247 tt_global->common.addr, message);
248
249 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
250 batadv_choose_orig, tt_global->common.addr);
251 batadv_tt_global_entry_free_ref(tt_global);
252
253}
254
241void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 255void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
242 int ifindex) 256 int ifindex)
243{ 257{
244 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 258 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
245 struct batadv_tt_local_entry *tt_local_entry = NULL; 259 struct batadv_tt_local_entry *tt_local;
246 struct batadv_tt_global_entry *tt_global_entry = NULL; 260 struct batadv_tt_global_entry *tt_global;
247 struct hlist_head *head; 261 struct hlist_head *head;
248 struct hlist_node *node; 262 struct hlist_node *node;
249 struct batadv_tt_orig_list_entry *orig_entry; 263 struct batadv_tt_orig_list_entry *orig_entry;
250 int hash_added; 264 int hash_added;
265 bool roamed_back = false;
251 266
252 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); 267 tt_local = batadv_tt_local_hash_find(bat_priv, addr);
268 tt_global = batadv_tt_global_hash_find(bat_priv, addr);
253 269
254 if (tt_local_entry) { 270 if (tt_local) {
255 tt_local_entry->last_seen = jiffies; 271 tt_local->last_seen = jiffies;
256 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */ 272 if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
257 tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING; 273 batadv_dbg(BATADV_DBG_TT, bat_priv,
258 goto out; 274 "Re-adding pending client %pM\n", addr);
275 /* whatever the reason why the PENDING flag was set,
276 * this is a client which was enqueued to be removed in
277 * this orig_interval. Since it popped up again, the
278 * flag can be reset like it was never enqueued
279 */
280 tt_local->common.flags &= ~BATADV_TT_CLIENT_PENDING;
281 goto add_event;
282 }
283
284 if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
285 batadv_dbg(BATADV_DBG_TT, bat_priv,
286 "Roaming client %pM came back to its original location\n",
287 addr);
288 /* the ROAM flag is set because this client roamed away
289 * and the node got a roaming_advertisement message. Now
290 * that the client popped up again at its original
291 * location such flag can be unset
292 */
293 tt_local->common.flags &= ~BATADV_TT_CLIENT_ROAM;
294 roamed_back = true;
295 }
296 goto check_roaming;
259 } 297 }
260 298
261 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); 299 tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC);
262 if (!tt_local_entry) 300 if (!tt_local)
263 goto out; 301 goto out;
264 302
265 batadv_dbg(BATADV_DBG_TT, bat_priv, 303 batadv_dbg(BATADV_DBG_TT, bat_priv,
266 "Creating new local tt entry: %pM (ttvn: %d)\n", addr, 304 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
267 (uint8_t)atomic_read(&bat_priv->tt.vn)); 305 (uint8_t)atomic_read(&bat_priv->tt.vn));
268 306
269 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); 307 memcpy(tt_local->common.addr, addr, ETH_ALEN);
270 tt_local_entry->common.flags = BATADV_NO_FLAGS; 308 tt_local->common.flags = BATADV_NO_FLAGS;
271 if (batadv_is_wifi_iface(ifindex)) 309 if (batadv_is_wifi_iface(ifindex))
272 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI; 310 tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
273 atomic_set(&tt_local_entry->common.refcount, 2); 311 atomic_set(&tt_local->common.refcount, 2);
274 tt_local_entry->last_seen = jiffies; 312 tt_local->last_seen = jiffies;
275 tt_local_entry->common.added_at = tt_local_entry->last_seen; 313 tt_local->common.added_at = tt_local->last_seen;
276 314
277 /* the batman interface mac address should never be purged */ 315 /* the batman interface mac address should never be purged */
278 if (batadv_compare_eth(addr, soft_iface->dev_addr)) 316 if (batadv_compare_eth(addr, soft_iface->dev_addr))
279 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE; 317 tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
280 318
281 /* The local entry has to be marked as NEW to avoid to send it in 319 /* The local entry has to be marked as NEW to avoid to send it in
282 * a full table response going out before the next ttvn increment 320 * a full table response going out before the next ttvn increment
283 * (consistency check) 321 * (consistency check)
284 */ 322 */
285 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW; 323 tt_local->common.flags |= BATADV_TT_CLIENT_NEW;
286 324
287 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, 325 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
288 batadv_choose_orig, 326 batadv_choose_orig, &tt_local->common,
289 &tt_local_entry->common, 327 &tt_local->common.hash_entry);
290 &tt_local_entry->common.hash_entry);
291 328
292 if (unlikely(hash_added != 0)) { 329 if (unlikely(hash_added != 0)) {
293 /* remove the reference for the hash */ 330 /* remove the reference for the hash */
294 batadv_tt_local_entry_free_ref(tt_local_entry); 331 batadv_tt_local_entry_free_ref(tt_local);
295 goto out; 332 goto out;
296 } 333 }
297 334
298 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags); 335add_event:
299 336 batadv_tt_local_event(bat_priv, addr, tt_local->common.flags);
300 /* remove address from global hash if present */
301 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
302 337
303 /* Check whether it is a roaming! */ 338check_roaming:
304 if (tt_global_entry) { 339 /* Check whether it is a roaming, but don't do anything if the roaming
340 * process has already been handled
341 */
342 if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
305 /* These node are probably going to update their tt table */ 343 /* These node are probably going to update their tt table */
306 head = &tt_global_entry->orig_list; 344 head = &tt_global->orig_list;
307 rcu_read_lock(); 345 rcu_read_lock();
308 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 346 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
309 orig_entry->orig_node->tt_poss_change = true; 347 batadv_send_roam_adv(bat_priv, tt_global->common.addr,
310
311 batadv_send_roam_adv(bat_priv,
312 tt_global_entry->common.addr,
313 orig_entry->orig_node); 348 orig_entry->orig_node);
314 } 349 }
315 rcu_read_unlock(); 350 rcu_read_unlock();
316 /* The global entry has to be marked as ROAMING and 351 if (roamed_back) {
317 * has to be kept for consistency purpose 352 batadv_tt_global_free(bat_priv, tt_global,
318 */ 353 "Roaming canceled");
319 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM; 354 tt_global = NULL;
320 tt_global_entry->roam_at = jiffies; 355 } else {
356 /* The global entry has to be marked as ROAMING and
357 * has to be kept for consistency purpose
358 */
359 tt_global->common.flags |= BATADV_TT_CLIENT_ROAM;
360 tt_global->roam_at = jiffies;
361 }
321 } 362 }
363
322out: 364out:
323 if (tt_local_entry) 365 if (tt_local)
324 batadv_tt_local_entry_free_ref(tt_local_entry); 366 batadv_tt_local_entry_free_ref(tt_local);
325 if (tt_global_entry) 367 if (tt_global)
326 batadv_tt_global_entry_free_ref(tt_global_entry); 368 batadv_tt_global_entry_free_ref(tt_global);
327} 369}
328 370
329static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff, 371static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
@@ -434,22 +476,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
434 struct hlist_node *node; 476 struct hlist_node *node;
435 struct hlist_head *head; 477 struct hlist_head *head;
436 uint32_t i; 478 uint32_t i;
437 int ret = 0;
438
439 primary_if = batadv_primary_if_get_selected(bat_priv);
440 if (!primary_if) {
441 ret = seq_printf(seq,
442 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
443 net_dev->name);
444 goto out;
445 }
446 479
447 if (primary_if->if_status != BATADV_IF_ACTIVE) { 480 primary_if = batadv_seq_print_text_primary_if_get(seq);
448 ret = seq_printf(seq, 481 if (!primary_if)
449 "BATMAN mesh %s disabled - primary interface not active\n",
450 net_dev->name);
451 goto out; 482 goto out;
452 }
453 483
454 seq_printf(seq, 484 seq_printf(seq,
455 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", 485 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
@@ -479,7 +509,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
479out: 509out:
480 if (primary_if) 510 if (primary_if)
481 batadv_hardif_free_ref(primary_if); 511 batadv_hardif_free_ref(primary_if);
482 return ret; 512 return 0;
483} 513}
484 514
485static void 515static void
@@ -501,24 +531,57 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
501 tt_local_entry->common.addr, message); 531 tt_local_entry->common.addr, message);
502} 532}
503 533
504void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr, 534/**
505 const char *message, bool roaming) 535 * batadv_tt_local_remove - logically remove an entry from the local table
536 * @bat_priv: the bat priv with all the soft interface information
537 * @addr: the MAC address of the client to remove
538 * @message: message to append to the log on deletion
539 * @roaming: true if the deletion is due to a roaming event
540 *
541 * Returns the flags assigned to the local entry before being deleted
542 */
543uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
544 const uint8_t *addr, const char *message,
545 bool roaming)
506{ 546{
507 struct batadv_tt_local_entry *tt_local_entry = NULL; 547 struct batadv_tt_local_entry *tt_local_entry;
508 uint16_t flags; 548 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
509 549
510 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); 550 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
511 if (!tt_local_entry) 551 if (!tt_local_entry)
512 goto out; 552 goto out;
513 553
554 curr_flags = tt_local_entry->common.flags;
555
514 flags = BATADV_TT_CLIENT_DEL; 556 flags = BATADV_TT_CLIENT_DEL;
515 if (roaming) 557 /* if this global entry addition is due to a roaming, the node has to
558 * mark the local entry as "roamed" in order to correctly reroute
559 * packets later
560 */
561 if (roaming) {
516 flags |= BATADV_TT_CLIENT_ROAM; 562 flags |= BATADV_TT_CLIENT_ROAM;
563 /* mark the local client as ROAMed */
564 tt_local_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
565 }
566
567 if (!(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) {
568 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags,
569 message);
570 goto out;
571 }
572 /* if this client has been added right now, it is possible to
573 * immediately purge it
574 */
575 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
576 curr_flags | BATADV_TT_CLIENT_DEL);
577 hlist_del_rcu(&tt_local_entry->common.hash_entry);
578 batadv_tt_local_entry_free_ref(tt_local_entry);
517 579
518 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
519out: 580out:
520 if (tt_local_entry) 581 if (tt_local_entry)
521 batadv_tt_local_entry_free_ref(tt_local_entry); 582 batadv_tt_local_entry_free_ref(tt_local_entry);
583
584 return curr_flags;
522} 585}
523 586
524static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, 587static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
@@ -721,12 +784,23 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
721 const unsigned char *tt_addr, uint8_t flags, 784 const unsigned char *tt_addr, uint8_t flags,
722 uint8_t ttvn) 785 uint8_t ttvn)
723{ 786{
724 struct batadv_tt_global_entry *tt_global_entry = NULL; 787 struct batadv_tt_global_entry *tt_global_entry;
788 struct batadv_tt_local_entry *tt_local_entry;
725 int ret = 0; 789 int ret = 0;
726 int hash_added; 790 int hash_added;
727 struct batadv_tt_common_entry *common; 791 struct batadv_tt_common_entry *common;
792 uint16_t local_flags;
728 793
729 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr); 794 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
795 tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr);
796
797 /* if the node already has a local client for this entry, it has to wait
798 * for a roaming advertisement instead of manually messing up the global
799 * table
800 */
801 if ((flags & BATADV_TT_CLIENT_TEMP) && tt_local_entry &&
802 !(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW))
803 goto out;
730 804
731 if (!tt_global_entry) { 805 if (!tt_global_entry) {
732 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC); 806 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
@@ -738,6 +812,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
738 812
739 common->flags = flags; 813 common->flags = flags;
740 tt_global_entry->roam_at = 0; 814 tt_global_entry->roam_at = 0;
815 /* node must store current time in case of roaming. This is
816 * needed to purge this entry out on timeout (if nobody claims
817 * it)
818 */
819 if (flags & BATADV_TT_CLIENT_ROAM)
820 tt_global_entry->roam_at = jiffies;
741 atomic_set(&common->refcount, 2); 821 atomic_set(&common->refcount, 2);
742 common->added_at = jiffies; 822 common->added_at = jiffies;
743 823
@@ -755,19 +835,37 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
755 goto out_remove; 835 goto out_remove;
756 } 836 }
757 } else { 837 } else {
838 common = &tt_global_entry->common;
758 /* If there is already a global entry, we can use this one for 839 /* If there is already a global entry, we can use this one for
759 * our processing. 840 * our processing.
760 * But if we are trying to add a temporary client we can exit 841 * But if we are trying to add a temporary client then here are
761 * directly because the temporary information should never 842 * two options at this point:
762 * override any already known client state (whatever it is) 843 * 1) the global client is not a temporary client: the global
844 * client has to be left as it is, temporary information
845 * should never override any already known client state
846 * 2) the global client is a temporary client: purge the
847 * originator list and add the new one orig_entry
763 */ 848 */
764 if (flags & BATADV_TT_CLIENT_TEMP) 849 if (flags & BATADV_TT_CLIENT_TEMP) {
765 goto out; 850 if (!(common->flags & BATADV_TT_CLIENT_TEMP))
851 goto out;
852 if (batadv_tt_global_entry_has_orig(tt_global_entry,
853 orig_node))
854 goto out_remove;
855 batadv_tt_global_del_orig_list(tt_global_entry);
856 goto add_orig_entry;
857 }
766 858
767 /* if the client was temporary added before receiving the first 859 /* if the client was temporary added before receiving the first
768 * OGM announcing it, we have to clear the TEMP flag 860 * OGM announcing it, we have to clear the TEMP flag
769 */ 861 */
770 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP; 862 common->flags &= ~BATADV_TT_CLIENT_TEMP;
863
864 /* the change can carry possible "attribute" flags like the
865 * TT_CLIENT_WIFI, therefore they have to be copied in the
866 * client entry
867 */
868 tt_global_entry->common.flags |= flags;
771 869
772 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 870 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
773 * one originator left in the list and we previously received a 871 * one originator left in the list and we previously received a
@@ -776,33 +874,81 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
776 * We should first delete the old originator before adding the 874 * We should first delete the old originator before adding the
777 * new one. 875 * new one.
778 */ 876 */
779 if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) { 877 if (common->flags & BATADV_TT_CLIENT_ROAM) {
780 batadv_tt_global_del_orig_list(tt_global_entry); 878 batadv_tt_global_del_orig_list(tt_global_entry);
781 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM; 879 common->flags &= ~BATADV_TT_CLIENT_ROAM;
782 tt_global_entry->roam_at = 0; 880 tt_global_entry->roam_at = 0;
783 } 881 }
784 } 882 }
883add_orig_entry:
785 /* add the new orig_entry (if needed) or update it */ 884 /* add the new orig_entry (if needed) or update it */
786 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); 885 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
787 886
788 batadv_dbg(BATADV_DBG_TT, bat_priv, 887 batadv_dbg(BATADV_DBG_TT, bat_priv,
789 "Creating new global tt entry: %pM (via %pM)\n", 888 "Creating new global tt entry: %pM (via %pM)\n",
790 tt_global_entry->common.addr, orig_node->orig); 889 common->addr, orig_node->orig);
890 ret = 1;
791 891
792out_remove: 892out_remove:
893
793 /* remove address from local hash if present */ 894 /* remove address from local hash if present */
794 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr, 895 local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
795 "global tt received", 896 "global tt received",
796 flags & BATADV_TT_CLIENT_ROAM); 897 !!(flags & BATADV_TT_CLIENT_ROAM));
797 ret = 1; 898 tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
899
900 if (!(flags & BATADV_TT_CLIENT_ROAM))
901 /* this is a normal global add. Therefore the client is not in a
902 * roaming state anymore.
903 */
904 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
905
798out: 906out:
799 if (tt_global_entry) 907 if (tt_global_entry)
800 batadv_tt_global_entry_free_ref(tt_global_entry); 908 batadv_tt_global_entry_free_ref(tt_global_entry);
909 if (tt_local_entry)
910 batadv_tt_local_entry_free_ref(tt_local_entry);
801 return ret; 911 return ret;
802} 912}
803 913
804/* print all orig nodes who announce the address for this global entry. 914/* batadv_transtable_best_orig - Get best originator list entry from tt entry
805 * it is assumed that the caller holds rcu_read_lock(); 915 * @tt_global_entry: global translation table entry to be analyzed
916 *
917 * This functon assumes the caller holds rcu_read_lock().
918 * Returns best originator list entry or NULL on errors.
919 */
920static struct batadv_tt_orig_list_entry *
921batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
922{
923 struct batadv_neigh_node *router = NULL;
924 struct hlist_head *head;
925 struct hlist_node *node;
926 struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
927 int best_tq = 0;
928
929 head = &tt_global_entry->orig_list;
930 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
931 router = batadv_orig_node_get_router(orig_entry->orig_node);
932 if (!router)
933 continue;
934
935 if (router->tq_avg > best_tq) {
936 best_entry = orig_entry;
937 best_tq = router->tq_avg;
938 }
939
940 batadv_neigh_node_free_ref(router);
941 }
942
943 return best_entry;
944}
945
946/* batadv_tt_global_print_entry - print all orig nodes who announce the address
947 * for this global entry
948 * @tt_global_entry: global translation table entry to be printed
949 * @seq: debugfs table seq_file struct
950 *
951 * This functon assumes the caller holds rcu_read_lock().
806 */ 952 */
807static void 953static void
808batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, 954batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
@@ -810,21 +956,37 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
810{ 956{
811 struct hlist_head *head; 957 struct hlist_head *head;
812 struct hlist_node *node; 958 struct hlist_node *node;
813 struct batadv_tt_orig_list_entry *orig_entry; 959 struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
814 struct batadv_tt_common_entry *tt_common_entry; 960 struct batadv_tt_common_entry *tt_common_entry;
815 uint16_t flags; 961 uint16_t flags;
816 uint8_t last_ttvn; 962 uint8_t last_ttvn;
817 963
818 tt_common_entry = &tt_global_entry->common; 964 tt_common_entry = &tt_global_entry->common;
965 flags = tt_common_entry->flags;
966
967 best_entry = batadv_transtable_best_orig(tt_global_entry);
968 if (best_entry) {
969 last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
970 seq_printf(seq, " %c %pM (%3u) via %pM (%3u) [%c%c%c]\n",
971 '*', tt_global_entry->common.addr,
972 best_entry->ttvn, best_entry->orig_node->orig,
973 last_ttvn,
974 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
975 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
976 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
977 }
819 978
820 head = &tt_global_entry->orig_list; 979 head = &tt_global_entry->orig_list;
821 980
822 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 981 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
823 flags = tt_common_entry->flags; 982 if (best_entry == orig_entry)
983 continue;
984
824 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); 985 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
825 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n", 986 seq_printf(seq, " %c %pM (%3u) via %pM (%3u) [%c%c%c]\n",
826 tt_global_entry->common.addr, orig_entry->ttvn, 987 '+', tt_global_entry->common.addr,
827 orig_entry->orig_node->orig, last_ttvn, 988 orig_entry->ttvn, orig_entry->orig_node->orig,
989 last_ttvn,
828 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), 990 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
829 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'), 991 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
830 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.')); 992 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
@@ -842,22 +1004,10 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
842 struct hlist_node *node; 1004 struct hlist_node *node;
843 struct hlist_head *head; 1005 struct hlist_head *head;
844 uint32_t i; 1006 uint32_t i;
845 int ret = 0;
846 1007
847 primary_if = batadv_primary_if_get_selected(bat_priv); 1008 primary_if = batadv_seq_print_text_primary_if_get(seq);
848 if (!primary_if) { 1009 if (!primary_if)
849 ret = seq_printf(seq,
850 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
851 net_dev->name);
852 goto out;
853 }
854
855 if (primary_if->if_status != BATADV_IF_ACTIVE) {
856 ret = seq_printf(seq,
857 "BATMAN mesh %s disabled - primary interface not active\n",
858 net_dev->name);
859 goto out; 1010 goto out;
860 }
861 1011
862 seq_printf(seq, 1012 seq_printf(seq,
863 "Globally announced TT entries received via the mesh %s\n", 1013 "Globally announced TT entries received via the mesh %s\n",
@@ -881,7 +1031,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
881out: 1031out:
882 if (primary_if) 1032 if (primary_if)
883 batadv_hardif_free_ref(primary_if); 1033 batadv_hardif_free_ref(primary_if);
884 return ret; 1034 return 0;
885} 1035}
886 1036
887/* deletes the orig list of a tt_global_entry */ 1037/* deletes the orig list of a tt_global_entry */
@@ -927,21 +1077,6 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
927 spin_unlock_bh(&tt_global_entry->list_lock); 1077 spin_unlock_bh(&tt_global_entry->list_lock);
928} 1078}
929 1079
930static void
931batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
932 struct batadv_tt_global_entry *tt_global_entry,
933 const char *message)
934{
935 batadv_dbg(BATADV_DBG_TT, bat_priv,
936 "Deleting global tt entry %pM: %s\n",
937 tt_global_entry->common.addr, message);
938
939 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
940 batadv_choose_orig, tt_global_entry->common.addr);
941 batadv_tt_global_entry_free_ref(tt_global_entry);
942
943}
944
945/* If the client is to be deleted, we check if it is the last origantor entry 1080/* If the client is to be deleted, we check if it is the last origantor entry
946 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the 1081 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
947 * timer, otherwise we simply remove the originator scheduled for deletion. 1082 * timer, otherwise we simply remove the originator scheduled for deletion.
@@ -990,7 +1125,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
990 const unsigned char *addr, 1125 const unsigned char *addr,
991 const char *message, bool roaming) 1126 const char *message, bool roaming)
992{ 1127{
993 struct batadv_tt_global_entry *tt_global_entry = NULL; 1128 struct batadv_tt_global_entry *tt_global_entry;
994 struct batadv_tt_local_entry *local_entry = NULL; 1129 struct batadv_tt_local_entry *local_entry = NULL;
995 1130
996 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); 1131 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
@@ -1002,8 +1137,8 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
1002 orig_node, message); 1137 orig_node, message);
1003 1138
1004 if (hlist_empty(&tt_global_entry->orig_list)) 1139 if (hlist_empty(&tt_global_entry->orig_list))
1005 batadv_tt_global_del_struct(bat_priv, tt_global_entry, 1140 batadv_tt_global_free(bat_priv, tt_global_entry,
1006 message); 1141 message);
1007 1142
1008 goto out; 1143 goto out;
1009 } 1144 }
@@ -1026,7 +1161,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
1026 if (local_entry) { 1161 if (local_entry) {
1027 /* local entry exists, case 2: client roamed to us. */ 1162 /* local entry exists, case 2: client roamed to us. */
1028 batadv_tt_global_del_orig_list(tt_global_entry); 1163 batadv_tt_global_del_orig_list(tt_global_entry);
1029 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message); 1164 batadv_tt_global_free(bat_priv, tt_global_entry, message);
1030 } else 1165 } else
1031 /* no local entry exists, case 1: check for roaming */ 1166 /* no local entry exists, case 1: check for roaming */
1032 batadv_tt_global_del_roaming(bat_priv, tt_global_entry, 1167 batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
@@ -1197,15 +1332,12 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
1197 struct batadv_tt_local_entry *tt_local_entry = NULL; 1332 struct batadv_tt_local_entry *tt_local_entry = NULL;
1198 struct batadv_tt_global_entry *tt_global_entry = NULL; 1333 struct batadv_tt_global_entry *tt_global_entry = NULL;
1199 struct batadv_orig_node *orig_node = NULL; 1334 struct batadv_orig_node *orig_node = NULL;
1200 struct batadv_neigh_node *router = NULL; 1335 struct batadv_tt_orig_list_entry *best_entry;
1201 struct hlist_head *head;
1202 struct hlist_node *node;
1203 struct batadv_tt_orig_list_entry *orig_entry;
1204 int best_tq;
1205 1336
1206 if (src && atomic_read(&bat_priv->ap_isolation)) { 1337 if (src && atomic_read(&bat_priv->ap_isolation)) {
1207 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src); 1338 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1208 if (!tt_local_entry) 1339 if (!tt_local_entry ||
1340 (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
1209 goto out; 1341 goto out;
1210 } 1342 }
1211 1343
@@ -1220,25 +1352,15 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
1220 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) 1352 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1221 goto out; 1353 goto out;
1222 1354
1223 best_tq = 0;
1224
1225 rcu_read_lock(); 1355 rcu_read_lock();
1226 head = &tt_global_entry->orig_list; 1356 best_entry = batadv_transtable_best_orig(tt_global_entry);
1227 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1228 router = batadv_orig_node_get_router(orig_entry->orig_node);
1229 if (!router)
1230 continue;
1231
1232 if (router->tq_avg > best_tq) {
1233 orig_node = orig_entry->orig_node;
1234 best_tq = router->tq_avg;
1235 }
1236 batadv_neigh_node_free_ref(router);
1237 }
1238 /* found anything? */ 1357 /* found anything? */
1358 if (best_entry)
1359 orig_node = best_entry->orig_node;
1239 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) 1360 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1240 orig_node = NULL; 1361 orig_node = NULL;
1241 rcu_read_unlock(); 1362 rcu_read_unlock();
1363
1242out: 1364out:
1243 if (tt_global_entry) 1365 if (tt_global_entry)
1244 batadv_tt_global_entry_free_ref(tt_global_entry); 1366 batadv_tt_global_entry_free_ref(tt_global_entry);
@@ -1471,11 +1593,11 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1471 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1593 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1472 1594
1473 len = tt_query_size + tt_len; 1595 len = tt_query_size + tt_len;
1474 skb = dev_alloc_skb(len + ETH_HLEN); 1596 skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
1475 if (!skb) 1597 if (!skb)
1476 goto out; 1598 goto out;
1477 1599
1478 skb_reserve(skb, ETH_HLEN); 1600 skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
1479 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len); 1601 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
1480 tt_response->ttvn = ttvn; 1602 tt_response->ttvn = ttvn;
1481 1603
@@ -1496,7 +1618,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1496 1618
1497 memcpy(tt_change->addr, tt_common_entry->addr, 1619 memcpy(tt_change->addr, tt_common_entry->addr,
1498 ETH_ALEN); 1620 ETH_ALEN);
1499 tt_change->flags = BATADV_NO_FLAGS; 1621 tt_change->flags = tt_common_entry->flags;
1500 1622
1501 tt_count++; 1623 tt_count++;
1502 tt_change++; 1624 tt_change++;
@@ -1520,7 +1642,6 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1520{ 1642{
1521 struct sk_buff *skb = NULL; 1643 struct sk_buff *skb = NULL;
1522 struct batadv_tt_query_packet *tt_request; 1644 struct batadv_tt_query_packet *tt_request;
1523 struct batadv_neigh_node *neigh_node = NULL;
1524 struct batadv_hard_iface *primary_if; 1645 struct batadv_hard_iface *primary_if;
1525 struct batadv_tt_req_node *tt_req_node = NULL; 1646 struct batadv_tt_req_node *tt_req_node = NULL;
1526 int ret = 1; 1647 int ret = 1;
@@ -1537,11 +1658,11 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1537 if (!tt_req_node) 1658 if (!tt_req_node)
1538 goto out; 1659 goto out;
1539 1660
1540 skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN); 1661 skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN + NET_IP_ALIGN);
1541 if (!skb) 1662 if (!skb)
1542 goto out; 1663 goto out;
1543 1664
1544 skb_reserve(skb, ETH_HLEN); 1665 skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
1545 1666
1546 tt_req_len = sizeof(*tt_request); 1667 tt_req_len = sizeof(*tt_request);
1547 tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len); 1668 tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
@@ -1558,23 +1679,15 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1558 if (full_table) 1679 if (full_table)
1559 tt_request->flags |= BATADV_TT_FULL_TABLE; 1680 tt_request->flags |= BATADV_TT_FULL_TABLE;
1560 1681
1561 neigh_node = batadv_orig_node_get_router(dst_orig_node); 1682 batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
1562 if (!neigh_node) 1683 dst_orig_node->orig, (full_table ? 'F' : '.'));
1563 goto out;
1564
1565 batadv_dbg(BATADV_DBG_TT, bat_priv,
1566 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1567 dst_orig_node->orig, neigh_node->addr,
1568 (full_table ? 'F' : '.'));
1569 1684
1570 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX); 1685 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
1571 1686
1572 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1687 if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL))
1573 ret = 0; 1688 ret = 0;
1574 1689
1575out: 1690out:
1576 if (neigh_node)
1577 batadv_neigh_node_free_ref(neigh_node);
1578 if (primary_if) 1691 if (primary_if)
1579 batadv_hardif_free_ref(primary_if); 1692 batadv_hardif_free_ref(primary_if);
1580 if (ret) 1693 if (ret)
@@ -1592,9 +1705,8 @@ static bool
1592batadv_send_other_tt_response(struct batadv_priv *bat_priv, 1705batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1593 struct batadv_tt_query_packet *tt_request) 1706 struct batadv_tt_query_packet *tt_request)
1594{ 1707{
1595 struct batadv_orig_node *req_dst_orig_node = NULL; 1708 struct batadv_orig_node *req_dst_orig_node;
1596 struct batadv_orig_node *res_dst_orig_node = NULL; 1709 struct batadv_orig_node *res_dst_orig_node = NULL;
1597 struct batadv_neigh_node *neigh_node = NULL;
1598 struct batadv_hard_iface *primary_if = NULL; 1710 struct batadv_hard_iface *primary_if = NULL;
1599 uint8_t orig_ttvn, req_ttvn, ttvn; 1711 uint8_t orig_ttvn, req_ttvn, ttvn;
1600 int ret = false; 1712 int ret = false;
@@ -1620,10 +1732,6 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1620 if (!res_dst_orig_node) 1732 if (!res_dst_orig_node)
1621 goto out; 1733 goto out;
1622 1734
1623 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1624 if (!neigh_node)
1625 goto out;
1626
1627 primary_if = batadv_primary_if_get_selected(bat_priv); 1735 primary_if = batadv_primary_if_get_selected(bat_priv);
1628 if (!primary_if) 1736 if (!primary_if)
1629 goto out; 1737 goto out;
@@ -1652,11 +1760,11 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1652 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1760 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1653 1761
1654 len = sizeof(*tt_response) + tt_len; 1762 len = sizeof(*tt_response) + tt_len;
1655 skb = dev_alloc_skb(len + ETH_HLEN); 1763 skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
1656 if (!skb) 1764 if (!skb)
1657 goto unlock; 1765 goto unlock;
1658 1766
1659 skb_reserve(skb, ETH_HLEN); 1767 skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
1660 packet_pos = skb_put(skb, len); 1768 packet_pos = skb_put(skb, len);
1661 tt_response = (struct batadv_tt_query_packet *)packet_pos; 1769 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1662 tt_response->ttvn = req_ttvn; 1770 tt_response->ttvn = req_ttvn;
@@ -1695,14 +1803,13 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1695 tt_response->flags |= BATADV_TT_FULL_TABLE; 1803 tt_response->flags |= BATADV_TT_FULL_TABLE;
1696 1804
1697 batadv_dbg(BATADV_DBG_TT, bat_priv, 1805 batadv_dbg(BATADV_DBG_TT, bat_priv,
1698 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", 1806 "Sending TT_RESPONSE %pM for %pM (ttvn: %u)\n",
1699 res_dst_orig_node->orig, neigh_node->addr, 1807 res_dst_orig_node->orig, req_dst_orig_node->orig, req_ttvn);
1700 req_dst_orig_node->orig, req_ttvn);
1701 1808
1702 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); 1809 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1703 1810
1704 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1811 if (batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL))
1705 ret = true; 1812 ret = true;
1706 goto out; 1813 goto out;
1707 1814
1708unlock: 1815unlock:
@@ -1713,8 +1820,6 @@ out:
1713 batadv_orig_node_free_ref(res_dst_orig_node); 1820 batadv_orig_node_free_ref(res_dst_orig_node);
1714 if (req_dst_orig_node) 1821 if (req_dst_orig_node)
1715 batadv_orig_node_free_ref(req_dst_orig_node); 1822 batadv_orig_node_free_ref(req_dst_orig_node);
1716 if (neigh_node)
1717 batadv_neigh_node_free_ref(neigh_node);
1718 if (primary_if) 1823 if (primary_if)
1719 batadv_hardif_free_ref(primary_if); 1824 batadv_hardif_free_ref(primary_if);
1720 if (!ret) 1825 if (!ret)
@@ -1727,8 +1832,7 @@ static bool
1727batadv_send_my_tt_response(struct batadv_priv *bat_priv, 1832batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1728 struct batadv_tt_query_packet *tt_request) 1833 struct batadv_tt_query_packet *tt_request)
1729{ 1834{
1730 struct batadv_orig_node *orig_node = NULL; 1835 struct batadv_orig_node *orig_node;
1731 struct batadv_neigh_node *neigh_node = NULL;
1732 struct batadv_hard_iface *primary_if = NULL; 1836 struct batadv_hard_iface *primary_if = NULL;
1733 uint8_t my_ttvn, req_ttvn, ttvn; 1837 uint8_t my_ttvn, req_ttvn, ttvn;
1734 int ret = false; 1838 int ret = false;
@@ -1753,10 +1857,6 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1753 if (!orig_node) 1857 if (!orig_node)
1754 goto out; 1858 goto out;
1755 1859
1756 neigh_node = batadv_orig_node_get_router(orig_node);
1757 if (!neigh_node)
1758 goto out;
1759
1760 primary_if = batadv_primary_if_get_selected(bat_priv); 1860 primary_if = batadv_primary_if_get_selected(bat_priv);
1761 if (!primary_if) 1861 if (!primary_if)
1762 goto out; 1862 goto out;
@@ -1779,11 +1879,11 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1779 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1879 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1780 1880
1781 len = sizeof(*tt_response) + tt_len; 1881 len = sizeof(*tt_response) + tt_len;
1782 skb = dev_alloc_skb(len + ETH_HLEN); 1882 skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
1783 if (!skb) 1883 if (!skb)
1784 goto unlock; 1884 goto unlock;
1785 1885
1786 skb_reserve(skb, ETH_HLEN); 1886 skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
1787 packet_pos = skb_put(skb, len); 1887 packet_pos = skb_put(skb, len);
1788 tt_response = (struct batadv_tt_query_packet *)packet_pos; 1888 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1789 tt_response->ttvn = req_ttvn; 1889 tt_response->ttvn = req_ttvn;
@@ -1820,14 +1920,14 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1820 tt_response->flags |= BATADV_TT_FULL_TABLE; 1920 tt_response->flags |= BATADV_TT_FULL_TABLE;
1821 1921
1822 batadv_dbg(BATADV_DBG_TT, bat_priv, 1922 batadv_dbg(BATADV_DBG_TT, bat_priv,
1823 "Sending TT_RESPONSE to %pM via %pM [%c]\n", 1923 "Sending TT_RESPONSE to %pM [%c]\n",
1824 orig_node->orig, neigh_node->addr, 1924 orig_node->orig,
1825 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); 1925 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1826 1926
1827 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); 1927 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1828 1928
1829 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1929 if (batadv_send_skb_to_orig(skb, orig_node, NULL))
1830 ret = true; 1930 ret = true;
1831 goto out; 1931 goto out;
1832 1932
1833unlock: 1933unlock:
@@ -1835,8 +1935,6 @@ unlock:
1835out: 1935out:
1836 if (orig_node) 1936 if (orig_node)
1837 batadv_orig_node_free_ref(orig_node); 1937 batadv_orig_node_free_ref(orig_node);
1838 if (neigh_node)
1839 batadv_neigh_node_free_ref(neigh_node);
1840 if (primary_if) 1938 if (primary_if)
1841 batadv_hardif_free_ref(primary_if); 1939 batadv_hardif_free_ref(primary_if);
1842 if (!ret) 1940 if (!ret)
@@ -1893,7 +1991,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
1893static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv, 1991static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
1894 struct batadv_tt_query_packet *tt_response) 1992 struct batadv_tt_query_packet *tt_response)
1895{ 1993{
1896 struct batadv_orig_node *orig_node = NULL; 1994 struct batadv_orig_node *orig_node;
1897 1995
1898 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src); 1996 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1899 if (!orig_node) 1997 if (!orig_node)
@@ -1935,7 +2033,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
1935 2033
1936bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr) 2034bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
1937{ 2035{
1938 struct batadv_tt_local_entry *tt_local_entry = NULL; 2036 struct batadv_tt_local_entry *tt_local_entry;
1939 bool ret = false; 2037 bool ret = false;
1940 2038
1941 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); 2039 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
@@ -1944,7 +2042,8 @@ bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
1944 /* Check if the client has been logically deleted (but is kept for 2042 /* Check if the client has been logically deleted (but is kept for
1945 * consistency purpose) 2043 * consistency purpose)
1946 */ 2044 */
1947 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) 2045 if ((tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) ||
2046 (tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM))
1948 goto out; 2047 goto out;
1949 ret = true; 2048 ret = true;
1950out: 2049out:
@@ -1995,10 +2094,6 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
1995 2094
1996 /* Recalculate the CRC for this orig_node and store it */ 2095 /* Recalculate the CRC for this orig_node and store it */
1997 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); 2096 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
1998 /* Roaming phase is over: tables are in sync again. I can
1999 * unset the flag
2000 */
2001 orig_node->tt_poss_change = false;
2002out: 2097out:
2003 if (orig_node) 2098 if (orig_node)
2004 batadv_orig_node_free_ref(orig_node); 2099 batadv_orig_node_free_ref(orig_node);
@@ -2104,7 +2199,6 @@ unlock:
2104static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, 2199static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
2105 struct batadv_orig_node *orig_node) 2200 struct batadv_orig_node *orig_node)
2106{ 2201{
2107 struct batadv_neigh_node *neigh_node = NULL;
2108 struct sk_buff *skb = NULL; 2202 struct sk_buff *skb = NULL;
2109 struct batadv_roam_adv_packet *roam_adv_packet; 2203 struct batadv_roam_adv_packet *roam_adv_packet;
2110 int ret = 1; 2204 int ret = 1;
@@ -2117,11 +2211,11 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
2117 if (!batadv_tt_check_roam_count(bat_priv, client)) 2211 if (!batadv_tt_check_roam_count(bat_priv, client))
2118 goto out; 2212 goto out;
2119 2213
2120 skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN); 2214 skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN + NET_IP_ALIGN);
2121 if (!skb) 2215 if (!skb)
2122 goto out; 2216 goto out;
2123 2217
2124 skb_reserve(skb, ETH_HLEN); 2218 skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
2125 2219
2126 roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len); 2220 roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
2127 2221
@@ -2137,23 +2231,17 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
2137 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); 2231 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
2138 memcpy(roam_adv_packet->client, client, ETH_ALEN); 2232 memcpy(roam_adv_packet->client, client, ETH_ALEN);
2139 2233
2140 neigh_node = batadv_orig_node_get_router(orig_node);
2141 if (!neigh_node)
2142 goto out;
2143
2144 batadv_dbg(BATADV_DBG_TT, bat_priv, 2234 batadv_dbg(BATADV_DBG_TT, bat_priv,
2145 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", 2235 "Sending ROAMING_ADV to %pM (client %pM)\n",
2146 orig_node->orig, client, neigh_node->addr); 2236 orig_node->orig, client);
2147 2237
2148 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX); 2238 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
2149 2239
2150 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 2240 if (batadv_send_skb_to_orig(skb, orig_node, NULL))
2151 ret = 0; 2241 ret = 0;
2152 2242
2153out: 2243out:
2154 if (neigh_node) 2244 if (ret && skb)
2155 batadv_neigh_node_free_ref(neigh_node);
2156 if (ret)
2157 kfree_skb(skb); 2245 kfree_skb(skb);
2158 return; 2246 return;
2159} 2247}
@@ -2289,7 +2377,6 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2289 batadv_dbg(BATADV_DBG_TT, bat_priv, 2377 batadv_dbg(BATADV_DBG_TT, bat_priv,
2290 "Local changes committed, updating to ttvn %u\n", 2378 "Local changes committed, updating to ttvn %u\n",
2291 (uint8_t)atomic_read(&bat_priv->tt.vn)); 2379 (uint8_t)atomic_read(&bat_priv->tt.vn));
2292 bat_priv->tt.poss_change = false;
2293 2380
2294 /* reset the sending counter */ 2381 /* reset the sending counter */
2295 atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); 2382 atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
@@ -2401,11 +2488,6 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
2401 */ 2488 */
2402 if (orig_node->tt_crc != tt_crc) 2489 if (orig_node->tt_crc != tt_crc)
2403 goto request_table; 2490 goto request_table;
2404
2405 /* Roaming phase is over: tables are in sync again. I can
2406 * unset the flag
2407 */
2408 orig_node->tt_poss_change = false;
2409 } else { 2491 } else {
2410 /* if we missed more than one change or our tables are not 2492 /* if we missed more than one change or our tables are not
2411 * in sync anymore -> request fresh tt data 2493 * in sync anymore -> request fresh tt data
@@ -2438,18 +2520,51 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2438 if (!tt_global_entry) 2520 if (!tt_global_entry)
2439 goto out; 2521 goto out;
2440 2522
2441 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM; 2523 ret = !!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM);
2442 batadv_tt_global_entry_free_ref(tt_global_entry); 2524 batadv_tt_global_entry_free_ref(tt_global_entry);
2443out: 2525out:
2444 return ret; 2526 return ret;
2445} 2527}
2446 2528
2529/**
2530 * batadv_tt_local_client_is_roaming - tells whether the client is roaming
2531 * @bat_priv: the bat priv with all the soft interface information
2532 * @addr: the MAC address of the local client to query
2533 *
2534 * Returns true if the local client is known to be roaming (it is not served by
2535 * this node anymore) or not. If yes, the client is still present in the table
2536 * to keep the latter consistent with the node TTVN
2537 */
2538bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
2539 uint8_t *addr)
2540{
2541 struct batadv_tt_local_entry *tt_local_entry;
2542 bool ret = false;
2543
2544 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
2545 if (!tt_local_entry)
2546 goto out;
2547
2548 ret = tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2549 batadv_tt_local_entry_free_ref(tt_local_entry);
2550out:
2551 return ret;
2552
2553}
2554
2447bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, 2555bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
2448 struct batadv_orig_node *orig_node, 2556 struct batadv_orig_node *orig_node,
2449 const unsigned char *addr) 2557 const unsigned char *addr)
2450{ 2558{
2451 bool ret = false; 2559 bool ret = false;
2452 2560
2561 /* if the originator is a backbone node (meaning it belongs to the same
2562 * LAN of this node) the temporary client must not be added because to
2563 * reach such destination the node must use the LAN instead of the mesh
2564 */
2565 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2566 goto out;
2567
2453 if (!batadv_tt_global_add(bat_priv, orig_node, addr, 2568 if (!batadv_tt_global_add(bat_priv, orig_node, addr,
2454 BATADV_TT_CLIENT_TEMP, 2569 BATADV_TT_CLIENT_TEMP,
2455 atomic_read(&orig_node->last_ttvn))) 2570 atomic_read(&orig_node->last_ttvn)))
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 811fffd4760c..46d4451a59ee 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -24,9 +24,9 @@ int batadv_tt_len(int changes_num);
24int batadv_tt_init(struct batadv_priv *bat_priv); 24int batadv_tt_init(struct batadv_priv *bat_priv);
25void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 25void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
26 int ifindex); 26 int ifindex);
27void batadv_tt_local_remove(struct batadv_priv *bat_priv, 27uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
28 const uint8_t *addr, const char *message, 28 const uint8_t *addr, const char *message,
29 bool roaming); 29 bool roaming);
30int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset); 30int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
31void batadv_tt_global_add_orig(struct batadv_priv *bat_priv, 31void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
32 struct batadv_orig_node *orig_node, 32 struct batadv_orig_node *orig_node,
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
59 int packet_min_len); 59 int packet_min_len);
60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, 60bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
61 uint8_t *addr); 61 uint8_t *addr);
62bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
63 uint8_t *addr);
62bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, 64bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
63 struct batadv_orig_node *orig_node, 65 struct batadv_orig_node *orig_node,
64 const unsigned char *addr); 66 const unsigned char *addr);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ac1e07a80454..ae9ac9aca8c5 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -28,20 +28,41 @@
28 (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \ 28 (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
29 sizeof(struct batadv_bcast_packet))) 29 sizeof(struct batadv_bcast_packet)))
30 30
31#ifdef CONFIG_BATMAN_ADV_DAT
32
33/* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
34 * BATADV_DAT_ADDR_MAX is changed as well.
35 *
36 * *Please be careful: batadv_dat_addr_t must be UNSIGNED*
37 */
38#define batadv_dat_addr_t uint16_t
39
40#endif /* CONFIG_BATMAN_ADV_DAT */
41
42/**
43 * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
44 * @ogm_buff: buffer holding the OGM packet
45 * @ogm_buff_len: length of the OGM packet buffer
46 * @ogm_seqno: OGM sequence number - used to identify each OGM
47 */
48struct batadv_hard_iface_bat_iv {
49 unsigned char *ogm_buff;
50 int ogm_buff_len;
51 atomic_t ogm_seqno;
52};
53
31struct batadv_hard_iface { 54struct batadv_hard_iface {
32 struct list_head list; 55 struct list_head list;
33 int16_t if_num; 56 int16_t if_num;
34 char if_status; 57 char if_status;
35 struct net_device *net_dev; 58 struct net_device *net_dev;
36 atomic_t seqno;
37 atomic_t frag_seqno; 59 atomic_t frag_seqno;
38 unsigned char *packet_buff;
39 int packet_len;
40 struct kobject *hardif_obj; 60 struct kobject *hardif_obj;
41 atomic_t refcount; 61 atomic_t refcount;
42 struct packet_type batman_adv_ptype; 62 struct packet_type batman_adv_ptype;
43 struct net_device *soft_iface; 63 struct net_device *soft_iface;
44 struct rcu_head rcu; 64 struct rcu_head rcu;
65 struct batadv_hard_iface_bat_iv bat_iv;
45}; 66};
46 67
47/** 68/**
@@ -63,6 +84,9 @@ struct batadv_orig_node {
63 uint8_t orig[ETH_ALEN]; 84 uint8_t orig[ETH_ALEN];
64 uint8_t primary_addr[ETH_ALEN]; 85 uint8_t primary_addr[ETH_ALEN];
65 struct batadv_neigh_node __rcu *router; /* rcu protected pointer */ 86 struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
87#ifdef CONFIG_BATMAN_ADV_DAT
88 batadv_dat_addr_t dat_addr;
89#endif
66 unsigned long *bcast_own; 90 unsigned long *bcast_own;
67 uint8_t *bcast_own_sum; 91 uint8_t *bcast_own_sum;
68 unsigned long last_seen; 92 unsigned long last_seen;
@@ -77,13 +101,6 @@ struct batadv_orig_node {
77 spinlock_t tt_buff_lock; /* protects tt_buff */ 101 spinlock_t tt_buff_lock; /* protects tt_buff */
78 atomic_t tt_size; 102 atomic_t tt_size;
79 bool tt_initialised; 103 bool tt_initialised;
80 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
81 * If true, then I sent a Roaming_adv to this orig_node and I have to
82 * inspect every packet directed to it to check whether it is still
83 * the true destination or not. This flag will be reset to false as
84 * soon as I receive a new TTVN from this orig_node
85 */
86 bool tt_poss_change;
87 uint32_t last_real_seqno; 104 uint32_t last_real_seqno;
88 uint8_t last_ttl; 105 uint8_t last_ttl;
89 DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); 106 DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
@@ -139,7 +156,7 @@ struct batadv_neigh_node {
139#ifdef CONFIG_BATMAN_ADV_BLA 156#ifdef CONFIG_BATMAN_ADV_BLA
140struct batadv_bcast_duplist_entry { 157struct batadv_bcast_duplist_entry {
141 uint8_t orig[ETH_ALEN]; 158 uint8_t orig[ETH_ALEN];
142 uint16_t crc; 159 __be32 crc;
143 unsigned long entrytime; 160 unsigned long entrytime;
144}; 161};
145#endif 162#endif
@@ -162,6 +179,13 @@ enum batadv_counters {
162 BATADV_CNT_TT_RESPONSE_RX, 179 BATADV_CNT_TT_RESPONSE_RX,
163 BATADV_CNT_TT_ROAM_ADV_TX, 180 BATADV_CNT_TT_ROAM_ADV_TX,
164 BATADV_CNT_TT_ROAM_ADV_RX, 181 BATADV_CNT_TT_ROAM_ADV_RX,
182#ifdef CONFIG_BATMAN_ADV_DAT
183 BATADV_CNT_DAT_GET_TX,
184 BATADV_CNT_DAT_GET_RX,
185 BATADV_CNT_DAT_PUT_TX,
186 BATADV_CNT_DAT_PUT_RX,
187 BATADV_CNT_DAT_CACHED_REPLY_TX,
188#endif
165 BATADV_CNT_NUM, 189 BATADV_CNT_NUM,
166}; 190};
167 191
@@ -181,7 +205,6 @@ struct batadv_priv_tt {
181 atomic_t vn; 205 atomic_t vn;
182 atomic_t ogm_append_cnt; 206 atomic_t ogm_append_cnt;
183 atomic_t local_changes; 207 atomic_t local_changes;
184 bool poss_change;
185 struct list_head changes_list; 208 struct list_head changes_list;
186 struct batadv_hashtable *local_hash; 209 struct batadv_hashtable *local_hash;
187 struct batadv_hashtable *global_hash; 210 struct batadv_hashtable *global_hash;
@@ -228,6 +251,20 @@ struct batadv_priv_vis {
228 struct batadv_vis_info *my_info; 251 struct batadv_vis_info *my_info;
229}; 252};
230 253
254/**
255 * struct batadv_priv_dat - per mesh interface DAT private data
256 * @addr: node DAT address
257 * @hash: hashtable representing the local ARP cache
258 * @work: work queue callback item for cache purging
259 */
260#ifdef CONFIG_BATMAN_ADV_DAT
261struct batadv_priv_dat {
262 batadv_dat_addr_t addr;
263 struct batadv_hashtable *hash;
264 struct delayed_work work;
265};
266#endif
267
231struct batadv_priv { 268struct batadv_priv {
232 atomic_t mesh_state; 269 atomic_t mesh_state;
233 struct net_device_stats stats; 270 struct net_device_stats stats;
@@ -237,6 +274,9 @@ struct batadv_priv {
237 atomic_t fragmentation; /* boolean */ 274 atomic_t fragmentation; /* boolean */
238 atomic_t ap_isolation; /* boolean */ 275 atomic_t ap_isolation; /* boolean */
239 atomic_t bridge_loop_avoidance; /* boolean */ 276 atomic_t bridge_loop_avoidance; /* boolean */
277#ifdef CONFIG_BATMAN_ADV_DAT
278 atomic_t distributed_arp_table; /* boolean */
279#endif
240 atomic_t vis_mode; /* VIS_TYPE_* */ 280 atomic_t vis_mode; /* VIS_TYPE_* */
241 atomic_t gw_mode; /* GW_MODE_* */ 281 atomic_t gw_mode; /* GW_MODE_* */
242 atomic_t gw_sel_class; /* uint */ 282 atomic_t gw_sel_class; /* uint */
@@ -255,7 +295,7 @@ struct batadv_priv {
255 struct hlist_head forw_bcast_list; 295 struct hlist_head forw_bcast_list;
256 struct batadv_hashtable *orig_hash; 296 struct batadv_hashtable *orig_hash;
257 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 297 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
258 spinlock_t forw_bcast_list_lock; /* protects */ 298 spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
259 struct delayed_work orig_work; 299 struct delayed_work orig_work;
260 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ 300 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
261 struct batadv_algo_ops *bat_algo_ops; 301 struct batadv_algo_ops *bat_algo_ops;
@@ -265,6 +305,9 @@ struct batadv_priv {
265 struct batadv_priv_gw gw; 305 struct batadv_priv_gw gw;
266 struct batadv_priv_tt tt; 306 struct batadv_priv_tt tt;
267 struct batadv_priv_vis vis; 307 struct batadv_priv_vis vis;
308#ifdef CONFIG_BATMAN_ADV_DAT
309 struct batadv_priv_dat dat;
310#endif
268}; 311};
269 312
270struct batadv_socket_client { 313struct batadv_socket_client {
@@ -318,6 +361,7 @@ struct batadv_backbone_gw {
318 struct hlist_node hash_entry; 361 struct hlist_node hash_entry;
319 struct batadv_priv *bat_priv; 362 struct batadv_priv *bat_priv;
320 unsigned long lasttime; /* last time we heard of this backbone gw */ 363 unsigned long lasttime; /* last time we heard of this backbone gw */
364 atomic_t wait_periods;
321 atomic_t request_sent; 365 atomic_t request_sent;
322 atomic_t refcount; 366 atomic_t refcount;
323 struct rcu_head rcu; 367 struct rcu_head rcu;
@@ -437,4 +481,36 @@ struct batadv_algo_ops {
437 void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet); 481 void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
438}; 482};
439 483
484/**
485 * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
486 * is used to stored ARP entries needed for the global DAT cache
487 * @ip: the IPv4 corresponding to this DAT/ARP entry
488 * @mac_addr: the MAC address associated to the stored IPv4
489 * @last_update: time in jiffies when this entry was refreshed last time
490 * @hash_entry: hlist node for batadv_priv_dat::hash
491 * @refcount: number of contexts the object is used
492 * @rcu: struct used for freeing in an RCU-safe manner
493 */
494struct batadv_dat_entry {
495 __be32 ip;
496 uint8_t mac_addr[ETH_ALEN];
497 unsigned long last_update;
498 struct hlist_node hash_entry;
499 atomic_t refcount;
500 struct rcu_head rcu;
501};
502
503/**
504 * struct batadv_dat_candidate - candidate destination for DAT operations
505 * @type: the type of the selected candidate. It can one of the following:
506 * - BATADV_DAT_CANDIDATE_NOT_FOUND
507 * - BATADV_DAT_CANDIDATE_ORIG
508 * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the
509 * corresponding originator node structure
510 */
511struct batadv_dat_candidate {
512 int type;
513 struct batadv_orig_node *orig_node;
514};
515
440#endif /* _NET_BATMAN_ADV_TYPES_H_ */ 516#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index f39723281ca1..10aff49fcf25 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -291,14 +291,118 @@ out:
291 return ret; 291 return ret;
292} 292}
293 293
294int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv) 294/**
295 * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
296 * common fields for unicast packets
297 * @skb: packet
298 * @hdr_size: amount of bytes to push at the beginning of the skb
299 * @orig_node: the destination node
300 *
301 * Returns false if the buffer extension was not possible or true otherwise
302 */
303static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
304 struct batadv_orig_node *orig_node)
305{
306 struct batadv_unicast_packet *unicast_packet;
307 uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
308
309 if (batadv_skb_head_push(skb, hdr_size) < 0)
310 return false;
311
312 unicast_packet = (struct batadv_unicast_packet *)skb->data;
313 unicast_packet->header.version = BATADV_COMPAT_VERSION;
314 /* batman packet type: unicast */
315 unicast_packet->header.packet_type = BATADV_UNICAST;
316 /* set unicast ttl */
317 unicast_packet->header.ttl = BATADV_TTL;
318 /* copy the destination for faster routing */
319 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
320 /* set the destination tt version number */
321 unicast_packet->ttvn = ttvn;
322
323 return true;
324}
325
326/**
327 * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
328 * @skb: the skb containing the payload to encapsulate
329 * @orig_node: the destination node
330 *
331 * Returns false if the payload could not be encapsulated or true otherwise
332 */
333static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
334 struct batadv_orig_node *orig_node)
335{
336 size_t uni_size = sizeof(struct batadv_unicast_packet);
337 return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
338}
339
340/**
341 * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
342 * header
343 * @bat_priv: the bat priv with all the soft interface information
344 * @skb: the skb containing the payload to encapsulate
345 * @orig_node: the destination node
346 * @packet_subtype: the batman 4addr packet subtype to use
347 *
348 * Returns false if the payload could not be encapsulated or true otherwise
349 */
350bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
351 struct sk_buff *skb,
352 struct batadv_orig_node *orig,
353 int packet_subtype)
354{
355 struct batadv_hard_iface *primary_if;
356 struct batadv_unicast_4addr_packet *unicast_4addr_packet;
357 bool ret = false;
358
359 primary_if = batadv_primary_if_get_selected(bat_priv);
360 if (!primary_if)
361 goto out;
362
363 /* pull the header space and fill the unicast_packet substructure.
364 * We can do that because the first member of the unicast_4addr_packet
365 * is of type struct unicast_packet
366 */
367 if (!batadv_unicast_push_and_fill_skb(skb,
368 sizeof(*unicast_4addr_packet),
369 orig))
370 goto out;
371
372 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
373 unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
374 memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
375 ETH_ALEN);
376 unicast_4addr_packet->subtype = packet_subtype;
377 unicast_4addr_packet->reserved = 0;
378
379 ret = true;
380out:
381 if (primary_if)
382 batadv_hardif_free_ref(primary_if);
383 return ret;
384}
385
386/**
387 * batadv_unicast_generic_send_skb - send an skb as unicast
388 * @bat_priv: the bat priv with all the soft interface information
389 * @skb: payload to send
390 * @packet_type: the batman unicast packet type to use
391 * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
392 * not BATADV_UNICAT_4ADDR
393 *
394 * Returns 1 in case of error or 0 otherwise
395 */
396int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
397 struct sk_buff *skb, int packet_type,
398 int packet_subtype)
295{ 399{
296 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 400 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
297 struct batadv_unicast_packet *unicast_packet; 401 struct batadv_unicast_packet *unicast_packet;
298 struct batadv_orig_node *orig_node; 402 struct batadv_orig_node *orig_node;
299 struct batadv_neigh_node *neigh_node; 403 struct batadv_neigh_node *neigh_node;
300 int data_len = skb->len; 404 int data_len = skb->len;
301 int ret = 1; 405 int ret = NET_RX_DROP;
302 unsigned int dev_mtu; 406 unsigned int dev_mtu;
303 407
304 /* get routing information */ 408 /* get routing information */
@@ -324,21 +428,23 @@ find_router:
324 if (!neigh_node) 428 if (!neigh_node)
325 goto out; 429 goto out;
326 430
327 if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0) 431 switch (packet_type) {
432 case BATADV_UNICAST:
433 batadv_unicast_prepare_skb(skb, orig_node);
434 break;
435 case BATADV_UNICAST_4ADDR:
436 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
437 packet_subtype);
438 break;
439 default:
440 /* this function supports UNICAST and UNICAST_4ADDR only. It
441 * should never be invoked with any other packet type
442 */
328 goto out; 443 goto out;
444 }
329 445
330 unicast_packet = (struct batadv_unicast_packet *)skb->data; 446 unicast_packet = (struct batadv_unicast_packet *)skb->data;
331 447
332 unicast_packet->header.version = BATADV_COMPAT_VERSION;
333 /* batman packet type: unicast */
334 unicast_packet->header.packet_type = BATADV_UNICAST;
335 /* set unicast ttl */
336 unicast_packet->header.ttl = BATADV_TTL;
337 /* copy the destination for faster routing */
338 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
339 /* set the destination tt version number */
340 unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
341
342 /* inform the destination node that we are still missing a correct route 448 /* inform the destination node that we are still missing a correct route
343 * for this client. The destination will receive this packet and will 449 * for this client. The destination will receive this packet and will
344 * try to reroute it because the ttvn contained in the header is less 450 * try to reroute it because the ttvn contained in the header is less
@@ -348,7 +454,9 @@ find_router:
348 unicast_packet->ttvn = unicast_packet->ttvn - 1; 454 unicast_packet->ttvn = unicast_packet->ttvn - 1;
349 455
350 dev_mtu = neigh_node->if_incoming->net_dev->mtu; 456 dev_mtu = neigh_node->if_incoming->net_dev->mtu;
351 if (atomic_read(&bat_priv->fragmentation) && 457 /* fragmentation mechanism only works for UNICAST (now) */
458 if (packet_type == BATADV_UNICAST &&
459 atomic_read(&bat_priv->fragmentation) &&
352 data_len + sizeof(*unicast_packet) > dev_mtu) { 460 data_len + sizeof(*unicast_packet) > dev_mtu) {
353 /* send frag skb decreases ttl */ 461 /* send frag skb decreases ttl */
354 unicast_packet->header.ttl++; 462 unicast_packet->header.ttl++;
@@ -358,16 +466,15 @@ find_router:
358 goto out; 466 goto out;
359 } 467 }
360 468
361 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 469 if (batadv_send_skb_to_orig(skb, orig_node, NULL))
362 ret = 0; 470 ret = 0;
363 goto out;
364 471
365out: 472out:
366 if (neigh_node) 473 if (neigh_node)
367 batadv_neigh_node_free_ref(neigh_node); 474 batadv_neigh_node_free_ref(neigh_node);
368 if (orig_node) 475 if (orig_node)
369 batadv_orig_node_free_ref(orig_node); 476 batadv_orig_node_free_ref(orig_node);
370 if (ret == 1) 477 if (ret == NET_RX_DROP)
371 kfree_skb(skb); 478 kfree_skb(skb);
372 return ret; 479 return ret;
373} 480}
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 1c46e2eb1ef9..61abba58bd8f 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -29,10 +29,44 @@ int batadv_frag_reassemble_skb(struct sk_buff *skb,
29 struct batadv_priv *bat_priv, 29 struct batadv_priv *bat_priv,
30 struct sk_buff **new_skb); 30 struct sk_buff **new_skb);
31void batadv_frag_list_free(struct list_head *head); 31void batadv_frag_list_free(struct list_head *head);
32int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv);
33int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv, 32int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
34 struct batadv_hard_iface *hard_iface, 33 struct batadv_hard_iface *hard_iface,
35 const uint8_t dstaddr[]); 34 const uint8_t dstaddr[]);
35bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
36 struct sk_buff *skb,
37 struct batadv_orig_node *orig_node,
38 int packet_subtype);
39int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
40 struct sk_buff *skb, int packet_type,
41 int packet_subtype);
42
43
44/**
45 * batadv_unicast_send_skb - send the skb encapsulated in a unicast packet
46 * @bat_priv: the bat priv with all the soft interface information
47 * @skb: the payload to send
48 */
49static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv,
50 struct sk_buff *skb)
51{
52 return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST,
53 0);
54}
55
56/**
57 * batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet
58 * @bat_priv: the bat priv with all the soft interface information
59 * @skb: the payload to send
60 * @packet_subtype: the batman 4addr packet subtype to use
61 */
62static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv,
63 struct sk_buff *skb,
64 int packet_subtype)
65{
66 return batadv_unicast_generic_send_skb(bat_priv, skb,
67 BATADV_UNICAST_4ADDR,
68 packet_subtype);
69}
36 70
37static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu) 71static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
38{ 72{
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 5abd1454fb07..0f65a9de5f74 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -396,12 +396,12 @@ batadv_add_packet(struct batadv_priv *bat_priv,
396 return NULL; 396 return NULL;
397 397
398 len = sizeof(*packet) + vis_info_len; 398 len = sizeof(*packet) + vis_info_len;
399 info->skb_packet = dev_alloc_skb(len + ETH_HLEN); 399 info->skb_packet = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN);
400 if (!info->skb_packet) { 400 if (!info->skb_packet) {
401 kfree(info); 401 kfree(info);
402 return NULL; 402 return NULL;
403 } 403 }
404 skb_reserve(info->skb_packet, ETH_HLEN); 404 skb_reserve(info->skb_packet, ETH_HLEN + NET_IP_ALIGN);
405 packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len); 405 packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
406 406
407 kref_init(&info->refcount); 407 kref_init(&info->refcount);
@@ -698,15 +698,12 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
698static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, 698static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
699 struct batadv_vis_info *info) 699 struct batadv_vis_info *info)
700{ 700{
701 struct batadv_neigh_node *router;
702 struct batadv_hashtable *hash = bat_priv->orig_hash; 701 struct batadv_hashtable *hash = bat_priv->orig_hash;
703 struct hlist_node *node; 702 struct hlist_node *node;
704 struct hlist_head *head; 703 struct hlist_head *head;
705 struct batadv_orig_node *orig_node; 704 struct batadv_orig_node *orig_node;
706 struct batadv_vis_packet *packet; 705 struct batadv_vis_packet *packet;
707 struct sk_buff *skb; 706 struct sk_buff *skb;
708 struct batadv_hard_iface *hard_iface;
709 uint8_t dstaddr[ETH_ALEN];
710 uint32_t i; 707 uint32_t i;
711 708
712 709
@@ -722,30 +719,20 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
722 if (!(orig_node->flags & BATADV_VIS_SERVER)) 719 if (!(orig_node->flags & BATADV_VIS_SERVER))
723 continue; 720 continue;
724 721
725 router = batadv_orig_node_get_router(orig_node);
726 if (!router)
727 continue;
728
729 /* don't send it if we already received the packet from 722 /* don't send it if we already received the packet from
730 * this node. 723 * this node.
731 */ 724 */
732 if (batadv_recv_list_is_in(bat_priv, &info->recv_list, 725 if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
733 orig_node->orig)) { 726 orig_node->orig))
734 batadv_neigh_node_free_ref(router);
735 continue; 727 continue;
736 }
737 728
738 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); 729 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
739 hard_iface = router->if_incoming;
740 memcpy(dstaddr, router->addr, ETH_ALEN);
741
742 batadv_neigh_node_free_ref(router);
743
744 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 730 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
745 if (skb) 731 if (!skb)
746 batadv_send_skb_packet(skb, hard_iface, 732 continue;
747 dstaddr);
748 733
734 if (!batadv_send_skb_to_orig(skb, orig_node, NULL))
735 kfree_skb(skb);
749 } 736 }
750 rcu_read_unlock(); 737 rcu_read_unlock();
751 } 738 }
@@ -755,7 +742,6 @@ static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
755 struct batadv_vis_info *info) 742 struct batadv_vis_info *info)
756{ 743{
757 struct batadv_orig_node *orig_node; 744 struct batadv_orig_node *orig_node;
758 struct batadv_neigh_node *router = NULL;
759 struct sk_buff *skb; 745 struct sk_buff *skb;
760 struct batadv_vis_packet *packet; 746 struct batadv_vis_packet *packet;
761 747
@@ -765,17 +751,14 @@ static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
765 if (!orig_node) 751 if (!orig_node)
766 goto out; 752 goto out;
767 753
768 router = batadv_orig_node_get_router(orig_node); 754 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
769 if (!router) 755 if (!skb)
770 goto out; 756 goto out;
771 757
772 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 758 if (!batadv_send_skb_to_orig(skb, orig_node, NULL))
773 if (skb) 759 kfree_skb(skb);
774 batadv_send_skb_packet(skb, router->if_incoming, router->addr);
775 760
776out: 761out:
777 if (router)
778 batadv_neigh_node_free_ref(router);
779 if (orig_node) 762 if (orig_node)
780 batadv_orig_node_free_ref(orig_node); 763 batadv_orig_node_free_ref(orig_node);
781} 764}
@@ -873,12 +856,13 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
873 if (!bat_priv->vis.my_info) 856 if (!bat_priv->vis.my_info)
874 goto err; 857 goto err;
875 858
876 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN; 859 len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE;
860 len += ETH_HLEN + NET_IP_ALIGN;
877 bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len); 861 bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
878 if (!bat_priv->vis.my_info->skb_packet) 862 if (!bat_priv->vis.my_info->skb_packet)
879 goto free_info; 863 goto free_info;
880 864
881 skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN); 865 skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN + NET_IP_ALIGN);
882 tmp_skb = bat_priv->vis.my_info->skb_packet; 866 tmp_skb = bat_priv->vis.my_info->skb_packet;
883 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); 867 packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
884 868
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 3537d385035e..d3f3f7b1d32c 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -11,6 +11,7 @@ menuconfig BT
11 select CRYPTO_BLKCIPHER 11 select CRYPTO_BLKCIPHER
12 select CRYPTO_AES 12 select CRYPTO_AES
13 select CRYPTO_ECB 13 select CRYPTO_ECB
14 select CRYPTO_SHA256
14 help 15 help
15 Bluetooth is low-cost, low-power, short-range wireless technology. 16 Bluetooth is low-cost, low-power, short-range wireless technology.
16 It was designed as a replacement for cables and other short-range 17 It was designed as a replacement for cables and other short-range
@@ -47,4 +48,3 @@ source "net/bluetooth/cmtp/Kconfig"
47source "net/bluetooth/hidp/Kconfig" 48source "net/bluetooth/hidp/Kconfig"
48 49
49source "drivers/bluetooth/Kconfig" 50source "drivers/bluetooth/Kconfig"
50
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index fa6d94a4602a..dea6a287daca 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ 12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o 13 a2mp.o amp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 0760d1fed6f0..2f67d5ecc907 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -16,6 +16,11 @@
16#include <net/bluetooth/hci_core.h> 16#include <net/bluetooth/hci_core.h>
17#include <net/bluetooth/l2cap.h> 17#include <net/bluetooth/l2cap.h>
18#include <net/bluetooth/a2mp.h> 18#include <net/bluetooth/a2mp.h>
19#include <net/bluetooth/amp.h>
20
21/* Global AMP Manager list */
22LIST_HEAD(amp_mgr_list);
23DEFINE_MUTEX(amp_mgr_list_lock);
19 24
20/* A2MP build & send command helper functions */ 25/* A2MP build & send command helper functions */
21static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) 26static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
@@ -37,8 +42,7 @@ static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
37 return cmd; 42 return cmd;
38} 43}
39 44
40static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, 45void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
41 void *data)
42{ 46{
43 struct l2cap_chan *chan = mgr->a2mp_chan; 47 struct l2cap_chan *chan = mgr->a2mp_chan;
44 struct a2mp_cmd *cmd; 48 struct a2mp_cmd *cmd;
@@ -63,6 +67,14 @@ static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
63 kfree(cmd); 67 kfree(cmd);
64} 68}
65 69
70u8 __next_ident(struct amp_mgr *mgr)
71{
72 if (++mgr->ident == 0)
73 mgr->ident = 1;
74
75 return mgr->ident;
76}
77
66static inline void __a2mp_cl_bredr(struct a2mp_cl *cl) 78static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
67{ 79{
68 cl->id = 0; 80 cl->id = 0;
@@ -161,6 +173,83 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
161 return 0; 173 return 0;
162} 174}
163 175
176static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
177 struct a2mp_cmd *hdr)
178{
179 struct a2mp_discov_rsp *rsp = (void *) skb->data;
180 u16 len = le16_to_cpu(hdr->len);
181 struct a2mp_cl *cl;
182 u16 ext_feat;
183 bool found = false;
184
185 if (len < sizeof(*rsp))
186 return -EINVAL;
187
188 len -= sizeof(*rsp);
189 skb_pull(skb, sizeof(*rsp));
190
191 ext_feat = le16_to_cpu(rsp->ext_feat);
192
193 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
194
195 /* check that packet is not broken for now */
196 while (ext_feat & A2MP_FEAT_EXT) {
197 if (len < sizeof(ext_feat))
198 return -EINVAL;
199
200 ext_feat = get_unaligned_le16(skb->data);
201 BT_DBG("efm 0x%4.4x", ext_feat);
202 len -= sizeof(ext_feat);
203 skb_pull(skb, sizeof(ext_feat));
204 }
205
206 cl = (void *) skb->data;
207 while (len >= sizeof(*cl)) {
208 BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
209 cl->status);
210
211 if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) {
212 struct a2mp_info_req req;
213
214 found = true;
215 req.id = cl->id;
216 a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
217 sizeof(req), &req);
218 }
219
220 len -= sizeof(*cl);
221 cl = (void *) skb_pull(skb, sizeof(*cl));
222 }
223
224 /* Fall back to L2CAP init sequence */
225 if (!found) {
226 struct l2cap_conn *conn = mgr->l2cap_conn;
227 struct l2cap_chan *chan;
228
229 mutex_lock(&conn->chan_lock);
230
231 list_for_each_entry(chan, &conn->chan_l, list) {
232
233 BT_DBG("chan %p state %s", chan,
234 state_to_string(chan->state));
235
236 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP)
237 continue;
238
239 l2cap_chan_lock(chan);
240
241 if (chan->state == BT_CONNECT)
242 l2cap_send_conn_req(chan);
243
244 l2cap_chan_unlock(chan);
245 }
246
247 mutex_unlock(&conn->chan_lock);
248 }
249
250 return 0;
251}
252
164static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, 253static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
165 struct a2mp_cmd *hdr) 254 struct a2mp_cmd *hdr)
166{ 255{
@@ -181,7 +270,6 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
181 struct a2mp_cmd *hdr) 270 struct a2mp_cmd *hdr)
182{ 271{
183 struct a2mp_info_req *req = (void *) skb->data; 272 struct a2mp_info_req *req = (void *) skb->data;
184 struct a2mp_info_rsp rsp;
185 struct hci_dev *hdev; 273 struct hci_dev *hdev;
186 274
187 if (le16_to_cpu(hdr->len) < sizeof(*req)) 275 if (le16_to_cpu(hdr->len) < sizeof(*req))
@@ -189,53 +277,93 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
189 277
190 BT_DBG("id %d", req->id); 278 BT_DBG("id %d", req->id);
191 279
192 rsp.id = req->id;
193 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
194
195 hdev = hci_dev_get(req->id); 280 hdev = hci_dev_get(req->id);
196 if (hdev && hdev->amp_type != HCI_BREDR) { 281 if (!hdev || hdev->dev_type != HCI_AMP) {
197 rsp.status = 0; 282 struct a2mp_info_rsp rsp;
198 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); 283
199 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); 284 rsp.id = req->id;
200 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); 285 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
201 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); 286
202 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); 287 a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
288 &rsp);
289
290 goto done;
203 } 291 }
204 292
293 mgr->state = READ_LOC_AMP_INFO;
294 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
295
296done:
205 if (hdev) 297 if (hdev)
206 hci_dev_put(hdev); 298 hci_dev_put(hdev);
207 299
208 a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
209
210 skb_pull(skb, sizeof(*req)); 300 skb_pull(skb, sizeof(*req));
211 return 0; 301 return 0;
212} 302}
213 303
304static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
305 struct a2mp_cmd *hdr)
306{
307 struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
308 struct a2mp_amp_assoc_req req;
309 struct amp_ctrl *ctrl;
310
311 if (le16_to_cpu(hdr->len) < sizeof(*rsp))
312 return -EINVAL;
313
314 BT_DBG("id %d status 0x%2.2x", rsp->id, rsp->status);
315
316 if (rsp->status)
317 return -EINVAL;
318
319 ctrl = amp_ctrl_add(mgr, rsp->id);
320 if (!ctrl)
321 return -ENOMEM;
322
323 req.id = rsp->id;
324 a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
325 &req);
326
327 skb_pull(skb, sizeof(*rsp));
328 return 0;
329}
330
214static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, 331static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
215 struct a2mp_cmd *hdr) 332 struct a2mp_cmd *hdr)
216{ 333{
217 struct a2mp_amp_assoc_req *req = (void *) skb->data; 334 struct a2mp_amp_assoc_req *req = (void *) skb->data;
218 struct hci_dev *hdev; 335 struct hci_dev *hdev;
336 struct amp_mgr *tmp;
219 337
220 if (le16_to_cpu(hdr->len) < sizeof(*req)) 338 if (le16_to_cpu(hdr->len) < sizeof(*req))
221 return -EINVAL; 339 return -EINVAL;
222 340
223 BT_DBG("id %d", req->id); 341 BT_DBG("id %d", req->id);
224 342
343 /* Make sure that other request is not processed */
344 tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
345
225 hdev = hci_dev_get(req->id); 346 hdev = hci_dev_get(req->id);
226 if (!hdev || hdev->amp_type == HCI_BREDR) { 347 if (!hdev || hdev->amp_type == HCI_BREDR || tmp) {
227 struct a2mp_amp_assoc_rsp rsp; 348 struct a2mp_amp_assoc_rsp rsp;
228 rsp.id = req->id; 349 rsp.id = req->id;
229 rsp.status = A2MP_STATUS_INVALID_CTRL_ID; 350
351 if (tmp) {
352 rsp.status = A2MP_STATUS_COLLISION_OCCURED;
353 amp_mgr_put(tmp);
354 } else {
355 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
356 }
230 357
231 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), 358 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
232 &rsp); 359 &rsp);
233 goto clean; 360
361 goto done;
234 } 362 }
235 363
236 /* Placeholder for HCI Read AMP Assoc */ 364 amp_read_loc_assoc(hdev, mgr);
237 365
238clean: 366done:
239 if (hdev) 367 if (hdev)
240 hci_dev_put(hdev); 368 hci_dev_put(hdev);
241 369
@@ -243,6 +371,68 @@ clean:
243 return 0; 371 return 0;
244} 372}
245 373
374static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
375 struct a2mp_cmd *hdr)
376{
377 struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
378 u16 len = le16_to_cpu(hdr->len);
379 struct hci_dev *hdev;
380 struct amp_ctrl *ctrl;
381 struct hci_conn *hcon;
382 size_t assoc_len;
383
384 if (len < sizeof(*rsp))
385 return -EINVAL;
386
387 assoc_len = len - sizeof(*rsp);
388
389 BT_DBG("id %d status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
390 assoc_len);
391
392 if (rsp->status)
393 return -EINVAL;
394
395 /* Save remote ASSOC data */
396 ctrl = amp_ctrl_lookup(mgr, rsp->id);
397 if (ctrl) {
398 u8 *assoc;
399
400 assoc = kzalloc(assoc_len, GFP_KERNEL);
401 if (!assoc) {
402 amp_ctrl_put(ctrl);
403 return -ENOMEM;
404 }
405
406 memcpy(assoc, rsp->amp_assoc, assoc_len);
407 ctrl->assoc = assoc;
408 ctrl->assoc_len = assoc_len;
409 ctrl->assoc_rem_len = assoc_len;
410 ctrl->assoc_len_so_far = 0;
411
412 amp_ctrl_put(ctrl);
413 }
414
415 /* Create Phys Link */
416 hdev = hci_dev_get(rsp->id);
417 if (!hdev)
418 return -EINVAL;
419
420 hcon = phylink_add(hdev, mgr, rsp->id, true);
421 if (!hcon)
422 goto done;
423
424 BT_DBG("Created hcon %p: loc:%d -> rem:%d", hcon, hdev->id, rsp->id);
425
426 mgr->bredr_chan->remote_amp_id = rsp->id;
427
428 amp_create_phylink(hdev, mgr, hcon);
429
430done:
431 hci_dev_put(hdev);
432 skb_pull(skb, len);
433 return 0;
434}
435
246static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, 436static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
247 struct a2mp_cmd *hdr) 437 struct a2mp_cmd *hdr)
248{ 438{
@@ -250,6 +440,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
250 440
251 struct a2mp_physlink_rsp rsp; 441 struct a2mp_physlink_rsp rsp;
252 struct hci_dev *hdev; 442 struct hci_dev *hdev;
443 struct hci_conn *hcon;
444 struct amp_ctrl *ctrl;
253 445
254 if (le16_to_cpu(hdr->len) < sizeof(*req)) 446 if (le16_to_cpu(hdr->len) < sizeof(*req))
255 return -EINVAL; 447 return -EINVAL;
@@ -265,9 +457,43 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
265 goto send_rsp; 457 goto send_rsp;
266 } 458 }
267 459
268 /* TODO process physlink create */ 460 ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
461 if (!ctrl) {
462 ctrl = amp_ctrl_add(mgr, rsp.remote_id);
463 if (ctrl) {
464 amp_ctrl_get(ctrl);
465 } else {
466 rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
467 goto send_rsp;
468 }
469 }
269 470
270 rsp.status = A2MP_STATUS_SUCCESS; 471 if (ctrl) {
472 size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
473 u8 *assoc;
474
475 assoc = kzalloc(assoc_len, GFP_KERNEL);
476 if (!assoc) {
477 amp_ctrl_put(ctrl);
478 return -ENOMEM;
479 }
480
481 memcpy(assoc, req->amp_assoc, assoc_len);
482 ctrl->assoc = assoc;
483 ctrl->assoc_len = assoc_len;
484 ctrl->assoc_rem_len = assoc_len;
485 ctrl->assoc_len_so_far = 0;
486
487 amp_ctrl_put(ctrl);
488 }
489
490 hcon = phylink_add(hdev, mgr, req->local_id, false);
491 if (hcon) {
492 amp_accept_phylink(hdev, mgr, hcon);
493 rsp.status = A2MP_STATUS_SUCCESS;
494 } else {
495 rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
496 }
271 497
272send_rsp: 498send_rsp:
273 if (hdev) 499 if (hdev)
@@ -286,6 +512,7 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
286 struct a2mp_physlink_req *req = (void *) skb->data; 512 struct a2mp_physlink_req *req = (void *) skb->data;
287 struct a2mp_physlink_rsp rsp; 513 struct a2mp_physlink_rsp rsp;
288 struct hci_dev *hdev; 514 struct hci_dev *hdev;
515 struct hci_conn *hcon;
289 516
290 if (le16_to_cpu(hdr->len) < sizeof(*req)) 517 if (le16_to_cpu(hdr->len) < sizeof(*req))
291 return -EINVAL; 518 return -EINVAL;
@@ -296,14 +523,22 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
296 rsp.remote_id = req->local_id; 523 rsp.remote_id = req->local_id;
297 rsp.status = A2MP_STATUS_SUCCESS; 524 rsp.status = A2MP_STATUS_SUCCESS;
298 525
299 hdev = hci_dev_get(req->local_id); 526 hdev = hci_dev_get(req->remote_id);
300 if (!hdev) { 527 if (!hdev) {
301 rsp.status = A2MP_STATUS_INVALID_CTRL_ID; 528 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
302 goto send_rsp; 529 goto send_rsp;
303 } 530 }
304 531
532 hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst);
533 if (!hcon) {
534 BT_ERR("No phys link exist");
535 rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
536 goto clean;
537 }
538
305 /* TODO Disconnect Phys Link here */ 539 /* TODO Disconnect Phys Link here */
306 540
541clean:
307 hci_dev_put(hdev); 542 hci_dev_put(hdev);
308 543
309send_rsp: 544send_rsp:
@@ -377,10 +612,19 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
377 err = a2mp_discphyslink_req(mgr, skb, hdr); 612 err = a2mp_discphyslink_req(mgr, skb, hdr);
378 break; 613 break;
379 614
380 case A2MP_CHANGE_RSP:
381 case A2MP_DISCOVER_RSP: 615 case A2MP_DISCOVER_RSP:
616 err = a2mp_discover_rsp(mgr, skb, hdr);
617 break;
618
382 case A2MP_GETINFO_RSP: 619 case A2MP_GETINFO_RSP:
620 err = a2mp_getinfo_rsp(mgr, skb, hdr);
621 break;
622
383 case A2MP_GETAMPASSOC_RSP: 623 case A2MP_GETAMPASSOC_RSP:
624 err = a2mp_getampassoc_rsp(mgr, skb, hdr);
625 break;
626
627 case A2MP_CHANGE_RSP:
384 case A2MP_CREATEPHYSLINK_RSP: 628 case A2MP_CREATEPHYSLINK_RSP:
385 case A2MP_DISCONNPHYSLINK_RSP: 629 case A2MP_DISCONNPHYSLINK_RSP:
386 err = a2mp_cmd_rsp(mgr, skb, hdr); 630 err = a2mp_cmd_rsp(mgr, skb, hdr);
@@ -455,9 +699,10 @@ static struct l2cap_ops a2mp_chan_ops = {
455 .new_connection = l2cap_chan_no_new_connection, 699 .new_connection = l2cap_chan_no_new_connection,
456 .teardown = l2cap_chan_no_teardown, 700 .teardown = l2cap_chan_no_teardown,
457 .ready = l2cap_chan_no_ready, 701 .ready = l2cap_chan_no_ready,
702 .defer = l2cap_chan_no_defer,
458}; 703};
459 704
460static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn) 705static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
461{ 706{
462 struct l2cap_chan *chan; 707 struct l2cap_chan *chan;
463 int err; 708 int err;
@@ -492,7 +737,10 @@ static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
492 737
493 chan->conf_state = 0; 738 chan->conf_state = 0;
494 739
495 l2cap_chan_add(conn, chan); 740 if (locked)
741 __l2cap_chan_add(conn, chan);
742 else
743 l2cap_chan_add(conn, chan);
496 744
497 chan->remote_mps = chan->omtu; 745 chan->remote_mps = chan->omtu;
498 chan->mps = chan->omtu; 746 chan->mps = chan->omtu;
@@ -503,11 +751,13 @@ static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
503} 751}
504 752
505/* AMP Manager functions */ 753/* AMP Manager functions */
506void amp_mgr_get(struct amp_mgr *mgr) 754struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
507{ 755{
508 BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount)); 756 BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
509 757
510 kref_get(&mgr->kref); 758 kref_get(&mgr->kref);
759
760 return mgr;
511} 761}
512 762
513static void amp_mgr_destroy(struct kref *kref) 763static void amp_mgr_destroy(struct kref *kref)
@@ -516,6 +766,11 @@ static void amp_mgr_destroy(struct kref *kref)
516 766
517 BT_DBG("mgr %p", mgr); 767 BT_DBG("mgr %p", mgr);
518 768
769 mutex_lock(&amp_mgr_list_lock);
770 list_del(&mgr->list);
771 mutex_unlock(&amp_mgr_list_lock);
772
773 amp_ctrl_list_flush(mgr);
519 kfree(mgr); 774 kfree(mgr);
520} 775}
521 776
@@ -526,7 +781,7 @@ int amp_mgr_put(struct amp_mgr *mgr)
526 return kref_put(&mgr->kref, &amp_mgr_destroy); 781 return kref_put(&mgr->kref, &amp_mgr_destroy);
527} 782}
528 783
529static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn) 784static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
530{ 785{
531 struct amp_mgr *mgr; 786 struct amp_mgr *mgr;
532 struct l2cap_chan *chan; 787 struct l2cap_chan *chan;
@@ -539,7 +794,7 @@ static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
539 794
540 mgr->l2cap_conn = conn; 795 mgr->l2cap_conn = conn;
541 796
542 chan = a2mp_chan_open(conn); 797 chan = a2mp_chan_open(conn, locked);
543 if (!chan) { 798 if (!chan) {
544 kfree(mgr); 799 kfree(mgr);
545 return NULL; 800 return NULL;
@@ -552,6 +807,14 @@ static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
552 807
553 kref_init(&mgr->kref); 808 kref_init(&mgr->kref);
554 809
810 /* Remote AMP ctrl list initialization */
811 INIT_LIST_HEAD(&mgr->amp_ctrls);
812 mutex_init(&mgr->amp_ctrls_lock);
813
814 mutex_lock(&amp_mgr_list_lock);
815 list_add(&mgr->list, &amp_mgr_list);
816 mutex_unlock(&amp_mgr_list_lock);
817
555 return mgr; 818 return mgr;
556} 819}
557 820
@@ -560,7 +823,7 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
560{ 823{
561 struct amp_mgr *mgr; 824 struct amp_mgr *mgr;
562 825
563 mgr = amp_mgr_create(conn); 826 mgr = amp_mgr_create(conn, false);
564 if (!mgr) { 827 if (!mgr) {
565 BT_ERR("Could not create AMP manager"); 828 BT_ERR("Could not create AMP manager");
566 return NULL; 829 return NULL;
@@ -570,3 +833,139 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
570 833
571 return mgr->a2mp_chan; 834 return mgr->a2mp_chan;
572} 835}
836
837struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
838{
839 struct amp_mgr *mgr;
840
841 mutex_lock(&amp_mgr_list_lock);
842 list_for_each_entry(mgr, &amp_mgr_list, list) {
843 if (mgr->state == state) {
844 amp_mgr_get(mgr);
845 mutex_unlock(&amp_mgr_list_lock);
846 return mgr;
847 }
848 }
849 mutex_unlock(&amp_mgr_list_lock);
850
851 return NULL;
852}
853
854void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
855{
856 struct amp_mgr *mgr;
857 struct a2mp_info_rsp rsp;
858
859 mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
860 if (!mgr)
861 return;
862
863 BT_DBG("%s mgr %p", hdev->name, mgr);
864
865 rsp.id = hdev->id;
866 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
867
868 if (hdev->amp_type != HCI_BREDR) {
869 rsp.status = 0;
870 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
871 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
872 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
873 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
874 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
875 }
876
877 a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
878 amp_mgr_put(mgr);
879}
880
881void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
882{
883 struct amp_mgr *mgr;
884 struct amp_assoc *loc_assoc = &hdev->loc_assoc;
885 struct a2mp_amp_assoc_rsp *rsp;
886 size_t len;
887
888 mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
889 if (!mgr)
890 return;
891
892 BT_DBG("%s mgr %p", hdev->name, mgr);
893
894 len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
895 rsp = kzalloc(len, GFP_KERNEL);
896 if (!rsp) {
897 amp_mgr_put(mgr);
898 return;
899 }
900
901 rsp->id = hdev->id;
902
903 if (status) {
904 rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
905 } else {
906 rsp->status = A2MP_STATUS_SUCCESS;
907 memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
908 }
909
910 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
911 amp_mgr_put(mgr);
912 kfree(rsp);
913}
914
915void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
916{
917 struct amp_mgr *mgr;
918 struct amp_assoc *loc_assoc = &hdev->loc_assoc;
919 struct a2mp_physlink_req *req;
920 struct l2cap_chan *bredr_chan;
921 size_t len;
922
923 mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
924 if (!mgr)
925 return;
926
927 len = sizeof(*req) + loc_assoc->len;
928
929 BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
930
931 req = kzalloc(len, GFP_KERNEL);
932 if (!req) {
933 amp_mgr_put(mgr);
934 return;
935 }
936
937 bredr_chan = mgr->bredr_chan;
938 if (!bredr_chan)
939 goto clean;
940
941 req->local_id = hdev->id;
942 req->remote_id = bredr_chan->remote_amp_id;
943 memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
944
945 a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
946
947clean:
948 amp_mgr_put(mgr);
949 kfree(req);
950}
951
952void a2mp_discover_amp(struct l2cap_chan *chan)
953{
954 struct l2cap_conn *conn = chan->conn;
955 struct amp_mgr *mgr = conn->hcon->amp_mgr;
956 struct a2mp_discov_req req;
957
958 BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
959
960 if (!mgr) {
961 mgr = amp_mgr_create(conn, true);
962 if (!mgr)
963 return;
964 }
965
966 mgr->bredr_chan = chan;
967
968 req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
969 req.ext_feat = 0;
970 a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
971}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index ba033f09196e..5355df63d39b 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -569,7 +569,6 @@ static int bt_seq_show(struct seq_file *seq, void *v)
569{ 569{
570 struct bt_seq_state *s = seq->private; 570 struct bt_seq_state *s = seq->private;
571 struct bt_sock_list *l = s->l; 571 struct bt_sock_list *l = s->l;
572 bdaddr_t src_baswapped, dst_baswapped;
573 572
574 if (v == SEQ_START_TOKEN) { 573 if (v == SEQ_START_TOKEN) {
575 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent"); 574 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent");
@@ -583,18 +582,17 @@ static int bt_seq_show(struct seq_file *seq, void *v)
583 } else { 582 } else {
584 struct sock *sk = sk_entry(v); 583 struct sock *sk = sk_entry(v);
585 struct bt_sock *bt = bt_sk(sk); 584 struct bt_sock *bt = bt_sk(sk);
586 baswap(&src_baswapped, &bt->src);
587 baswap(&dst_baswapped, &bt->dst);
588 585
589 seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu", 586 seq_printf(seq,
587 "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu",
590 sk, 588 sk,
591 atomic_read(&sk->sk_refcnt), 589 atomic_read(&sk->sk_refcnt),
592 sk_rmem_alloc_get(sk), 590 sk_rmem_alloc_get(sk),
593 sk_wmem_alloc_get(sk), 591 sk_wmem_alloc_get(sk),
594 from_kuid(seq_user_ns(seq), sock_i_uid(sk)), 592 from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
595 sock_i_ino(sk), 593 sock_i_ino(sk),
596 &src_baswapped, 594 &bt->src,
597 &dst_baswapped, 595 &bt->dst,
598 bt->parent? sock_i_ino(bt->parent): 0LU); 596 bt->parent? sock_i_ino(bt->parent): 0LU);
599 597
600 if (l->custom_seq_show) { 598 if (l->custom_seq_show) {
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
new file mode 100644
index 000000000000..1b0d92c0643a
--- /dev/null
+++ b/net/bluetooth/amp.c
@@ -0,0 +1,471 @@
1/*
2 Copyright (c) 2011,2012 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <net/bluetooth/bluetooth.h>
15#include <net/bluetooth/hci.h>
16#include <net/bluetooth/hci_core.h>
17#include <net/bluetooth/a2mp.h>
18#include <net/bluetooth/amp.h>
19#include <crypto/hash.h>
20
21/* Remote AMP Controllers interface */
22void amp_ctrl_get(struct amp_ctrl *ctrl)
23{
24 BT_DBG("ctrl %p orig refcnt %d", ctrl,
25 atomic_read(&ctrl->kref.refcount));
26
27 kref_get(&ctrl->kref);
28}
29
30static void amp_ctrl_destroy(struct kref *kref)
31{
32 struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
33
34 BT_DBG("ctrl %p", ctrl);
35
36 kfree(ctrl->assoc);
37 kfree(ctrl);
38}
39
40int amp_ctrl_put(struct amp_ctrl *ctrl)
41{
42 BT_DBG("ctrl %p orig refcnt %d", ctrl,
43 atomic_read(&ctrl->kref.refcount));
44
45 return kref_put(&ctrl->kref, &amp_ctrl_destroy);
46}
47
48struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
49{
50 struct amp_ctrl *ctrl;
51
52 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
53 if (!ctrl)
54 return NULL;
55
56 kref_init(&ctrl->kref);
57 ctrl->id = id;
58
59 mutex_lock(&mgr->amp_ctrls_lock);
60 list_add(&ctrl->list, &mgr->amp_ctrls);
61 mutex_unlock(&mgr->amp_ctrls_lock);
62
63 BT_DBG("mgr %p ctrl %p", mgr, ctrl);
64
65 return ctrl;
66}
67
68void amp_ctrl_list_flush(struct amp_mgr *mgr)
69{
70 struct amp_ctrl *ctrl, *n;
71
72 BT_DBG("mgr %p", mgr);
73
74 mutex_lock(&mgr->amp_ctrls_lock);
75 list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
76 list_del(&ctrl->list);
77 amp_ctrl_put(ctrl);
78 }
79 mutex_unlock(&mgr->amp_ctrls_lock);
80}
81
82struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
83{
84 struct amp_ctrl *ctrl;
85
86 BT_DBG("mgr %p id %d", mgr, id);
87
88 mutex_lock(&mgr->amp_ctrls_lock);
89 list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
90 if (ctrl->id == id) {
91 amp_ctrl_get(ctrl);
92 mutex_unlock(&mgr->amp_ctrls_lock);
93 return ctrl;
94 }
95 }
96 mutex_unlock(&mgr->amp_ctrls_lock);
97
98 return NULL;
99}
100
101/* Physical Link interface */
102static u8 __next_handle(struct amp_mgr *mgr)
103{
104 if (++mgr->handle == 0)
105 mgr->handle = 1;
106
107 return mgr->handle;
108}
109
110struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
111 u8 remote_id, bool out)
112{
113 bdaddr_t *dst = mgr->l2cap_conn->dst;
114 struct hci_conn *hcon;
115
116 hcon = hci_conn_add(hdev, AMP_LINK, dst);
117 if (!hcon)
118 return NULL;
119
120 BT_DBG("hcon %p dst %pMR", hcon, dst);
121
122 hcon->state = BT_CONNECT;
123 hcon->attempt++;
124 hcon->handle = __next_handle(mgr);
125 hcon->remote_id = remote_id;
126 hcon->amp_mgr = amp_mgr_get(mgr);
127 hcon->out = out;
128
129 return hcon;
130}
131
132/* AMP crypto key generation interface */
133static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
134{
135 int ret = 0;
136 struct crypto_shash *tfm;
137
138 if (!ksize)
139 return -EINVAL;
140
141 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
142 if (IS_ERR(tfm)) {
143 BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
144 return PTR_ERR(tfm);
145 }
146
147 ret = crypto_shash_setkey(tfm, key, ksize);
148 if (ret) {
149 BT_DBG("crypto_ahash_setkey failed: err %d", ret);
150 } else {
151 struct {
152 struct shash_desc shash;
153 char ctx[crypto_shash_descsize(tfm)];
154 } desc;
155
156 desc.shash.tfm = tfm;
157 desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
158
159 ret = crypto_shash_digest(&desc.shash, plaintext, psize,
160 output);
161 }
162
163 crypto_free_shash(tfm);
164 return ret;
165}
166
167int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
168{
169 struct hci_dev *hdev = conn->hdev;
170 struct link_key *key;
171 u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
172 u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
173 int err;
174
175 if (!hci_conn_check_link_mode(conn))
176 return -EACCES;
177
178 BT_DBG("conn %p key_type %d", conn, conn->key_type);
179
180 /* Legacy key */
181 if (conn->key_type < 3) {
182 BT_ERR("Legacy key type %d", conn->key_type);
183 return -EACCES;
184 }
185
186 *type = conn->key_type;
187 *len = HCI_AMP_LINK_KEY_SIZE;
188
189 key = hci_find_link_key(hdev, &conn->dst);
190 if (!key) {
191 BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
192 return -EACCES;
193 }
194
195 /* BR/EDR Link Key concatenated together with itself */
196 memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
197 memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
198
199 /* Derive Generic AMP Link Key (gamp) */
200 err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
201 if (err) {
202 BT_ERR("Could not derive Generic AMP Key: err %d", err);
203 return err;
204 }
205
206 if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
207 BT_DBG("Use Generic AMP Key (gamp)");
208 memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
209 return err;
210 }
211
212 /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
213 return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
214}
215
216void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
217{
218 struct hci_cp_read_local_amp_assoc cp;
219 struct amp_assoc *loc_assoc = &hdev->loc_assoc;
220
221 BT_DBG("%s handle %d", hdev->name, phy_handle);
222
223 cp.phy_handle = phy_handle;
224 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
225 cp.len_so_far = cpu_to_le16(loc_assoc->offset);
226
227 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
228}
229
230void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
231{
232 struct hci_cp_read_local_amp_assoc cp;
233
234 memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
235 memset(&cp, 0, sizeof(cp));
236
237 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
238
239 mgr->state = READ_LOC_AMP_ASSOC;
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
241}
242
243void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
244 struct hci_conn *hcon)
245{
246 struct hci_cp_read_local_amp_assoc cp;
247 struct amp_mgr *mgr = hcon->amp_mgr;
248
249 cp.phy_handle = hcon->handle;
250 cp.len_so_far = cpu_to_le16(0);
251 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
252
253 mgr->state = READ_LOC_AMP_ASSOC_FINAL;
254
255 /* Read Local AMP Assoc final link information data */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
257}
258
259/* Write AMP Assoc data fragments, returns true with last fragment written*/
260static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
261 struct hci_conn *hcon)
262{
263 struct hci_cp_write_remote_amp_assoc *cp;
264 struct amp_mgr *mgr = hcon->amp_mgr;
265 struct amp_ctrl *ctrl;
266 u16 frag_len, len;
267
268 ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
269 if (!ctrl)
270 return false;
271
272 if (!ctrl->assoc_rem_len) {
273 BT_DBG("all fragments are written");
274 ctrl->assoc_rem_len = ctrl->assoc_len;
275 ctrl->assoc_len_so_far = 0;
276
277 amp_ctrl_put(ctrl);
278 return true;
279 }
280
281 frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
282 len = frag_len + sizeof(*cp);
283
284 cp = kzalloc(len, GFP_KERNEL);
285 if (!cp) {
286 amp_ctrl_put(ctrl);
287 return false;
288 }
289
290 BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
291 hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
292
293 cp->phy_handle = hcon->handle;
294 cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
295 cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
296 memcpy(cp->frag, ctrl->assoc, frag_len);
297
298 ctrl->assoc_len_so_far += frag_len;
299 ctrl->assoc_rem_len -= frag_len;
300
301 amp_ctrl_put(ctrl);
302
303 hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
304
305 kfree(cp);
306
307 return false;
308}
309
310void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
311{
312 struct hci_conn *hcon;
313
314 BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
315
316 hcon = hci_conn_hash_lookup_handle(hdev, handle);
317 if (!hcon)
318 return;
319
320 amp_write_rem_assoc_frag(hdev, hcon);
321}
322
323void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
324{
325 struct hci_conn *hcon;
326
327 BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
328
329 hcon = hci_conn_hash_lookup_handle(hdev, handle);
330 if (!hcon)
331 return;
332
333 BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
334
335 amp_write_rem_assoc_frag(hdev, hcon);
336}
337
338void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
339 struct hci_conn *hcon)
340{
341 struct hci_cp_create_phy_link cp;
342
343 cp.phy_handle = hcon->handle;
344
345 BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
346 hcon->handle);
347
348 if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
349 &cp.key_type)) {
350 BT_DBG("Cannot create link key");
351 return;
352 }
353
354 hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
355}
356
357void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
358 struct hci_conn *hcon)
359{
360 struct hci_cp_accept_phy_link cp;
361
362 cp.phy_handle = hcon->handle;
363
364 BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
365 hcon->handle);
366
367 if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
368 &cp.key_type)) {
369 BT_DBG("Cannot create link key");
370 return;
371 }
372
373 hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
374}
375
376void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
377{
378 struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
379 struct amp_mgr *mgr = hs_hcon->amp_mgr;
380 struct l2cap_chan *bredr_chan;
381
382 BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
383
384 if (!bredr_hdev || !mgr || !mgr->bredr_chan)
385 return;
386
387 bredr_chan = mgr->bredr_chan;
388
389 l2cap_chan_lock(bredr_chan);
390
391 set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
392 bredr_chan->remote_amp_id = hs_hcon->remote_id;
393 bredr_chan->local_amp_id = hs_hcon->hdev->id;
394 bredr_chan->hs_hcon = hs_hcon;
395 bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
396
397 __l2cap_physical_cfm(bredr_chan, 0);
398
399 l2cap_chan_unlock(bredr_chan);
400
401 hci_dev_put(bredr_hdev);
402}
403
404void amp_create_logical_link(struct l2cap_chan *chan)
405{
406 struct hci_cp_create_accept_logical_link cp;
407 struct hci_conn *hcon;
408 struct hci_dev *hdev;
409
410 BT_DBG("chan %p", chan);
411
412 if (!chan->hs_hcon)
413 return;
414
415 hdev = hci_dev_hold(chan->hs_hcon->hdev);
416 if (!hdev)
417 return;
418
419 BT_DBG("chan %p dst %pMR", chan, chan->conn->dst);
420
421 hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, chan->conn->dst);
422 if (!hcon)
423 goto done;
424
425 cp.phy_handle = hcon->handle;
426
427 cp.tx_flow_spec.id = chan->local_id;
428 cp.tx_flow_spec.stype = chan->local_stype;
429 cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
430 cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
431 cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
432 cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
433
434 cp.rx_flow_spec.id = chan->remote_id;
435 cp.rx_flow_spec.stype = chan->remote_stype;
436 cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
437 cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
438 cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
439 cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
440
441 if (hcon->out)
442 hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
443 &cp);
444 else
445 hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
446 &cp);
447
448done:
449 hci_dev_put(hdev);
450}
451
452void amp_disconnect_logical_link(struct hci_chan *hchan)
453{
454 struct hci_conn *hcon = hchan->conn;
455 struct hci_cp_disconn_logical_link cp;
456
457 if (hcon->state != BT_CONNECTED) {
458 BT_DBG("hchan %p not connected", hchan);
459 return;
460 }
461
462 cp.log_handle = cpu_to_le16(hchan->handle);
463 hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
464}
465
466void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
467{
468 BT_DBG("hchan %p", hchan);
469
470 hci_chan_del(hchan);
471}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 4a6620bc1570..a5b639702637 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -182,8 +182,7 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
182 a2 = data; 182 a2 = data;
183 data += ETH_ALEN; 183 data += ETH_ALEN;
184 184
185 BT_DBG("mc filter %s -> %s", 185 BT_DBG("mc filter %pMR -> %pMR", a1, a2);
186 batostr((void *) a1), batostr((void *) a2));
187 186
188 /* Iterate from a1 to a2 */ 187 /* Iterate from a1 to a2 */
189 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 188 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 98f86f91d47c..e58c8b32589c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,7 +25,6 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28#include <linux/export.h>
29#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
30 29
31#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 50f0d135eb8f..a4a9d4b6816c 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -20,7 +20,7 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/export.h>
24#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/types.h> 26#include <linux/types.h>
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 6c9c1fd601ca..e0a6ebf2baa6 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -353,7 +353,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
353 353
354 BT_DBG("mtu %d", session->mtu); 354 BT_DBG("mtu %d", session->mtu);
355 355
356 sprintf(session->name, "%s", batostr(&bt_sk(sock->sk)->dst)); 356 sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst);
357 357
358 session->sock = sock; 358 session->sock = sock;
359 session->state = BT_CONFIG; 359 session->state = BT_CONFIG;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index aacb802d1ee4..1c57482112b6 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -20,7 +20,7 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/export.h>
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/capability.h> 26#include <linux/capability.h>
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b9196a44f759..25bfce0666eb 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -130,6 +130,20 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
130 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); 130 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
131} 131}
132 132
133static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
134{
135 struct hci_cp_disconn_phy_link cp;
136
137 BT_DBG("hcon %p", conn);
138
139 conn->state = BT_DISCONN;
140
141 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
142 cp.reason = reason;
143 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
144 sizeof(cp), &cp);
145}
146
133static void hci_add_sco(struct hci_conn *conn, __u16 handle) 147static void hci_add_sco(struct hci_conn *conn, __u16 handle)
134{ 148{
135 struct hci_dev *hdev = conn->hdev; 149 struct hci_dev *hdev = conn->hdev;
@@ -230,11 +244,24 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
230 } 244 }
231} 245}
232 246
247static void hci_conn_disconnect(struct hci_conn *conn)
248{
249 __u8 reason = hci_proto_disconn_ind(conn);
250
251 switch (conn->type) {
252 case ACL_LINK:
253 hci_acl_disconn(conn, reason);
254 break;
255 case AMP_LINK:
256 hci_amp_disconn(conn, reason);
257 break;
258 }
259}
260
233static void hci_conn_timeout(struct work_struct *work) 261static void hci_conn_timeout(struct work_struct *work)
234{ 262{
235 struct hci_conn *conn = container_of(work, struct hci_conn, 263 struct hci_conn *conn = container_of(work, struct hci_conn,
236 disc_work.work); 264 disc_work.work);
237 __u8 reason;
238 265
239 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 266 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
240 267
@@ -253,8 +280,7 @@ static void hci_conn_timeout(struct work_struct *work)
253 break; 280 break;
254 case BT_CONFIG: 281 case BT_CONFIG:
255 case BT_CONNECTED: 282 case BT_CONNECTED:
256 reason = hci_proto_disconn_ind(conn); 283 hci_conn_disconnect(conn);
257 hci_acl_disconn(conn, reason);
258 break; 284 break;
259 default: 285 default:
260 conn->state = BT_CLOSED; 286 conn->state = BT_CLOSED;
@@ -320,7 +346,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
320{ 346{
321 struct hci_conn *conn; 347 struct hci_conn *conn;
322 348
323 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 349 BT_DBG("%s dst %pMR", hdev->name, dst);
324 350
325 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL); 351 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
326 if (!conn) 352 if (!conn)
@@ -437,7 +463,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
437 int use_src = bacmp(src, BDADDR_ANY); 463 int use_src = bacmp(src, BDADDR_ANY);
438 struct hci_dev *hdev = NULL, *d; 464 struct hci_dev *hdev = NULL, *d;
439 465
440 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 466 BT_DBG("%pMR -> %pMR", src, dst);
441 467
442 read_lock(&hci_dev_list_lock); 468 read_lock(&hci_dev_list_lock);
443 469
@@ -476,6 +502,9 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
476{ 502{
477 struct hci_conn *le; 503 struct hci_conn *le;
478 504
505 if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
506 return ERR_PTR(-ENOTSUPP);
507
479 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 508 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
480 if (!le) { 509 if (!le) {
481 le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 510 le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
@@ -567,7 +596,7 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
567struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, 596struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
568 __u8 dst_type, __u8 sec_level, __u8 auth_type) 597 __u8 dst_type, __u8 sec_level, __u8 auth_type)
569{ 598{
570 BT_DBG("%s dst %s type 0x%x", hdev->name, batostr(dst), type); 599 BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
571 600
572 switch (type) { 601 switch (type) {
573 case LE_LINK: 602 case LE_LINK:
@@ -933,6 +962,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
933 962
934 chan->conn = conn; 963 chan->conn = conn;
935 skb_queue_head_init(&chan->data_q); 964 skb_queue_head_init(&chan->data_q);
965 chan->state = BT_CONNECTED;
936 966
937 list_add_rcu(&chan->list, &conn->chan_list); 967 list_add_rcu(&chan->list, &conn->chan_list);
938 968
@@ -950,6 +980,8 @@ void hci_chan_del(struct hci_chan *chan)
950 980
951 synchronize_rcu(); 981 synchronize_rcu();
952 982
983 hci_conn_put(conn);
984
953 skb_queue_purge(&chan->data_q); 985 skb_queue_purge(&chan->data_q);
954 kfree(chan); 986 kfree(chan);
955} 987}
@@ -963,3 +995,35 @@ void hci_chan_list_flush(struct hci_conn *conn)
963 list_for_each_entry_safe(chan, n, &conn->chan_list, list) 995 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
964 hci_chan_del(chan); 996 hci_chan_del(chan);
965} 997}
998
999static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1000 __u16 handle)
1001{
1002 struct hci_chan *hchan;
1003
1004 list_for_each_entry(hchan, &hcon->chan_list, list) {
1005 if (hchan->handle == handle)
1006 return hchan;
1007 }
1008
1009 return NULL;
1010}
1011
1012struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1013{
1014 struct hci_conn_hash *h = &hdev->conn_hash;
1015 struct hci_conn *hcon;
1016 struct hci_chan *hchan = NULL;
1017
1018 rcu_read_lock();
1019
1020 list_for_each_entry_rcu(hcon, &h->list, list) {
1021 hchan = __hci_chan_lookup_handle(hcon, handle);
1022 if (hchan)
1023 break;
1024 }
1025
1026 rcu_read_unlock();
1027
1028 return hchan;
1029}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8a0ce706aebd..596660d37c5e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -178,48 +178,13 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
178 178
179static void bredr_init(struct hci_dev *hdev) 179static void bredr_init(struct hci_dev *hdev)
180{ 180{
181 struct hci_cp_delete_stored_link_key cp;
182 __le16 param;
183 __u8 flt_type;
184
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186 182
187 /* Mandatory initialization */
188
189 /* Read Local Supported Features */ 183 /* Read Local Supported Features */
190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
191 185
192 /* Read Local Version */ 186 /* Read Local Version */
193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
197
198 /* Read BD Address */
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
206
207 /* Read Voice Setting */
208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
213 flt_type = HCI_FLT_CLEAR_ALL;
214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215
216 /* Connection accept timeout ~20 secs */
217 param = __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
223} 188}
224 189
225static void amp_init(struct hci_dev *hdev) 190static void amp_init(struct hci_dev *hdev)
@@ -273,14 +238,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
273 } 238 }
274} 239}
275 240
276static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277{
278 BT_DBG("%s", hdev->name);
279
280 /* Read LE buffer size */
281 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282}
283
284static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285{ 242{
286 __u8 scan = opt; 243 __u8 scan = opt;
@@ -405,7 +362,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
405 struct discovery_state *cache = &hdev->discovery; 362 struct discovery_state *cache = &hdev->discovery;
406 struct inquiry_entry *e; 363 struct inquiry_entry *e;
407 364
408 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 365 BT_DBG("cache %p, %pMR", cache, bdaddr);
409 366
410 list_for_each_entry(e, &cache->all, all) { 367 list_for_each_entry(e, &cache->all, all) {
411 if (!bacmp(&e->data.bdaddr, bdaddr)) 368 if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -421,7 +378,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
421 struct discovery_state *cache = &hdev->discovery; 378 struct discovery_state *cache = &hdev->discovery;
422 struct inquiry_entry *e; 379 struct inquiry_entry *e;
423 380
424 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 381 BT_DBG("cache %p, %pMR", cache, bdaddr);
425 382
426 list_for_each_entry(e, &cache->unknown, list) { 383 list_for_each_entry(e, &cache->unknown, list) {
427 if (!bacmp(&e->data.bdaddr, bdaddr)) 384 if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -438,7 +395,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
438 struct discovery_state *cache = &hdev->discovery; 395 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e; 396 struct inquiry_entry *e;
440 397
441 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state); 398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
442 399
443 list_for_each_entry(e, &cache->resolve, list) { 400 list_for_each_entry(e, &cache->resolve, list) {
444 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
@@ -475,7 +432,9 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
475 struct discovery_state *cache = &hdev->discovery; 432 struct discovery_state *cache = &hdev->discovery;
476 struct inquiry_entry *ie; 433 struct inquiry_entry *ie;
477 434
478 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
436
437 hci_remove_remote_oob_data(hdev, &data->bdaddr);
479 438
480 if (ssp) 439 if (ssp)
481 *ssp = data->ssp_mode; 440 *ssp = data->ssp_mode;
@@ -637,6 +596,99 @@ done:
637 return err; 596 return err;
638} 597}
639 598
599static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
600{
601 u8 ad_len = 0, flags = 0;
602 size_t name_len;
603
604 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
605 flags |= LE_AD_GENERAL;
606
607 if (!lmp_bredr_capable(hdev))
608 flags |= LE_AD_NO_BREDR;
609
610 if (lmp_le_br_capable(hdev))
611 flags |= LE_AD_SIM_LE_BREDR_CTRL;
612
613 if (lmp_host_le_br_capable(hdev))
614 flags |= LE_AD_SIM_LE_BREDR_HOST;
615
616 if (flags) {
617 BT_DBG("adv flags 0x%02x", flags);
618
619 ptr[0] = 2;
620 ptr[1] = EIR_FLAGS;
621 ptr[2] = flags;
622
623 ad_len += 3;
624 ptr += 3;
625 }
626
627 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
628 ptr[0] = 2;
629 ptr[1] = EIR_TX_POWER;
630 ptr[2] = (u8) hdev->adv_tx_power;
631
632 ad_len += 3;
633 ptr += 3;
634 }
635
636 name_len = strlen(hdev->dev_name);
637 if (name_len > 0) {
638 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
639
640 if (name_len > max_len) {
641 name_len = max_len;
642 ptr[1] = EIR_NAME_SHORT;
643 } else
644 ptr[1] = EIR_NAME_COMPLETE;
645
646 ptr[0] = name_len + 1;
647
648 memcpy(ptr + 2, hdev->dev_name, name_len);
649
650 ad_len += (name_len + 2);
651 ptr += (name_len + 2);
652 }
653
654 return ad_len;
655}
656
657int hci_update_ad(struct hci_dev *hdev)
658{
659 struct hci_cp_le_set_adv_data cp;
660 u8 len;
661 int err;
662
663 hci_dev_lock(hdev);
664
665 if (!lmp_le_capable(hdev)) {
666 err = -EINVAL;
667 goto unlock;
668 }
669
670 memset(&cp, 0, sizeof(cp));
671
672 len = create_ad(hdev, cp.data);
673
674 if (hdev->adv_data_len == len &&
675 memcmp(cp.data, hdev->adv_data, len) == 0) {
676 err = 0;
677 goto unlock;
678 }
679
680 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
681 hdev->adv_data_len = len;
682
683 cp.length = len;
684 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
685
686unlock:
687 hci_dev_unlock(hdev);
688
689 return err;
690}
691
640/* ---- HCI ioctl helpers ---- */ 692/* ---- HCI ioctl helpers ---- */
641 693
642int hci_dev_open(__u16 dev) 694int hci_dev_open(__u16 dev)
@@ -687,10 +739,6 @@ int hci_dev_open(__u16 dev)
687 739
688 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); 740 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
689 741
690 if (lmp_host_le_capable(hdev))
691 ret = __hci_request(hdev, hci_le_init_req, 0,
692 HCI_INIT_TIMEOUT);
693
694 clear_bit(HCI_INIT, &hdev->flags); 742 clear_bit(HCI_INIT, &hdev->flags);
695 } 743 }
696 744
@@ -698,6 +746,7 @@ int hci_dev_open(__u16 dev)
698 hci_dev_hold(hdev); 746 hci_dev_hold(hdev);
699 set_bit(HCI_UP, &hdev->flags); 747 set_bit(HCI_UP, &hdev->flags);
700 hci_notify(hdev, HCI_DEV_UP); 748 hci_notify(hdev, HCI_DEV_UP);
749 hci_update_ad(hdev);
701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 750 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) { 751 mgmt_valid_hdev(hdev)) {
703 hci_dev_lock(hdev); 752 hci_dev_lock(hdev);
@@ -812,6 +861,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
812 /* Clear flags */ 861 /* Clear flags */
813 hdev->flags = 0; 862 hdev->flags = 0;
814 863
864 /* Controller radio is available but is currently powered down */
865 hdev->amp_status = 0;
866
815 memset(hdev->eir, 0, sizeof(hdev->eir)); 867 memset(hdev->eir, 0, sizeof(hdev->eir));
816 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 868 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
817 869
@@ -1039,10 +1091,17 @@ int hci_get_dev_info(void __user *arg)
1039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); 1091 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040 di.flags = hdev->flags; 1092 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type; 1093 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu; 1094 if (lmp_bredr_capable(hdev)) {
1043 di.acl_pkts = hdev->acl_pkts; 1095 di.acl_mtu = hdev->acl_mtu;
1044 di.sco_mtu = hdev->sco_mtu; 1096 di.acl_pkts = hdev->acl_pkts;
1045 di.sco_pkts = hdev->sco_pkts; 1097 di.sco_mtu = hdev->sco_mtu;
1098 di.sco_pkts = hdev->sco_pkts;
1099 } else {
1100 di.acl_mtu = hdev->le_mtu;
1101 di.acl_pkts = hdev->le_pkts;
1102 di.sco_mtu = 0;
1103 di.sco_pkts = 0;
1104 }
1046 di.link_policy = hdev->link_policy; 1105 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode; 1106 di.link_mode = hdev->link_mode;
1048 1107
@@ -1259,7 +1318,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1259 list_add(&key->list, &hdev->link_keys); 1318 list_add(&key->list, &hdev->link_keys);
1260 } 1319 }
1261 1320
1262 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type); 1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1263 1322
1264 /* Some buggy controller combinations generate a changed 1323 /* Some buggy controller combinations generate a changed
1265 * combination key for legacy pairing even when there's no 1324 * combination key for legacy pairing even when there's no
@@ -1338,7 +1397,7 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1338 if (!key) 1397 if (!key)
1339 return -ENOENT; 1398 return -ENOENT;
1340 1399
1341 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); 1400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1342 1401
1343 list_del(&key->list); 1402 list_del(&key->list);
1344 kfree(key); 1403 kfree(key);
@@ -1354,7 +1413,7 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1354 if (bacmp(bdaddr, &k->bdaddr)) 1413 if (bacmp(bdaddr, &k->bdaddr))
1355 continue; 1414 continue;
1356 1415
1357 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); 1416 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1358 1417
1359 list_del(&k->list); 1418 list_del(&k->list);
1360 kfree(k); 1419 kfree(k);
@@ -1401,7 +1460,7 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1401 if (!data) 1460 if (!data)
1402 return -ENOENT; 1461 return -ENOENT;
1403 1462
1404 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); 1463 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1405 1464
1406 list_del(&data->list); 1465 list_del(&data->list);
1407 kfree(data); 1466 kfree(data);
@@ -1440,7 +1499,7 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1440 memcpy(data->hash, hash, sizeof(data->hash)); 1499 memcpy(data->hash, hash, sizeof(data->hash));
1441 memcpy(data->randomizer, randomizer, sizeof(data->randomizer)); 1500 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1442 1501
1443 BT_DBG("%s for %s", hdev->name, batostr(bdaddr)); 1502 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1444 1503
1445 return 0; 1504 return 0;
1446} 1505}
@@ -1617,6 +1676,9 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1617 1676
1618 BT_DBG("%s", hdev->name); 1677 BT_DBG("%s", hdev->name);
1619 1678
1679 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1680 return -ENOTSUPP;
1681
1620 if (work_busy(&hdev->le_scan)) 1682 if (work_busy(&hdev->le_scan))
1621 return -EINPROGRESS; 1683 return -EINPROGRESS;
1622 1684
@@ -1643,6 +1705,8 @@ struct hci_dev *hci_alloc_dev(void)
1643 hdev->esco_type = (ESCO_HV1); 1705 hdev->esco_type = (ESCO_HV1);
1644 hdev->link_mode = (HCI_LM_ACCEPT); 1706 hdev->link_mode = (HCI_LM_ACCEPT);
1645 hdev->io_capability = 0x03; /* No Input No Output */ 1707 hdev->io_capability = 0x03; /* No Input No Output */
1708 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1709 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1646 1710
1647 hdev->sniff_max_interval = 800; 1711 hdev->sniff_max_interval = 800;
1648 hdev->sniff_min_interval = 80; 1712 hdev->sniff_min_interval = 80;
@@ -1754,11 +1818,11 @@ int hci_register_dev(struct hci_dev *hdev)
1754 if (hdev->dev_type != HCI_AMP) 1818 if (hdev->dev_type != HCI_AMP)
1755 set_bit(HCI_AUTO_OFF, &hdev->dev_flags); 1819 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1756 1820
1757 schedule_work(&hdev->power_on);
1758
1759 hci_notify(hdev, HCI_DEV_REG); 1821 hci_notify(hdev, HCI_DEV_REG);
1760 hci_dev_hold(hdev); 1822 hci_dev_hold(hdev);
1761 1823
1824 schedule_work(&hdev->power_on);
1825
1762 return id; 1826 return id;
1763 1827
1764err_wqueue: 1828err_wqueue:
@@ -1793,6 +1857,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1793 for (i = 0; i < NUM_REASSEMBLY; i++) 1857 for (i = 0; i < NUM_REASSEMBLY; i++)
1794 kfree_skb(hdev->reassembly[i]); 1858 kfree_skb(hdev->reassembly[i]);
1795 1859
1860 cancel_work_sync(&hdev->power_on);
1861
1796 if (!test_bit(HCI_INIT, &hdev->flags) && 1862 if (!test_bit(HCI_INIT, &hdev->flags) &&
1797 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 1863 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1798 hci_dev_lock(hdev); 1864 hci_dev_lock(hdev);
@@ -2153,9 +2219,10 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2153 hdr->dlen = cpu_to_le16(len); 2219 hdr->dlen = cpu_to_le16(len);
2154} 2220}
2155 2221
2156static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, 2222static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2157 struct sk_buff *skb, __u16 flags) 2223 struct sk_buff *skb, __u16 flags)
2158{ 2224{
2225 struct hci_conn *conn = chan->conn;
2159 struct hci_dev *hdev = conn->hdev; 2226 struct hci_dev *hdev = conn->hdev;
2160 struct sk_buff *list; 2227 struct sk_buff *list;
2161 2228
@@ -2163,7 +2230,18 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2163 skb->data_len = 0; 2230 skb->data_len = 0;
2164 2231
2165 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 2232 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2166 hci_add_acl_hdr(skb, conn->handle, flags); 2233
2234 switch (hdev->dev_type) {
2235 case HCI_BREDR:
2236 hci_add_acl_hdr(skb, conn->handle, flags);
2237 break;
2238 case HCI_AMP:
2239 hci_add_acl_hdr(skb, chan->handle, flags);
2240 break;
2241 default:
2242 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2243 return;
2244 }
2167 2245
2168 list = skb_shinfo(skb)->frag_list; 2246 list = skb_shinfo(skb)->frag_list;
2169 if (!list) { 2247 if (!list) {
@@ -2202,14 +2280,13 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2202 2280
2203void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 2281void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2204{ 2282{
2205 struct hci_conn *conn = chan->conn; 2283 struct hci_dev *hdev = chan->conn->hdev;
2206 struct hci_dev *hdev = conn->hdev;
2207 2284
2208 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 2285 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2209 2286
2210 skb->dev = (void *) hdev; 2287 skb->dev = (void *) hdev;
2211 2288
2212 hci_queue_acl(conn, &chan->data_q, skb, flags); 2289 hci_queue_acl(chan, &chan->data_q, skb, flags);
2213 2290
2214 queue_work(hdev->workqueue, &hdev->tx_work); 2291 queue_work(hdev->workqueue, &hdev->tx_work);
2215} 2292}
@@ -2311,8 +2388,8 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2311 /* Kill stalled connections */ 2388 /* Kill stalled connections */
2312 list_for_each_entry_rcu(c, &h->list, list) { 2389 list_for_each_entry_rcu(c, &h->list, list) {
2313 if (c->type == type && c->sent) { 2390 if (c->type == type && c->sent) {
2314 BT_ERR("%s killing stalled connection %s", 2391 BT_ERR("%s killing stalled connection %pMR",
2315 hdev->name, batostr(&c->dst)); 2392 hdev->name, &c->dst);
2316 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); 2393 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2317 } 2394 }
2318 } 2395 }
@@ -2381,6 +2458,9 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2381 case ACL_LINK: 2458 case ACL_LINK:
2382 cnt = hdev->acl_cnt; 2459 cnt = hdev->acl_cnt;
2383 break; 2460 break;
2461 case AMP_LINK:
2462 cnt = hdev->block_cnt;
2463 break;
2384 case SCO_LINK: 2464 case SCO_LINK:
2385 case ESCO_LINK: 2465 case ESCO_LINK:
2386 cnt = hdev->sco_cnt; 2466 cnt = hdev->sco_cnt;
@@ -2510,11 +2590,19 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
2510 struct hci_chan *chan; 2590 struct hci_chan *chan;
2511 struct sk_buff *skb; 2591 struct sk_buff *skb;
2512 int quote; 2592 int quote;
2593 u8 type;
2513 2594
2514 __check_timeout(hdev, cnt); 2595 __check_timeout(hdev, cnt);
2515 2596
2597 BT_DBG("%s", hdev->name);
2598
2599 if (hdev->dev_type == HCI_AMP)
2600 type = AMP_LINK;
2601 else
2602 type = ACL_LINK;
2603
2516 while (hdev->block_cnt > 0 && 2604 while (hdev->block_cnt > 0 &&
2517 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2605 (chan = hci_chan_sent(hdev, type, &quote))) {
2518 u32 priority = (skb_peek(&chan->data_q))->priority; 2606 u32 priority = (skb_peek(&chan->data_q))->priority;
2519 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 2607 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2520 int blocks; 2608 int blocks;
@@ -2547,14 +2635,19 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
2547 } 2635 }
2548 2636
2549 if (cnt != hdev->block_cnt) 2637 if (cnt != hdev->block_cnt)
2550 hci_prio_recalculate(hdev, ACL_LINK); 2638 hci_prio_recalculate(hdev, type);
2551} 2639}
2552 2640
2553static void hci_sched_acl(struct hci_dev *hdev) 2641static void hci_sched_acl(struct hci_dev *hdev)
2554{ 2642{
2555 BT_DBG("%s", hdev->name); 2643 BT_DBG("%s", hdev->name);
2556 2644
2557 if (!hci_conn_num(hdev, ACL_LINK)) 2645 /* No ACL link over BR/EDR controller */
2646 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2647 return;
2648
2649 /* No AMP link over AMP controller */
2650 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2558 return; 2651 return;
2559 2652
2560 switch (hdev->flow_ctl_mode) { 2653 switch (hdev->flow_ctl_mode) {
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 2022b43c7353..705078a0cc39 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,12 +24,13 @@
24 24
25/* Bluetooth HCI event handling. */ 25/* Bluetooth HCI event handling. */
26 26
27#include <linux/export.h>
28#include <asm/unaligned.h> 27#include <asm/unaligned.h>
29 28
30#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/mgmt.h> 31#include <net/bluetooth/mgmt.h>
32#include <net/bluetooth/a2mp.h>
33#include <net/bluetooth/amp.h>
33 34
34/* Handle HCI Event packets */ 35/* Handle HCI Event packets */
35 36
@@ -201,6 +202,11 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
201 BIT(HCI_PERIODIC_INQ)); 202 BIT(HCI_PERIODIC_INQ));
202 203
203 hdev->discovery.state = DISCOVERY_STOPPED; 204 hdev->discovery.state = DISCOVERY_STOPPED;
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
204} 210}
205 211
206static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 212static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -223,6 +229,9 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
223 229
224 hci_dev_unlock(hdev); 230 hci_dev_unlock(hdev);
225 231
232 if (!status && !test_bit(HCI_INIT, &hdev->flags))
233 hci_update_ad(hdev);
234
226 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status); 235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
227} 236}
228 237
@@ -438,7 +447,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
438static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 447static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
439{ 448{
440 __u8 status = *((__u8 *) skb->data); 449 __u8 status = *((__u8 *) skb->data);
441 void *sent; 450 struct hci_cp_write_ssp_mode *sent;
442 451
443 BT_DBG("%s status 0x%2.2x", hdev->name, status); 452 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444 453
@@ -446,10 +455,17 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
446 if (!sent) 455 if (!sent)
447 return; 456 return;
448 457
458 if (!status) {
459 if (sent->mode)
460 hdev->host_features[0] |= LMP_HOST_SSP;
461 else
462 hdev->host_features[0] &= ~LMP_HOST_SSP;
463 }
464
449 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 465 if (test_bit(HCI_MGMT, &hdev->dev_flags))
450 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status); 466 mgmt_ssp_enable_complete(hdev, sent->mode, status);
451 else if (!status) { 467 else if (!status) {
452 if (*((u8 *) sent)) 468 if (sent->mode)
453 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 469 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
454 else 470 else
455 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 471 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
@@ -458,10 +474,10 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
458 474
459static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 475static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
460{ 476{
461 if (hdev->features[6] & LMP_EXT_INQ) 477 if (lmp_ext_inq_capable(hdev))
462 return 2; 478 return 2;
463 479
464 if (hdev->features[3] & LMP_RSSI_INQ) 480 if (lmp_inq_rssi_capable(hdev))
465 return 1; 481 return 1;
466 482
467 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 483 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
@@ -505,28 +521,30 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
505 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 521 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
506 return; 522 return;
507 523
508 events[4] |= 0x01; /* Flow Specification Complete */ 524 if (lmp_bredr_capable(hdev)) {
509 events[4] |= 0x02; /* Inquiry Result with RSSI */ 525 events[4] |= 0x01; /* Flow Specification Complete */
510 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 526 events[4] |= 0x02; /* Inquiry Result with RSSI */
511 events[5] |= 0x08; /* Synchronous Connection Complete */ 527 events[4] |= 0x04; /* Read Remote Extended Features Complete */
512 events[5] |= 0x10; /* Synchronous Connection Changed */ 528 events[5] |= 0x08; /* Synchronous Connection Complete */
529 events[5] |= 0x10; /* Synchronous Connection Changed */
530 }
513 531
514 if (hdev->features[3] & LMP_RSSI_INQ) 532 if (lmp_inq_rssi_capable(hdev))
515 events[4] |= 0x02; /* Inquiry Result with RSSI */ 533 events[4] |= 0x02; /* Inquiry Result with RSSI */
516 534
517 if (lmp_sniffsubr_capable(hdev)) 535 if (lmp_sniffsubr_capable(hdev))
518 events[5] |= 0x20; /* Sniff Subrating */ 536 events[5] |= 0x20; /* Sniff Subrating */
519 537
520 if (hdev->features[5] & LMP_PAUSE_ENC) 538 if (lmp_pause_enc_capable(hdev))
521 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 539 events[5] |= 0x80; /* Encryption Key Refresh Complete */
522 540
523 if (hdev->features[6] & LMP_EXT_INQ) 541 if (lmp_ext_inq_capable(hdev))
524 events[5] |= 0x40; /* Extended Inquiry Result */ 542 events[5] |= 0x40; /* Extended Inquiry Result */
525 543
526 if (lmp_no_flush_capable(hdev)) 544 if (lmp_no_flush_capable(hdev))
527 events[7] |= 0x01; /* Enhanced Flush Complete */ 545 events[7] |= 0x01; /* Enhanced Flush Complete */
528 546
529 if (hdev->features[7] & LMP_LSTO) 547 if (lmp_lsto_capable(hdev))
530 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 548 events[6] |= 0x80; /* Link Supervision Timeout Changed */
531 549
532 if (lmp_ssp_capable(hdev)) { 550 if (lmp_ssp_capable(hdev)) {
@@ -546,6 +564,53 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
546 events[7] |= 0x20; /* LE Meta-Event */ 564 events[7] |= 0x20; /* LE Meta-Event */
547 565
548 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 566 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
567
568 if (lmp_le_capable(hdev)) {
569 memset(events, 0, sizeof(events));
570 events[0] = 0x1f;
571 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
572 sizeof(events), events);
573 }
574}
575
576static void bredr_setup(struct hci_dev *hdev)
577{
578 struct hci_cp_delete_stored_link_key cp;
579 __le16 param;
580 __u8 flt_type;
581
582 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
583 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
584
585 /* Read Class of Device */
586 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
587
588 /* Read Local Name */
589 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
590
591 /* Read Voice Setting */
592 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
593
594 /* Clear Event Filters */
595 flt_type = HCI_FLT_CLEAR_ALL;
596 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
597
598 /* Connection accept timeout ~20 secs */
599 param = __constant_cpu_to_le16(0x7d00);
600 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
601
602 bacpy(&cp.bdaddr, BDADDR_ANY);
603 cp.delete_all = 1;
604 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
605}
606
607static void le_setup(struct hci_dev *hdev)
608{
609 /* Read LE Buffer Size */
610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
611
612 /* Read LE Advertising Channel TX Power */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
549} 614}
550 615
551static void hci_setup(struct hci_dev *hdev) 616static void hci_setup(struct hci_dev *hdev)
@@ -553,6 +618,15 @@ static void hci_setup(struct hci_dev *hdev)
553 if (hdev->dev_type != HCI_BREDR) 618 if (hdev->dev_type != HCI_BREDR)
554 return; 619 return;
555 620
621 /* Read BD Address */
622 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
623
624 if (lmp_bredr_capable(hdev))
625 bredr_setup(hdev);
626
627 if (lmp_le_capable(hdev))
628 le_setup(hdev);
629
556 hci_setup_event_mask(hdev); 630 hci_setup_event_mask(hdev);
557 631
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 632 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
@@ -573,13 +647,13 @@ static void hci_setup(struct hci_dev *hdev)
573 } 647 }
574 } 648 }
575 649
576 if (hdev->features[3] & LMP_RSSI_INQ) 650 if (lmp_inq_rssi_capable(hdev))
577 hci_setup_inquiry_mode(hdev); 651 hci_setup_inquiry_mode(hdev);
578 652
579 if (hdev->features[7] & LMP_INQ_TX_PWR) 653 if (lmp_inq_tx_pwr_capable(hdev))
580 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 654 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
581 655
582 if (hdev->features[7] & LMP_EXTFEATURES) { 656 if (lmp_ext_feat_capable(hdev)) {
583 struct hci_cp_read_local_ext_features cp; 657 struct hci_cp_read_local_ext_features cp;
584 658
585 cp.page = 0x01; 659 cp.page = 0x01;
@@ -626,11 +700,11 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
626 700
627 if (lmp_rswitch_capable(hdev)) 701 if (lmp_rswitch_capable(hdev))
628 link_policy |= HCI_LP_RSWITCH; 702 link_policy |= HCI_LP_RSWITCH;
629 if (hdev->features[0] & LMP_HOLD) 703 if (lmp_hold_capable(hdev))
630 link_policy |= HCI_LP_HOLD; 704 link_policy |= HCI_LP_HOLD;
631 if (lmp_sniff_capable(hdev)) 705 if (lmp_sniff_capable(hdev))
632 link_policy |= HCI_LP_SNIFF; 706 link_policy |= HCI_LP_SNIFF;
633 if (hdev->features[1] & LMP_PARK) 707 if (lmp_park_capable(hdev))
634 link_policy |= HCI_LP_PARK; 708 link_policy |= HCI_LP_PARK;
635 709
636 cp.policy = cpu_to_le16(link_policy); 710 cp.policy = cpu_to_le16(link_policy);
@@ -720,10 +794,10 @@ static void hci_set_le_support(struct hci_dev *hdev)
720 794
721 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 795 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
722 cp.le = 1; 796 cp.le = 1;
723 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); 797 cp.simul = lmp_le_br_capable(hdev);
724 } 798 }
725 799
726 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE)) 800 if (cp.le != lmp_host_le_capable(hdev))
727 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), 801 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
728 &cp); 802 &cp);
729} 803}
@@ -846,7 +920,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 920 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847 921
848 if (rp->status) 922 if (rp->status)
849 return; 923 goto a2mp_rsp;
850 924
851 hdev->amp_status = rp->amp_status; 925 hdev->amp_status = rp->amp_status;
852 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 926 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -860,6 +934,46 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
860 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 934 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
861 935
862 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); 936 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
937
938a2mp_rsp:
939 a2mp_send_getinfo_rsp(hdev);
940}
941
942static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
943 struct sk_buff *skb)
944{
945 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
946 struct amp_assoc *assoc = &hdev->loc_assoc;
947 size_t rem_len, frag_len;
948
949 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
950
951 if (rp->status)
952 goto a2mp_rsp;
953
954 frag_len = skb->len - sizeof(*rp);
955 rem_len = __le16_to_cpu(rp->rem_len);
956
957 if (rem_len > frag_len) {
958 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
959
960 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
961 assoc->offset += frag_len;
962
963 /* Read other fragments */
964 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
965
966 return;
967 }
968
969 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
970 assoc->len = assoc->offset + rem_len;
971 assoc->offset = 0;
972
973a2mp_rsp:
974 /* Send A2MP Rsp when all fragments are received */
975 a2mp_send_getampassoc_rsp(hdev, rp->status);
976 a2mp_send_create_phy_link_req(hdev, rp->status);
863} 977}
864 978
865static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 979static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
@@ -976,6 +1090,31 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
976 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 1090 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
977} 1091}
978 1092
1093static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1094 struct sk_buff *skb)
1095{
1096 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1097
1098 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1099
1100 if (!rp->status) {
1101 hdev->adv_tx_power = rp->tx_power;
1102 if (!test_bit(HCI_INIT, &hdev->flags))
1103 hci_update_ad(hdev);
1104 }
1105
1106 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
1107}
1108
1109static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
1110{
1111 __u8 status = *((__u8 *) skb->data);
1112
1113 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1114
1115 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
1116}
1117
979static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 1118static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
980{ 1119{
981 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1120 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
@@ -1051,6 +1190,33 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1051 hci_dev_unlock(hdev); 1190 hci_dev_unlock(hdev);
1052} 1191}
1053 1192
1193static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1194{
1195 __u8 *sent, status = *((__u8 *) skb->data);
1196
1197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1198
1199 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1200 if (!sent)
1201 return;
1202
1203 hci_dev_lock(hdev);
1204
1205 if (!status) {
1206 if (*sent)
1207 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1208 else
1209 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1210 }
1211
1212 hci_dev_unlock(hdev);
1213
1214 if (!test_bit(HCI_INIT, &hdev->flags))
1215 hci_update_ad(hdev);
1216
1217 hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status);
1218}
1219
1054static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1220static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1055{ 1221{
1056 __u8 status = *((__u8 *) skb->data); 1222 __u8 status = *((__u8 *) skb->data);
@@ -1165,6 +1331,11 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1165 hdev->host_features[0] |= LMP_HOST_LE; 1331 hdev->host_features[0] |= LMP_HOST_LE;
1166 else 1332 else
1167 hdev->host_features[0] &= ~LMP_HOST_LE; 1333 hdev->host_features[0] &= ~LMP_HOST_LE;
1334
1335 if (sent->simul)
1336 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1337 else
1338 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1168 } 1339 }
1169 1340
1170 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1341 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
@@ -1174,6 +1345,20 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1174 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1345 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1175} 1346}
1176 1347
1348static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1349 struct sk_buff *skb)
1350{
1351 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1352
1353 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1354 hdev->name, rp->status, rp->phy_handle);
1355
1356 if (rp->status)
1357 return;
1358
1359 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1360}
1361
1177static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1362static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1178{ 1363{
1179 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1364 BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1210,7 +1395,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1210 1395
1211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1396 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1212 1397
1213 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn); 1398 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1214 1399
1215 if (status) { 1400 if (status) {
1216 if (conn && conn->state == BT_CONNECT) { 1401 if (conn && conn->state == BT_CONNECT) {
@@ -1639,8 +1824,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1639 return; 1824 return;
1640 } 1825 }
1641 1826
1642 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst), 1827 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1643 conn);
1644 1828
1645 conn->state = BT_CLOSED; 1829 conn->state = BT_CLOSED;
1646 mgmt_connect_failed(hdev, &conn->dst, conn->type, 1830 mgmt_connect_failed(hdev, &conn->dst, conn->type,
@@ -1657,6 +1841,52 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1657 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1841 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1658} 1842}
1659 1843
1844static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1845{
1846 struct hci_cp_create_phy_link *cp;
1847
1848 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1849
1850 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1851 if (!cp)
1852 return;
1853
1854 hci_dev_lock(hdev);
1855
1856 if (status) {
1857 struct hci_conn *hcon;
1858
1859 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1860 if (hcon)
1861 hci_conn_del(hcon);
1862 } else {
1863 amp_write_remote_assoc(hdev, cp->phy_handle);
1864 }
1865
1866 hci_dev_unlock(hdev);
1867}
1868
1869static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1870{
1871 struct hci_cp_accept_phy_link *cp;
1872
1873 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1874
1875 if (status)
1876 return;
1877
1878 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1879 if (!cp)
1880 return;
1881
1882 amp_write_remote_assoc(hdev, cp->phy_handle);
1883}
1884
1885static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status)
1886{
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1888}
1889
1660static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1890static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1661{ 1891{
1662 __u8 status = *((__u8 *) skb->data); 1892 __u8 status = *((__u8 *) skb->data);
@@ -1817,15 +2047,53 @@ unlock:
1817 hci_conn_check_pending(hdev); 2047 hci_conn_check_pending(hdev);
1818} 2048}
1819 2049
2050void hci_conn_accept(struct hci_conn *conn, int mask)
2051{
2052 struct hci_dev *hdev = conn->hdev;
2053
2054 BT_DBG("conn %p", conn);
2055
2056 conn->state = BT_CONFIG;
2057
2058 if (!lmp_esco_capable(hdev)) {
2059 struct hci_cp_accept_conn_req cp;
2060
2061 bacpy(&cp.bdaddr, &conn->dst);
2062
2063 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2064 cp.role = 0x00; /* Become master */
2065 else
2066 cp.role = 0x01; /* Remain slave */
2067
2068 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2069 } else /* lmp_esco_capable(hdev)) */ {
2070 struct hci_cp_accept_sync_conn_req cp;
2071
2072 bacpy(&cp.bdaddr, &conn->dst);
2073 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2074
2075 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2076 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2077 cp.max_latency = __constant_cpu_to_le16(0xffff);
2078 cp.content_format = cpu_to_le16(hdev->voice_setting);
2079 cp.retrans_effort = 0xff;
2080
2081 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2082 sizeof(cp), &cp);
2083 }
2084}
2085
1820static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2086static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1821{ 2087{
1822 struct hci_ev_conn_request *ev = (void *) skb->data; 2088 struct hci_ev_conn_request *ev = (void *) skb->data;
1823 int mask = hdev->link_mode; 2089 int mask = hdev->link_mode;
2090 __u8 flags = 0;
1824 2091
1825 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr), 2092 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1826 ev->link_type); 2093 ev->link_type);
1827 2094
1828 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 2095 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2096 &flags);
1829 2097
1830 if ((mask & HCI_LM_ACCEPT) && 2098 if ((mask & HCI_LM_ACCEPT) &&
1831 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 2099 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
@@ -1851,12 +2119,13 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1851 } 2119 }
1852 2120
1853 memcpy(conn->dev_class, ev->dev_class, 3); 2121 memcpy(conn->dev_class, ev->dev_class, 3);
1854 conn->state = BT_CONNECT;
1855 2122
1856 hci_dev_unlock(hdev); 2123 hci_dev_unlock(hdev);
1857 2124
1858 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { 2125 if (ev->link_type == ACL_LINK ||
2126 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1859 struct hci_cp_accept_conn_req cp; 2127 struct hci_cp_accept_conn_req cp;
2128 conn->state = BT_CONNECT;
1860 2129
1861 bacpy(&cp.bdaddr, &ev->bdaddr); 2130 bacpy(&cp.bdaddr, &ev->bdaddr);
1862 2131
@@ -1867,8 +2136,9 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1867 2136
1868 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), 2137 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1869 &cp); 2138 &cp);
1870 } else { 2139 } else if (!(flags & HCI_PROTO_DEFER)) {
1871 struct hci_cp_accept_sync_conn_req cp; 2140 struct hci_cp_accept_sync_conn_req cp;
2141 conn->state = BT_CONNECT;
1872 2142
1873 bacpy(&cp.bdaddr, &ev->bdaddr); 2143 bacpy(&cp.bdaddr, &ev->bdaddr);
1874 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2144 cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -1881,6 +2151,10 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1881 2151
1882 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 2152 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1883 sizeof(cp), &cp); 2153 sizeof(cp), &cp);
2154 } else {
2155 conn->state = BT_CONNECT2;
2156 hci_proto_connect_cfm(conn, 0);
2157 hci_conn_put(conn);
1884 } 2158 }
1885 } else { 2159 } else {
1886 /* Connection rejected */ 2160 /* Connection rejected */
@@ -2314,6 +2588,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2314 hci_cc_read_local_amp_info(hdev, skb); 2588 hci_cc_read_local_amp_info(hdev, skb);
2315 break; 2589 break;
2316 2590
2591 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2592 hci_cc_read_local_amp_assoc(hdev, skb);
2593 break;
2594
2317 case HCI_OP_DELETE_STORED_LINK_KEY: 2595 case HCI_OP_DELETE_STORED_LINK_KEY:
2318 hci_cc_delete_stored_link_key(hdev, skb); 2596 hci_cc_delete_stored_link_key(hdev, skb);
2319 break; 2597 break;
@@ -2350,6 +2628,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2350 hci_cc_le_read_buffer_size(hdev, skb); 2628 hci_cc_le_read_buffer_size(hdev, skb);
2351 break; 2629 break;
2352 2630
2631 case HCI_OP_LE_READ_ADV_TX_POWER:
2632 hci_cc_le_read_adv_tx_power(hdev, skb);
2633 break;
2634
2635 case HCI_OP_LE_SET_EVENT_MASK:
2636 hci_cc_le_set_event_mask(hdev, skb);
2637 break;
2638
2353 case HCI_OP_USER_CONFIRM_REPLY: 2639 case HCI_OP_USER_CONFIRM_REPLY:
2354 hci_cc_user_confirm_reply(hdev, skb); 2640 hci_cc_user_confirm_reply(hdev, skb);
2355 break; 2641 break;
@@ -2370,6 +2656,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2370 hci_cc_le_set_scan_param(hdev, skb); 2656 hci_cc_le_set_scan_param(hdev, skb);
2371 break; 2657 break;
2372 2658
2659 case HCI_OP_LE_SET_ADV_ENABLE:
2660 hci_cc_le_set_adv_enable(hdev, skb);
2661 break;
2662
2373 case HCI_OP_LE_SET_SCAN_ENABLE: 2663 case HCI_OP_LE_SET_SCAN_ENABLE:
2374 hci_cc_le_set_scan_enable(hdev, skb); 2664 hci_cc_le_set_scan_enable(hdev, skb);
2375 break; 2665 break;
@@ -2386,6 +2676,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2386 hci_cc_write_le_host_supported(hdev, skb); 2676 hci_cc_write_le_host_supported(hdev, skb);
2387 break; 2677 break;
2388 2678
2679 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2680 hci_cc_write_remote_amp_assoc(hdev, skb);
2681 break;
2682
2389 default: 2683 default:
2390 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2684 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2391 break; 2685 break;
@@ -2467,6 +2761,18 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2467 hci_cs_le_start_enc(hdev, ev->status); 2761 hci_cs_le_start_enc(hdev, ev->status);
2468 break; 2762 break;
2469 2763
2764 case HCI_OP_CREATE_PHY_LINK:
2765 hci_cs_create_phylink(hdev, ev->status);
2766 break;
2767
2768 case HCI_OP_ACCEPT_PHY_LINK:
2769 hci_cs_accept_phylink(hdev, ev->status);
2770 break;
2771
2772 case HCI_OP_CREATE_LOGICAL_LINK:
2773 hci_cs_create_logical_link(hdev, ev->status);
2774 break;
2775
2470 default: 2776 default:
2471 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2777 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2472 break; 2778 break;
@@ -2574,6 +2880,27 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2574 queue_work(hdev->workqueue, &hdev->tx_work); 2880 queue_work(hdev->workqueue, &hdev->tx_work);
2575} 2881}
2576 2882
2883static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2884 __u16 handle)
2885{
2886 struct hci_chan *chan;
2887
2888 switch (hdev->dev_type) {
2889 case HCI_BREDR:
2890 return hci_conn_hash_lookup_handle(hdev, handle);
2891 case HCI_AMP:
2892 chan = hci_chan_lookup_handle(hdev, handle);
2893 if (chan)
2894 return chan->conn;
2895 break;
2896 default:
2897 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2898 break;
2899 }
2900
2901 return NULL;
2902}
2903
2577static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 2904static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2578{ 2905{
2579 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2906 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
@@ -2595,13 +2922,13 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2595 2922
2596 for (i = 0; i < ev->num_hndl; i++) { 2923 for (i = 0; i < ev->num_hndl; i++) {
2597 struct hci_comp_blocks_info *info = &ev->handles[i]; 2924 struct hci_comp_blocks_info *info = &ev->handles[i];
2598 struct hci_conn *conn; 2925 struct hci_conn *conn = NULL;
2599 __u16 handle, block_count; 2926 __u16 handle, block_count;
2600 2927
2601 handle = __le16_to_cpu(info->handle); 2928 handle = __le16_to_cpu(info->handle);
2602 block_count = __le16_to_cpu(info->blocks); 2929 block_count = __le16_to_cpu(info->blocks);
2603 2930
2604 conn = hci_conn_hash_lookup_handle(hdev, handle); 2931 conn = __hci_conn_lookup_handle(hdev, handle);
2605 if (!conn) 2932 if (!conn)
2606 continue; 2933 continue;
2607 2934
@@ -2609,6 +2936,7 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2609 2936
2610 switch (conn->type) { 2937 switch (conn->type) {
2611 case ACL_LINK: 2938 case ACL_LINK:
2939 case AMP_LINK:
2612 hdev->block_cnt += block_count; 2940 hdev->block_cnt += block_count;
2613 if (hdev->block_cnt > hdev->num_blocks) 2941 if (hdev->block_cnt > hdev->num_blocks)
2614 hdev->block_cnt = hdev->num_blocks; 2942 hdev->block_cnt = hdev->num_blocks;
@@ -2705,13 +3033,13 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2705 3033
2706 key = hci_find_link_key(hdev, &ev->bdaddr); 3034 key = hci_find_link_key(hdev, &ev->bdaddr);
2707 if (!key) { 3035 if (!key) {
2708 BT_DBG("%s link key not found for %s", hdev->name, 3036 BT_DBG("%s link key not found for %pMR", hdev->name,
2709 batostr(&ev->bdaddr)); 3037 &ev->bdaddr);
2710 goto not_found; 3038 goto not_found;
2711 } 3039 }
2712 3040
2713 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 3041 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2714 batostr(&ev->bdaddr)); 3042 &ev->bdaddr);
2715 3043
2716 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 3044 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2717 key->type == HCI_LK_DEBUG_COMBINATION) { 3045 key->type == HCI_LK_DEBUG_COMBINATION) {
@@ -3419,6 +3747,130 @@ unlock:
3419 hci_dev_unlock(hdev); 3747 hci_dev_unlock(hdev);
3420} 3748}
3421 3749
3750static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3751 struct sk_buff *skb)
3752{
3753 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3754 struct hci_conn *hcon, *bredr_hcon;
3755
3756 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3757 ev->status);
3758
3759 hci_dev_lock(hdev);
3760
3761 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3762 if (!hcon) {
3763 hci_dev_unlock(hdev);
3764 return;
3765 }
3766
3767 if (ev->status) {
3768 hci_conn_del(hcon);
3769 hci_dev_unlock(hdev);
3770 return;
3771 }
3772
3773 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3774
3775 hcon->state = BT_CONNECTED;
3776 bacpy(&hcon->dst, &bredr_hcon->dst);
3777
3778 hci_conn_hold(hcon);
3779 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3780 hci_conn_put(hcon);
3781
3782 hci_conn_hold_device(hcon);
3783 hci_conn_add_sysfs(hcon);
3784
3785 amp_physical_cfm(bredr_hcon, hcon);
3786
3787 hci_dev_unlock(hdev);
3788}
3789
3790static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3791{
3792 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3793 struct hci_conn *hcon;
3794 struct hci_chan *hchan;
3795 struct amp_mgr *mgr;
3796
3797 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3798 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3799 ev->status);
3800
3801 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3802 if (!hcon)
3803 return;
3804
3805 /* Create AMP hchan */
3806 hchan = hci_chan_create(hcon);
3807 if (!hchan)
3808 return;
3809
3810 hchan->handle = le16_to_cpu(ev->handle);
3811
3812 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3813
3814 mgr = hcon->amp_mgr;
3815 if (mgr && mgr->bredr_chan) {
3816 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3817
3818 l2cap_chan_lock(bredr_chan);
3819
3820 bredr_chan->conn->mtu = hdev->block_mtu;
3821 l2cap_logical_cfm(bredr_chan, hchan, 0);
3822 hci_conn_hold(hcon);
3823
3824 l2cap_chan_unlock(bredr_chan);
3825 }
3826}
3827
3828static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3829 struct sk_buff *skb)
3830{
3831 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3832 struct hci_chan *hchan;
3833
3834 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3835 le16_to_cpu(ev->handle), ev->status);
3836
3837 if (ev->status)
3838 return;
3839
3840 hci_dev_lock(hdev);
3841
3842 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3843 if (!hchan)
3844 goto unlock;
3845
3846 amp_destroy_logical_link(hchan, ev->reason);
3847
3848unlock:
3849 hci_dev_unlock(hdev);
3850}
3851
3852static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3853 struct sk_buff *skb)
3854{
3855 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3856 struct hci_conn *hcon;
3857
3858 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3859
3860 if (ev->status)
3861 return;
3862
3863 hci_dev_lock(hdev);
3864
3865 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3866 if (hcon) {
3867 hcon->state = BT_CLOSED;
3868 hci_conn_del(hcon);
3869 }
3870
3871 hci_dev_unlock(hdev);
3872}
3873
3422static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3874static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3423{ 3875{
3424 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3876 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
@@ -3558,6 +4010,22 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3558 } 4010 }
3559} 4011}
3560 4012
4013static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4014{
4015 struct hci_ev_channel_selected *ev = (void *) skb->data;
4016 struct hci_conn *hcon;
4017
4018 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4019
4020 skb_pull(skb, sizeof(*ev));
4021
4022 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4023 if (!hcon)
4024 return;
4025
4026 amp_read_loc_assoc_final_data(hdev, hcon);
4027}
4028
3561void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 4029void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3562{ 4030{
3563 struct hci_event_hdr *hdr = (void *) skb->data; 4031 struct hci_event_hdr *hdr = (void *) skb->data;
@@ -3722,10 +4190,30 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3722 hci_le_meta_evt(hdev, skb); 4190 hci_le_meta_evt(hdev, skb);
3723 break; 4191 break;
3724 4192
4193 case HCI_EV_CHANNEL_SELECTED:
4194 hci_chan_selected_evt(hdev, skb);
4195 break;
4196
3725 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 4197 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3726 hci_remote_oob_data_request_evt(hdev, skb); 4198 hci_remote_oob_data_request_evt(hdev, skb);
3727 break; 4199 break;
3728 4200
4201 case HCI_EV_PHY_LINK_COMPLETE:
4202 hci_phy_link_complete_evt(hdev, skb);
4203 break;
4204
4205 case HCI_EV_LOGICAL_LINK_COMPLETE:
4206 hci_loglink_complete_evt(hdev, skb);
4207 break;
4208
4209 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4210 hci_disconn_loglink_complete_evt(hdev, skb);
4211 break;
4212
4213 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4214 hci_disconn_phylink_complete_evt(hdev, skb);
4215 break;
4216
3729 case HCI_EV_NUM_COMP_BLOCKS: 4217 case HCI_EV_NUM_COMP_BLOCKS:
3730 hci_num_comp_blocks_evt(hdev, skb); 4218 hci_num_comp_blocks_evt(hdev, skb);
3731 break; 4219 break;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index a20e61c3653d..55cceee02a84 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -38,7 +38,7 @@ static ssize_t show_link_address(struct device *dev,
38 struct device_attribute *attr, char *buf) 38 struct device_attribute *attr, char *buf)
39{ 39{
40 struct hci_conn *conn = to_hci_conn(dev); 40 struct hci_conn *conn = to_hci_conn(dev);
41 return sprintf(buf, "%s\n", batostr(&conn->dst)); 41 return sprintf(buf, "%pMR\n", &conn->dst);
42} 42}
43 43
44static ssize_t show_link_features(struct device *dev, 44static ssize_t show_link_features(struct device *dev,
@@ -224,7 +224,7 @@ static ssize_t show_address(struct device *dev,
224 struct device_attribute *attr, char *buf) 224 struct device_attribute *attr, char *buf)
225{ 225{
226 struct hci_dev *hdev = to_hci_dev(dev); 226 struct hci_dev *hdev = to_hci_dev(dev);
227 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 227 return sprintf(buf, "%pMR\n", &hdev->bdaddr);
228} 228}
229 229
230static ssize_t show_features(struct device *dev, 230static ssize_t show_features(struct device *dev,
@@ -406,8 +406,8 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
406 406
407 list_for_each_entry(e, &cache->all, all) { 407 list_for_each_entry(e, &cache->all, all) {
408 struct inquiry_data *data = &e->data; 408 struct inquiry_data *data = &e->data;
409 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", 409 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
410 batostr(&data->bdaddr), 410 &data->bdaddr,
411 data->pscan_rep_mode, data->pscan_period_mode, 411 data->pscan_rep_mode, data->pscan_period_mode,
412 data->pscan_mode, data->dev_class[2], 412 data->pscan_mode, data->dev_class[2],
413 data->dev_class[1], data->dev_class[0], 413 data->dev_class[1], data->dev_class[0],
@@ -440,7 +440,7 @@ static int blacklist_show(struct seq_file *f, void *p)
440 hci_dev_lock(hdev); 440 hci_dev_lock(hdev);
441 441
442 list_for_each_entry(b, &hdev->blacklist, list) 442 list_for_each_entry(b, &hdev->blacklist, list)
443 seq_printf(f, "%s\n", batostr(&b->bdaddr)); 443 seq_printf(f, "%pMR\n", &b->bdaddr);
444 444
445 hci_dev_unlock(hdev); 445 hci_dev_unlock(hdev);
446 446
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index ccd985da6518..b2bcbe2dc328 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -932,8 +932,12 @@ static int hidp_setup_hid(struct hidp_session *session,
932 hid->country = req->country; 932 hid->country = req->country;
933 933
934 strncpy(hid->name, req->name, 128); 934 strncpy(hid->name, req->name, 128);
935 strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); 935
936 strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); 936 snprintf(hid->phys, sizeof(hid->phys), "%pMR",
937 &bt_sk(session->ctrl_sock->sk)->src);
938
939 snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
940 &bt_sk(session->ctrl_sock->sk)->dst);
937 941
938 hid->dev.parent = &session->conn->dev; 942 hid->dev.parent = &session->conn->dev;
939 hid->ll_driver = &hidp_hid_driver; 943 hid->ll_driver = &hidp_hid_driver;
@@ -941,6 +945,13 @@ static int hidp_setup_hid(struct hidp_session *session,
941 hid->hid_get_raw_report = hidp_get_raw_report; 945 hid->hid_get_raw_report = hidp_get_raw_report;
942 hid->hid_output_raw_report = hidp_output_raw_report; 946 hid->hid_output_raw_report = hidp_output_raw_report;
943 947
948 /* True if device is blacklisted in drivers/hid/hid-core.c */
949 if (hid_ignore(hid)) {
950 hid_destroy_device(session->hid);
951 session->hid = NULL;
952 return -ENODEV;
953 }
954
944 return 0; 955 return 0;
945 956
946fault: 957fault:
@@ -1013,7 +1024,7 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1013 1024
1014 if (req->rd_size > 0) { 1025 if (req->rd_size > 0) {
1015 err = hidp_setup_hid(session, req); 1026 err = hidp_setup_hid(session, req);
1016 if (err) 1027 if (err && err != -ENODEV)
1017 goto purge; 1028 goto purge;
1018 } 1029 }
1019 1030
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a91239dcda41..2c78208d793e 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -38,6 +38,7 @@
38#include <net/bluetooth/l2cap.h> 38#include <net/bluetooth/l2cap.h>
39#include <net/bluetooth/smp.h> 39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h> 40#include <net/bluetooth/a2mp.h>
41#include <net/bluetooth/amp.h>
41 42
42bool disable_ertm; 43bool disable_ertm;
43 44
@@ -48,19 +49,19 @@ static LIST_HEAD(chan_list);
48static DEFINE_RWLOCK(chan_list_lock); 49static DEFINE_RWLOCK(chan_list_lock);
49 50
50static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 51static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data); 52 u8 code, u8 ident, u16 dlen, void *data);
52static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, 53static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data); 54 void *data);
54static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 55static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55static void l2cap_send_disconn_req(struct l2cap_conn *conn, 56static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
56 struct l2cap_chan *chan, int err);
57 57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event); 59 struct sk_buff_head *skbs, u8 event);
60 60
61/* ---- L2CAP channels ---- */ 61/* ---- L2CAP channels ---- */
62 62
63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
64{ 65{
65 struct l2cap_chan *c; 66 struct l2cap_chan *c;
66 67
@@ -71,7 +72,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16
71 return NULL; 72 return NULL;
72} 73}
73 74
74static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 75static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
75{ 77{
76 struct l2cap_chan *c; 78 struct l2cap_chan *c;
77 79
@@ -84,7 +86,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16
84 86
85/* Find channel with given SCID. 87/* Find channel with given SCID.
86 * Returns locked channel. */ 88 * Returns locked channel. */
87static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 89static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
88{ 91{
89 struct l2cap_chan *c; 92 struct l2cap_chan *c;
90 93
@@ -97,7 +100,25 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci
97 return c; 100 return c;
98} 101}
99 102
100static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) 103/* Find channel with given DCID.
104 * Returns locked channel.
105 */
106static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108{
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118}
119
120static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
101{ 122{
102 struct l2cap_chan *c; 123 struct l2cap_chan *c;
103 124
@@ -108,6 +129,20 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8
108 return NULL; 129 return NULL;
109} 130}
110 131
132static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134{
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144}
145
111static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) 146static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112{ 147{
113 struct l2cap_chan *c; 148 struct l2cap_chan *c;
@@ -178,7 +213,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
178static void __l2cap_state_change(struct l2cap_chan *chan, int state) 213static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179{ 214{
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), 215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state)); 216 state_to_string(state));
182 217
183 chan->state = state; 218 chan->state = state;
184 chan->ops->state_change(chan, state); 219 chan->ops->state_change(chan, state);
@@ -361,7 +396,7 @@ static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
361static void l2cap_chan_timeout(struct work_struct *work) 396static void l2cap_chan_timeout(struct work_struct *work)
362{ 397{
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work); 399 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn; 400 struct l2cap_conn *conn = chan->conn;
366 int reason; 401 int reason;
367 402
@@ -373,7 +408,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) 408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED; 409 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT && 410 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP) 411 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED; 412 reason = ECONNREFUSED;
378 else 413 else
379 reason = ETIMEDOUT; 414 reason = ETIMEDOUT;
@@ -455,7 +490,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
455 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
456} 491}
457 492
458static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 493void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
459{ 494{
460 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
461 __le16_to_cpu(chan->psm), chan->dcid); 496 __le16_to_cpu(chan->psm), chan->dcid);
@@ -504,7 +539,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
504 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; 539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
505 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; 540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
506 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; 541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
507 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO; 542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
508 543
509 l2cap_chan_hold(chan); 544 l2cap_chan_hold(chan);
510 545
@@ -527,6 +562,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
527 BT_DBG("chan %p, conn %p, err %d", chan, conn, err); 562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
528 563
529 if (conn) { 564 if (conn) {
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
530 /* Delete from channel list */ 566 /* Delete from channel list */
531 list_del(&chan->list); 567 list_del(&chan->list);
532 568
@@ -536,10 +572,19 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
536 572
537 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) 573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
538 hci_conn_put(conn->hcon); 574 hci_conn_put(conn->hcon);
575
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
539 } 578 }
540 579
541 if (chan->ops->teardown) 580 if (chan->hs_hchan) {
542 chan->ops->teardown(chan, err); 581 struct hci_chan *hs_hchan = chan->hs_hchan;
582
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
585 }
586
587 chan->ops->teardown(chan, err);
543 588
544 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) 589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
545 return; 590 return;
@@ -573,28 +618,27 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
573 struct l2cap_conn *conn = chan->conn; 618 struct l2cap_conn *conn = chan->conn;
574 struct sock *sk = chan->sk; 619 struct sock *sk = chan->sk;
575 620
576 BT_DBG("chan %p state %s sk %p", chan, 621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
577 state_to_string(chan->state), sk); 622 sk);
578 623
579 switch (chan->state) { 624 switch (chan->state) {
580 case BT_LISTEN: 625 case BT_LISTEN:
581 if (chan->ops->teardown) 626 chan->ops->teardown(chan, 0);
582 chan->ops->teardown(chan, 0);
583 break; 627 break;
584 628
585 case BT_CONNECTED: 629 case BT_CONNECTED:
586 case BT_CONFIG: 630 case BT_CONFIG:
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && 631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) { 632 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo); 633 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason); 634 l2cap_send_disconn_req(chan, reason);
591 } else 635 } else
592 l2cap_chan_del(chan, reason); 636 l2cap_chan_del(chan, reason);
593 break; 637 break;
594 638
595 case BT_CONNECT2: 639 case BT_CONNECT2:
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && 640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) { 641 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp; 642 struct l2cap_conn_rsp rsp;
599 __u16 result; 643 __u16 result;
600 644
@@ -609,7 +653,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
609 rsp.result = cpu_to_le16(result); 653 rsp.result = cpu_to_le16(result);
610 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); 654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
612 sizeof(rsp), &rsp); 656 sizeof(rsp), &rsp);
613 } 657 }
614 658
615 l2cap_chan_del(chan, reason); 659 l2cap_chan_del(chan, reason);
@@ -621,8 +665,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 break; 665 break;
622 666
623 default: 667 default:
624 if (chan->ops->teardown) 668 chan->ops->teardown(chan, 0);
625 chan->ops->teardown(chan, 0);
626 break; 669 break;
627 } 670 }
628} 671}
@@ -691,7 +734,8 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn)
691 return id; 734 return id;
692} 735}
693 736
694static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 737static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 void *data)
695{ 739{
696 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
697 u8 flags; 741 u8 flags;
@@ -712,16 +756,31 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
712 hci_send_acl(conn->hchan, skb, flags); 756 hci_send_acl(conn->hchan, skb, flags);
713} 757}
714 758
759static bool __chan_is_moving(struct l2cap_chan *chan)
760{
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763}
764
715static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 765static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
716{ 766{
717 struct hci_conn *hcon = chan->conn->hcon; 767 struct hci_conn *hcon = chan->conn->hcon;
718 u16 flags; 768 u16 flags;
719 769
720 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, 770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
721 skb->priority); 771 skb->priority);
772
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 if (chan->hs_hchan)
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 else
777 kfree_skb(skb);
778
779 return;
780 }
722 781
723 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && 782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
724 lmp_no_flush_capable(hcon->hdev)) 783 lmp_no_flush_capable(hcon->hdev))
725 flags = ACL_START_NO_FLUSH; 784 flags = ACL_START_NO_FLUSH;
726 else 785 else
727 flags = ACL_START; 786 flags = ACL_START;
@@ -895,6 +954,9 @@ static void l2cap_send_sframe(struct l2cap_chan *chan,
895 if (!control->sframe) 954 if (!control->sframe)
896 return; 955 return;
897 956
957 if (__chan_is_moving(chan))
958 return;
959
898 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && 960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
899 !control->poll) 961 !control->poll)
900 control->final = 1; 962 control->final = 1;
@@ -946,7 +1008,26 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
946 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); 1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
947} 1009}
948 1010
949static void l2cap_send_conn_req(struct l2cap_chan *chan) 1011static bool __amp_capable(struct l2cap_chan *chan)
1012{
1013 struct l2cap_conn *conn = chan->conn;
1014
1015 if (enable_hs &&
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 return true;
1020 else
1021 return false;
1022}
1023
1024static bool l2cap_check_efs(struct l2cap_chan *chan)
1025{
1026 /* Check EFS parameters */
1027 return true;
1028}
1029
1030void l2cap_send_conn_req(struct l2cap_chan *chan)
950{ 1031{
951 struct l2cap_conn *conn = chan->conn; 1032 struct l2cap_conn *conn = chan->conn;
952 struct l2cap_conn_req req; 1033 struct l2cap_conn_req req;
@@ -961,6 +1042,76 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
961 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); 1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
962} 1043}
963 1044
1045static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046{
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1051
1052 chan->ident = l2cap_get_ident(chan->conn);
1053
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 sizeof(req), &req);
1056}
1057
1058static void l2cap_move_setup(struct l2cap_chan *chan)
1059{
1060 struct sk_buff *skb;
1061
1062 BT_DBG("chan %p", chan);
1063
1064 if (chan->mode != L2CAP_MODE_ERTM)
1065 return;
1066
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1070
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1075 else
1076 break;
1077 }
1078
1079 chan->expected_tx_seq = chan->buffer_seq;
1080
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1086
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1089
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091}
1092
1093static void l2cap_move_done(struct l2cap_chan *chan)
1094{
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1097
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100
1101 if (chan->mode != L2CAP_MODE_ERTM)
1102 return;
1103
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 break;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 break;
1112 }
1113}
1114
964static void l2cap_chan_ready(struct l2cap_chan *chan) 1115static void l2cap_chan_ready(struct l2cap_chan *chan)
965{ 1116{
966 /* This clears all conf flags, including CONF_NOT_COMPLETE */ 1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
@@ -972,6 +1123,16 @@ static void l2cap_chan_ready(struct l2cap_chan *chan)
972 chan->ops->ready(chan); 1123 chan->ops->ready(chan);
973} 1124}
974 1125
1126static void l2cap_start_connection(struct l2cap_chan *chan)
1127{
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1131 } else {
1132 l2cap_send_conn_req(chan);
1133 }
1134}
1135
975static void l2cap_do_start(struct l2cap_chan *chan) 1136static void l2cap_do_start(struct l2cap_chan *chan)
976{ 1137{
977 struct l2cap_conn *conn = chan->conn; 1138 struct l2cap_conn *conn = chan->conn;
@@ -986,8 +1147,9 @@ static void l2cap_do_start(struct l2cap_chan *chan)
986 return; 1147 return;
987 1148
988 if (l2cap_chan_check_security(chan) && 1149 if (l2cap_chan_check_security(chan) &&
989 __l2cap_no_conn_pending(chan)) 1150 __l2cap_no_conn_pending(chan)) {
990 l2cap_send_conn_req(chan); 1151 l2cap_start_connection(chan);
1152 }
991 } else { 1153 } else {
992 struct l2cap_info_req req; 1154 struct l2cap_info_req req;
993 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); 1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
@@ -997,8 +1159,8 @@ static void l2cap_do_start(struct l2cap_chan *chan)
997 1159
998 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
999 1161
1000 l2cap_send_cmd(conn, conn->info_ident, 1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1001 L2CAP_INFO_REQ, sizeof(req), &req); 1163 sizeof(req), &req);
1002 } 1164 }
1003} 1165}
1004 1166
@@ -1018,9 +1180,10 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1018 } 1180 }
1019} 1181}
1020 1182
1021static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) 1183static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1022{ 1184{
1023 struct sock *sk = chan->sk; 1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1024 struct l2cap_disconn_req req; 1187 struct l2cap_disconn_req req;
1025 1188
1026 if (!conn) 1189 if (!conn)
@@ -1033,14 +1196,14 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
1033 } 1196 }
1034 1197
1035 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { 1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1036 __l2cap_state_change(chan, BT_DISCONN); 1199 l2cap_state_change(chan, BT_DISCONN);
1037 return; 1200 return;
1038 } 1201 }
1039 1202
1040 req.dcid = cpu_to_le16(chan->dcid); 1203 req.dcid = cpu_to_le16(chan->dcid);
1041 req.scid = cpu_to_le16(chan->scid); 1204 req.scid = cpu_to_le16(chan->scid);
1042 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1043 L2CAP_DISCONN_REQ, sizeof(req), &req); 1206 sizeof(req), &req);
1044 1207
1045 lock_sock(sk); 1208 lock_sock(sk);
1046 __l2cap_state_change(chan, BT_DISCONN); 1209 __l2cap_state_change(chan, BT_DISCONN);
@@ -1069,20 +1232,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1069 1232
1070 if (chan->state == BT_CONNECT) { 1233 if (chan->state == BT_CONNECT) {
1071 if (!l2cap_chan_check_security(chan) || 1234 if (!l2cap_chan_check_security(chan) ||
1072 !__l2cap_no_conn_pending(chan)) { 1235 !__l2cap_no_conn_pending(chan)) {
1073 l2cap_chan_unlock(chan); 1236 l2cap_chan_unlock(chan);
1074 continue; 1237 continue;
1075 } 1238 }
1076 1239
1077 if (!l2cap_mode_supported(chan->mode, conn->feat_mask) 1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1078 && test_bit(CONF_STATE2_DEVICE, 1241 && test_bit(CONF_STATE2_DEVICE,
1079 &chan->conf_state)) { 1242 &chan->conf_state)) {
1080 l2cap_chan_close(chan, ECONNRESET); 1243 l2cap_chan_close(chan, ECONNRESET);
1081 l2cap_chan_unlock(chan); 1244 l2cap_chan_unlock(chan);
1082 continue; 1245 continue;
1083 } 1246 }
1084 1247
1085 l2cap_send_conn_req(chan); 1248 l2cap_start_connection(chan);
1086 1249
1087 } else if (chan->state == BT_CONNECT2) { 1250 } else if (chan->state == BT_CONNECT2) {
1088 struct l2cap_conn_rsp rsp; 1251 struct l2cap_conn_rsp rsp;
@@ -1094,11 +1257,9 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1094 lock_sock(sk); 1257 lock_sock(sk);
1095 if (test_bit(BT_SK_DEFER_SETUP, 1258 if (test_bit(BT_SK_DEFER_SETUP,
1096 &bt_sk(sk)->flags)) { 1259 &bt_sk(sk)->flags)) {
1097 struct sock *parent = bt_sk(sk)->parent;
1098 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); 1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1099 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1100 if (parent) 1262 chan->ops->defer(chan);
1101 parent->sk_data_ready(parent, 0);
1102 1263
1103 } else { 1264 } else {
1104 __l2cap_state_change(chan, BT_CONFIG); 1265 __l2cap_state_change(chan, BT_CONFIG);
@@ -1112,17 +1273,17 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1112 } 1273 }
1113 1274
1114 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1115 sizeof(rsp), &rsp); 1276 sizeof(rsp), &rsp);
1116 1277
1117 if (test_bit(CONF_REQ_SENT, &chan->conf_state) || 1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1118 rsp.result != L2CAP_CR_SUCCESS) { 1279 rsp.result != L2CAP_CR_SUCCESS) {
1119 l2cap_chan_unlock(chan); 1280 l2cap_chan_unlock(chan);
1120 continue; 1281 continue;
1121 } 1282 }
1122 1283
1123 set_bit(CONF_REQ_SENT, &chan->conf_state); 1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1124 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1125 l2cap_build_conf_req(chan, buf), buf); 1286 l2cap_build_conf_req(chan, buf), buf);
1126 chan->num_conf_req++; 1287 chan->num_conf_req++;
1127 } 1288 }
1128 1289
@@ -1204,8 +1365,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1204 bacpy(&bt_sk(sk)->src, conn->src); 1365 bacpy(&bt_sk(sk)->src, conn->src);
1205 bacpy(&bt_sk(sk)->dst, conn->dst); 1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1206 1367
1207 bt_accept_enqueue(parent, sk);
1208
1209 l2cap_chan_add(conn, chan); 1368 l2cap_chan_add(conn, chan);
1210 1369
1211 l2cap_chan_ready(chan); 1370 l2cap_chan_ready(chan);
@@ -1270,7 +1429,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1270 1429
1271 list_for_each_entry(chan, &conn->chan_l, list) { 1430 list_for_each_entry(chan, &conn->chan_l, list) {
1272 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) 1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 __l2cap_chan_set_err(chan, err); 1432 l2cap_chan_set_err(chan, err);
1274 } 1433 }
1275 1434
1276 mutex_unlock(&conn->chan_lock); 1435 mutex_unlock(&conn->chan_lock);
@@ -1279,7 +1438,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1279static void l2cap_info_timeout(struct work_struct *work) 1438static void l2cap_info_timeout(struct work_struct *work)
1280{ 1439{
1281 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1282 info_timer.work); 1441 info_timer.work);
1283 1442
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 conn->info_ident = 0; 1444 conn->info_ident = 0;
@@ -1333,7 +1492,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1333static void security_timeout(struct work_struct *work) 1492static void security_timeout(struct work_struct *work)
1334{ 1493{
1335 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1494 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 security_timer.work); 1495 security_timer.work);
1337 1496
1338 BT_DBG("conn %p", conn); 1497 BT_DBG("conn %p", conn);
1339 1498
@@ -1355,7 +1514,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1355 if (!hchan) 1514 if (!hchan)
1356 return NULL; 1515 return NULL;
1357 1516
1358 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); 1517 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1359 if (!conn) { 1518 if (!conn) {
1360 hci_chan_del(hchan); 1519 hci_chan_del(hchan);
1361 return NULL; 1520 return NULL;
@@ -1367,10 +1526,22 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1367 1526
1368 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1369 1528
1370 if (hcon->hdev->le_mtu && hcon->type == LE_LINK) 1529 switch (hcon->type) {
1371 conn->mtu = hcon->hdev->le_mtu; 1530 case AMP_LINK:
1372 else 1531 conn->mtu = hcon->hdev->block_mtu;
1532 break;
1533
1534 case LE_LINK:
1535 if (hcon->hdev->le_mtu) {
1536 conn->mtu = hcon->hdev->le_mtu;
1537 break;
1538 }
1539 /* fall through */
1540
1541 default:
1373 conn->mtu = hcon->hdev->acl_mtu; 1542 conn->mtu = hcon->hdev->acl_mtu;
1543 break;
1544 }
1374 1545
1375 conn->src = &hcon->hdev->bdaddr; 1546 conn->src = &hcon->hdev->bdaddr;
1376 conn->dst = &hcon->dst; 1547 conn->dst = &hcon->dst;
@@ -1448,7 +1619,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1448 __u8 auth_type; 1619 __u8 auth_type;
1449 int err; 1620 int err;
1450 1621
1451 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst), 1622 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1452 dst_type, __le16_to_cpu(psm)); 1623 dst_type, __le16_to_cpu(psm));
1453 1624
1454 hdev = hci_get_route(dst, src); 1625 hdev = hci_get_route(dst, src);
@@ -1461,7 +1632,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1461 1632
1462 /* PSM must be odd and lsb of upper byte must be 0 */ 1633 /* PSM must be odd and lsb of upper byte must be 0 */
1463 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && 1634 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1464 chan->chan_type != L2CAP_CHAN_RAW) { 1635 chan->chan_type != L2CAP_CHAN_RAW) {
1465 err = -EINVAL; 1636 err = -EINVAL;
1466 goto done; 1637 goto done;
1467 } 1638 }
@@ -1657,6 +1828,9 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
1657 1828
1658 BT_DBG("chan %p, skbs %p", chan, skbs); 1829 BT_DBG("chan %p, skbs %p", chan, skbs);
1659 1830
1831 if (__chan_is_moving(chan))
1832 return;
1833
1660 skb_queue_splice_tail_init(skbs, &chan->tx_q); 1834 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1661 1835
1662 while (!skb_queue_empty(&chan->tx_q)) { 1836 while (!skb_queue_empty(&chan->tx_q)) {
@@ -1699,6 +1873,9 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1699 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1873 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1700 return 0; 1874 return 0;
1701 1875
1876 if (__chan_is_moving(chan))
1877 return 0;
1878
1702 while (chan->tx_send_head && 1879 while (chan->tx_send_head &&
1703 chan->unacked_frames < chan->remote_tx_win && 1880 chan->unacked_frames < chan->remote_tx_win &&
1704 chan->tx_state == L2CAP_TX_STATE_XMIT) { 1881 chan->tx_state == L2CAP_TX_STATE_XMIT) {
@@ -1764,13 +1941,16 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
1764 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1941 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1765 return; 1942 return;
1766 1943
1944 if (__chan_is_moving(chan))
1945 return;
1946
1767 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { 1947 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1768 seq = l2cap_seq_list_pop(&chan->retrans_list); 1948 seq = l2cap_seq_list_pop(&chan->retrans_list);
1769 1949
1770 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); 1950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1771 if (!skb) { 1951 if (!skb) {
1772 BT_DBG("Error: Can't retransmit seq %d, frame missing", 1952 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1773 seq); 1953 seq);
1774 continue; 1954 continue;
1775 } 1955 }
1776 1956
@@ -1780,7 +1960,7 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
1780 if (chan->max_tx != 0 && 1960 if (chan->max_tx != 0 &&
1781 bt_cb(skb)->control.retries > chan->max_tx) { 1961 bt_cb(skb)->control.retries > chan->max_tx) {
1782 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 1962 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 1963 l2cap_send_disconn_req(chan, ECONNRESET);
1784 l2cap_seq_list_clear(&chan->retrans_list); 1964 l2cap_seq_list_clear(&chan->retrans_list);
1785 break; 1965 break;
1786 } 1966 }
@@ -1795,9 +1975,9 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
1795 /* Cloned sk_buffs are read-only, so we need a 1975 /* Cloned sk_buffs are read-only, so we need a
1796 * writeable copy 1976 * writeable copy
1797 */ 1977 */
1798 tx_skb = skb_copy(skb, GFP_ATOMIC); 1978 tx_skb = skb_copy(skb, GFP_KERNEL);
1799 } else { 1979 } else {
1800 tx_skb = skb_clone(skb, GFP_ATOMIC); 1980 tx_skb = skb_clone(skb, GFP_KERNEL);
1801 } 1981 }
1802 1982
1803 if (!tx_skb) { 1983 if (!tx_skb) {
@@ -1855,7 +2035,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
1855 if (chan->unacked_frames) { 2035 if (chan->unacked_frames) {
1856 skb_queue_walk(&chan->tx_q, skb) { 2036 skb_queue_walk(&chan->tx_q, skb) {
1857 if (bt_cb(skb)->control.txseq == control->reqseq || 2037 if (bt_cb(skb)->control.txseq == control->reqseq ||
1858 skb == chan->tx_send_head) 2038 skb == chan->tx_send_head)
1859 break; 2039 break;
1860 } 2040 }
1861 2041
@@ -2106,7 +2286,9 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2106 /* PDU size is derived from the HCI MTU */ 2286 /* PDU size is derived from the HCI MTU */
2107 pdu_len = chan->conn->mtu; 2287 pdu_len = chan->conn->mtu;
2108 2288
2109 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2289 /* Constrain PDU size for BR/EDR connections */
2290 if (!chan->hs_hcon)
2291 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2110 2292
2111 /* Adjust for largest possible L2CAP overhead. */ 2293 /* Adjust for largest possible L2CAP overhead. */
2112 if (chan->fcs) 2294 if (chan->fcs)
@@ -2156,7 +2338,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2156} 2338}
2157 2339
2158int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2340int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2159 u32 priority) 2341 u32 priority)
2160{ 2342{
2161 struct sk_buff *skb; 2343 struct sk_buff *skb;
2162 int err; 2344 int err;
@@ -2484,7 +2666,7 @@ static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2484 __set_monitor_timer(chan); 2666 __set_monitor_timer(chan);
2485 chan->retry_count++; 2667 chan->retry_count++;
2486 } else { 2668 } else {
2487 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 2669 l2cap_send_disconn_req(chan, ECONNABORTED);
2488 } 2670 }
2489 break; 2671 break;
2490 default: 2672 default:
@@ -2543,7 +2725,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2543 /* Don't send frame to the socket it came from */ 2725 /* Don't send frame to the socket it came from */
2544 if (skb->sk == sk) 2726 if (skb->sk == sk)
2545 continue; 2727 continue;
2546 nskb = skb_clone(skb, GFP_ATOMIC); 2728 nskb = skb_clone(skb, GFP_KERNEL);
2547 if (!nskb) 2729 if (!nskb)
2548 continue; 2730 continue;
2549 2731
@@ -2569,7 +2751,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2569 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2751 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2570 count = min_t(unsigned int, conn->mtu, len); 2752 count = min_t(unsigned int, conn->mtu, len);
2571 2753
2572 skb = bt_skb_alloc(count, GFP_ATOMIC); 2754 skb = bt_skb_alloc(count, GFP_KERNEL);
2573 if (!skb) 2755 if (!skb)
2574 return NULL; 2756 return NULL;
2575 2757
@@ -2599,7 +2781,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2599 while (len) { 2781 while (len) {
2600 count = min_t(unsigned int, conn->mtu, len); 2782 count = min_t(unsigned int, conn->mtu, len);
2601 2783
2602 *frag = bt_skb_alloc(count, GFP_ATOMIC); 2784 *frag = bt_skb_alloc(count, GFP_KERNEL);
2603 if (!*frag) 2785 if (!*frag)
2604 goto fail; 2786 goto fail;
2605 2787
@@ -2618,7 +2800,8 @@ fail:
2618 return NULL; 2800 return NULL;
2619} 2801}
2620 2802
2621static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) 2803static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2804 unsigned long *val)
2622{ 2805{
2623 struct l2cap_conf_opt *opt = *ptr; 2806 struct l2cap_conf_opt *opt = *ptr;
2624 int len; 2807 int len;
@@ -2692,7 +2875,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2692 efs.msdu = cpu_to_le16(chan->local_msdu); 2875 efs.msdu = cpu_to_le16(chan->local_msdu);
2693 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 2876 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2694 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 2877 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2695 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); 2878 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2696 break; 2879 break;
2697 2880
2698 case L2CAP_MODE_STREAMING: 2881 case L2CAP_MODE_STREAMING:
@@ -2709,7 +2892,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2709 } 2892 }
2710 2893
2711 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), 2894 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2712 (unsigned long) &efs); 2895 (unsigned long) &efs);
2713} 2896}
2714 2897
2715static void l2cap_ack_timeout(struct work_struct *work) 2898static void l2cap_ack_timeout(struct work_struct *work)
@@ -2749,6 +2932,11 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
2749 2932
2750 skb_queue_head_init(&chan->tx_q); 2933 skb_queue_head_init(&chan->tx_q);
2751 2934
2935 chan->local_amp_id = 0;
2936 chan->move_id = 0;
2937 chan->move_state = L2CAP_MOVE_STABLE;
2938 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2939
2752 if (chan->mode != L2CAP_MODE_ERTM) 2940 if (chan->mode != L2CAP_MODE_ERTM)
2753 return 0; 2941 return 0;
2754 2942
@@ -2795,16 +2983,54 @@ static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2795 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; 2983 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2796} 2984}
2797 2985
2986static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2987 struct l2cap_conf_rfc *rfc)
2988{
2989 if (chan->local_amp_id && chan->hs_hcon) {
2990 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2991
2992 /* Class 1 devices have must have ERTM timeouts
2993 * exceeding the Link Supervision Timeout. The
2994 * default Link Supervision Timeout for AMP
2995 * controllers is 10 seconds.
2996 *
2997 * Class 1 devices use 0xffffffff for their
2998 * best-effort flush timeout, so the clamping logic
2999 * will result in a timeout that meets the above
3000 * requirement. ERTM timeouts are 16-bit values, so
3001 * the maximum timeout is 65.535 seconds.
3002 */
3003
3004 /* Convert timeout to milliseconds and round */
3005 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3006
3007 /* This is the recommended formula for class 2 devices
3008 * that start ERTM timers when packets are sent to the
3009 * controller.
3010 */
3011 ertm_to = 3 * ertm_to + 500;
3012
3013 if (ertm_to > 0xffff)
3014 ertm_to = 0xffff;
3015
3016 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3017 rfc->monitor_timeout = rfc->retrans_timeout;
3018 } else {
3019 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3020 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3021 }
3022}
3023
2798static inline void l2cap_txwin_setup(struct l2cap_chan *chan) 3024static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2799{ 3025{
2800 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && 3026 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2801 __l2cap_ews_supported(chan)) { 3027 __l2cap_ews_supported(chan)) {
2802 /* use extended control field */ 3028 /* use extended control field */
2803 set_bit(FLAG_EXT_CTRL, &chan->flags); 3029 set_bit(FLAG_EXT_CTRL, &chan->flags);
2804 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; 3030 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2805 } else { 3031 } else {
2806 chan->tx_win = min_t(u16, chan->tx_win, 3032 chan->tx_win = min_t(u16, chan->tx_win,
2807 L2CAP_DEFAULT_TX_WINDOW); 3033 L2CAP_DEFAULT_TX_WINDOW);
2808 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; 3034 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2809 } 3035 }
2810 chan->ack_win = chan->tx_win; 3036 chan->ack_win = chan->tx_win;
@@ -2844,7 +3070,7 @@ done:
2844 switch (chan->mode) { 3070 switch (chan->mode) {
2845 case L2CAP_MODE_BASIC: 3071 case L2CAP_MODE_BASIC:
2846 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 3072 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2847 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 3073 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2848 break; 3074 break;
2849 3075
2850 rfc.mode = L2CAP_MODE_BASIC; 3076 rfc.mode = L2CAP_MODE_BASIC;
@@ -2855,44 +3081,42 @@ done:
2855 rfc.max_pdu_size = 0; 3081 rfc.max_pdu_size = 0;
2856 3082
2857 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2858 (unsigned long) &rfc); 3084 (unsigned long) &rfc);
2859 break; 3085 break;
2860 3086
2861 case L2CAP_MODE_ERTM: 3087 case L2CAP_MODE_ERTM:
2862 rfc.mode = L2CAP_MODE_ERTM; 3088 rfc.mode = L2CAP_MODE_ERTM;
2863 rfc.max_transmit = chan->max_tx; 3089 rfc.max_transmit = chan->max_tx;
2864 rfc.retrans_timeout = 0; 3090
2865 rfc.monitor_timeout = 0; 3091 __l2cap_set_ertm_timeouts(chan, &rfc);
2866 3092
2867 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3093 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2868 L2CAP_EXT_HDR_SIZE - 3094 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2869 L2CAP_SDULEN_SIZE - 3095 L2CAP_FCS_SIZE);
2870 L2CAP_FCS_SIZE);
2871 rfc.max_pdu_size = cpu_to_le16(size); 3096 rfc.max_pdu_size = cpu_to_le16(size);
2872 3097
2873 l2cap_txwin_setup(chan); 3098 l2cap_txwin_setup(chan);
2874 3099
2875 rfc.txwin_size = min_t(u16, chan->tx_win, 3100 rfc.txwin_size = min_t(u16, chan->tx_win,
2876 L2CAP_DEFAULT_TX_WINDOW); 3101 L2CAP_DEFAULT_TX_WINDOW);
2877 3102
2878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3103 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2879 (unsigned long) &rfc); 3104 (unsigned long) &rfc);
2880 3105
2881 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3106 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2882 l2cap_add_opt_efs(&ptr, chan); 3107 l2cap_add_opt_efs(&ptr, chan);
2883 3108
2884 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2885 break;
2886
2887 if (chan->fcs == L2CAP_FCS_NONE ||
2888 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2889 chan->fcs = L2CAP_FCS_NONE;
2890 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2891 }
2892
2893 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 3109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, 3110 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2895 chan->tx_win); 3111 chan->tx_win);
3112
3113 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3114 if (chan->fcs == L2CAP_FCS_NONE ||
3115 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3116 chan->fcs = L2CAP_FCS_NONE;
3117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3118 chan->fcs);
3119 }
2896 break; 3120 break;
2897 3121
2898 case L2CAP_MODE_STREAMING: 3122 case L2CAP_MODE_STREAMING:
@@ -2904,25 +3128,23 @@ done:
2904 rfc.monitor_timeout = 0; 3128 rfc.monitor_timeout = 0;
2905 3129
2906 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - 3130 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2907 L2CAP_EXT_HDR_SIZE - 3131 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2908 L2CAP_SDULEN_SIZE - 3132 L2CAP_FCS_SIZE);
2909 L2CAP_FCS_SIZE);
2910 rfc.max_pdu_size = cpu_to_le16(size); 3133 rfc.max_pdu_size = cpu_to_le16(size);
2911 3134
2912 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 3135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2913 (unsigned long) &rfc); 3136 (unsigned long) &rfc);
2914 3137
2915 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) 3138 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2916 l2cap_add_opt_efs(&ptr, chan); 3139 l2cap_add_opt_efs(&ptr, chan);
2917 3140
2918 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) 3141 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
2919 break; 3142 if (chan->fcs == L2CAP_FCS_NONE ||
2920 3143 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
2921 if (chan->fcs == L2CAP_FCS_NONE || 3144 chan->fcs = L2CAP_FCS_NONE;
2922 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { 3145 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
2923 chan->fcs = L2CAP_FCS_NONE; 3146 chan->fcs);
2924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 3147 }
2925 }
2926 break; 3148 break;
2927 } 3149 }
2928 3150
@@ -2974,7 +3196,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2974 3196
2975 case L2CAP_CONF_FCS: 3197 case L2CAP_CONF_FCS:
2976 if (val == L2CAP_FCS_NONE) 3198 if (val == L2CAP_FCS_NONE)
2977 set_bit(CONF_NO_FCS_RECV, &chan->conf_state); 3199 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
2978 break; 3200 break;
2979 3201
2980 case L2CAP_CONF_EFS: 3202 case L2CAP_CONF_EFS:
@@ -3011,7 +3233,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3011 case L2CAP_MODE_ERTM: 3233 case L2CAP_MODE_ERTM:
3012 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { 3234 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3013 chan->mode = l2cap_select_mode(rfc.mode, 3235 chan->mode = l2cap_select_mode(rfc.mode,
3014 chan->conn->feat_mask); 3236 chan->conn->feat_mask);
3015 break; 3237 break;
3016 } 3238 }
3017 3239
@@ -3036,8 +3258,8 @@ done:
3036 if (chan->num_conf_rsp == 1) 3258 if (chan->num_conf_rsp == 1)
3037 return -ECONNREFUSED; 3259 return -ECONNREFUSED;
3038 3260
3039 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3040 sizeof(rfc), (unsigned long) &rfc); 3262 (unsigned long) &rfc);
3041 } 3263 }
3042 3264
3043 if (result == L2CAP_CONF_SUCCESS) { 3265 if (result == L2CAP_CONF_SUCCESS) {
@@ -3054,8 +3276,8 @@ done:
3054 3276
3055 if (remote_efs) { 3277 if (remote_efs) {
3056 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3278 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3057 efs.stype != L2CAP_SERV_NOTRAFIC && 3279 efs.stype != L2CAP_SERV_NOTRAFIC &&
3058 efs.stype != chan->local_stype) { 3280 efs.stype != chan->local_stype) {
3059 3281
3060 result = L2CAP_CONF_UNACCEPT; 3282 result = L2CAP_CONF_UNACCEPT;
3061 3283
@@ -3063,8 +3285,8 @@ done:
3063 return -ECONNREFUSED; 3285 return -ECONNREFUSED;
3064 3286
3065 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3066 sizeof(efs), 3288 sizeof(efs),
3067 (unsigned long) &efs); 3289 (unsigned long) &efs);
3068 } else { 3290 } else {
3069 /* Send PENDING Conf Rsp */ 3291 /* Send PENDING Conf Rsp */
3070 result = L2CAP_CONF_PENDING; 3292 result = L2CAP_CONF_PENDING;
@@ -3087,51 +3309,45 @@ done:
3087 chan->remote_max_tx = rfc.max_transmit; 3309 chan->remote_max_tx = rfc.max_transmit;
3088 3310
3089 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3311 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3090 chan->conn->mtu - 3312 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3091 L2CAP_EXT_HDR_SIZE - 3313 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3092 L2CAP_SDULEN_SIZE -
3093 L2CAP_FCS_SIZE);
3094 rfc.max_pdu_size = cpu_to_le16(size); 3314 rfc.max_pdu_size = cpu_to_le16(size);
3095 chan->remote_mps = size; 3315 chan->remote_mps = size;
3096 3316
3097 rfc.retrans_timeout = 3317 __l2cap_set_ertm_timeouts(chan, &rfc);
3098 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3099 rfc.monitor_timeout =
3100 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3101 3318
3102 set_bit(CONF_MODE_DONE, &chan->conf_state); 3319 set_bit(CONF_MODE_DONE, &chan->conf_state);
3103 3320
3104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3105 sizeof(rfc), (unsigned long) &rfc); 3322 sizeof(rfc), (unsigned long) &rfc);
3106 3323
3107 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3324 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3108 chan->remote_id = efs.id; 3325 chan->remote_id = efs.id;
3109 chan->remote_stype = efs.stype; 3326 chan->remote_stype = efs.stype;
3110 chan->remote_msdu = le16_to_cpu(efs.msdu); 3327 chan->remote_msdu = le16_to_cpu(efs.msdu);
3111 chan->remote_flush_to = 3328 chan->remote_flush_to =
3112 le32_to_cpu(efs.flush_to); 3329 le32_to_cpu(efs.flush_to);
3113 chan->remote_acc_lat = 3330 chan->remote_acc_lat =
3114 le32_to_cpu(efs.acc_lat); 3331 le32_to_cpu(efs.acc_lat);
3115 chan->remote_sdu_itime = 3332 chan->remote_sdu_itime =
3116 le32_to_cpu(efs.sdu_itime); 3333 le32_to_cpu(efs.sdu_itime);
3117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3118 sizeof(efs), (unsigned long) &efs); 3335 sizeof(efs),
3336 (unsigned long) &efs);
3119 } 3337 }
3120 break; 3338 break;
3121 3339
3122 case L2CAP_MODE_STREAMING: 3340 case L2CAP_MODE_STREAMING:
3123 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), 3341 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3124 chan->conn->mtu - 3342 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3125 L2CAP_EXT_HDR_SIZE - 3343 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3126 L2CAP_SDULEN_SIZE -
3127 L2CAP_FCS_SIZE);
3128 rfc.max_pdu_size = cpu_to_le16(size); 3344 rfc.max_pdu_size = cpu_to_le16(size);
3129 chan->remote_mps = size; 3345 chan->remote_mps = size;
3130 3346
3131 set_bit(CONF_MODE_DONE, &chan->conf_state); 3347 set_bit(CONF_MODE_DONE, &chan->conf_state);
3132 3348
3133 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3134 sizeof(rfc), (unsigned long) &rfc); 3350 (unsigned long) &rfc);
3135 3351
3136 break; 3352 break;
3137 3353
@@ -3152,7 +3368,8 @@ done:
3152 return ptr - data; 3368 return ptr - data;
3153} 3369}
3154 3370
3155static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result) 3371static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3372 void *data, u16 *result)
3156{ 3373{
3157 struct l2cap_conf_req *req = data; 3374 struct l2cap_conf_req *req = data;
3158 void *ptr = req->data; 3375 void *ptr = req->data;
@@ -3179,7 +3396,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
3179 case L2CAP_CONF_FLUSH_TO: 3396 case L2CAP_CONF_FLUSH_TO:
3180 chan->flush_to = val; 3397 chan->flush_to = val;
3181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3182 2, chan->flush_to); 3399 2, chan->flush_to);
3183 break; 3400 break;
3184 3401
3185 case L2CAP_CONF_RFC: 3402 case L2CAP_CONF_RFC:
@@ -3187,13 +3404,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
3187 memcpy(&rfc, (void *)val, olen); 3404 memcpy(&rfc, (void *)val, olen);
3188 3405
3189 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && 3406 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3190 rfc.mode != chan->mode) 3407 rfc.mode != chan->mode)
3191 return -ECONNREFUSED; 3408 return -ECONNREFUSED;
3192 3409
3193 chan->fcs = 0; 3410 chan->fcs = 0;
3194 3411
3195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3196 sizeof(rfc), (unsigned long) &rfc); 3413 sizeof(rfc), (unsigned long) &rfc);
3197 break; 3414 break;
3198 3415
3199 case L2CAP_CONF_EWS: 3416 case L2CAP_CONF_EWS:
@@ -3207,12 +3424,19 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
3207 memcpy(&efs, (void *)val, olen); 3424 memcpy(&efs, (void *)val, olen);
3208 3425
3209 if (chan->local_stype != L2CAP_SERV_NOTRAFIC && 3426 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3210 efs.stype != L2CAP_SERV_NOTRAFIC && 3427 efs.stype != L2CAP_SERV_NOTRAFIC &&
3211 efs.stype != chan->local_stype) 3428 efs.stype != chan->local_stype)
3212 return -ECONNREFUSED; 3429 return -ECONNREFUSED;
3213 3430
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, 3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3215 sizeof(efs), (unsigned long) &efs); 3432 (unsigned long) &efs);
3433 break;
3434
3435 case L2CAP_CONF_FCS:
3436 if (*result == L2CAP_CONF_PENDING)
3437 if (val == L2CAP_FCS_NONE)
3438 set_bit(CONF_RECV_NO_FCS,
3439 &chan->conf_state);
3216 break; 3440 break;
3217 } 3441 }
3218 } 3442 }
@@ -3235,10 +3459,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
3235 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3459 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3236 chan->local_msdu = le16_to_cpu(efs.msdu); 3460 chan->local_msdu = le16_to_cpu(efs.msdu);
3237 chan->local_sdu_itime = 3461 chan->local_sdu_itime =
3238 le32_to_cpu(efs.sdu_itime); 3462 le32_to_cpu(efs.sdu_itime);
3239 chan->local_acc_lat = le32_to_cpu(efs.acc_lat); 3463 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3240 chan->local_flush_to = 3464 chan->local_flush_to =
3241 le32_to_cpu(efs.flush_to); 3465 le32_to_cpu(efs.flush_to);
3242 } 3466 }
3243 break; 3467 break;
3244 3468
@@ -3253,7 +3477,8 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
3253 return ptr - data; 3477 return ptr - data;
3254} 3478}
3255 3479
3256static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags) 3480static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3481 u16 result, u16 flags)
3257{ 3482{
3258 struct l2cap_conf_rsp *rsp = data; 3483 struct l2cap_conf_rsp *rsp = data;
3259 void *ptr = rsp->data; 3484 void *ptr = rsp->data;
@@ -3272,19 +3497,27 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3272 struct l2cap_conn_rsp rsp; 3497 struct l2cap_conn_rsp rsp;
3273 struct l2cap_conn *conn = chan->conn; 3498 struct l2cap_conn *conn = chan->conn;
3274 u8 buf[128]; 3499 u8 buf[128];
3500 u8 rsp_code;
3275 3501
3276 rsp.scid = cpu_to_le16(chan->dcid); 3502 rsp.scid = cpu_to_le16(chan->dcid);
3277 rsp.dcid = cpu_to_le16(chan->scid); 3503 rsp.dcid = cpu_to_le16(chan->scid);
3278 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); 3504 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3279 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); 3505 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3280 l2cap_send_cmd(conn, chan->ident, 3506
3281 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3507 if (chan->hs_hcon)
3508 rsp_code = L2CAP_CREATE_CHAN_RSP;
3509 else
3510 rsp_code = L2CAP_CONN_RSP;
3511
3512 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3513
3514 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3282 3515
3283 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) 3516 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3284 return; 3517 return;
3285 3518
3286 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3287 l2cap_build_conf_req(chan, buf), buf); 3520 l2cap_build_conf_req(chan, buf), buf);
3288 chan->num_conf_req++; 3521 chan->num_conf_req++;
3289} 3522}
3290 3523
@@ -3339,7 +3572,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3339 } 3572 }
3340} 3573}
3341 3574
3342static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3575static inline int l2cap_command_rej(struct l2cap_conn *conn,
3576 struct l2cap_cmd_hdr *cmd, u8 *data)
3343{ 3577{
3344 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; 3578 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3345 3579
@@ -3347,7 +3581,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
3347 return 0; 3581 return 0;
3348 3582
3349 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 3583 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3350 cmd->ident == conn->info_ident) { 3584 cmd->ident == conn->info_ident) {
3351 cancel_delayed_work(&conn->info_timer); 3585 cancel_delayed_work(&conn->info_timer);
3352 3586
3353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 3587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -3359,7 +3593,9 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
3359 return 0; 3593 return 0;
3360} 3594}
3361 3595
3362static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3596static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3597 struct l2cap_cmd_hdr *cmd,
3598 u8 *data, u8 rsp_code, u8 amp_id)
3363{ 3599{
3364 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 3600 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3365 struct l2cap_conn_rsp rsp; 3601 struct l2cap_conn_rsp rsp;
@@ -3386,7 +3622,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
3386 3622
3387 /* Check if the ACL is secure enough (if not SDP) */ 3623 /* Check if the ACL is secure enough (if not SDP) */
3388 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && 3624 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3389 !hci_conn_check_link_mode(conn->hcon)) { 3625 !hci_conn_check_link_mode(conn->hcon)) {
3390 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3626 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3391 result = L2CAP_CR_SEC_BLOCK; 3627 result = L2CAP_CR_SEC_BLOCK;
3392 goto response; 3628 goto response;
@@ -3410,8 +3646,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
3410 bacpy(&bt_sk(sk)->dst, conn->dst); 3646 bacpy(&bt_sk(sk)->dst, conn->dst);
3411 chan->psm = psm; 3647 chan->psm = psm;
3412 chan->dcid = scid; 3648 chan->dcid = scid;
3413 3649 chan->local_amp_id = amp_id;
3414 bt_accept_enqueue(parent, sk);
3415 3650
3416 __l2cap_chan_add(conn, chan); 3651 __l2cap_chan_add(conn, chan);
3417 3652
@@ -3427,10 +3662,19 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
3427 __l2cap_state_change(chan, BT_CONNECT2); 3662 __l2cap_state_change(chan, BT_CONNECT2);
3428 result = L2CAP_CR_PEND; 3663 result = L2CAP_CR_PEND;
3429 status = L2CAP_CS_AUTHOR_PEND; 3664 status = L2CAP_CS_AUTHOR_PEND;
3430 parent->sk_data_ready(parent, 0); 3665 chan->ops->defer(chan);
3431 } else { 3666 } else {
3432 __l2cap_state_change(chan, BT_CONFIG); 3667 /* Force pending result for AMP controllers.
3433 result = L2CAP_CR_SUCCESS; 3668 * The connection will succeed after the
3669 * physical link is up.
3670 */
3671 if (amp_id) {
3672 __l2cap_state_change(chan, BT_CONNECT2);
3673 result = L2CAP_CR_PEND;
3674 } else {
3675 __l2cap_state_change(chan, BT_CONFIG);
3676 result = L2CAP_CR_SUCCESS;
3677 }
3434 status = L2CAP_CS_NO_INFO; 3678 status = L2CAP_CS_NO_INFO;
3435 } 3679 }
3436 } else { 3680 } else {
@@ -3453,7 +3697,7 @@ sendresp:
3453 rsp.dcid = cpu_to_le16(dcid); 3697 rsp.dcid = cpu_to_le16(dcid);
3454 rsp.result = cpu_to_le16(result); 3698 rsp.result = cpu_to_le16(result);
3455 rsp.status = cpu_to_le16(status); 3699 rsp.status = cpu_to_le16(status);
3456 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3700 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3457 3701
3458 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 3702 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3459 struct l2cap_info_req info; 3703 struct l2cap_info_req info;
@@ -3464,23 +3708,31 @@ sendresp:
3464 3708
3465 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); 3709 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3466 3710
3467 l2cap_send_cmd(conn, conn->info_ident, 3711 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3468 L2CAP_INFO_REQ, sizeof(info), &info); 3712 sizeof(info), &info);
3469 } 3713 }
3470 3714
3471 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && 3715 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3472 result == L2CAP_CR_SUCCESS) { 3716 result == L2CAP_CR_SUCCESS) {
3473 u8 buf[128]; 3717 u8 buf[128];
3474 set_bit(CONF_REQ_SENT, &chan->conf_state); 3718 set_bit(CONF_REQ_SENT, &chan->conf_state);
3475 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3719 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3476 l2cap_build_conf_req(chan, buf), buf); 3720 l2cap_build_conf_req(chan, buf), buf);
3477 chan->num_conf_req++; 3721 chan->num_conf_req++;
3478 } 3722 }
3479 3723
3724 return chan;
3725}
3726
3727static int l2cap_connect_req(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u8 *data)
3729{
3730 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3480 return 0; 3731 return 0;
3481} 3732}
3482 3733
3483static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3734static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3735 struct l2cap_cmd_hdr *cmd, u8 *data)
3484{ 3736{
3485 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 3737 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3486 u16 scid, dcid, result, status; 3738 u16 scid, dcid, result, status;
@@ -3494,7 +3746,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
3494 status = __le16_to_cpu(rsp->status); 3746 status = __le16_to_cpu(rsp->status);
3495 3747
3496 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", 3748 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3497 dcid, scid, result, status); 3749 dcid, scid, result, status);
3498 3750
3499 mutex_lock(&conn->chan_lock); 3751 mutex_lock(&conn->chan_lock);
3500 3752
@@ -3527,7 +3779,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
3527 break; 3779 break;
3528 3780
3529 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3781 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3530 l2cap_build_conf_req(chan, req), req); 3782 l2cap_build_conf_req(chan, req), req);
3531 chan->num_conf_req++; 3783 chan->num_conf_req++;
3532 break; 3784 break;
3533 3785
@@ -3555,11 +3807,29 @@ static inline void set_default_fcs(struct l2cap_chan *chan)
3555 */ 3807 */
3556 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 3808 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3557 chan->fcs = L2CAP_FCS_NONE; 3809 chan->fcs = L2CAP_FCS_NONE;
3558 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) 3810 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3559 chan->fcs = L2CAP_FCS_CRC16; 3811 chan->fcs = L2CAP_FCS_CRC16;
3560} 3812}
3561 3813
3562static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 3814static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3815 u8 ident, u16 flags)
3816{
3817 struct l2cap_conn *conn = chan->conn;
3818
3819 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3820 flags);
3821
3822 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3823 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3824
3825 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3826 l2cap_build_conf_rsp(chan, data,
3827 L2CAP_CONF_SUCCESS, flags), data);
3828}
3829
3830static inline int l2cap_config_req(struct l2cap_conn *conn,
3831 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3832 u8 *data)
3563{ 3833{
3564 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 3834 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3565 u16 dcid, flags; 3835 u16 dcid, flags;
@@ -3584,7 +3854,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3584 rej.dcid = cpu_to_le16(chan->dcid); 3854 rej.dcid = cpu_to_le16(chan->dcid);
3585 3855
3586 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 3856 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3587 sizeof(rej), &rej); 3857 sizeof(rej), &rej);
3588 goto unlock; 3858 goto unlock;
3589 } 3859 }
3590 3860
@@ -3592,8 +3862,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3592 len = cmd_len - sizeof(*req); 3862 len = cmd_len - sizeof(*req);
3593 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { 3863 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3594 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3864 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3595 l2cap_build_conf_rsp(chan, rsp, 3865 l2cap_build_conf_rsp(chan, rsp,
3596 L2CAP_CONF_REJECT, flags), rsp); 3866 L2CAP_CONF_REJECT, flags), rsp);
3597 goto unlock; 3867 goto unlock;
3598 } 3868 }
3599 3869
@@ -3604,18 +3874,19 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3604 if (flags & L2CAP_CONF_FLAG_CONTINUATION) { 3874 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3605 /* Incomplete config. Send empty response. */ 3875 /* Incomplete config. Send empty response. */
3606 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3607 l2cap_build_conf_rsp(chan, rsp, 3877 l2cap_build_conf_rsp(chan, rsp,
3608 L2CAP_CONF_SUCCESS, flags), rsp); 3878 L2CAP_CONF_SUCCESS, flags), rsp);
3609 goto unlock; 3879 goto unlock;
3610 } 3880 }
3611 3881
3612 /* Complete config. */ 3882 /* Complete config. */
3613 len = l2cap_parse_conf_req(chan, rsp); 3883 len = l2cap_parse_conf_req(chan, rsp);
3614 if (len < 0) { 3884 if (len < 0) {
3615 l2cap_send_disconn_req(conn, chan, ECONNRESET); 3885 l2cap_send_disconn_req(chan, ECONNRESET);
3616 goto unlock; 3886 goto unlock;
3617 } 3887 }
3618 3888
3889 chan->ident = cmd->ident;
3619 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 3890 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3620 chan->num_conf_rsp++; 3891 chan->num_conf_rsp++;
3621 3892
@@ -3633,7 +3904,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3633 err = l2cap_ertm_init(chan); 3904 err = l2cap_ertm_init(chan);
3634 3905
3635 if (err < 0) 3906 if (err < 0)
3636 l2cap_send_disconn_req(chan->conn, chan, -err); 3907 l2cap_send_disconn_req(chan, -err);
3637 else 3908 else
3638 l2cap_chan_ready(chan); 3909 l2cap_chan_ready(chan);
3639 3910
@@ -3643,23 +3914,22 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3643 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { 3914 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3644 u8 buf[64]; 3915 u8 buf[64];
3645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3916 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3646 l2cap_build_conf_req(chan, buf), buf); 3917 l2cap_build_conf_req(chan, buf), buf);
3647 chan->num_conf_req++; 3918 chan->num_conf_req++;
3648 } 3919 }
3649 3920
3650 /* Got Conf Rsp PENDING from remote side and asume we sent 3921 /* Got Conf Rsp PENDING from remote side and asume we sent
3651 Conf Rsp PENDING in the code above */ 3922 Conf Rsp PENDING in the code above */
3652 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && 3923 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3653 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { 3924 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3654 3925
3655 /* check compatibility */ 3926 /* check compatibility */
3656 3927
3657 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 3928 /* Send rsp for BR/EDR channel */
3658 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 3929 if (!chan->hs_hcon)
3659 3930 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3660 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3931 else
3661 l2cap_build_conf_rsp(chan, rsp, 3932 chan->ident = cmd->ident;
3662 L2CAP_CONF_SUCCESS, flags), rsp);
3663 } 3933 }
3664 3934
3665unlock: 3935unlock:
@@ -3667,7 +3937,8 @@ unlock:
3667 return err; 3937 return err;
3668} 3938}
3669 3939
3670static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3940static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3941 struct l2cap_cmd_hdr *cmd, u8 *data)
3671{ 3942{
3672 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 3943 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3673 u16 scid, flags, result; 3944 u16 scid, flags, result;
@@ -3699,20 +3970,21 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3699 char buf[64]; 3970 char buf[64];
3700 3971
3701 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 3972 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3702 buf, &result); 3973 buf, &result);
3703 if (len < 0) { 3974 if (len < 0) {
3704 l2cap_send_disconn_req(conn, chan, ECONNRESET); 3975 l2cap_send_disconn_req(chan, ECONNRESET);
3705 goto done; 3976 goto done;
3706 } 3977 }
3707 3978
3708 /* check compatibility */ 3979 if (!chan->hs_hcon) {
3709 3980 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3710 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); 3981 0);
3711 set_bit(CONF_OUTPUT_DONE, &chan->conf_state); 3982 } else {
3712 3983 if (l2cap_check_efs(chan)) {
3713 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3984 amp_create_logical_link(chan);
3714 l2cap_build_conf_rsp(chan, buf, 3985 chan->ident = cmd->ident;
3715 L2CAP_CONF_SUCCESS, 0x0000), buf); 3986 }
3987 }
3716 } 3988 }
3717 goto done; 3989 goto done;
3718 3990
@@ -3721,21 +3993,21 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3721 char req[64]; 3993 char req[64];
3722 3994
3723 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 3995 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3724 l2cap_send_disconn_req(conn, chan, ECONNRESET); 3996 l2cap_send_disconn_req(chan, ECONNRESET);
3725 goto done; 3997 goto done;
3726 } 3998 }
3727 3999
3728 /* throw out any old stored conf requests */ 4000 /* throw out any old stored conf requests */
3729 result = L2CAP_CONF_SUCCESS; 4001 result = L2CAP_CONF_SUCCESS;
3730 len = l2cap_parse_conf_rsp(chan, rsp->data, len, 4002 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3731 req, &result); 4003 req, &result);
3732 if (len < 0) { 4004 if (len < 0) {
3733 l2cap_send_disconn_req(conn, chan, ECONNRESET); 4005 l2cap_send_disconn_req(chan, ECONNRESET);
3734 goto done; 4006 goto done;
3735 } 4007 }
3736 4008
3737 l2cap_send_cmd(conn, l2cap_get_ident(conn), 4009 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3738 L2CAP_CONF_REQ, len, req); 4010 L2CAP_CONF_REQ, len, req);
3739 chan->num_conf_req++; 4011 chan->num_conf_req++;
3740 if (result != L2CAP_CONF_SUCCESS) 4012 if (result != L2CAP_CONF_SUCCESS)
3741 goto done; 4013 goto done;
@@ -3746,7 +4018,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3746 l2cap_chan_set_err(chan, ECONNRESET); 4018 l2cap_chan_set_err(chan, ECONNRESET);
3747 4019
3748 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); 4020 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3749 l2cap_send_disconn_req(conn, chan, ECONNRESET); 4021 l2cap_send_disconn_req(chan, ECONNRESET);
3750 goto done; 4022 goto done;
3751 } 4023 }
3752 4024
@@ -3763,7 +4035,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3763 err = l2cap_ertm_init(chan); 4035 err = l2cap_ertm_init(chan);
3764 4036
3765 if (err < 0) 4037 if (err < 0)
3766 l2cap_send_disconn_req(chan->conn, chan, -err); 4038 l2cap_send_disconn_req(chan, -err);
3767 else 4039 else
3768 l2cap_chan_ready(chan); 4040 l2cap_chan_ready(chan);
3769 } 4041 }
@@ -3773,7 +4045,8 @@ done:
3773 return err; 4045 return err;
3774} 4046}
3775 4047
3776static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 4048static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4049 struct l2cap_cmd_hdr *cmd, u8 *data)
3777{ 4050{
3778 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 4051 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3779 struct l2cap_disconn_rsp rsp; 4052 struct l2cap_disconn_rsp rsp;
@@ -3819,7 +4092,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3819 return 0; 4092 return 0;
3820} 4093}
3821 4094
3822static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 4095static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4096 struct l2cap_cmd_hdr *cmd, u8 *data)
3823{ 4097{
3824 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 4098 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3825 u16 dcid, scid; 4099 u16 dcid, scid;
@@ -3853,7 +4127,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3853 return 0; 4127 return 0;
3854} 4128}
3855 4129
3856static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 4130static inline int l2cap_information_req(struct l2cap_conn *conn,
4131 struct l2cap_cmd_hdr *cmd, u8 *data)
3857{ 4132{
3858 struct l2cap_info_req *req = (struct l2cap_info_req *) data; 4133 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3859 u16 type; 4134 u16 type;
@@ -3870,14 +4145,14 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3870 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); 4145 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3871 if (!disable_ertm) 4146 if (!disable_ertm)
3872 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 4147 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3873 | L2CAP_FEAT_FCS; 4148 | L2CAP_FEAT_FCS;
3874 if (enable_hs) 4149 if (enable_hs)
3875 feat_mask |= L2CAP_FEAT_EXT_FLOW 4150 feat_mask |= L2CAP_FEAT_EXT_FLOW
3876 | L2CAP_FEAT_EXT_WINDOW; 4151 | L2CAP_FEAT_EXT_WINDOW;
3877 4152
3878 put_unaligned_le32(feat_mask, rsp->data); 4153 put_unaligned_le32(feat_mask, rsp->data);
3879 l2cap_send_cmd(conn, cmd->ident, 4154 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3880 L2CAP_INFO_RSP, sizeof(buf), buf); 4155 buf);
3881 } else if (type == L2CAP_IT_FIXED_CHAN) { 4156 } else if (type == L2CAP_IT_FIXED_CHAN) {
3882 u8 buf[12]; 4157 u8 buf[12];
3883 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 4158 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
@@ -3890,20 +4165,21 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3890 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); 4165 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3891 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); 4166 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3892 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); 4167 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3893 l2cap_send_cmd(conn, cmd->ident, 4168 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3894 L2CAP_INFO_RSP, sizeof(buf), buf); 4169 buf);
3895 } else { 4170 } else {
3896 struct l2cap_info_rsp rsp; 4171 struct l2cap_info_rsp rsp;
3897 rsp.type = cpu_to_le16(type); 4172 rsp.type = cpu_to_le16(type);
3898 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); 4173 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3899 l2cap_send_cmd(conn, cmd->ident, 4174 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
3900 L2CAP_INFO_RSP, sizeof(rsp), &rsp); 4175 &rsp);
3901 } 4176 }
3902 4177
3903 return 0; 4178 return 0;
3904} 4179}
3905 4180
3906static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 4181static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4182 struct l2cap_cmd_hdr *cmd, u8 *data)
3907{ 4183{
3908 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4184 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3909 u16 type, result; 4185 u16 type, result;
@@ -3915,7 +4191,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3915 4191
3916 /* L2CAP Info req/rsp are unbound to channels, add extra checks */ 4192 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3917 if (cmd->ident != conn->info_ident || 4193 if (cmd->ident != conn->info_ident ||
3918 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) 4194 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3919 return 0; 4195 return 0;
3920 4196
3921 cancel_delayed_work(&conn->info_timer); 4197 cancel_delayed_work(&conn->info_timer);
@@ -3940,7 +4216,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3940 conn->info_ident = l2cap_get_ident(conn); 4216 conn->info_ident = l2cap_get_ident(conn);
3941 4217
3942 l2cap_send_cmd(conn, conn->info_ident, 4218 l2cap_send_cmd(conn, conn->info_ident,
3943 L2CAP_INFO_REQ, sizeof(req), &req); 4219 L2CAP_INFO_REQ, sizeof(req), &req);
3944 } else { 4220 } else {
3945 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4221 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3946 conn->info_ident = 0; 4222 conn->info_ident = 0;
@@ -3961,12 +4237,14 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3961 return 0; 4237 return 0;
3962} 4238}
3963 4239
3964static inline int l2cap_create_channel_req(struct l2cap_conn *conn, 4240static int l2cap_create_channel_req(struct l2cap_conn *conn,
3965 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 4241 struct l2cap_cmd_hdr *cmd,
3966 void *data) 4242 u16 cmd_len, void *data)
3967{ 4243{
3968 struct l2cap_create_chan_req *req = data; 4244 struct l2cap_create_chan_req *req = data;
3969 struct l2cap_create_chan_rsp rsp; 4245 struct l2cap_create_chan_rsp rsp;
4246 struct l2cap_chan *chan;
4247 struct hci_dev *hdev;
3970 u16 psm, scid; 4248 u16 psm, scid;
3971 4249
3972 if (cmd_len != sizeof(*req)) 4250 if (cmd_len != sizeof(*req))
@@ -3980,56 +4258,119 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3980 4258
3981 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); 4259 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3982 4260
3983 /* Placeholder: Always reject */ 4261 /* For controller id 0 make BR/EDR connection */
4262 if (req->amp_id == HCI_BREDR_ID) {
4263 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4264 req->amp_id);
4265 return 0;
4266 }
4267
4268 /* Validate AMP controller id */
4269 hdev = hci_dev_get(req->amp_id);
4270 if (!hdev)
4271 goto error;
4272
4273 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4274 hci_dev_put(hdev);
4275 goto error;
4276 }
4277
4278 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4279 req->amp_id);
4280 if (chan) {
4281 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4282 struct hci_conn *hs_hcon;
4283
4284 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4285 if (!hs_hcon) {
4286 hci_dev_put(hdev);
4287 return -EFAULT;
4288 }
4289
4290 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4291
4292 mgr->bredr_chan = chan;
4293 chan->hs_hcon = hs_hcon;
4294 chan->fcs = L2CAP_FCS_NONE;
4295 conn->mtu = hdev->block_mtu;
4296 }
4297
4298 hci_dev_put(hdev);
4299
4300 return 0;
4301
4302error:
3984 rsp.dcid = 0; 4303 rsp.dcid = 0;
3985 rsp.scid = cpu_to_le16(scid); 4304 rsp.scid = cpu_to_le16(scid);
3986 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM); 4305 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
3987 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); 4306 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3988 4307
3989 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, 4308 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3990 sizeof(rsp), &rsp); 4309 sizeof(rsp), &rsp);
3991 4310
3992 return 0; 4311 return -EFAULT;
3993} 4312}
3994 4313
3995static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn, 4314static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
3996 struct l2cap_cmd_hdr *cmd, void *data)
3997{ 4315{
3998 BT_DBG("conn %p", conn); 4316 struct l2cap_move_chan_req req;
4317 u8 ident;
4318
4319 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4320
4321 ident = l2cap_get_ident(chan->conn);
4322 chan->ident = ident;
4323
4324 req.icid = cpu_to_le16(chan->scid);
4325 req.dest_amp_id = dest_amp_id;
4326
4327 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4328 &req);
3999 4329
4000 return l2cap_connect_rsp(conn, cmd, data); 4330 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4001} 4331}
4002 4332
4003static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, 4333static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4004 u16 icid, u16 result)
4005{ 4334{
4006 struct l2cap_move_chan_rsp rsp; 4335 struct l2cap_move_chan_rsp rsp;
4007 4336
4008 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 4337 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4009 4338
4010 rsp.icid = cpu_to_le16(icid); 4339 rsp.icid = cpu_to_le16(chan->dcid);
4011 rsp.result = cpu_to_le16(result); 4340 rsp.result = cpu_to_le16(result);
4012 4341
4013 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); 4342 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4343 sizeof(rsp), &rsp);
4014} 4344}
4015 4345
4016static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, 4346static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4017 struct l2cap_chan *chan,
4018 u16 icid, u16 result)
4019{ 4347{
4020 struct l2cap_move_chan_cfm cfm; 4348 struct l2cap_move_chan_cfm cfm;
4021 u8 ident;
4022 4349
4023 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 4350 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4024 4351
4025 ident = l2cap_get_ident(conn); 4352 chan->ident = l2cap_get_ident(chan->conn);
4026 if (chan)
4027 chan->ident = ident;
4028 4353
4029 cfm.icid = cpu_to_le16(icid); 4354 cfm.icid = cpu_to_le16(chan->scid);
4030 cfm.result = cpu_to_le16(result); 4355 cfm.result = cpu_to_le16(result);
4031 4356
4032 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); 4357 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4358 sizeof(cfm), &cfm);
4359
4360 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4361}
4362
4363static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4364{
4365 struct l2cap_move_chan_cfm cfm;
4366
4367 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4368
4369 cfm.icid = cpu_to_le16(icid);
4370 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4371
4372 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4373 sizeof(cfm), &cfm);
4033} 4374}
4034 4375
4035static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, 4376static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
@@ -4043,11 +4384,289 @@ static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4043 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); 4384 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4044} 4385}
4045 4386
4387static void __release_logical_link(struct l2cap_chan *chan)
4388{
4389 chan->hs_hchan = NULL;
4390 chan->hs_hcon = NULL;
4391
4392 /* Placeholder - release the logical link */
4393}
4394
4395static void l2cap_logical_fail(struct l2cap_chan *chan)
4396{
4397 /* Logical link setup failed */
4398 if (chan->state != BT_CONNECTED) {
4399 /* Create channel failure, disconnect */
4400 l2cap_send_disconn_req(chan, ECONNRESET);
4401 return;
4402 }
4403
4404 switch (chan->move_role) {
4405 case L2CAP_MOVE_ROLE_RESPONDER:
4406 l2cap_move_done(chan);
4407 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4408 break;
4409 case L2CAP_MOVE_ROLE_INITIATOR:
4410 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4411 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4412 /* Remote has only sent pending or
4413 * success responses, clean up
4414 */
4415 l2cap_move_done(chan);
4416 }
4417
4418 /* Other amp move states imply that the move
4419 * has already aborted
4420 */
4421 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4422 break;
4423 }
4424}
4425
4426static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4427 struct hci_chan *hchan)
4428{
4429 struct l2cap_conf_rsp rsp;
4430
4431 chan->hs_hchan = hchan;
4432 chan->hs_hcon->l2cap_data = chan->conn;
4433
4434 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4435
4436 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4437 int err;
4438
4439 set_default_fcs(chan);
4440
4441 err = l2cap_ertm_init(chan);
4442 if (err < 0)
4443 l2cap_send_disconn_req(chan, -err);
4444 else
4445 l2cap_chan_ready(chan);
4446 }
4447}
4448
4449static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4450 struct hci_chan *hchan)
4451{
4452 chan->hs_hcon = hchan->conn;
4453 chan->hs_hcon->l2cap_data = chan->conn;
4454
4455 BT_DBG("move_state %d", chan->move_state);
4456
4457 switch (chan->move_state) {
4458 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4459 /* Move confirm will be sent after a success
4460 * response is received
4461 */
4462 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4463 break;
4464 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4465 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4466 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4467 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4468 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4469 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4470 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4471 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4472 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4473 }
4474 break;
4475 default:
4476 /* Move was not in expected state, free the channel */
4477 __release_logical_link(chan);
4478
4479 chan->move_state = L2CAP_MOVE_STABLE;
4480 }
4481}
4482
4483/* Call with chan locked */
4484void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4485 u8 status)
4486{
4487 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4488
4489 if (status) {
4490 l2cap_logical_fail(chan);
4491 __release_logical_link(chan);
4492 return;
4493 }
4494
4495 if (chan->state != BT_CONNECTED) {
4496 /* Ignore logical link if channel is on BR/EDR */
4497 if (chan->local_amp_id)
4498 l2cap_logical_finish_create(chan, hchan);
4499 } else {
4500 l2cap_logical_finish_move(chan, hchan);
4501 }
4502}
4503
4504void l2cap_move_start(struct l2cap_chan *chan)
4505{
4506 BT_DBG("chan %p", chan);
4507
4508 if (chan->local_amp_id == HCI_BREDR_ID) {
4509 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4510 return;
4511 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4512 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4513 /* Placeholder - start physical link setup */
4514 } else {
4515 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4516 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4517 chan->move_id = 0;
4518 l2cap_move_setup(chan);
4519 l2cap_send_move_chan_req(chan, 0);
4520 }
4521}
4522
4523static void l2cap_do_create(struct l2cap_chan *chan, int result,
4524 u8 local_amp_id, u8 remote_amp_id)
4525{
4526 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4527 local_amp_id, remote_amp_id);
4528
4529 chan->fcs = L2CAP_FCS_NONE;
4530
4531 /* Outgoing channel on AMP */
4532 if (chan->state == BT_CONNECT) {
4533 if (result == L2CAP_CR_SUCCESS) {
4534 chan->local_amp_id = local_amp_id;
4535 l2cap_send_create_chan_req(chan, remote_amp_id);
4536 } else {
4537 /* Revert to BR/EDR connect */
4538 l2cap_send_conn_req(chan);
4539 }
4540
4541 return;
4542 }
4543
4544 /* Incoming channel on AMP */
4545 if (__l2cap_no_conn_pending(chan)) {
4546 struct l2cap_conn_rsp rsp;
4547 char buf[128];
4548 rsp.scid = cpu_to_le16(chan->dcid);
4549 rsp.dcid = cpu_to_le16(chan->scid);
4550
4551 if (result == L2CAP_CR_SUCCESS) {
4552 /* Send successful response */
4553 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4554 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4555 } else {
4556 /* Send negative response */
4557 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4558 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4559 }
4560
4561 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4562 sizeof(rsp), &rsp);
4563
4564 if (result == L2CAP_CR_SUCCESS) {
4565 __l2cap_state_change(chan, BT_CONFIG);
4566 set_bit(CONF_REQ_SENT, &chan->conf_state);
4567 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4568 L2CAP_CONF_REQ,
4569 l2cap_build_conf_req(chan, buf), buf);
4570 chan->num_conf_req++;
4571 }
4572 }
4573}
4574
4575static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4576 u8 remote_amp_id)
4577{
4578 l2cap_move_setup(chan);
4579 chan->move_id = local_amp_id;
4580 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4581
4582 l2cap_send_move_chan_req(chan, remote_amp_id);
4583}
4584
4585static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4586{
4587 struct hci_chan *hchan = NULL;
4588
4589 /* Placeholder - get hci_chan for logical link */
4590
4591 if (hchan) {
4592 if (hchan->state == BT_CONNECTED) {
4593 /* Logical link is ready to go */
4594 chan->hs_hcon = hchan->conn;
4595 chan->hs_hcon->l2cap_data = chan->conn;
4596 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4597 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4598
4599 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4600 } else {
4601 /* Wait for logical link to be ready */
4602 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4603 }
4604 } else {
4605 /* Logical link not available */
4606 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4607 }
4608}
4609
4610static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4611{
4612 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4613 u8 rsp_result;
4614 if (result == -EINVAL)
4615 rsp_result = L2CAP_MR_BAD_ID;
4616 else
4617 rsp_result = L2CAP_MR_NOT_ALLOWED;
4618
4619 l2cap_send_move_chan_rsp(chan, rsp_result);
4620 }
4621
4622 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4623 chan->move_state = L2CAP_MOVE_STABLE;
4624
4625 /* Restart data transmission */
4626 l2cap_ertm_send(chan);
4627}
4628
4629/* Invoke with locked chan */
4630void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4631{
4632 u8 local_amp_id = chan->local_amp_id;
4633 u8 remote_amp_id = chan->remote_amp_id;
4634
4635 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4636 chan, result, local_amp_id, remote_amp_id);
4637
4638 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4639 l2cap_chan_unlock(chan);
4640 return;
4641 }
4642
4643 if (chan->state != BT_CONNECTED) {
4644 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4645 } else if (result != L2CAP_MR_SUCCESS) {
4646 l2cap_do_move_cancel(chan, result);
4647 } else {
4648 switch (chan->move_role) {
4649 case L2CAP_MOVE_ROLE_INITIATOR:
4650 l2cap_do_move_initiate(chan, local_amp_id,
4651 remote_amp_id);
4652 break;
4653 case L2CAP_MOVE_ROLE_RESPONDER:
4654 l2cap_do_move_respond(chan, result);
4655 break;
4656 default:
4657 l2cap_do_move_cancel(chan, result);
4658 break;
4659 }
4660 }
4661}
4662
4046static inline int l2cap_move_channel_req(struct l2cap_conn *conn, 4663static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4047 struct l2cap_cmd_hdr *cmd, 4664 struct l2cap_cmd_hdr *cmd,
4048 u16 cmd_len, void *data) 4665 u16 cmd_len, void *data)
4049{ 4666{
4050 struct l2cap_move_chan_req *req = data; 4667 struct l2cap_move_chan_req *req = data;
4668 struct l2cap_move_chan_rsp rsp;
4669 struct l2cap_chan *chan;
4051 u16 icid = 0; 4670 u16 icid = 0;
4052 u16 result = L2CAP_MR_NOT_ALLOWED; 4671 u16 result = L2CAP_MR_NOT_ALLOWED;
4053 4672
@@ -4061,15 +4680,206 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4061 if (!enable_hs) 4680 if (!enable_hs)
4062 return -EINVAL; 4681 return -EINVAL;
4063 4682
4064 /* Placeholder: Always refuse */ 4683 chan = l2cap_get_chan_by_dcid(conn, icid);
4065 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result); 4684 if (!chan) {
4685 rsp.icid = cpu_to_le16(icid);
4686 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4687 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4688 sizeof(rsp), &rsp);
4689 return 0;
4690 }
4691
4692 chan->ident = cmd->ident;
4693
4694 if (chan->scid < L2CAP_CID_DYN_START ||
4695 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4696 (chan->mode != L2CAP_MODE_ERTM &&
4697 chan->mode != L2CAP_MODE_STREAMING)) {
4698 result = L2CAP_MR_NOT_ALLOWED;
4699 goto send_move_response;
4700 }
4701
4702 if (chan->local_amp_id == req->dest_amp_id) {
4703 result = L2CAP_MR_SAME_ID;
4704 goto send_move_response;
4705 }
4706
4707 if (req->dest_amp_id) {
4708 struct hci_dev *hdev;
4709 hdev = hci_dev_get(req->dest_amp_id);
4710 if (!hdev || hdev->dev_type != HCI_AMP ||
4711 !test_bit(HCI_UP, &hdev->flags)) {
4712 if (hdev)
4713 hci_dev_put(hdev);
4714
4715 result = L2CAP_MR_BAD_ID;
4716 goto send_move_response;
4717 }
4718 hci_dev_put(hdev);
4719 }
4720
4721 /* Detect a move collision. Only send a collision response
4722 * if this side has "lost", otherwise proceed with the move.
4723 * The winner has the larger bd_addr.
4724 */
4725 if ((__chan_is_moving(chan) ||
4726 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4727 bacmp(conn->src, conn->dst) > 0) {
4728 result = L2CAP_MR_COLLISION;
4729 goto send_move_response;
4730 }
4731
4732 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4733 l2cap_move_setup(chan);
4734 chan->move_id = req->dest_amp_id;
4735 icid = chan->dcid;
4736
4737 if (!req->dest_amp_id) {
4738 /* Moving to BR/EDR */
4739 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4740 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4741 result = L2CAP_MR_PEND;
4742 } else {
4743 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4744 result = L2CAP_MR_SUCCESS;
4745 }
4746 } else {
4747 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4748 /* Placeholder - uncomment when amp functions are available */
4749 /*amp_accept_physical(chan, req->dest_amp_id);*/
4750 result = L2CAP_MR_PEND;
4751 }
4752
4753send_move_response:
4754 l2cap_send_move_chan_rsp(chan, result);
4755
4756 l2cap_chan_unlock(chan);
4066 4757
4067 return 0; 4758 return 0;
4068} 4759}
4069 4760
4070static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, 4761static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4071 struct l2cap_cmd_hdr *cmd, 4762{
4072 u16 cmd_len, void *data) 4763 struct l2cap_chan *chan;
4764 struct hci_chan *hchan = NULL;
4765
4766 chan = l2cap_get_chan_by_scid(conn, icid);
4767 if (!chan) {
4768 l2cap_send_move_chan_cfm_icid(conn, icid);
4769 return;
4770 }
4771
4772 __clear_chan_timer(chan);
4773 if (result == L2CAP_MR_PEND)
4774 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4775
4776 switch (chan->move_state) {
4777 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4778 /* Move confirm will be sent when logical link
4779 * is complete.
4780 */
4781 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4782 break;
4783 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4784 if (result == L2CAP_MR_PEND) {
4785 break;
4786 } else if (test_bit(CONN_LOCAL_BUSY,
4787 &chan->conn_state)) {
4788 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4789 } else {
4790 /* Logical link is up or moving to BR/EDR,
4791 * proceed with move
4792 */
4793 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4794 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4795 }
4796 break;
4797 case L2CAP_MOVE_WAIT_RSP:
4798 /* Moving to AMP */
4799 if (result == L2CAP_MR_SUCCESS) {
4800 /* Remote is ready, send confirm immediately
4801 * after logical link is ready
4802 */
4803 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4804 } else {
4805 /* Both logical link and move success
4806 * are required to confirm
4807 */
4808 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4809 }
4810
4811 /* Placeholder - get hci_chan for logical link */
4812 if (!hchan) {
4813 /* Logical link not available */
4814 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4815 break;
4816 }
4817
4818 /* If the logical link is not yet connected, do not
4819 * send confirmation.
4820 */
4821 if (hchan->state != BT_CONNECTED)
4822 break;
4823
4824 /* Logical link is already ready to go */
4825
4826 chan->hs_hcon = hchan->conn;
4827 chan->hs_hcon->l2cap_data = chan->conn;
4828
4829 if (result == L2CAP_MR_SUCCESS) {
4830 /* Can confirm now */
4831 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4832 } else {
4833 /* Now only need move success
4834 * to confirm
4835 */
4836 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4837 }
4838
4839 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4840 break;
4841 default:
4842 /* Any other amp move state means the move failed. */
4843 chan->move_id = chan->local_amp_id;
4844 l2cap_move_done(chan);
4845 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4846 }
4847
4848 l2cap_chan_unlock(chan);
4849}
4850
4851static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4852 u16 result)
4853{
4854 struct l2cap_chan *chan;
4855
4856 chan = l2cap_get_chan_by_ident(conn, ident);
4857 if (!chan) {
4858 /* Could not locate channel, icid is best guess */
4859 l2cap_send_move_chan_cfm_icid(conn, icid);
4860 return;
4861 }
4862
4863 __clear_chan_timer(chan);
4864
4865 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4866 if (result == L2CAP_MR_COLLISION) {
4867 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4868 } else {
4869 /* Cleanup - cancel move */
4870 chan->move_id = chan->local_amp_id;
4871 l2cap_move_done(chan);
4872 }
4873 }
4874
4875 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4876
4877 l2cap_chan_unlock(chan);
4878}
4879
4880static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4881 struct l2cap_cmd_hdr *cmd,
4882 u16 cmd_len, void *data)
4073{ 4883{
4074 struct l2cap_move_chan_rsp *rsp = data; 4884 struct l2cap_move_chan_rsp *rsp = data;
4075 u16 icid, result; 4885 u16 icid, result;
@@ -4082,17 +4892,20 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4082 4892
4083 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 4893 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4084 4894
4085 /* Placeholder: Always unconfirmed */ 4895 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4086 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); 4896 l2cap_move_continue(conn, icid, result);
4897 else
4898 l2cap_move_fail(conn, cmd->ident, icid, result);
4087 4899
4088 return 0; 4900 return 0;
4089} 4901}
4090 4902
4091static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, 4903static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd, 4904 struct l2cap_cmd_hdr *cmd,
4093 u16 cmd_len, void *data) 4905 u16 cmd_len, void *data)
4094{ 4906{
4095 struct l2cap_move_chan_cfm *cfm = data; 4907 struct l2cap_move_chan_cfm *cfm = data;
4908 struct l2cap_chan *chan;
4096 u16 icid, result; 4909 u16 icid, result;
4097 4910
4098 if (cmd_len != sizeof(*cfm)) 4911 if (cmd_len != sizeof(*cfm))
@@ -4103,8 +4916,29 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4103 4916
4104 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); 4917 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4105 4918
4919 chan = l2cap_get_chan_by_dcid(conn, icid);
4920 if (!chan) {
4921 /* Spec requires a response even if the icid was not found */
4922 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4923 return 0;
4924 }
4925
4926 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4927 if (result == L2CAP_MC_CONFIRMED) {
4928 chan->local_amp_id = chan->move_id;
4929 if (!chan->local_amp_id)
4930 __release_logical_link(chan);
4931 } else {
4932 chan->move_id = chan->local_amp_id;
4933 }
4934
4935 l2cap_move_done(chan);
4936 }
4937
4106 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); 4938 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4107 4939
4940 l2cap_chan_unlock(chan);
4941
4108 return 0; 4942 return 0;
4109} 4943}
4110 4944
@@ -4113,6 +4947,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4113 u16 cmd_len, void *data) 4947 u16 cmd_len, void *data)
4114{ 4948{
4115 struct l2cap_move_chan_cfm_rsp *rsp = data; 4949 struct l2cap_move_chan_cfm_rsp *rsp = data;
4950 struct l2cap_chan *chan;
4116 u16 icid; 4951 u16 icid;
4117 4952
4118 if (cmd_len != sizeof(*rsp)) 4953 if (cmd_len != sizeof(*rsp))
@@ -4122,11 +4957,28 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4122 4957
4123 BT_DBG("icid 0x%4.4x", icid); 4958 BT_DBG("icid 0x%4.4x", icid);
4124 4959
4960 chan = l2cap_get_chan_by_scid(conn, icid);
4961 if (!chan)
4962 return 0;
4963
4964 __clear_chan_timer(chan);
4965
4966 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4967 chan->local_amp_id = chan->move_id;
4968
4969 if (!chan->local_amp_id && chan->hs_hchan)
4970 __release_logical_link(chan);
4971
4972 l2cap_move_done(chan);
4973 }
4974
4975 l2cap_chan_unlock(chan);
4976
4125 return 0; 4977 return 0;
4126} 4978}
4127 4979
4128static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, 4980static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4129 u16 to_multiplier) 4981 u16 to_multiplier)
4130{ 4982{
4131 u16 max_latency; 4983 u16 max_latency;
4132 4984
@@ -4147,7 +4999,8 @@ static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4147} 4999}
4148 5000
4149static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 5001static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4150 struct l2cap_cmd_hdr *cmd, u8 *data) 5002 struct l2cap_cmd_hdr *cmd,
5003 u8 *data)
4151{ 5004{
4152 struct hci_conn *hcon = conn->hcon; 5005 struct hci_conn *hcon = conn->hcon;
4153 struct l2cap_conn_param_update_req *req; 5006 struct l2cap_conn_param_update_req *req;
@@ -4169,7 +5022,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4169 to_multiplier = __le16_to_cpu(req->to_multiplier); 5022 to_multiplier = __le16_to_cpu(req->to_multiplier);
4170 5023
4171 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", 5024 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4172 min, max, latency, to_multiplier); 5025 min, max, latency, to_multiplier);
4173 5026
4174 memset(&rsp, 0, sizeof(rsp)); 5027 memset(&rsp, 0, sizeof(rsp));
4175 5028
@@ -4180,7 +5033,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4180 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 5033 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4181 5034
4182 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 5035 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4183 sizeof(rsp), &rsp); 5036 sizeof(rsp), &rsp);
4184 5037
4185 if (!err) 5038 if (!err)
4186 hci_le_conn_update(hcon, min, max, latency, to_multiplier); 5039 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
@@ -4189,7 +5042,8 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4189} 5042}
4190 5043
4191static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, 5044static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4192 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 5045 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5046 u8 *data)
4193{ 5047{
4194 int err = 0; 5048 int err = 0;
4195 5049
@@ -4203,7 +5057,8 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4203 break; 5057 break;
4204 5058
4205 case L2CAP_CONN_RSP: 5059 case L2CAP_CONN_RSP:
4206 err = l2cap_connect_rsp(conn, cmd, data); 5060 case L2CAP_CREATE_CHAN_RSP:
5061 err = l2cap_connect_create_rsp(conn, cmd, data);
4207 break; 5062 break;
4208 5063
4209 case L2CAP_CONF_REQ: 5064 case L2CAP_CONF_REQ:
@@ -4241,10 +5096,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4241 err = l2cap_create_channel_req(conn, cmd, cmd_len, data); 5096 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4242 break; 5097 break;
4243 5098
4244 case L2CAP_CREATE_CHAN_RSP:
4245 err = l2cap_create_channel_rsp(conn, cmd, data);
4246 break;
4247
4248 case L2CAP_MOVE_CHAN_REQ: 5099 case L2CAP_MOVE_CHAN_REQ:
4249 err = l2cap_move_channel_req(conn, cmd, cmd_len, data); 5100 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4250 break; 5101 break;
@@ -4271,7 +5122,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4271} 5122}
4272 5123
4273static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, 5124static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4274 struct l2cap_cmd_hdr *cmd, u8 *data) 5125 struct l2cap_cmd_hdr *cmd, u8 *data)
4275{ 5126{
4276 switch (cmd->code) { 5127 switch (cmd->code) {
4277 case L2CAP_COMMAND_REJ: 5128 case L2CAP_COMMAND_REJ:
@@ -4290,7 +5141,7 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4290} 5141}
4291 5142
4292static inline void l2cap_sig_channel(struct l2cap_conn *conn, 5143static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4293 struct sk_buff *skb) 5144 struct sk_buff *skb)
4294{ 5145{
4295 u8 *data = skb->data; 5146 u8 *data = skb->data;
4296 int len = skb->len; 5147 int len = skb->len;
@@ -4307,7 +5158,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4307 5158
4308 cmd_len = le16_to_cpu(cmd.len); 5159 cmd_len = le16_to_cpu(cmd.len);
4309 5160
4310 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident); 5161 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5162 cmd.ident);
4311 5163
4312 if (cmd_len > len || !cmd.ident) { 5164 if (cmd_len > len || !cmd.ident) {
4313 BT_DBG("corrupted command"); 5165 BT_DBG("corrupted command");
@@ -4326,7 +5178,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4326 5178
4327 /* FIXME: Map err to a valid reason */ 5179 /* FIXME: Map err to a valid reason */
4328 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 5180 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4329 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 5181 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5182 sizeof(rej), &rej);
4330 } 5183 }
4331 5184
4332 data += cmd_len; 5185 data += cmd_len;
@@ -4391,8 +5244,8 @@ static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4391 } 5244 }
4392} 5245}
4393 5246
4394static void append_skb_frag(struct sk_buff *skb, 5247static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4395 struct sk_buff *new_frag, struct sk_buff **last_frag) 5248 struct sk_buff **last_frag)
4396{ 5249{
4397 /* skb->len reflects data in skb as well as all fragments 5250 /* skb->len reflects data in skb as well as all fragments
4398 * skb->data_len reflects only data in fragments 5251 * skb->data_len reflects only data in fragments
@@ -4492,6 +5345,12 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4492 return err; 5345 return err;
4493} 5346}
4494 5347
5348static int l2cap_resegment(struct l2cap_chan *chan)
5349{
5350 /* Placeholder */
5351 return 0;
5352}
5353
4495void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 5354void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4496{ 5355{
4497 u8 event; 5356 u8 event;
@@ -4546,7 +5405,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
4546 5405
4547 if (control->reqseq == chan->next_tx_seq) { 5406 if (control->reqseq == chan->next_tx_seq) {
4548 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5407 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4549 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5408 l2cap_send_disconn_req(chan, ECONNRESET);
4550 return; 5409 return;
4551 } 5410 }
4552 5411
@@ -4560,7 +5419,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
4560 5419
4561 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) { 5420 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4562 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5421 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4563 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5422 l2cap_send_disconn_req(chan, ECONNRESET);
4564 return; 5423 return;
4565 } 5424 }
4566 5425
@@ -4604,7 +5463,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
4604 5463
4605 if (control->reqseq == chan->next_tx_seq) { 5464 if (control->reqseq == chan->next_tx_seq) {
4606 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); 5465 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4607 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5466 l2cap_send_disconn_req(chan, ECONNRESET);
4608 return; 5467 return;
4609 } 5468 }
4610 5469
@@ -4613,7 +5472,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
4613 if (chan->max_tx && skb && 5472 if (chan->max_tx && skb &&
4614 bt_cb(skb)->control.retries >= chan->max_tx) { 5473 bt_cb(skb)->control.retries >= chan->max_tx) {
4615 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5474 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4616 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5475 l2cap_send_disconn_req(chan, ECONNRESET);
4617 return; 5476 return;
4618 } 5477 }
4619 5478
@@ -4641,7 +5500,7 @@ static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4641 5500
4642 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { 5501 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4643 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= 5502 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4644 chan->tx_win) { 5503 chan->tx_win) {
4645 /* See notes below regarding "double poll" and 5504 /* See notes below regarding "double poll" and
4646 * invalid packets. 5505 * invalid packets.
4647 */ 5506 */
@@ -4682,8 +5541,7 @@ static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4682 } 5541 }
4683 5542
4684 if (__seq_offset(chan, txseq, chan->last_acked_seq) < 5543 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4685 __seq_offset(chan, chan->expected_tx_seq, 5544 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
4686 chan->last_acked_seq)){
4687 BT_DBG("Duplicate - expected_tx_seq later than txseq"); 5545 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4688 return L2CAP_TXSEQ_DUPLICATE; 5546 return L2CAP_TXSEQ_DUPLICATE;
4689 } 5547 }
@@ -4798,8 +5656,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4798 break; 5656 break;
4799 case L2CAP_TXSEQ_INVALID: 5657 case L2CAP_TXSEQ_INVALID:
4800 default: 5658 default:
4801 l2cap_send_disconn_req(chan->conn, chan, 5659 l2cap_send_disconn_req(chan, ECONNRESET);
4802 ECONNRESET);
4803 break; 5660 break;
4804 } 5661 }
4805 break; 5662 break;
@@ -4808,8 +5665,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4808 if (control->final) { 5665 if (control->final) {
4809 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5666 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4810 5667
4811 if (!test_and_clear_bit(CONN_REJ_ACT, 5668 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
4812 &chan->conn_state)) { 5669 !__chan_is_moving(chan)) {
4813 control->final = 0; 5670 control->final = 0;
4814 l2cap_retransmit_all(chan, control); 5671 l2cap_retransmit_all(chan, control);
4815 } 5672 }
@@ -4932,8 +5789,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4932 break; 5789 break;
4933 case L2CAP_TXSEQ_INVALID: 5790 case L2CAP_TXSEQ_INVALID:
4934 default: 5791 default:
4935 l2cap_send_disconn_req(chan->conn, chan, 5792 l2cap_send_disconn_req(chan, ECONNRESET);
4936 ECONNRESET);
4937 break; 5793 break;
4938 } 5794 }
4939 break; 5795 break;
@@ -4998,6 +5854,96 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4998 return err; 5854 return err;
4999} 5855}
5000 5856
5857static int l2cap_finish_move(struct l2cap_chan *chan)
5858{
5859 BT_DBG("chan %p", chan);
5860
5861 chan->rx_state = L2CAP_RX_STATE_RECV;
5862
5863 if (chan->hs_hcon)
5864 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5865 else
5866 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5867
5868 return l2cap_resegment(chan);
5869}
5870
5871static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5872 struct l2cap_ctrl *control,
5873 struct sk_buff *skb, u8 event)
5874{
5875 int err;
5876
5877 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5878 event);
5879
5880 if (!control->poll)
5881 return -EPROTO;
5882
5883 l2cap_process_reqseq(chan, control->reqseq);
5884
5885 if (!skb_queue_empty(&chan->tx_q))
5886 chan->tx_send_head = skb_peek(&chan->tx_q);
5887 else
5888 chan->tx_send_head = NULL;
5889
5890 /* Rewind next_tx_seq to the point expected
5891 * by the receiver.
5892 */
5893 chan->next_tx_seq = control->reqseq;
5894 chan->unacked_frames = 0;
5895
5896 err = l2cap_finish_move(chan);
5897 if (err)
5898 return err;
5899
5900 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5901 l2cap_send_i_or_rr_or_rnr(chan);
5902
5903 if (event == L2CAP_EV_RECV_IFRAME)
5904 return -EPROTO;
5905
5906 return l2cap_rx_state_recv(chan, control, NULL, event);
5907}
5908
5909static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5910 struct l2cap_ctrl *control,
5911 struct sk_buff *skb, u8 event)
5912{
5913 int err;
5914
5915 if (!control->final)
5916 return -EPROTO;
5917
5918 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5919
5920 chan->rx_state = L2CAP_RX_STATE_RECV;
5921 l2cap_process_reqseq(chan, control->reqseq);
5922
5923 if (!skb_queue_empty(&chan->tx_q))
5924 chan->tx_send_head = skb_peek(&chan->tx_q);
5925 else
5926 chan->tx_send_head = NULL;
5927
5928 /* Rewind next_tx_seq to the point expected
5929 * by the receiver.
5930 */
5931 chan->next_tx_seq = control->reqseq;
5932 chan->unacked_frames = 0;
5933
5934 if (chan->hs_hcon)
5935 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5936 else
5937 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5938
5939 err = l2cap_resegment(chan);
5940
5941 if (!err)
5942 err = l2cap_rx_state_recv(chan, control, skb, event);
5943
5944 return err;
5945}
5946
5001static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) 5947static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5002{ 5948{
5003 /* Make sure reqseq is for a packet that has been sent but not acked */ 5949 /* Make sure reqseq is for a packet that has been sent but not acked */
@@ -5024,6 +5970,12 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5024 err = l2cap_rx_state_srej_sent(chan, control, skb, 5970 err = l2cap_rx_state_srej_sent(chan, control, skb,
5025 event); 5971 event);
5026 break; 5972 break;
5973 case L2CAP_RX_STATE_WAIT_P:
5974 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5975 break;
5976 case L2CAP_RX_STATE_WAIT_F:
5977 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5978 break;
5027 default: 5979 default:
5028 /* shut it down */ 5980 /* shut it down */
5029 break; 5981 break;
@@ -5032,7 +5984,7 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5032 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", 5984 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5033 control->reqseq, chan->next_tx_seq, 5985 control->reqseq, chan->next_tx_seq,
5034 chan->expected_ack_seq); 5986 chan->expected_ack_seq);
5035 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5987 l2cap_send_disconn_req(chan, ECONNRESET);
5036 } 5988 }
5037 5989
5038 return err; 5990 return err;
@@ -5101,7 +6053,7 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5101 len -= L2CAP_FCS_SIZE; 6053 len -= L2CAP_FCS_SIZE;
5102 6054
5103 if (len > chan->mps) { 6055 if (len > chan->mps) {
5104 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 6056 l2cap_send_disconn_req(chan, ECONNRESET);
5105 goto drop; 6057 goto drop;
5106 } 6058 }
5107 6059
@@ -5126,8 +6078,7 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5126 } 6078 }
5127 6079
5128 if (err) 6080 if (err)
5129 l2cap_send_disconn_req(chan->conn, chan, 6081 l2cap_send_disconn_req(chan, ECONNRESET);
5130 ECONNRESET);
5131 } else { 6082 } else {
5132 const u8 rx_func_to_event[4] = { 6083 const u8 rx_func_to_event[4] = {
5133 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, 6084 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
@@ -5143,8 +6094,8 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5143 control->super); 6094 control->super);
5144 6095
5145 if (len != 0) { 6096 if (len != 0) {
5146 BT_ERR("%d", len); 6097 BT_ERR("Trailing bytes: %d in sframe", len);
5147 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 6098 l2cap_send_disconn_req(chan, ECONNRESET);
5148 goto drop; 6099 goto drop;
5149 } 6100 }
5150 6101
@@ -5155,7 +6106,7 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5155 6106
5156 event = rx_func_to_event[control->super]; 6107 event = rx_func_to_event[control->super];
5157 if (l2cap_rx(chan, control, skb, event)) 6108 if (l2cap_rx(chan, control, skb, event))
5158 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 6109 l2cap_send_disconn_req(chan, ECONNRESET);
5159 } 6110 }
5160 6111
5161 return 0; 6112 return 0;
@@ -5323,7 +6274,7 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5323 int exact = 0, lm1 = 0, lm2 = 0; 6274 int exact = 0, lm1 = 0, lm2 = 0;
5324 struct l2cap_chan *c; 6275 struct l2cap_chan *c;
5325 6276
5326 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 6277 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5327 6278
5328 /* Find listening sockets and check their link_mode */ 6279 /* Find listening sockets and check their link_mode */
5329 read_lock(&chan_list_lock); 6280 read_lock(&chan_list_lock);
@@ -5353,15 +6304,15 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5353{ 6304{
5354 struct l2cap_conn *conn; 6305 struct l2cap_conn *conn;
5355 6306
5356 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 6307 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5357 6308
5358 if (!status) { 6309 if (!status) {
5359 conn = l2cap_conn_add(hcon, status); 6310 conn = l2cap_conn_add(hcon, status);
5360 if (conn) 6311 if (conn)
5361 l2cap_conn_ready(conn); 6312 l2cap_conn_ready(conn);
5362 } else 6313 } else {
5363 l2cap_conn_del(hcon, bt_to_errno(status)); 6314 l2cap_conn_del(hcon, bt_to_errno(status));
5364 6315 }
5365} 6316}
5366 6317
5367int l2cap_disconn_ind(struct hci_conn *hcon) 6318int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -5437,13 +6388,13 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5437 continue; 6388 continue;
5438 } 6389 }
5439 6390
5440 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) { 6391 if (!__l2cap_no_conn_pending(chan)) {
5441 l2cap_chan_unlock(chan); 6392 l2cap_chan_unlock(chan);
5442 continue; 6393 continue;
5443 } 6394 }
5444 6395
5445 if (!status && (chan->state == BT_CONNECTED || 6396 if (!status && (chan->state == BT_CONNECTED ||
5446 chan->state == BT_CONFIG)) { 6397 chan->state == BT_CONFIG)) {
5447 struct sock *sk = chan->sk; 6398 struct sock *sk = chan->sk;
5448 6399
5449 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); 6400 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
@@ -5456,7 +6407,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5456 6407
5457 if (chan->state == BT_CONNECT) { 6408 if (chan->state == BT_CONNECT) {
5458 if (!status) { 6409 if (!status) {
5459 l2cap_send_conn_req(chan); 6410 l2cap_start_connection(chan);
5460 } else { 6411 } else {
5461 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 6412 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5462 } 6413 }
@@ -5470,11 +6421,9 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5470 if (!status) { 6421 if (!status) {
5471 if (test_bit(BT_SK_DEFER_SETUP, 6422 if (test_bit(BT_SK_DEFER_SETUP,
5472 &bt_sk(sk)->flags)) { 6423 &bt_sk(sk)->flags)) {
5473 struct sock *parent = bt_sk(sk)->parent;
5474 res = L2CAP_CR_PEND; 6424 res = L2CAP_CR_PEND;
5475 stat = L2CAP_CS_AUTHOR_PEND; 6425 stat = L2CAP_CS_AUTHOR_PEND;
5476 if (parent) 6426 chan->ops->defer(chan);
5477 parent->sk_data_ready(parent, 0);
5478 } else { 6427 } else {
5479 __l2cap_state_change(chan, BT_CONFIG); 6428 __l2cap_state_change(chan, BT_CONFIG);
5480 res = L2CAP_CR_SUCCESS; 6429 res = L2CAP_CR_SUCCESS;
@@ -5494,7 +6443,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5494 rsp.result = cpu_to_le16(res); 6443 rsp.result = cpu_to_le16(res);
5495 rsp.status = cpu_to_le16(stat); 6444 rsp.status = cpu_to_le16(stat);
5496 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 6445 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5497 sizeof(rsp), &rsp); 6446 sizeof(rsp), &rsp);
5498 6447
5499 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && 6448 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5500 res == L2CAP_CR_SUCCESS) { 6449 res == L2CAP_CR_SUCCESS) {
@@ -5519,6 +6468,12 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5519int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) 6468int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5520{ 6469{
5521 struct l2cap_conn *conn = hcon->l2cap_data; 6470 struct l2cap_conn *conn = hcon->l2cap_data;
6471 struct l2cap_hdr *hdr;
6472 int len;
6473
6474 /* For AMP controller do not create l2cap conn */
6475 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6476 goto drop;
5522 6477
5523 if (!conn) 6478 if (!conn)
5524 conn = l2cap_conn_add(hcon, 0); 6479 conn = l2cap_conn_add(hcon, 0);
@@ -5528,10 +6483,10 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5528 6483
5529 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); 6484 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5530 6485
5531 if (!(flags & ACL_CONT)) { 6486 switch (flags) {
5532 struct l2cap_hdr *hdr; 6487 case ACL_START:
5533 int len; 6488 case ACL_START_NO_FLUSH:
5534 6489 case ACL_COMPLETE:
5535 if (conn->rx_len) { 6490 if (conn->rx_len) {
5536 BT_ERR("Unexpected start frame (len %d)", skb->len); 6491 BT_ERR("Unexpected start frame (len %d)", skb->len);
5537 kfree_skb(conn->rx_skb); 6492 kfree_skb(conn->rx_skb);
@@ -5560,20 +6515,22 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5560 6515
5561 if (skb->len > len) { 6516 if (skb->len > len) {
5562 BT_ERR("Frame is too long (len %d, expected len %d)", 6517 BT_ERR("Frame is too long (len %d, expected len %d)",
5563 skb->len, len); 6518 skb->len, len);
5564 l2cap_conn_unreliable(conn, ECOMM); 6519 l2cap_conn_unreliable(conn, ECOMM);
5565 goto drop; 6520 goto drop;
5566 } 6521 }
5567 6522
5568 /* Allocate skb for the complete frame (with header) */ 6523 /* Allocate skb for the complete frame (with header) */
5569 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); 6524 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
5570 if (!conn->rx_skb) 6525 if (!conn->rx_skb)
5571 goto drop; 6526 goto drop;
5572 6527
5573 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 6528 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5574 skb->len); 6529 skb->len);
5575 conn->rx_len = len - skb->len; 6530 conn->rx_len = len - skb->len;
5576 } else { 6531 break;
6532
6533 case ACL_CONT:
5577 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); 6534 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5578 6535
5579 if (!conn->rx_len) { 6536 if (!conn->rx_len) {
@@ -5584,7 +6541,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5584 6541
5585 if (skb->len > conn->rx_len) { 6542 if (skb->len > conn->rx_len) {
5586 BT_ERR("Fragment is too long (len %d, expected %d)", 6543 BT_ERR("Fragment is too long (len %d, expected %d)",
5587 skb->len, conn->rx_len); 6544 skb->len, conn->rx_len);
5588 kfree_skb(conn->rx_skb); 6545 kfree_skb(conn->rx_skb);
5589 conn->rx_skb = NULL; 6546 conn->rx_skb = NULL;
5590 conn->rx_len = 0; 6547 conn->rx_len = 0;
@@ -5593,7 +6550,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5593 } 6550 }
5594 6551
5595 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), 6552 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5596 skb->len); 6553 skb->len);
5597 conn->rx_len -= skb->len; 6554 conn->rx_len -= skb->len;
5598 6555
5599 if (!conn->rx_len) { 6556 if (!conn->rx_len) {
@@ -5601,6 +6558,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5601 l2cap_recv_frame(conn, conn->rx_skb); 6558 l2cap_recv_frame(conn, conn->rx_skb);
5602 conn->rx_skb = NULL; 6559 conn->rx_skb = NULL;
5603 } 6560 }
6561 break;
5604 } 6562 }
5605 6563
5606drop: 6564drop:
@@ -5617,12 +6575,11 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
5617 list_for_each_entry(c, &chan_list, global_l) { 6575 list_for_each_entry(c, &chan_list, global_l) {
5618 struct sock *sk = c->sk; 6576 struct sock *sk = c->sk;
5619 6577
5620 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 6578 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5621 batostr(&bt_sk(sk)->src), 6579 &bt_sk(sk)->src, &bt_sk(sk)->dst,
5622 batostr(&bt_sk(sk)->dst), 6580 c->state, __le16_to_cpu(c->psm),
5623 c->state, __le16_to_cpu(c->psm), 6581 c->scid, c->dcid, c->imtu, c->omtu,
5624 c->scid, c->dcid, c->imtu, c->omtu, 6582 c->sec_level, c->mode);
5625 c->sec_level, c->mode);
5626 } 6583 }
5627 6584
5628 read_unlock(&chan_list_lock); 6585 read_unlock(&chan_list_lock);
@@ -5653,8 +6610,8 @@ int __init l2cap_init(void)
5653 return err; 6610 return err;
5654 6611
5655 if (bt_debugfs) { 6612 if (bt_debugfs) {
5656 l2cap_debugfs = debugfs_create_file("l2cap", 0444, 6613 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
5657 bt_debugfs, NULL, &l2cap_debugfs_fops); 6614 NULL, &l2cap_debugfs_fops);
5658 if (!l2cap_debugfs) 6615 if (!l2cap_debugfs)
5659 BT_ERR("Failed to create L2CAP debug file"); 6616 BT_ERR("Failed to create L2CAP debug file");
5660 } 6617 }
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 083f2bf065d4..1bcfb8422fdc 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -40,7 +40,8 @@ static struct bt_sock_list l2cap_sk_list = {
40 40
41static const struct proto_ops l2cap_sock_ops; 41static const struct proto_ops l2cap_sock_ops;
42static void l2cap_sock_init(struct sock *sk, struct sock *parent); 42static void l2cap_sock_init(struct sock *sk, struct sock *parent);
43static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio); 43static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
44 int proto, gfp_t prio);
44 45
45static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) 46static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
46{ 47{
@@ -106,7 +107,8 @@ done:
106 return err; 107 return err;
107} 108}
108 109
109static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 110static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
111 int alen, int flags)
110{ 112{
111 struct sock *sk = sock->sk; 113 struct sock *sk = sock->sk;
112 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 114 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -134,7 +136,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
134 lock_sock(sk); 136 lock_sock(sk);
135 137
136 err = bt_sock_wait_state(sk, BT_CONNECTED, 138 err = bt_sock_wait_state(sk, BT_CONNECTED,
137 sock_sndtimeo(sk, flags & O_NONBLOCK)); 139 sock_sndtimeo(sk, flags & O_NONBLOCK));
138 140
139 release_sock(sk); 141 release_sock(sk);
140 142
@@ -185,7 +187,8 @@ done:
185 return err; 187 return err;
186} 188}
187 189
188static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags) 190static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
191 int flags)
189{ 192{
190 DECLARE_WAITQUEUE(wait, current); 193 DECLARE_WAITQUEUE(wait, current);
191 struct sock *sk = sock->sk, *nsk; 194 struct sock *sk = sock->sk, *nsk;
@@ -241,7 +244,8 @@ done:
241 return err; 244 return err;
242} 245}
243 246
244static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) 247static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
248 int *len, int peer)
245{ 249{
246 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; 250 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
247 struct sock *sk = sock->sk; 251 struct sock *sk = sock->sk;
@@ -266,7 +270,8 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
266 return 0; 270 return 0;
267} 271}
268 272
269static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 273static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
274 char __user *optval, int __user *optlen)
270{ 275{
271 struct sock *sk = sock->sk; 276 struct sock *sk = sock->sk;
272 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 277 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -309,7 +314,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
309 break; 314 break;
310 case BT_SECURITY_HIGH: 315 case BT_SECURITY_HIGH:
311 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | 316 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
312 L2CAP_LM_SECURE; 317 L2CAP_LM_SECURE;
313 break; 318 break;
314 default: 319 default:
315 opt = 0; 320 opt = 0;
@@ -353,7 +358,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
353 return err; 358 return err;
354} 359}
355 360
356static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 361static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
362 char __user *optval, int __user *optlen)
357{ 363{
358 struct sock *sk = sock->sk; 364 struct sock *sk = sock->sk;
359 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 365 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -377,19 +383,20 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
377 switch (optname) { 383 switch (optname) {
378 case BT_SECURITY: 384 case BT_SECURITY:
379 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && 385 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
380 chan->chan_type != L2CAP_CHAN_RAW) { 386 chan->chan_type != L2CAP_CHAN_RAW) {
381 err = -EINVAL; 387 err = -EINVAL;
382 break; 388 break;
383 } 389 }
384 390
385 memset(&sec, 0, sizeof(sec)); 391 memset(&sec, 0, sizeof(sec));
386 if (chan->conn) 392 if (chan->conn) {
387 sec.level = chan->conn->hcon->sec_level; 393 sec.level = chan->conn->hcon->sec_level;
388 else
389 sec.level = chan->sec_level;
390 394
391 if (sk->sk_state == BT_CONNECTED) 395 if (sk->sk_state == BT_CONNECTED)
392 sec.key_size = chan->conn->hcon->enc_key_size; 396 sec.key_size = chan->conn->hcon->enc_key_size;
397 } else {
398 sec.level = chan->sec_level;
399 }
393 400
394 len = min_t(unsigned int, len, sizeof(sec)); 401 len = min_t(unsigned int, len, sizeof(sec));
395 if (copy_to_user(optval, (char *) &sec, len)) 402 if (copy_to_user(optval, (char *) &sec, len))
@@ -411,14 +418,14 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
411 418
412 case BT_FLUSHABLE: 419 case BT_FLUSHABLE:
413 if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), 420 if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
414 (u32 __user *) optval)) 421 (u32 __user *) optval))
415 err = -EFAULT; 422 err = -EFAULT;
416 423
417 break; 424 break;
418 425
419 case BT_POWER: 426 case BT_POWER:
420 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM 427 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
421 && sk->sk_type != SOCK_RAW) { 428 && sk->sk_type != SOCK_RAW) {
422 err = -EINVAL; 429 err = -EINVAL;
423 break; 430 break;
424 } 431 }
@@ -466,7 +473,8 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
466 return true; 473 return true;
467} 474}
468 475
469static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 476static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
477 char __user *optval, unsigned int optlen)
470{ 478{
471 struct sock *sk = sock->sk; 479 struct sock *sk = sock->sk;
472 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 480 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -529,6 +537,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
529 chan->fcs = opts.fcs; 537 chan->fcs = opts.fcs;
530 chan->max_tx = opts.max_tx; 538 chan->max_tx = opts.max_tx;
531 chan->tx_win = opts.txwin_size; 539 chan->tx_win = opts.txwin_size;
540 chan->flush_to = opts.flush_to;
532 break; 541 break;
533 542
534 case L2CAP_LM: 543 case L2CAP_LM:
@@ -564,7 +573,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
564 return err; 573 return err;
565} 574}
566 575
567static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 576static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
577 char __user *optval, unsigned int optlen)
568{ 578{
569 struct sock *sk = sock->sk; 579 struct sock *sk = sock->sk;
570 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 580 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -587,7 +597,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
587 switch (optname) { 597 switch (optname) {
588 case BT_SECURITY: 598 case BT_SECURITY:
589 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && 599 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
590 chan->chan_type != L2CAP_CHAN_RAW) { 600 chan->chan_type != L2CAP_CHAN_RAW) {
591 err = -EINVAL; 601 err = -EINVAL;
592 break; 602 break;
593 } 603 }
@@ -601,7 +611,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
601 } 611 }
602 612
603 if (sec.level < BT_SECURITY_LOW || 613 if (sec.level < BT_SECURITY_LOW ||
604 sec.level > BT_SECURITY_HIGH) { 614 sec.level > BT_SECURITY_HIGH) {
605 err = -EINVAL; 615 err = -EINVAL;
606 break; 616 break;
607 } 617 }
@@ -627,7 +637,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
627 637
628 /* or for ACL link */ 638 /* or for ACL link */
629 } else if ((sk->sk_state == BT_CONNECT2 && 639 } else if ((sk->sk_state == BT_CONNECT2 &&
630 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || 640 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
631 sk->sk_state == BT_CONNECTED) { 641 sk->sk_state == BT_CONNECTED) {
632 if (!l2cap_chan_check_security(chan)) 642 if (!l2cap_chan_check_security(chan))
633 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); 643 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
@@ -684,7 +694,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
684 694
685 case BT_POWER: 695 case BT_POWER:
686 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && 696 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
687 chan->chan_type != L2CAP_CHAN_RAW) { 697 chan->chan_type != L2CAP_CHAN_RAW) {
688 err = -EINVAL; 698 err = -EINVAL;
689 break; 699 break;
690 } 700 }
@@ -720,12 +730,17 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
720 } 730 }
721 731
722 if (chan->mode != L2CAP_MODE_ERTM && 732 if (chan->mode != L2CAP_MODE_ERTM &&
723 chan->mode != L2CAP_MODE_STREAMING) { 733 chan->mode != L2CAP_MODE_STREAMING) {
724 err = -EOPNOTSUPP; 734 err = -EOPNOTSUPP;
725 break; 735 break;
726 } 736 }
727 737
728 chan->chan_policy = (u8) opt; 738 chan->chan_policy = (u8) opt;
739
740 if (sk->sk_state == BT_CONNECTED &&
741 chan->move_role == L2CAP_MOVE_ROLE_NONE)
742 l2cap_move_start(chan);
743
729 break; 744 break;
730 745
731 default: 746 default:
@@ -737,7 +752,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
737 return err; 752 return err;
738} 753}
739 754
740static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) 755static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
756 struct msghdr *msg, size_t len)
741{ 757{
742 struct sock *sk = sock->sk; 758 struct sock *sk = sock->sk;
743 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 759 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -762,7 +778,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
762 return err; 778 return err;
763} 779}
764 780
765static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) 781static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
782 struct msghdr *msg, size_t len, int flags)
766{ 783{
767 struct sock *sk = sock->sk; 784 struct sock *sk = sock->sk;
768 struct l2cap_pinfo *pi = l2cap_pi(sk); 785 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -866,7 +883,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
866 883
867 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 884 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
868 err = bt_sock_wait_state(sk, BT_CLOSED, 885 err = bt_sock_wait_state(sk, BT_CLOSED,
869 sk->sk_lingertime); 886 sk->sk_lingertime);
870 } 887 }
871 888
872 if (!err && sk->sk_err) 889 if (!err && sk->sk_err)
@@ -930,7 +947,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
930 } 947 }
931 948
932 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 949 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
933 GFP_ATOMIC); 950 GFP_ATOMIC);
934 if (!sk) 951 if (!sk)
935 return NULL; 952 return NULL;
936 953
@@ -938,6 +955,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
938 955
939 l2cap_sock_init(sk, parent); 956 l2cap_sock_init(sk, parent);
940 957
958 bt_accept_enqueue(parent, sk);
959
941 return l2cap_pi(sk)->chan; 960 return l2cap_pi(sk)->chan;
942} 961}
943 962
@@ -1068,6 +1087,15 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1068 release_sock(sk); 1087 release_sock(sk);
1069} 1088}
1070 1089
1090static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
1091{
1092 struct sock *sk = chan->data;
1093 struct sock *parent = bt_sk(sk)->parent;
1094
1095 if (parent)
1096 parent->sk_data_ready(parent, 0);
1097}
1098
1071static struct l2cap_ops l2cap_chan_ops = { 1099static struct l2cap_ops l2cap_chan_ops = {
1072 .name = "L2CAP Socket Interface", 1100 .name = "L2CAP Socket Interface",
1073 .new_connection = l2cap_sock_new_connection_cb, 1101 .new_connection = l2cap_sock_new_connection_cb,
@@ -1076,6 +1104,7 @@ static struct l2cap_ops l2cap_chan_ops = {
1076 .teardown = l2cap_sock_teardown_cb, 1104 .teardown = l2cap_sock_teardown_cb,
1077 .state_change = l2cap_sock_state_change_cb, 1105 .state_change = l2cap_sock_state_change_cb,
1078 .ready = l2cap_sock_ready_cb, 1106 .ready = l2cap_sock_ready_cb,
1107 .defer = l2cap_sock_defer_cb,
1079 .alloc_skb = l2cap_sock_alloc_skb_cb, 1108 .alloc_skb = l2cap_sock_alloc_skb_cb,
1080}; 1109};
1081 1110
@@ -1083,7 +1112,8 @@ static void l2cap_sock_destruct(struct sock *sk)
1083{ 1112{
1084 BT_DBG("sk %p", sk); 1113 BT_DBG("sk %p", sk);
1085 1114
1086 l2cap_chan_put(l2cap_pi(sk)->chan); 1115 if (l2cap_pi(sk)->chan)
1116 l2cap_chan_put(l2cap_pi(sk)->chan);
1087 if (l2cap_pi(sk)->rx_busy_skb) { 1117 if (l2cap_pi(sk)->rx_busy_skb) {
1088 kfree_skb(l2cap_pi(sk)->rx_busy_skb); 1118 kfree_skb(l2cap_pi(sk)->rx_busy_skb);
1089 l2cap_pi(sk)->rx_busy_skb = NULL; 1119 l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -1159,7 +1189,8 @@ static struct proto l2cap_proto = {
1159 .obj_size = sizeof(struct l2cap_pinfo) 1189 .obj_size = sizeof(struct l2cap_pinfo)
1160}; 1190};
1161 1191
1162static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) 1192static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
1193 int proto, gfp_t prio)
1163{ 1194{
1164 struct sock *sk; 1195 struct sock *sk;
1165 struct l2cap_chan *chan; 1196 struct l2cap_chan *chan;
@@ -1204,7 +1235,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1204 sock->state = SS_UNCONNECTED; 1235 sock->state = SS_UNCONNECTED;
1205 1236
1206 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM && 1237 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
1207 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) 1238 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
1208 return -ESOCKTNOSUPPORT; 1239 return -ESOCKTNOSUPPORT;
1209 1240
1210 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) 1241 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
@@ -1261,7 +1292,8 @@ int __init l2cap_init_sockets(void)
1261 goto error; 1292 goto error;
1262 } 1293 }
1263 1294
1264 err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL); 1295 err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list,
1296 NULL);
1265 if (err < 0) { 1297 if (err < 0) {
1266 BT_ERR("Failed to create L2CAP proc file"); 1298 BT_ERR("Failed to create L2CAP proc file");
1267 bt_sock_unregister(BTPROTO_L2CAP); 1299 bt_sock_unregister(BTPROTO_L2CAP);
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index e1c97527e16c..b3fbc73516c4 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -41,20 +41,6 @@ void baswap(bdaddr_t *dst, bdaddr_t *src)
41} 41}
42EXPORT_SYMBOL(baswap); 42EXPORT_SYMBOL(baswap);
43 43
44char *batostr(bdaddr_t *ba)
45{
46 static char str[2][18];
47 static int i = 1;
48
49 i ^= 1;
50 sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X",
51 ba->b[5], ba->b[4], ba->b[3],
52 ba->b[2], ba->b[1], ba->b[0]);
53
54 return str[i];
55}
56EXPORT_SYMBOL(batostr);
57
58/* Bluetooth error codes to Unix errno mapping */ 44/* Bluetooth error codes to Unix errno mapping */
59int bt_to_errno(__u16 code) 45int bt_to_errno(__u16 code)
60{ 46{
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index aa2ea0a8142c..f559b966279c 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -222,7 +222,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
222 222
223 hdr = (void *) skb_put(skb, sizeof(*hdr)); 223 hdr = (void *) skb_put(skb, sizeof(*hdr));
224 224
225 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); 225 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index); 226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev)); 227 hdr->len = cpu_to_le16(sizeof(*ev));
228 228
@@ -253,7 +253,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
253 253
254 hdr = (void *) skb_put(skb, sizeof(*hdr)); 254 hdr = (void *) skb_put(skb, sizeof(*hdr));
255 255
256 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 256 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index); 257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
259 259
@@ -326,7 +326,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
326 struct hci_dev *d; 326 struct hci_dev *d;
327 size_t rp_len; 327 size_t rp_len;
328 u16 count; 328 u16 count;
329 int i, err; 329 int err;
330 330
331 BT_DBG("sock %p", sk); 331 BT_DBG("sock %p", sk);
332 332
@@ -347,9 +347,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
347 return -ENOMEM; 347 return -ENOMEM;
348 } 348 }
349 349
350 rp->num_controllers = cpu_to_le16(count); 350 count = 0;
351
352 i = 0;
353 list_for_each_entry(d, &hci_dev_list, list) { 351 list_for_each_entry(d, &hci_dev_list, list) {
354 if (test_bit(HCI_SETUP, &d->dev_flags)) 352 if (test_bit(HCI_SETUP, &d->dev_flags))
355 continue; 353 continue;
@@ -357,10 +355,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
357 if (!mgmt_valid_hdev(d)) 355 if (!mgmt_valid_hdev(d))
358 continue; 356 continue;
359 357
360 rp->index[i++] = cpu_to_le16(d->id); 358 rp->index[count++] = cpu_to_le16(d->id);
361 BT_DBG("Added hci%u", d->id); 359 BT_DBG("Added hci%u", d->id);
362 } 360 }
363 361
362 rp->num_controllers = cpu_to_le16(count);
363 rp_len = sizeof(*rp) + (2 * count);
364
364 read_unlock(&hci_dev_list_lock); 365 read_unlock(&hci_dev_list_lock);
365 366
366 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, 367 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
@@ -376,15 +377,15 @@ static u32 get_supported_settings(struct hci_dev *hdev)
376 u32 settings = 0; 377 u32 settings = 0;
377 378
378 settings |= MGMT_SETTING_POWERED; 379 settings |= MGMT_SETTING_POWERED;
379 settings |= MGMT_SETTING_CONNECTABLE;
380 settings |= MGMT_SETTING_FAST_CONNECTABLE;
381 settings |= MGMT_SETTING_DISCOVERABLE;
382 settings |= MGMT_SETTING_PAIRABLE; 380 settings |= MGMT_SETTING_PAIRABLE;
383 381
384 if (lmp_ssp_capable(hdev)) 382 if (lmp_ssp_capable(hdev))
385 settings |= MGMT_SETTING_SSP; 383 settings |= MGMT_SETTING_SSP;
386 384
387 if (lmp_bredr_capable(hdev)) { 385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
388 settings |= MGMT_SETTING_BREDR; 389 settings |= MGMT_SETTING_BREDR;
389 settings |= MGMT_SETTING_LINK_SECURITY; 390 settings |= MGMT_SETTING_LINK_SECURITY;
390 } 391 }
@@ -484,7 +485,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
484 ptr += (name_len + 2); 485 ptr += (name_len + 2);
485 } 486 }
486 487
487 if (hdev->inq_tx_power) { 488 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
488 ptr[0] = 2; 489 ptr[0] = 2;
489 ptr[1] = EIR_TX_POWER; 490 ptr[1] = EIR_TX_POWER;
490 ptr[2] = (u8) hdev->inq_tx_power; 491 ptr[2] = (u8) hdev->inq_tx_power;
@@ -565,7 +566,7 @@ static int update_eir(struct hci_dev *hdev)
565 if (!hdev_is_powered(hdev)) 566 if (!hdev_is_powered(hdev))
566 return 0; 567 return 0;
567 568
568 if (!(hdev->features[6] & LMP_EXT_INQ)) 569 if (!lmp_ext_inq_capable(hdev))
569 return 0; 570 return 0;
570 571
571 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 572 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
@@ -832,7 +833,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
832 if (hdev) 833 if (hdev)
833 hdr->index = cpu_to_le16(hdev->id); 834 hdr->index = cpu_to_le16(hdev->id);
834 else 835 else
835 hdr->index = cpu_to_le16(MGMT_INDEX_NONE); 836 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
836 hdr->len = cpu_to_le16(data_len); 837 hdr->len = cpu_to_le16(data_len);
837 838
838 if (data) 839 if (data)
@@ -867,6 +868,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
867 868
868 BT_DBG("request for %s", hdev->name); 869 BT_DBG("request for %s", hdev->name);
869 870
871 if (!lmp_bredr_capable(hdev))
872 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
873 MGMT_STATUS_NOT_SUPPORTED);
874
870 timeout = __le16_to_cpu(cp->timeout); 875 timeout = __le16_to_cpu(cp->timeout);
871 if (!cp->val && timeout > 0) 876 if (!cp->val && timeout > 0)
872 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 877 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
@@ -962,6 +967,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
962 967
963 BT_DBG("request for %s", hdev->name); 968 BT_DBG("request for %s", hdev->name);
964 969
970 if (!lmp_bredr_capable(hdev))
971 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
972 MGMT_STATUS_NOT_SUPPORTED);
973
965 hci_dev_lock(hdev); 974 hci_dev_lock(hdev);
966 975
967 if (!hdev_is_powered(hdev)) { 976 if (!hdev_is_powered(hdev)) {
@@ -1060,6 +1069,10 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1060 1069
1061 BT_DBG("request for %s", hdev->name); 1070 BT_DBG("request for %s", hdev->name);
1062 1071
1072 if (!lmp_bredr_capable(hdev))
1073 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1074 MGMT_STATUS_NOT_SUPPORTED);
1075
1063 hci_dev_lock(hdev); 1076 hci_dev_lock(hdev);
1064 1077
1065 if (!hdev_is_powered(hdev)) { 1078 if (!hdev_is_powered(hdev)) {
@@ -1213,7 +1226,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1213 } 1226 }
1214 1227
1215 val = !!cp->val; 1228 val = !!cp->val;
1216 enabled = !!(hdev->host_features[0] & LMP_HOST_LE); 1229 enabled = lmp_host_le_capable(hdev);
1217 1230
1218 if (!hdev_is_powered(hdev) || val == enabled) { 1231 if (!hdev_is_powered(hdev) || val == enabled) {
1219 bool changed = false; 1232 bool changed = false;
@@ -1249,7 +1262,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1249 1262
1250 if (val) { 1263 if (val) {
1251 hci_cp.le = val; 1264 hci_cp.le = val;
1252 hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); 1265 hci_cp.simul = lmp_le_br_capable(hdev);
1253 } 1266 }
1254 1267
1255 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), 1268 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
@@ -1366,6 +1379,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1366 continue; 1379 continue;
1367 1380
1368 list_del(&match->list); 1381 list_del(&match->list);
1382 kfree(match);
1369 found++; 1383 found++;
1370 } 1384 }
1371 1385
@@ -2594,6 +2608,10 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2594 2608
2595 BT_DBG("%s", hdev->name); 2609 BT_DBG("%s", hdev->name);
2596 2610
2611 if (!lmp_bredr_capable(hdev))
2612 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2613 MGMT_STATUS_NOT_SUPPORTED);
2614
2597 if (!hdev_is_powered(hdev)) 2615 if (!hdev_is_powered(hdev))
2598 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2616 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2599 MGMT_STATUS_NOT_POWERED); 2617 MGMT_STATUS_NOT_POWERED);
@@ -2871,6 +2889,21 @@ static void settings_rsp(struct pending_cmd *cmd, void *data)
2871 mgmt_pending_free(cmd); 2889 mgmt_pending_free(cmd);
2872} 2890}
2873 2891
2892static int set_bredr_scan(struct hci_dev *hdev)
2893{
2894 u8 scan = 0;
2895
2896 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2897 scan |= SCAN_PAGE;
2898 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2899 scan |= SCAN_INQUIRY;
2900
2901 if (!scan)
2902 return 0;
2903
2904 return hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2905}
2906
2874int mgmt_powered(struct hci_dev *hdev, u8 powered) 2907int mgmt_powered(struct hci_dev *hdev, u8 powered)
2875{ 2908{
2876 struct cmd_lookup match = { NULL, hdev }; 2909 struct cmd_lookup match = { NULL, hdev };
@@ -2882,17 +2915,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2882 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 2915 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
2883 2916
2884 if (powered) { 2917 if (powered) {
2885 u8 scan = 0; 2918 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
2886 2919 !lmp_host_ssp_capable(hdev)) {
2887 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
2888 scan |= SCAN_PAGE;
2889 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
2890 scan |= SCAN_INQUIRY;
2891
2892 if (scan)
2893 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2894
2895 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2896 u8 ssp = 1; 2920 u8 ssp = 1;
2897 2921
2898 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); 2922 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
@@ -2902,15 +2926,24 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2902 struct hci_cp_write_le_host_supported cp; 2926 struct hci_cp_write_le_host_supported cp;
2903 2927
2904 cp.le = 1; 2928 cp.le = 1;
2905 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); 2929 cp.simul = lmp_le_br_capable(hdev);
2906 2930
2907 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, 2931 /* Check first if we already have the right
2908 sizeof(cp), &cp); 2932 * host state (host features set)
2933 */
2934 if (cp.le != lmp_host_le_capable(hdev) ||
2935 cp.simul != lmp_host_le_br_capable(hdev))
2936 hci_send_cmd(hdev,
2937 HCI_OP_WRITE_LE_HOST_SUPPORTED,
2938 sizeof(cp), &cp);
2909 } 2939 }
2910 2940
2911 update_class(hdev); 2941 if (lmp_bredr_capable(hdev)) {
2912 update_name(hdev, hdev->dev_name); 2942 set_bredr_scan(hdev);
2913 update_eir(hdev); 2943 update_class(hdev);
2944 update_name(hdev, hdev->dev_name);
2945 update_eir(hdev);
2946 }
2914 } else { 2947 } else {
2915 u8 status = MGMT_STATUS_NOT_POWERED; 2948 u8 status = MGMT_STATUS_NOT_POWERED;
2916 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 2949 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
@@ -3125,6 +3158,9 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3125 struct pending_cmd *cmd; 3158 struct pending_cmd *cmd;
3126 int err; 3159 int err;
3127 3160
3161 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3162 hdev);
3163
3128 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); 3164 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3129 if (!cmd) 3165 if (!cmd)
3130 return -ENOENT; 3166 return -ENOENT;
@@ -3137,8 +3173,6 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3137 3173
3138 mgmt_pending_remove(cmd); 3174 mgmt_pending_remove(cmd);
3139 3175
3140 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3141 hdev);
3142 return err; 3176 return err;
3143} 3177}
3144 3178
@@ -3358,7 +3392,7 @@ static int clear_eir(struct hci_dev *hdev)
3358{ 3392{
3359 struct hci_cp_write_eir cp; 3393 struct hci_cp_write_eir cp;
3360 3394
3361 if (!(hdev->features[6] & LMP_EXT_INQ)) 3395 if (!lmp_ext_inq_capable(hdev))
3362 return 0; 3396 return 0;
3363 3397
3364 memset(hdev->eir, 0, sizeof(hdev->eir)); 3398 memset(hdev->eir, 0, sizeof(hdev->eir));
@@ -3490,7 +3524,12 @@ send_event:
3490 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, 3524 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3491 sizeof(ev), cmd ? cmd->sk : NULL); 3525 sizeof(ev), cmd ? cmd->sk : NULL);
3492 3526
3493 update_eir(hdev); 3527 /* EIR is taken care of separately when powering on the
3528 * adapter so only update them here if this is a name change
3529 * unrelated to power on.
3530 */
3531 if (!test_bit(HCI_INIT, &hdev->flags))
3532 update_eir(hdev);
3494 3533
3495failed: 3534failed:
3496 if (cmd) 3535 if (cmd)
@@ -3585,9 +3624,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3585 ev->addr.type = link_to_bdaddr(link_type, addr_type); 3624 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3586 ev->rssi = rssi; 3625 ev->rssi = rssi;
3587 if (cfm_name) 3626 if (cfm_name)
3588 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME); 3627 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
3589 if (!ssp) 3628 if (!ssp)
3590 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING); 3629 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
3591 3630
3592 if (eir_len > 0) 3631 if (eir_len > 0)
3593 memcpy(ev->eir, eir, eir_len); 3632 memcpy(ev->eir, eir, eir_len);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index c75107ef8920..201fdf737209 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -377,8 +377,8 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
377 int err = 0; 377 int err = 0;
378 u8 dlci; 378 u8 dlci;
379 379
380 BT_DBG("dlc %p state %ld %s %s channel %d", 380 BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d",
381 d, d->state, batostr(src), batostr(dst), channel); 381 d, d->state, src, dst, channel);
382 382
383 if (channel < 1 || channel > 30) 383 if (channel < 1 || channel > 30)
384 return -EINVAL; 384 return -EINVAL;
@@ -676,7 +676,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
676 struct socket *sock; 676 struct socket *sock;
677 struct sock *sk; 677 struct sock *sk;
678 678
679 BT_DBG("%s %s", batostr(src), batostr(dst)); 679 BT_DBG("%pMR -> %pMR", src, dst);
680 680
681 *err = rfcomm_l2sock_create(&sock); 681 *err = rfcomm_l2sock_create(&sock);
682 if (*err < 0) 682 if (*err < 0)
@@ -709,7 +709,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
709 709
710 bacpy(&addr.l2_bdaddr, dst); 710 bacpy(&addr.l2_bdaddr, dst);
711 addr.l2_family = AF_BLUETOOTH; 711 addr.l2_family = AF_BLUETOOTH;
712 addr.l2_psm = cpu_to_le16(RFCOMM_PSM); 712 addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM);
713 addr.l2_cid = 0; 713 addr.l2_cid = 0;
714 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); 714 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
715 if (*err == 0 || *err == -EINPROGRESS) 715 if (*err == 0 || *err == -EINPROGRESS)
@@ -1987,7 +1987,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1987 /* Bind socket */ 1987 /* Bind socket */
1988 bacpy(&addr.l2_bdaddr, ba); 1988 bacpy(&addr.l2_bdaddr, ba);
1989 addr.l2_family = AF_BLUETOOTH; 1989 addr.l2_family = AF_BLUETOOTH;
1990 addr.l2_psm = cpu_to_le16(RFCOMM_PSM); 1990 addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM);
1991 addr.l2_cid = 0; 1991 addr.l2_cid = 0;
1992 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); 1992 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
1993 if (err < 0) { 1993 if (err < 0) {
@@ -2125,11 +2125,10 @@ static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
2125 list_for_each_entry(d, &s->dlcs, list) { 2125 list_for_each_entry(d, &s->dlcs, list) {
2126 struct sock *sk = s->sock->sk; 2126 struct sock *sk = s->sock->sk;
2127 2127
2128 seq_printf(f, "%s %s %ld %d %d %d %d\n", 2128 seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n",
2129 batostr(&bt_sk(sk)->src), 2129 &bt_sk(sk)->src, &bt_sk(sk)->dst,
2130 batostr(&bt_sk(sk)->dst), 2130 d->state, d->dlci, d->mtu,
2131 d->state, d->dlci, d->mtu, 2131 d->rx_credits, d->tx_credits);
2132 d->rx_credits, d->tx_credits);
2133 } 2132 }
2134 } 2133 }
2135 2134
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index b3226f3658cf..ce3f6658f4b2 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -334,7 +334,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
334 struct sock *sk = sock->sk; 334 struct sock *sk = sock->sk;
335 int err = 0; 335 int err = 0;
336 336
337 BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr)); 337 BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
338 338
339 if (!addr || addr->sa_family != AF_BLUETOOTH) 339 if (!addr || addr->sa_family != AF_BLUETOOTH)
340 return -EINVAL; 340 return -EINVAL;
@@ -467,7 +467,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
467 long timeo; 467 long timeo;
468 int err = 0; 468 int err = 0;
469 469
470 lock_sock(sk); 470 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
471 471
472 if (sk->sk_type != SOCK_STREAM) { 472 if (sk->sk_type != SOCK_STREAM) {
473 err = -EINVAL; 473 err = -EINVAL;
@@ -504,7 +504,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
504 504
505 release_sock(sk); 505 release_sock(sk);
506 timeo = schedule_timeout(timeo); 506 timeo = schedule_timeout(timeo);
507 lock_sock(sk); 507 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
508 } 508 }
509 __set_current_state(TASK_RUNNING); 509 __set_current_state(TASK_RUNNING);
510 remove_wait_queue(sk_sleep(sk), &wait); 510 remove_wait_queue(sk_sleep(sk), &wait);
@@ -975,10 +975,9 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
975 read_lock(&rfcomm_sk_list.lock); 975 read_lock(&rfcomm_sk_list.lock);
976 976
977 sk_for_each(sk, node, &rfcomm_sk_list.head) { 977 sk_for_each(sk, node, &rfcomm_sk_list.head) {
978 seq_printf(f, "%s %s %d %d\n", 978 seq_printf(f, "%pMR %pMR %d %d\n",
979 batostr(&bt_sk(sk)->src), 979 &bt_sk(sk)->src, &bt_sk(sk)->dst,
980 batostr(&bt_sk(sk)->dst), 980 sk->sk_state, rfcomm_pi(sk)->channel);
981 sk->sk_state, rfcomm_pi(sk)->channel);
982 } 981 }
983 982
984 read_unlock(&rfcomm_sk_list.lock); 983 read_unlock(&rfcomm_sk_list.lock);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index ccc248791d50..bd6fd0f43d2b 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -166,7 +166,7 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
166static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) 166static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
167{ 167{
168 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); 168 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
169 return sprintf(buf, "%s\n", batostr(&dev->dst)); 169 return sprintf(buf, "%pMR\n", &dev->dst);
170} 170}
171 171
172static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) 172static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
@@ -663,8 +663,8 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
663 if (!dev) 663 if (!dev)
664 return -ENODEV; 664 return -ENODEV;
665 665
666 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), 666 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
667 dev->channel, dev->port.count); 667 dev->channel, dev->port.count);
668 668
669 spin_lock_irqsave(&dev->port.lock, flags); 669 spin_lock_irqsave(&dev->port.lock, flags);
670 if (++dev->port.count > 1) { 670 if (++dev->port.count > 1) {
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index dc42b917aaaf..531a93d613d4 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -131,15 +131,6 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
131 sco_sock_clear_timer(sk); 131 sco_sock_clear_timer(sk);
132 sco_chan_del(sk, err); 132 sco_chan_del(sk, err);
133 bh_unlock_sock(sk); 133 bh_unlock_sock(sk);
134
135 sco_conn_lock(conn);
136 conn->sk = NULL;
137 sco_pi(sk)->conn = NULL;
138 sco_conn_unlock(conn);
139
140 if (conn->hcon)
141 hci_conn_put(conn->hcon);
142
143 sco_sock_kill(sk); 134 sco_sock_kill(sk);
144 } 135 }
145 136
@@ -172,7 +163,7 @@ static int sco_connect(struct sock *sk)
172 struct hci_dev *hdev; 163 struct hci_dev *hdev;
173 int err, type; 164 int err, type;
174 165
175 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 166 BT_DBG("%pMR -> %pMR", src, dst);
176 167
177 hdev = hci_get_route(dst, src); 168 hdev = hci_get_route(dst, src);
178 if (!hdev) 169 if (!hdev)
@@ -397,6 +388,7 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
397 388
398 if (parent) { 389 if (parent) {
399 sk->sk_type = parent->sk_type; 390 sk->sk_type = parent->sk_type;
391 bt_sk(sk)->flags = bt_sk(parent)->flags;
400 security_sk_clone(parent, sk); 392 security_sk_clone(parent, sk);
401 } 393 }
402} 394}
@@ -460,7 +452,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
460 struct sock *sk = sock->sk; 452 struct sock *sk = sock->sk;
461 int err = 0; 453 int err = 0;
462 454
463 BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); 455 BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
464 456
465 if (!addr || addr->sa_family != AF_BLUETOOTH) 457 if (!addr || addr->sa_family != AF_BLUETOOTH)
466 return -EINVAL; 458 return -EINVAL;
@@ -662,16 +654,57 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
662 return err; 654 return err;
663} 655}
664 656
657static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
658 struct msghdr *msg, size_t len, int flags)
659{
660 struct sock *sk = sock->sk;
661 struct sco_pinfo *pi = sco_pi(sk);
662
663 lock_sock(sk);
664
665 if (sk->sk_state == BT_CONNECT2 &&
666 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
667 hci_conn_accept(pi->conn->hcon, 0);
668 sk->sk_state = BT_CONFIG;
669
670 release_sock(sk);
671 return 0;
672 }
673
674 release_sock(sk);
675
676 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
677}
678
665static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 679static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
666{ 680{
667 struct sock *sk = sock->sk; 681 struct sock *sk = sock->sk;
668 int err = 0; 682 int err = 0;
683 u32 opt;
669 684
670 BT_DBG("sk %p", sk); 685 BT_DBG("sk %p", sk);
671 686
672 lock_sock(sk); 687 lock_sock(sk);
673 688
674 switch (optname) { 689 switch (optname) {
690
691 case BT_DEFER_SETUP:
692 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
693 err = -EINVAL;
694 break;
695 }
696
697 if (get_user(opt, (u32 __user *) optval)) {
698 err = -EFAULT;
699 break;
700 }
701
702 if (opt)
703 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
704 else
705 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
706 break;
707
675 default: 708 default:
676 err = -ENOPROTOOPT; 709 err = -ENOPROTOOPT;
677 break; 710 break;
@@ -753,6 +786,19 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
753 lock_sock(sk); 786 lock_sock(sk);
754 787
755 switch (optname) { 788 switch (optname) {
789
790 case BT_DEFER_SETUP:
791 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
792 err = -EINVAL;
793 break;
794 }
795
796 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
797 (u32 __user *) optval))
798 err = -EFAULT;
799
800 break;
801
756 default: 802 default:
757 err = -ENOPROTOOPT; 803 err = -ENOPROTOOPT;
758 break; 804 break;
@@ -830,6 +876,16 @@ static void sco_chan_del(struct sock *sk, int err)
830 876
831 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); 877 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
832 878
879 if (conn) {
880 sco_conn_lock(conn);
881 conn->sk = NULL;
882 sco_pi(sk)->conn = NULL;
883 sco_conn_unlock(conn);
884
885 if (conn->hcon)
886 hci_conn_put(conn->hcon);
887 }
888
833 sk->sk_state = BT_CLOSED; 889 sk->sk_state = BT_CLOSED;
834 sk->sk_err = err; 890 sk->sk_err = err;
835 sk->sk_state_change(sk); 891 sk->sk_state_change(sk);
@@ -874,7 +930,10 @@ static void sco_conn_ready(struct sco_conn *conn)
874 hci_conn_hold(conn->hcon); 930 hci_conn_hold(conn->hcon);
875 __sco_chan_add(conn, sk, parent); 931 __sco_chan_add(conn, sk, parent);
876 932
877 sk->sk_state = BT_CONNECTED; 933 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
934 sk->sk_state = BT_CONNECT2;
935 else
936 sk->sk_state = BT_CONNECTED;
878 937
879 /* Wake up parent */ 938 /* Wake up parent */
880 parent->sk_data_ready(parent, 1); 939 parent->sk_data_ready(parent, 1);
@@ -887,13 +946,13 @@ done:
887} 946}
888 947
889/* ----- SCO interface with lower layer (HCI) ----- */ 948/* ----- SCO interface with lower layer (HCI) ----- */
890int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 949int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
891{ 950{
892 struct sock *sk; 951 struct sock *sk;
893 struct hlist_node *node; 952 struct hlist_node *node;
894 int lm = 0; 953 int lm = 0;
895 954
896 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 955 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
897 956
898 /* Find listening sockets */ 957 /* Find listening sockets */
899 read_lock(&sco_sk_list.lock); 958 read_lock(&sco_sk_list.lock);
@@ -904,6 +963,9 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
904 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || 963 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
905 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 964 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
906 lm |= HCI_LM_ACCEPT; 965 lm |= HCI_LM_ACCEPT;
966
967 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
968 *flags |= HCI_PROTO_DEFER;
907 break; 969 break;
908 } 970 }
909 } 971 }
@@ -914,7 +976,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
914 976
915void sco_connect_cfm(struct hci_conn *hcon, __u8 status) 977void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
916{ 978{
917 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 979 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
918 if (!status) { 980 if (!status) {
919 struct sco_conn *conn; 981 struct sco_conn *conn;
920 982
@@ -959,8 +1021,8 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
959 read_lock(&sco_sk_list.lock); 1021 read_lock(&sco_sk_list.lock);
960 1022
961 sk_for_each(sk, node, &sco_sk_list.head) { 1023 sk_for_each(sk, node, &sco_sk_list.head) {
962 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), 1024 seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
963 batostr(&bt_sk(sk)->dst), sk->sk_state); 1025 &bt_sk(sk)->dst, sk->sk_state);
964 } 1026 }
965 1027
966 read_unlock(&sco_sk_list.lock); 1028 read_unlock(&sco_sk_list.lock);
@@ -992,7 +1054,7 @@ static const struct proto_ops sco_sock_ops = {
992 .accept = sco_sock_accept, 1054 .accept = sco_sock_accept,
993 .getname = sco_sock_getname, 1055 .getname = sco_sock_getname,
994 .sendmsg = sco_sock_sendmsg, 1056 .sendmsg = sco_sock_sendmsg,
995 .recvmsg = bt_sock_recvmsg, 1057 .recvmsg = sco_sock_recvmsg,
996 .poll = bt_sock_poll, 1058 .poll = bt_sock_poll,
997 .ioctl = bt_sock_ioctl, 1059 .ioctl = bt_sock_ioctl,
998 .mmap = sock_no_mmap, 1060 .mmap = sock_no_mmap,
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 2ac8d50861e0..68a9587c9694 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -167,7 +167,7 @@ static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
167 167
168 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 168 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
169 lh->len = cpu_to_le16(sizeof(code) + dlen); 169 lh->len = cpu_to_le16(sizeof(code) + dlen);
170 lh->cid = cpu_to_le16(L2CAP_CID_SMP); 170 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SMP);
171 171
172 memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code)); 172 memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
173 173
@@ -267,7 +267,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
267 267
268 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags); 268 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
269 mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, 269 mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
270 hcon->dst_type, reason); 270 hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
271 271
272 cancel_delayed_work_sync(&conn->security_timer); 272 cancel_delayed_work_sync(&conn->security_timer);
273 273
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index d0359ea8ee79..e859098f5ee9 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -12,6 +12,6 @@ bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
12 12
13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o 13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
14 14
15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o 15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
16 16
17obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 17obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 070e8a68cfc6..7c78e2640190 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -313,6 +313,8 @@ static const struct net_device_ops br_netdev_ops = {
313 .ndo_fdb_add = br_fdb_add, 313 .ndo_fdb_add = br_fdb_add,
314 .ndo_fdb_del = br_fdb_delete, 314 .ndo_fdb_del = br_fdb_delete,
315 .ndo_fdb_dump = br_fdb_dump, 315 .ndo_fdb_dump = br_fdb_dump,
316 .ndo_bridge_getlink = br_getlink,
317 .ndo_bridge_setlink = br_setlink,
316}; 318};
317 319
318static void br_dev_free(struct net_device *dev) 320static void br_dev_free(struct net_device *dev)
@@ -356,7 +358,7 @@ void br_dev_setup(struct net_device *dev)
356 br->bridge_id.prio[0] = 0x80; 358 br->bridge_id.prio[0] = 0x80;
357 br->bridge_id.prio[1] = 0x00; 359 br->bridge_id.prio[1] = 0x00;
358 360
359 memcpy(br->group_addr, br_group_address, ETH_ALEN); 361 memcpy(br->group_addr, eth_reserved_addr_base, ETH_ALEN);
360 362
361 br->stp_enabled = BR_NO_STP; 363 br->stp_enabled = BR_NO_STP;
362 br->group_fwd_mask = BR_GROUPFWD_DEFAULT; 364 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1c8fdc3558cd..37fe693471a8 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -366,11 +366,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
366 366
367 err = netdev_set_master(dev, br->dev); 367 err = netdev_set_master(dev, br->dev);
368 if (err) 368 if (err)
369 goto err3; 369 goto err4;
370 370
371 err = netdev_rx_handler_register(dev, br_handle_frame, p); 371 err = netdev_rx_handler_register(dev, br_handle_frame, p);
372 if (err) 372 if (err)
373 goto err4; 373 goto err5;
374 374
375 dev->priv_flags |= IFF_BRIDGE_PORT; 375 dev->priv_flags |= IFF_BRIDGE_PORT;
376 376
@@ -402,8 +402,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
402 402
403 return 0; 403 return 0;
404 404
405err4: 405err5:
406 netdev_set_master(dev, NULL); 406 netdev_set_master(dev, NULL);
407err4:
408 br_netpoll_disable(p);
407err3: 409err3:
408 sysfs_remove_link(br->ifobj, p->dev->name); 410 sysfs_remove_link(br->ifobj, p->dev->name);
409err2: 411err2:
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 76f15fda0212..4b34207419b1 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -19,9 +19,6 @@
19#include <linux/export.h> 19#include <linux/export.h>
20#include "br_private.h" 20#include "br_private.h"
21 21
22/* Bridge group multicast address 802.1d (pg 51). */
23const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
24
25/* Hook for brouter */ 22/* Hook for brouter */
26br_should_route_hook_t __rcu *br_should_route_hook __read_mostly; 23br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
27EXPORT_SYMBOL(br_should_route_hook); 24EXPORT_SYMBOL(br_should_route_hook);
@@ -127,18 +124,6 @@ static int br_handle_local_finish(struct sk_buff *skb)
127 return 0; /* process further */ 124 return 0; /* process further */
128} 125}
129 126
130/* Does address match the link local multicast address.
131 * 01:80:c2:00:00:0X
132 */
133static inline int is_link_local(const unsigned char *dest)
134{
135 __be16 *a = (__be16 *)dest;
136 static const __be16 *b = (const __be16 *)br_group_address;
137 static const __be16 m = cpu_to_be16(0xfff0);
138
139 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
140}
141
142/* 127/*
143 * Return NULL if skb is handled 128 * Return NULL if skb is handled
144 * note: already called with rcu_read_lock 129 * note: already called with rcu_read_lock
@@ -162,7 +147,7 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
162 147
163 p = br_port_get_rcu(skb->dev); 148 p = br_port_get_rcu(skb->dev);
164 149
165 if (unlikely(is_link_local(dest))) { 150 if (unlikely(is_link_local_ether_addr(dest))) {
166 /* 151 /*
167 * See IEEE 802.1D Table 7-10 Reserved addresses 152 * See IEEE 802.1D Table 7-10 Reserved addresses
168 * 153 *
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 7222fe1d5460..cd8c3a44ab7d 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -85,13 +85,14 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
85/* called with RTNL */ 85/* called with RTNL */
86static int add_del_if(struct net_bridge *br, int ifindex, int isadd) 86static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
87{ 87{
88 struct net *net = dev_net(br->dev);
88 struct net_device *dev; 89 struct net_device *dev;
89 int ret; 90 int ret;
90 91
91 if (!capable(CAP_NET_ADMIN)) 92 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
92 return -EPERM; 93 return -EPERM;
93 94
94 dev = __dev_get_by_index(dev_net(br->dev), ifindex); 95 dev = __dev_get_by_index(net, ifindex);
95 if (dev == NULL) 96 if (dev == NULL)
96 return -EINVAL; 97 return -EINVAL;
97 98
@@ -178,25 +179,25 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
178 } 179 }
179 180
180 case BRCTL_SET_BRIDGE_FORWARD_DELAY: 181 case BRCTL_SET_BRIDGE_FORWARD_DELAY:
181 if (!capable(CAP_NET_ADMIN)) 182 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
182 return -EPERM; 183 return -EPERM;
183 184
184 return br_set_forward_delay(br, args[1]); 185 return br_set_forward_delay(br, args[1]);
185 186
186 case BRCTL_SET_BRIDGE_HELLO_TIME: 187 case BRCTL_SET_BRIDGE_HELLO_TIME:
187 if (!capable(CAP_NET_ADMIN)) 188 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
188 return -EPERM; 189 return -EPERM;
189 190
190 return br_set_hello_time(br, args[1]); 191 return br_set_hello_time(br, args[1]);
191 192
192 case BRCTL_SET_BRIDGE_MAX_AGE: 193 case BRCTL_SET_BRIDGE_MAX_AGE:
193 if (!capable(CAP_NET_ADMIN)) 194 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
194 return -EPERM; 195 return -EPERM;
195 196
196 return br_set_max_age(br, args[1]); 197 return br_set_max_age(br, args[1]);
197 198
198 case BRCTL_SET_AGEING_TIME: 199 case BRCTL_SET_AGEING_TIME:
199 if (!capable(CAP_NET_ADMIN)) 200 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
200 return -EPERM; 201 return -EPERM;
201 202
202 br->ageing_time = clock_t_to_jiffies(args[1]); 203 br->ageing_time = clock_t_to_jiffies(args[1]);
@@ -236,14 +237,14 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
236 } 237 }
237 238
238 case BRCTL_SET_BRIDGE_STP_STATE: 239 case BRCTL_SET_BRIDGE_STP_STATE:
239 if (!capable(CAP_NET_ADMIN)) 240 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
240 return -EPERM; 241 return -EPERM;
241 242
242 br_stp_set_enabled(br, args[1]); 243 br_stp_set_enabled(br, args[1]);
243 return 0; 244 return 0;
244 245
245 case BRCTL_SET_BRIDGE_PRIORITY: 246 case BRCTL_SET_BRIDGE_PRIORITY:
246 if (!capable(CAP_NET_ADMIN)) 247 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
247 return -EPERM; 248 return -EPERM;
248 249
249 spin_lock_bh(&br->lock); 250 spin_lock_bh(&br->lock);
@@ -256,7 +257,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
256 struct net_bridge_port *p; 257 struct net_bridge_port *p;
257 int ret; 258 int ret;
258 259
259 if (!capable(CAP_NET_ADMIN)) 260 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
260 return -EPERM; 261 return -EPERM;
261 262
262 spin_lock_bh(&br->lock); 263 spin_lock_bh(&br->lock);
@@ -273,7 +274,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
273 struct net_bridge_port *p; 274 struct net_bridge_port *p;
274 int ret; 275 int ret;
275 276
276 if (!capable(CAP_NET_ADMIN)) 277 if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
277 return -EPERM; 278 return -EPERM;
278 279
279 spin_lock_bh(&br->lock); 280 spin_lock_bh(&br->lock);
@@ -330,7 +331,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
330 { 331 {
331 char buf[IFNAMSIZ]; 332 char buf[IFNAMSIZ];
332 333
333 if (!capable(CAP_NET_ADMIN)) 334 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
334 return -EPERM; 335 return -EPERM;
335 336
336 if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ)) 337 if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ))
@@ -360,7 +361,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar
360 { 361 {
361 char buf[IFNAMSIZ]; 362 char buf[IFNAMSIZ];
362 363
363 if (!capable(CAP_NET_ADMIN)) 364 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
364 return -EPERM; 365 return -EPERM;
365 366
366 if (copy_from_user(buf, uarg, IFNAMSIZ)) 367 if (copy_from_user(buf, uarg, IFNAMSIZ))
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
new file mode 100644
index 000000000000..acc9f4cc18f7
--- /dev/null
+++ b/net/bridge/br_mdb.c
@@ -0,0 +1,493 @@
1#include <linux/err.h>
2#include <linux/igmp.h>
3#include <linux/kernel.h>
4#include <linux/netdevice.h>
5#include <linux/rculist.h>
6#include <linux/skbuff.h>
7#include <linux/if_ether.h>
8#include <net/ip.h>
9#include <net/netlink.h>
10#if IS_ENABLED(CONFIG_IPV6)
11#include <net/ipv6.h>
12#endif
13
14#include "br_private.h"
15
16static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
17 struct net_device *dev)
18{
19 struct net_bridge *br = netdev_priv(dev);
20 struct net_bridge_port *p;
21 struct hlist_node *n;
22 struct nlattr *nest;
23
24 if (!br->multicast_router || hlist_empty(&br->router_list))
25 return 0;
26
27 nest = nla_nest_start(skb, MDBA_ROUTER);
28 if (nest == NULL)
29 return -EMSGSIZE;
30
31 hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) {
32 if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
33 goto fail;
34 }
35
36 nla_nest_end(skb, nest);
37 return 0;
38fail:
39 nla_nest_cancel(skb, nest);
40 return -EMSGSIZE;
41}
42
43static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
44 struct net_device *dev)
45{
46 struct net_bridge *br = netdev_priv(dev);
47 struct net_bridge_mdb_htable *mdb;
48 struct nlattr *nest, *nest2;
49 int i, err = 0;
50 int idx = 0, s_idx = cb->args[1];
51
52 if (br->multicast_disabled)
53 return 0;
54
55 mdb = rcu_dereference(br->mdb);
56 if (!mdb)
57 return 0;
58
59 nest = nla_nest_start(skb, MDBA_MDB);
60 if (nest == NULL)
61 return -EMSGSIZE;
62
63 for (i = 0; i < mdb->max; i++) {
64 struct hlist_node *h;
65 struct net_bridge_mdb_entry *mp;
66 struct net_bridge_port_group *p, **pp;
67 struct net_bridge_port *port;
68
69 hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) {
70 if (idx < s_idx)
71 goto skip;
72
73 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
74 if (nest2 == NULL) {
75 err = -EMSGSIZE;
76 goto out;
77 }
78
79 for (pp = &mp->ports;
80 (p = rcu_dereference(*pp)) != NULL;
81 pp = &p->next) {
82 port = p->port;
83 if (port) {
84 struct br_mdb_entry e;
85 e.ifindex = port->dev->ifindex;
86 e.state = p->state;
87 if (p->addr.proto == htons(ETH_P_IP))
88 e.addr.u.ip4 = p->addr.u.ip4;
89#if IS_ENABLED(CONFIG_IPV6)
90 if (p->addr.proto == htons(ETH_P_IPV6))
91 e.addr.u.ip6 = p->addr.u.ip6;
92#endif
93 e.addr.proto = p->addr.proto;
94 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
95 nla_nest_cancel(skb, nest2);
96 err = -EMSGSIZE;
97 goto out;
98 }
99 }
100 }
101 nla_nest_end(skb, nest2);
102 skip:
103 idx++;
104 }
105 }
106
107out:
108 cb->args[1] = idx;
109 nla_nest_end(skb, nest);
110 return err;
111}
112
113static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
114{
115 struct net_device *dev;
116 struct net *net = sock_net(skb->sk);
117 struct nlmsghdr *nlh = NULL;
118 int idx = 0, s_idx;
119
120 s_idx = cb->args[0];
121
122 rcu_read_lock();
123
124 /* In theory this could be wrapped to 0... */
125 cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
126
127 for_each_netdev_rcu(net, dev) {
128 if (dev->priv_flags & IFF_EBRIDGE) {
129 struct br_port_msg *bpm;
130
131 if (idx < s_idx)
132 goto skip;
133
134 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
135 cb->nlh->nlmsg_seq, RTM_GETMDB,
136 sizeof(*bpm), NLM_F_MULTI);
137 if (nlh == NULL)
138 break;
139
140 bpm = nlmsg_data(nlh);
141 bpm->ifindex = dev->ifindex;
142 if (br_mdb_fill_info(skb, cb, dev) < 0)
143 goto out;
144 if (br_rports_fill_info(skb, cb, dev) < 0)
145 goto out;
146
147 cb->args[1] = 0;
148 nlmsg_end(skb, nlh);
149 skip:
150 idx++;
151 }
152 }
153
154out:
155 if (nlh)
156 nlmsg_end(skb, nlh);
157 rcu_read_unlock();
158 cb->args[0] = idx;
159 return skb->len;
160}
161
162static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
163 struct net_device *dev,
164 struct br_mdb_entry *entry, u32 pid,
165 u32 seq, int type, unsigned int flags)
166{
167 struct nlmsghdr *nlh;
168 struct br_port_msg *bpm;
169 struct nlattr *nest, *nest2;
170
171 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
172 if (!nlh)
173 return -EMSGSIZE;
174
175 bpm = nlmsg_data(nlh);
176 bpm->family = AF_BRIDGE;
177 bpm->ifindex = dev->ifindex;
178 nest = nla_nest_start(skb, MDBA_MDB);
179 if (nest == NULL)
180 goto cancel;
181 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
182 if (nest2 == NULL)
183 goto end;
184
185 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
186 goto end;
187
188 nla_nest_end(skb, nest2);
189 nla_nest_end(skb, nest);
190 return nlmsg_end(skb, nlh);
191
192end:
193 nla_nest_end(skb, nest);
194cancel:
195 nlmsg_cancel(skb, nlh);
196 return -EMSGSIZE;
197}
198
199static inline size_t rtnl_mdb_nlmsg_size(void)
200{
201 return NLMSG_ALIGN(sizeof(struct br_port_msg))
202 + nla_total_size(sizeof(struct br_mdb_entry));
203}
204
205static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
206 int type)
207{
208 struct net *net = dev_net(dev);
209 struct sk_buff *skb;
210 int err = -ENOBUFS;
211
212 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
213 if (!skb)
214 goto errout;
215
216 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
217 if (err < 0) {
218 kfree_skb(skb);
219 goto errout;
220 }
221
222 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
223 return;
224errout:
225 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
226}
227
228void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
229 struct br_ip *group, int type)
230{
231 struct br_mdb_entry entry;
232
233 entry.ifindex = port->dev->ifindex;
234 entry.addr.proto = group->proto;
235 entry.addr.u.ip4 = group->u.ip4;
236#if IS_ENABLED(CONFIG_IPV6)
237 entry.addr.u.ip6 = group->u.ip6;
238#endif
239 __br_mdb_notify(dev, &entry, type);
240}
241
242static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
243{
244 if (entry->ifindex == 0)
245 return false;
246
247 if (entry->addr.proto == htons(ETH_P_IP)) {
248 if (!ipv4_is_multicast(entry->addr.u.ip4))
249 return false;
250 if (ipv4_is_local_multicast(entry->addr.u.ip4))
251 return false;
252#if IS_ENABLED(CONFIG_IPV6)
253 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
254 if (!ipv6_is_transient_multicast(&entry->addr.u.ip6))
255 return false;
256#endif
257 } else
258 return false;
259 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
260 return false;
261
262 return true;
263}
264
265static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
266 struct net_device **pdev, struct br_mdb_entry **pentry)
267{
268 struct net *net = sock_net(skb->sk);
269 struct br_mdb_entry *entry;
270 struct br_port_msg *bpm;
271 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
272 struct net_device *dev;
273 int err;
274
275 if (!capable(CAP_NET_ADMIN))
276 return -EPERM;
277
278 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL);
279 if (err < 0)
280 return err;
281
282 bpm = nlmsg_data(nlh);
283 if (bpm->ifindex == 0) {
284 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
285 return -EINVAL;
286 }
287
288 dev = __dev_get_by_index(net, bpm->ifindex);
289 if (dev == NULL) {
290 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
291 return -ENODEV;
292 }
293
294 if (!(dev->priv_flags & IFF_EBRIDGE)) {
295 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
296 return -EOPNOTSUPP;
297 }
298
299 *pdev = dev;
300
301 if (!tb[MDBA_SET_ENTRY] ||
302 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
303 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
304 return -EINVAL;
305 }
306
307 entry = nla_data(tb[MDBA_SET_ENTRY]);
308 if (!is_valid_mdb_entry(entry)) {
309 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
310 return -EINVAL;
311 }
312
313 *pentry = entry;
314 return 0;
315}
316
317static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
318 struct br_ip *group, unsigned char state)
319{
320 struct net_bridge_mdb_entry *mp;
321 struct net_bridge_port_group *p;
322 struct net_bridge_port_group __rcu **pp;
323 struct net_bridge_mdb_htable *mdb;
324 int err;
325
326 mdb = mlock_dereference(br->mdb, br);
327 mp = br_mdb_ip_get(mdb, group);
328 if (!mp) {
329 mp = br_multicast_new_group(br, port, group);
330 err = PTR_ERR(mp);
331 if (IS_ERR(mp))
332 return err;
333 }
334
335 for (pp = &mp->ports;
336 (p = mlock_dereference(*pp, br)) != NULL;
337 pp = &p->next) {
338 if (p->port == port)
339 return -EEXIST;
340 if ((unsigned long)p->port < (unsigned long)port)
341 break;
342 }
343
344 p = br_multicast_new_port_group(port, group, *pp, state);
345 if (unlikely(!p))
346 return -ENOMEM;
347 rcu_assign_pointer(*pp, p);
348
349 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
350 return 0;
351}
352
353static int __br_mdb_add(struct net *net, struct net_bridge *br,
354 struct br_mdb_entry *entry)
355{
356 struct br_ip ip;
357 struct net_device *dev;
358 struct net_bridge_port *p;
359 int ret;
360
361 if (!netif_running(br->dev) || br->multicast_disabled)
362 return -EINVAL;
363
364 dev = __dev_get_by_index(net, entry->ifindex);
365 if (!dev)
366 return -ENODEV;
367
368 p = br_port_get_rtnl(dev);
369 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
370 return -EINVAL;
371
372 ip.proto = entry->addr.proto;
373 if (ip.proto == htons(ETH_P_IP))
374 ip.u.ip4 = entry->addr.u.ip4;
375#if IS_ENABLED(CONFIG_IPV6)
376 else
377 ip.u.ip6 = entry->addr.u.ip6;
378#endif
379
380 spin_lock_bh(&br->multicast_lock);
381 ret = br_mdb_add_group(br, p, &ip, entry->state);
382 spin_unlock_bh(&br->multicast_lock);
383 return ret;
384}
385
386static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
387{
388 struct net *net = sock_net(skb->sk);
389 struct br_mdb_entry *entry;
390 struct net_device *dev;
391 struct net_bridge *br;
392 int err;
393
394 err = br_mdb_parse(skb, nlh, &dev, &entry);
395 if (err < 0)
396 return err;
397
398 br = netdev_priv(dev);
399
400 err = __br_mdb_add(net, br, entry);
401 if (!err)
402 __br_mdb_notify(dev, entry, RTM_NEWMDB);
403 return err;
404}
405
406static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
407{
408 struct net_bridge_mdb_htable *mdb;
409 struct net_bridge_mdb_entry *mp;
410 struct net_bridge_port_group *p;
411 struct net_bridge_port_group __rcu **pp;
412 struct br_ip ip;
413 int err = -EINVAL;
414
415 if (!netif_running(br->dev) || br->multicast_disabled)
416 return -EINVAL;
417
418 if (timer_pending(&br->multicast_querier_timer))
419 return -EBUSY;
420
421 ip.proto = entry->addr.proto;
422 if (ip.proto == htons(ETH_P_IP))
423 ip.u.ip4 = entry->addr.u.ip4;
424#if IS_ENABLED(CONFIG_IPV6)
425 else
426 ip.u.ip6 = entry->addr.u.ip6;
427#endif
428
429 spin_lock_bh(&br->multicast_lock);
430 mdb = mlock_dereference(br->mdb, br);
431
432 mp = br_mdb_ip_get(mdb, &ip);
433 if (!mp)
434 goto unlock;
435
436 for (pp = &mp->ports;
437 (p = mlock_dereference(*pp, br)) != NULL;
438 pp = &p->next) {
439 if (!p->port || p->port->dev->ifindex != entry->ifindex)
440 continue;
441
442 if (p->port->state == BR_STATE_DISABLED)
443 goto unlock;
444
445 rcu_assign_pointer(*pp, p->next);
446 hlist_del_init(&p->mglist);
447 del_timer(&p->timer);
448 call_rcu_bh(&p->rcu, br_multicast_free_pg);
449 err = 0;
450
451 if (!mp->ports && !mp->mglist &&
452 netif_running(br->dev))
453 mod_timer(&mp->timer, jiffies);
454 break;
455 }
456
457unlock:
458 spin_unlock_bh(&br->multicast_lock);
459 return err;
460}
461
462static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
463{
464 struct net_device *dev;
465 struct br_mdb_entry *entry;
466 struct net_bridge *br;
467 int err;
468
469 err = br_mdb_parse(skb, nlh, &dev, &entry);
470 if (err < 0)
471 return err;
472
473 br = netdev_priv(dev);
474
475 err = __br_mdb_del(br, entry);
476 if (!err)
477 __br_mdb_notify(dev, entry, RTM_DELMDB);
478 return err;
479}
480
481void br_mdb_init(void)
482{
483 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
484 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
485 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
486}
487
488void br_mdb_uninit(void)
489{
490 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
491 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
492 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
493}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 241743417f49..6d6f26531de2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -27,25 +27,13 @@
27#if IS_ENABLED(CONFIG_IPV6) 27#if IS_ENABLED(CONFIG_IPV6)
28#include <net/ipv6.h> 28#include <net/ipv6.h>
29#include <net/mld.h> 29#include <net/mld.h>
30#include <net/addrconf.h>
31#include <net/ip6_checksum.h> 30#include <net/ip6_checksum.h>
32#endif 31#endif
33 32
34#include "br_private.h" 33#include "br_private.h"
35 34
36#define mlock_dereference(X, br) \
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38
39static void br_multicast_start_querier(struct net_bridge *br); 35static void br_multicast_start_querier(struct net_bridge *br);
40 36unsigned int br_mdb_rehash_seq;
41#if IS_ENABLED(CONFIG_IPV6)
42static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
43{
44 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
45 return 1;
46 return 0;
47}
48#endif
49 37
50static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 38static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
51{ 39{
@@ -103,8 +91,8 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
103 return NULL; 91 return NULL;
104} 92}
105 93
106static struct net_bridge_mdb_entry *br_mdb_ip_get( 94struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
107 struct net_bridge_mdb_htable *mdb, struct br_ip *dst) 95 struct br_ip *dst)
108{ 96{
109 if (!mdb) 97 if (!mdb)
110 return NULL; 98 return NULL;
@@ -207,7 +195,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
207 return maxlen > elasticity ? -EINVAL : 0; 195 return maxlen > elasticity ? -EINVAL : 0;
208} 196}
209 197
210static void br_multicast_free_pg(struct rcu_head *head) 198void br_multicast_free_pg(struct rcu_head *head)
211{ 199{
212 struct net_bridge_port_group *p = 200 struct net_bridge_port_group *p =
213 container_of(head, struct net_bridge_port_group, rcu); 201 container_of(head, struct net_bridge_port_group, rcu);
@@ -291,7 +279,7 @@ static void br_multicast_port_group_expired(unsigned long data)
291 279
292 spin_lock(&br->multicast_lock); 280 spin_lock(&br->multicast_lock);
293 if (!netif_running(br->dev) || timer_pending(&pg->timer) || 281 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
294 hlist_unhashed(&pg->mglist)) 282 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
295 goto out; 283 goto out;
296 284
297 br_multicast_del_pg(br, pg); 285 br_multicast_del_pg(br, pg);
@@ -338,6 +326,7 @@ static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
338 return err; 326 return err;
339 } 327 }
340 328
329 br_mdb_rehash_seq++;
341 call_rcu_bh(&mdb->rcu, br_mdb_free); 330 call_rcu_bh(&mdb->rcu, br_mdb_free);
342 331
343out: 332out:
@@ -582,9 +571,8 @@ err:
582 return mp; 571 return mp;
583} 572}
584 573
585static struct net_bridge_mdb_entry *br_multicast_new_group( 574struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
586 struct net_bridge *br, struct net_bridge_port *port, 575 struct net_bridge_port *port, struct br_ip *group)
587 struct br_ip *group)
588{ 576{
589 struct net_bridge_mdb_htable *mdb; 577 struct net_bridge_mdb_htable *mdb;
590 struct net_bridge_mdb_entry *mp; 578 struct net_bridge_mdb_entry *mp;
@@ -631,6 +619,28 @@ out:
631 return mp; 619 return mp;
632} 620}
633 621
622struct net_bridge_port_group *br_multicast_new_port_group(
623 struct net_bridge_port *port,
624 struct br_ip *group,
625 struct net_bridge_port_group __rcu *next,
626 unsigned char state)
627{
628 struct net_bridge_port_group *p;
629
630 p = kzalloc(sizeof(*p), GFP_ATOMIC);
631 if (unlikely(!p))
632 return NULL;
633
634 p->addr = *group;
635 p->port = port;
636 p->state = state;
637 rcu_assign_pointer(p->next, next);
638 hlist_add_head(&p->mglist, &port->mglist);
639 setup_timer(&p->timer, br_multicast_port_group_expired,
640 (unsigned long)p);
641 return p;
642}
643
634static int br_multicast_add_group(struct net_bridge *br, 644static int br_multicast_add_group(struct net_bridge *br,
635 struct net_bridge_port *port, 645 struct net_bridge_port *port,
636 struct br_ip *group) 646 struct br_ip *group)
@@ -666,19 +676,11 @@ static int br_multicast_add_group(struct net_bridge *br,
666 break; 676 break;
667 } 677 }
668 678
669 p = kzalloc(sizeof(*p), GFP_ATOMIC); 679 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
670 err = -ENOMEM;
671 if (unlikely(!p)) 680 if (unlikely(!p))
672 goto err; 681 goto err;
673
674 p->addr = *group;
675 p->port = port;
676 p->next = *pp;
677 hlist_add_head(&p->mglist, &port->mglist);
678 setup_timer(&p->timer, br_multicast_port_group_expired,
679 (unsigned long)p);
680
681 rcu_assign_pointer(*pp, p); 682 rcu_assign_pointer(*pp, p);
683 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
682 684
683found: 685found:
684 mod_timer(&p->timer, now + br->multicast_membership_interval); 686 mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -1138,7 +1140,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1138 struct sk_buff *skb) 1140 struct sk_buff *skb)
1139{ 1141{
1140 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1142 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1141 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1143 struct mld_msg *mld;
1142 struct net_bridge_mdb_entry *mp; 1144 struct net_bridge_mdb_entry *mp;
1143 struct mld2_query *mld2q; 1145 struct mld2_query *mld2q;
1144 struct net_bridge_port_group *p; 1146 struct net_bridge_port_group *p;
@@ -1172,7 +1174,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1172 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1174 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1173 if (!mld2q->mld2q_nsrcs) 1175 if (!mld2q->mld2q_nsrcs)
1174 group = &mld2q->mld2q_mca; 1176 group = &mld2q->mld2q_mca;
1175 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; 1177 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
1176 } 1178 }
1177 1179
1178 if (!group) 1180 if (!group)
@@ -1225,6 +1227,28 @@ static void br_multicast_leave_group(struct net_bridge *br,
1225 if (!mp) 1227 if (!mp)
1226 goto out; 1228 goto out;
1227 1229
1230 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1231 struct net_bridge_port_group __rcu **pp;
1232
1233 for (pp = &mp->ports;
1234 (p = mlock_dereference(*pp, br)) != NULL;
1235 pp = &p->next) {
1236 if (p->port != port)
1237 continue;
1238
1239 rcu_assign_pointer(*pp, p->next);
1240 hlist_del_init(&p->mglist);
1241 del_timer(&p->timer);
1242 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1243 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1244
1245 if (!mp->ports && !mp->mglist &&
1246 netif_running(br->dev))
1247 mod_timer(&mp->timer, jiffies);
1248 }
1249 goto out;
1250 }
1251
1228 now = jiffies; 1252 now = jiffies;
1229 time = now + br->multicast_last_member_count * 1253 time = now + br->multicast_last_member_count *
1230 br->multicast_last_member_interval; 1254 br->multicast_last_member_interval;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 093f527276a3..5dc66abcc9e2 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -20,16 +20,44 @@
20#include "br_private.h" 20#include "br_private.h"
21#include "br_private_stp.h" 21#include "br_private_stp.h"
22 22
23static inline size_t br_port_info_size(void)
24{
25 return nla_total_size(1) /* IFLA_BRPORT_STATE */
26 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
27 + nla_total_size(4) /* IFLA_BRPORT_COST */
28 + nla_total_size(1) /* IFLA_BRPORT_MODE */
29 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
30 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
31 + 0;
32}
33
23static inline size_t br_nlmsg_size(void) 34static inline size_t br_nlmsg_size(void)
24{ 35{
25 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 36 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
26 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 37 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
27 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 38 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
28 + nla_total_size(4) /* IFLA_MASTER */ 39 + nla_total_size(4) /* IFLA_MASTER */
29 + nla_total_size(4) /* IFLA_MTU */ 40 + nla_total_size(4) /* IFLA_MTU */
30 + nla_total_size(4) /* IFLA_LINK */ 41 + nla_total_size(4) /* IFLA_LINK */
31 + nla_total_size(1) /* IFLA_OPERSTATE */ 42 + nla_total_size(1) /* IFLA_OPERSTATE */
32 + nla_total_size(1); /* IFLA_PROTINFO */ 43 + nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */
44}
45
46static int br_port_fill_attrs(struct sk_buff *skb,
47 const struct net_bridge_port *p)
48{
49 u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
50
51 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
52 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
53 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
54 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
55 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
56 nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
57 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)))
58 return -EMSGSIZE;
59
60 return 0;
33} 61}
34 62
35/* 63/*
@@ -67,10 +95,18 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
67 (dev->addr_len && 95 (dev->addr_len &&
68 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 96 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
69 (dev->ifindex != dev->iflink && 97 (dev->ifindex != dev->iflink &&
70 nla_put_u32(skb, IFLA_LINK, dev->iflink)) || 98 nla_put_u32(skb, IFLA_LINK, dev->iflink)))
71 (event == RTM_NEWLINK &&
72 nla_put_u8(skb, IFLA_PROTINFO, port->state)))
73 goto nla_put_failure; 99 goto nla_put_failure;
100
101 if (event == RTM_NEWLINK) {
102 struct nlattr *nest
103 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
104
105 if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
106 goto nla_put_failure;
107 nla_nest_end(skb, nest);
108 }
109
74 return nlmsg_end(skb, nlh); 110 return nlmsg_end(skb, nlh);
75 111
76nla_put_failure: 112nla_put_failure:
@@ -111,89 +147,134 @@ errout:
111/* 147/*
112 * Dump information about all ports, in response to GETLINK 148 * Dump information about all ports, in response to GETLINK
113 */ 149 */
114static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 150int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
151 struct net_device *dev)
152{
153 int err = 0;
154 struct net_bridge_port *port = br_port_get_rcu(dev);
155
156 /* not a bridge port */
157 if (!port)
158 goto out;
159
160 err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI);
161out:
162 return err;
163}
164
165static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
166 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
167 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
168 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
169 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
170 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
171 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
172};
173
174/* Change the state of the port and notify spanning tree */
175static int br_set_port_state(struct net_bridge_port *p, u8 state)
176{
177 if (state > BR_STATE_BLOCKING)
178 return -EINVAL;
179
180 /* if kernel STP is running, don't allow changes */
181 if (p->br->stp_enabled == BR_KERNEL_STP)
182 return -EBUSY;
183
184 if (!netif_running(p->dev) ||
185 (!netif_carrier_ok(p->dev) && state != BR_STATE_DISABLED))
186 return -ENETDOWN;
187
188 p->state = state;
189 br_log_state(p);
190 br_port_state_selection(p->br);
191 return 0;
192}
193
194/* Set/clear or port flags based on attribute */
195static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
196 int attrtype, unsigned long mask)
197{
198 if (tb[attrtype]) {
199 u8 flag = nla_get_u8(tb[attrtype]);
200 if (flag)
201 p->flags |= mask;
202 else
203 p->flags &= ~mask;
204 }
205}
206
207/* Process bridge protocol info on port */
208static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
115{ 209{
116 struct net *net = sock_net(skb->sk); 210 int err;
117 struct net_device *dev; 211
118 int idx; 212 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
119 213 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
120 idx = 0; 214 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
121 rcu_read_lock(); 215
122 for_each_netdev_rcu(net, dev) { 216 if (tb[IFLA_BRPORT_COST]) {
123 struct net_bridge_port *port = br_port_get_rcu(dev); 217 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
124 218 if (err)
125 /* not a bridge port */ 219 return err;
126 if (!port || idx < cb->args[0]) 220 }
127 goto skip; 221
128 222 if (tb[IFLA_BRPORT_PRIORITY]) {
129 if (br_fill_ifinfo(skb, port, 223 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
130 NETLINK_CB(cb->skb).portid, 224 if (err)
131 cb->nlh->nlmsg_seq, RTM_NEWLINK, 225 return err;
132 NLM_F_MULTI) < 0)
133 break;
134skip:
135 ++idx;
136 } 226 }
137 rcu_read_unlock();
138 cb->args[0] = idx;
139 227
140 return skb->len; 228 if (tb[IFLA_BRPORT_STATE]) {
229 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
230 if (err)
231 return err;
232 }
233 return 0;
141} 234}
142 235
143/* 236/* Change state and parameters on port. */
144 * Change state of port (ie from forwarding to blocking etc) 237int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
145 * Used by spanning tree in user space.
146 */
147static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
148{ 238{
149 struct net *net = sock_net(skb->sk);
150 struct ifinfomsg *ifm; 239 struct ifinfomsg *ifm;
151 struct nlattr *protinfo; 240 struct nlattr *protinfo;
152 struct net_device *dev;
153 struct net_bridge_port *p; 241 struct net_bridge_port *p;
154 u8 new_state; 242 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
155 243 int err;
156 if (nlmsg_len(nlh) < sizeof(*ifm))
157 return -EINVAL;
158 244
159 ifm = nlmsg_data(nlh); 245 ifm = nlmsg_data(nlh);
160 if (ifm->ifi_family != AF_BRIDGE)
161 return -EPFNOSUPPORT;
162 246
163 protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO); 247 protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO);
164 if (!protinfo || nla_len(protinfo) < sizeof(u8)) 248 if (!protinfo)
165 return -EINVAL; 249 return 0;
166
167 new_state = nla_get_u8(protinfo);
168 if (new_state > BR_STATE_BLOCKING)
169 return -EINVAL;
170
171 dev = __dev_get_by_index(net, ifm->ifi_index);
172 if (!dev)
173 return -ENODEV;
174 250
175 p = br_port_get_rtnl(dev); 251 p = br_port_get_rtnl(dev);
176 if (!p) 252 if (!p)
177 return -EINVAL; 253 return -EINVAL;
178 254
179 /* if kernel STP is running, don't allow changes */ 255 if (protinfo->nla_type & NLA_F_NESTED) {
180 if (p->br->stp_enabled == BR_KERNEL_STP) 256 err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
181 return -EBUSY; 257 protinfo, ifla_brport_policy);
182 258 if (err)
183 if (!netif_running(dev) || 259 return err;
184 (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED)) 260
185 return -ENETDOWN; 261 spin_lock_bh(&p->br->lock);
186 262 err = br_setport(p, tb);
187 p->state = new_state; 263 spin_unlock_bh(&p->br->lock);
188 br_log_state(p); 264 } else {
265 /* Binary compatability with old RSTP */
266 if (nla_len(protinfo) < sizeof(u8))
267 return -EINVAL;
189 268
190 spin_lock_bh(&p->br->lock); 269 spin_lock_bh(&p->br->lock);
191 br_port_state_selection(p->br); 270 err = br_set_port_state(p, nla_get_u8(protinfo));
192 spin_unlock_bh(&p->br->lock); 271 spin_unlock_bh(&p->br->lock);
272 }
193 273
194 br_ifinfo_notify(RTM_NEWLINK, p); 274 if (err == 0)
275 br_ifinfo_notify(RTM_NEWLINK, p);
195 276
196 return 0; 277 return err;
197} 278}
198 279
199static int br_validate(struct nlattr *tb[], struct nlattr *data[]) 280static int br_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -220,31 +301,19 @@ int __init br_netlink_init(void)
220{ 301{
221 int err; 302 int err;
222 303
304 br_mdb_init();
223 err = rtnl_link_register(&br_link_ops); 305 err = rtnl_link_register(&br_link_ops);
224 if (err < 0)
225 goto err1;
226
227 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL,
228 br_dump_ifinfo, NULL);
229 if (err)
230 goto err2;
231 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK,
232 br_rtm_setlink, NULL, NULL);
233 if (err) 306 if (err)
234 goto err3; 307 goto out;
235 308
236 return 0; 309 return 0;
237 310out:
238err3: 311 br_mdb_uninit();
239 rtnl_unregister_all(PF_BRIDGE);
240err2:
241 rtnl_link_unregister(&br_link_ops);
242err1:
243 return err; 312 return err;
244} 313}
245 314
246void __exit br_netlink_fini(void) 315void __exit br_netlink_fini(void)
247{ 316{
317 br_mdb_uninit();
248 rtnl_link_unregister(&br_link_ops); 318 rtnl_link_unregister(&br_link_ops);
249 rtnl_unregister_all(PF_BRIDGE);
250} 319}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 9b278c4ebee1..711094aed41a 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -83,6 +83,7 @@ struct net_bridge_port_group {
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84 struct timer_list timer; 84 struct timer_list timer;
85 struct br_ip addr; 85 struct br_ip addr;
86 unsigned char state;
86}; 87};
87 88
88struct net_bridge_mdb_entry 89struct net_bridge_mdb_entry
@@ -135,6 +136,9 @@ struct net_bridge_port
135 136
136 unsigned long flags; 137 unsigned long flags;
137#define BR_HAIRPIN_MODE 0x00000001 138#define BR_HAIRPIN_MODE 0x00000001
139#define BR_BPDU_GUARD 0x00000002
140#define BR_ROOT_BLOCK 0x00000004
141#define BR_MULTICAST_FAST_LEAVE 0x00000008
138 142
139#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 143#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
140 u32 multicast_startup_queries_sent; 144 u32 multicast_startup_queries_sent;
@@ -158,7 +162,9 @@ struct net_bridge_port
158 162
159static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) 163static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
160{ 164{
161 struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data); 165 struct net_bridge_port *port =
166 rcu_dereference_rtnl(dev->rx_handler_data);
167
162 return br_port_exists(dev) ? port : NULL; 168 return br_port_exists(dev) ? port : NULL;
163} 169}
164 170
@@ -288,7 +294,6 @@ struct br_input_skb_cb {
288 pr_debug("%s: " format, (br)->dev->name, ##args) 294 pr_debug("%s: " format, (br)->dev->name, ##args)
289 295
290extern struct notifier_block br_device_notifier; 296extern struct notifier_block br_device_notifier;
291extern const u8 br_group_address[ETH_ALEN];
292 297
293/* called under bridge lock */ 298/* called under bridge lock */
294static inline int br_is_root_bridge(const struct net_bridge *br) 299static inline int br_is_root_bridge(const struct net_bridge *br)
@@ -407,6 +412,7 @@ extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __us
407 412
408/* br_multicast.c */ 413/* br_multicast.c */
409#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 414#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
415extern unsigned int br_mdb_rehash_seq;
410extern int br_multicast_rcv(struct net_bridge *br, 416extern int br_multicast_rcv(struct net_bridge *br,
411 struct net_bridge_port *port, 417 struct net_bridge_port *port,
412 struct sk_buff *skb); 418 struct sk_buff *skb);
@@ -429,6 +435,34 @@ extern int br_multicast_set_port_router(struct net_bridge_port *p,
429extern int br_multicast_toggle(struct net_bridge *br, unsigned long val); 435extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
430extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val); 436extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
431extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val); 437extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
438extern struct net_bridge_mdb_entry *br_mdb_ip_get(
439 struct net_bridge_mdb_htable *mdb,
440 struct br_ip *dst);
441extern struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
442 struct net_bridge_port *port, struct br_ip *group);
443extern void br_multicast_free_pg(struct rcu_head *head);
444extern struct net_bridge_port_group *br_multicast_new_port_group(
445 struct net_bridge_port *port,
446 struct br_ip *group,
447 struct net_bridge_port_group *next,
448 unsigned char state);
449extern void br_mdb_init(void);
450extern void br_mdb_uninit(void);
451extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
452 struct br_ip *group, int type);
453
454#define mlock_dereference(X, br) \
455 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
456
457#if IS_ENABLED(CONFIG_IPV6)
458#include <net/addrconf.h>
459static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
460{
461 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
462 return 1;
463 return 0;
464}
465#endif
432 466
433static inline bool br_multicast_is_router(struct net_bridge *br) 467static inline bool br_multicast_is_router(struct net_bridge *br)
434{ 468{
@@ -492,6 +526,12 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
492{ 526{
493 return 0; 527 return 0;
494} 528}
529static inline void br_mdb_init(void)
530{
531}
532static inline void br_mdb_uninit(void)
533{
534}
495#endif 535#endif
496 536
497/* br_netfilter.c */ 537/* br_netfilter.c */
@@ -553,6 +593,9 @@ extern struct rtnl_link_ops br_link_ops;
553extern int br_netlink_init(void); 593extern int br_netlink_init(void);
554extern void br_netlink_fini(void); 594extern void br_netlink_fini(void);
555extern void br_ifinfo_notify(int event, struct net_bridge_port *port); 595extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
596extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
597extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
598 struct net_device *dev);
556 599
557#ifdef CONFIG_SYSFS 600#ifdef CONFIG_SYSFS
558/* br_sysfs_if.c */ 601/* br_sysfs_if.c */
@@ -566,10 +609,10 @@ extern void br_sysfs_delbr(struct net_device *dev);
566 609
567#else 610#else
568 611
569#define br_sysfs_addif(p) (0) 612static inline int br_sysfs_addif(struct net_bridge_port *p) { return 0; }
570#define br_sysfs_renameif(p) (0) 613static inline int br_sysfs_renameif(struct net_bridge_port *p) { return 0; }
571#define br_sysfs_addbr(dev) (0) 614static inline int br_sysfs_addbr(struct net_device *dev) { return 0; }
572#define br_sysfs_delbr(dev) do { } while(0) 615static inline void br_sysfs_delbr(struct net_device *dev) { return; }
573#endif /* CONFIG_SYSFS */ 616#endif /* CONFIG_SYSFS */
574 617
575#endif 618#endif
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index af9a12099ba4..b01849a74310 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -100,6 +100,21 @@ static int br_should_become_root_port(const struct net_bridge_port *p,
100 return 0; 100 return 0;
101} 101}
102 102
103static void br_root_port_block(const struct net_bridge *br,
104 struct net_bridge_port *p)
105{
106
107 br_notice(br, "port %u(%s) tried to become root port (blocked)",
108 (unsigned int) p->port_no, p->dev->name);
109
110 p->state = BR_STATE_LISTENING;
111 br_log_state(p);
112 br_ifinfo_notify(RTM_NEWLINK, p);
113
114 if (br->forward_delay > 0)
115 mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
116}
117
103/* called under bridge lock */ 118/* called under bridge lock */
104static void br_root_selection(struct net_bridge *br) 119static void br_root_selection(struct net_bridge *br)
105{ 120{
@@ -107,7 +122,12 @@ static void br_root_selection(struct net_bridge *br)
107 u16 root_port = 0; 122 u16 root_port = 0;
108 123
109 list_for_each_entry(p, &br->port_list, list) { 124 list_for_each_entry(p, &br->port_list, list) {
110 if (br_should_become_root_port(p, root_port)) 125 if (!br_should_become_root_port(p, root_port))
126 continue;
127
128 if (p->flags & BR_ROOT_BLOCK)
129 br_root_port_block(br, p);
130 else
111 root_port = p->port_no; 131 root_port = p->port_no;
112 } 132 }
113 133
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index fd30a6022dea..7f884e3fb955 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -170,6 +170,13 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
170 if (!ether_addr_equal(dest, br->group_addr)) 170 if (!ether_addr_equal(dest, br->group_addr))
171 goto out; 171 goto out;
172 172
173 if (p->flags & BR_BPDU_GUARD) {
174 br_notice(br, "BPDU received on blocked port %u(%s)\n",
175 (unsigned int) p->port_no, p->dev->name);
176 br_stp_disable_port(p);
177 goto out;
178 }
179
173 buf = skb_pull(skb, 3); 180 buf = skb_pull(skb, 3);
174 181
175 if (buf[0] == BPDU_TYPE_CONFIG) { 182 if (buf[0] == BPDU_TYPE_CONFIG) {
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c5c059333eab..5913a3a0047b 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -14,6 +14,7 @@
14#include <linux/capability.h> 14#include <linux/capability.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
17#include <linux/if_bridge.h> 18#include <linux/if_bridge.h>
18#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
@@ -36,7 +37,7 @@ static ssize_t store_bridge_parm(struct device *d,
36 unsigned long val; 37 unsigned long val;
37 int err; 38 int err;
38 39
39 if (!capable(CAP_NET_ADMIN)) 40 if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
40 return -EPERM; 41 return -EPERM;
41 42
42 val = simple_strtoul(buf, &endp, 0); 43 val = simple_strtoul(buf, &endp, 0);
@@ -132,7 +133,7 @@ static ssize_t store_stp_state(struct device *d,
132 char *endp; 133 char *endp;
133 unsigned long val; 134 unsigned long val;
134 135
135 if (!capable(CAP_NET_ADMIN)) 136 if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
136 return -EPERM; 137 return -EPERM;
137 138
138 val = simple_strtoul(buf, &endp, 0); 139 val = simple_strtoul(buf, &endp, 0);
@@ -165,7 +166,7 @@ static ssize_t store_group_fwd_mask(struct device *d,
165 char *endp; 166 char *endp;
166 unsigned long val; 167 unsigned long val;
167 168
168 if (!capable(CAP_NET_ADMIN)) 169 if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
169 return -EPERM; 170 return -EPERM;
170 171
171 val = simple_strtoul(buf, &endp, 0); 172 val = simple_strtoul(buf, &endp, 0);
@@ -297,23 +298,18 @@ static ssize_t store_group_addr(struct device *d,
297 const char *buf, size_t len) 298 const char *buf, size_t len)
298{ 299{
299 struct net_bridge *br = to_bridge(d); 300 struct net_bridge *br = to_bridge(d);
300 unsigned int new_addr[6]; 301 u8 new_addr[6];
301 int i; 302 int i;
302 303
303 if (!capable(CAP_NET_ADMIN)) 304 if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
304 return -EPERM; 305 return -EPERM;
305 306
306 if (sscanf(buf, "%x:%x:%x:%x:%x:%x", 307 if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
307 &new_addr[0], &new_addr[1], &new_addr[2], 308 &new_addr[0], &new_addr[1], &new_addr[2],
308 &new_addr[3], &new_addr[4], &new_addr[5]) != 6) 309 &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
309 return -EINVAL; 310 return -EINVAL;
310 311
311 /* Must be 01:80:c2:00:00:0X */ 312 if (!is_link_local_ether_addr(new_addr))
312 for (i = 0; i < 5; i++)
313 if (new_addr[i] != br_group_address[i])
314 return -EINVAL;
315
316 if (new_addr[5] & ~0xf)
317 return -EINVAL; 313 return -EINVAL;
318 314
319 if (new_addr[5] == 1 || /* 802.3x Pause address */ 315 if (new_addr[5] == 1 || /* 802.3x Pause address */
@@ -337,7 +333,7 @@ static ssize_t store_flush(struct device *d,
337{ 333{
338 struct net_bridge *br = to_bridge(d); 334 struct net_bridge *br = to_bridge(d);
339 335
340 if (!capable(CAP_NET_ADMIN)) 336 if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
341 return -EPERM; 337 return -EPERM;
342 338
343 br_fdb_flush(br); 339 br_fdb_flush(br);
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 13b36bdc76a7..a1ef1b6e14dc 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -34,6 +34,28 @@ const struct brport_attribute brport_attr_##_name = { \
34 .store = _store, \ 34 .store = _store, \
35}; 35};
36 36
37#define BRPORT_ATTR_FLAG(_name, _mask) \
38static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
39{ \
40 return sprintf(buf, "%d\n", !!(p->flags & _mask)); \
41} \
42static int store_##_name(struct net_bridge_port *p, unsigned long v) \
43{ \
44 unsigned long flags = p->flags; \
45 if (v) \
46 flags |= _mask; \
47 else \
48 flags &= ~_mask; \
49 if (flags != p->flags) { \
50 p->flags = flags; \
51 br_ifinfo_notify(RTM_NEWLINK, p); \
52 } \
53 return 0; \
54} \
55static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \
56 show_##_name, store_##_name)
57
58
37static ssize_t show_path_cost(struct net_bridge_port *p, char *buf) 59static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
38{ 60{
39 return sprintf(buf, "%d\n", p->path_cost); 61 return sprintf(buf, "%d\n", p->path_cost);
@@ -133,21 +155,9 @@ static int store_flush(struct net_bridge_port *p, unsigned long v)
133} 155}
134static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); 156static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
135 157
136static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf) 158BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
137{ 159BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
138 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; 160BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
139 return sprintf(buf, "%d\n", hairpin_mode);
140}
141static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
142{
143 if (v)
144 p->flags |= BR_HAIRPIN_MODE;
145 else
146 p->flags &= ~BR_HAIRPIN_MODE;
147 return 0;
148}
149static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
150 show_hairpin_mode, store_hairpin_mode);
151 161
152#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 162#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
153static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) 163static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
@@ -162,6 +172,8 @@ static int store_multicast_router(struct net_bridge_port *p,
162} 172}
163static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router, 173static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
164 store_multicast_router); 174 store_multicast_router);
175
176BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
165#endif 177#endif
166 178
167static const struct brport_attribute *brport_attrs[] = { 179static const struct brport_attribute *brport_attrs[] = {
@@ -181,8 +193,11 @@ static const struct brport_attribute *brport_attrs[] = {
181 &brport_attr_hold_timer, 193 &brport_attr_hold_timer,
182 &brport_attr_flush, 194 &brport_attr_flush,
183 &brport_attr_hairpin_mode, 195 &brport_attr_hairpin_mode,
196 &brport_attr_bpdu_guard,
197 &brport_attr_root_block,
184#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 198#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
185 &brport_attr_multicast_router, 199 &brport_attr_multicast_router,
200 &brport_attr_multicast_fast_leave,
186#endif 201#endif
187 NULL 202 NULL
188}; 203};
@@ -209,7 +224,7 @@ static ssize_t brport_store(struct kobject * kobj,
209 char *endp; 224 char *endp;
210 unsigned long val; 225 unsigned long val;
211 226
212 if (!capable(CAP_NET_ADMIN)) 227 if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN))
213 return -EPERM; 228 return -EPERM;
214 229
215 val = simple_strtoul(buf, &endp, 0); 230 val = simple_strtoul(buf, &endp, 0);
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index fd7cbf5aa895..3ebc8cbc91ff 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -126,20 +126,16 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
126 struct net_device *dev = arg; 126 struct net_device *dev = arg;
127 struct caif_dev_common common; 127 struct caif_dev_common common;
128 struct cflayer *layer, *link_support; 128 struct cflayer *layer, *link_support;
129 struct usbnet *usbnet = netdev_priv(dev); 129 struct usbnet *usbnet;
130 struct usb_device *usbdev = usbnet->udev; 130 struct usb_device *usbdev;
131 struct ethtool_drvinfo drvinfo;
132 131
133 /* 132 /* Check whether we have a NCM device, and find its VID/PID. */
134 * Quirks: High-jack ethtool to find if we have a NCM device, 133 if (!(dev->dev.parent && dev->dev.parent->driver &&
135 * and find it's VID/PID. 134 strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0))
136 */
137 if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL)
138 return 0; 135 return 0;
139 136
140 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 137 usbnet = netdev_priv(dev);
141 if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0) 138 usbdev = usbnet->udev;
142 return 0;
143 139
144 pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n", 140 pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n",
145 le16_to_cpu(usbdev->descriptor.idVendor), 141 le16_to_cpu(usbdev->descriptor.idVendor),
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 44f270fc2d06..a376ec1ac0a7 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -515,8 +515,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
515 client_layer : NULL); 515 client_layer : NULL);
516 } 516 }
517 517
518 if (req != NULL) 518 kfree(req);
519 kfree(req);
520 519
521 spin_unlock_bh(&cfctrl->info_list_lock); 520 spin_unlock_bh(&cfctrl->info_list_lock);
522 } 521 }
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6f747582718e..969b7cdff59d 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1084 op->sk = sk; 1084 op->sk = sk;
1085 op->ifindex = ifindex; 1085 op->ifindex = ifindex;
1086 1086
1087 /* ifindex for timeout events w/o previous frame reception */
1088 op->rx_ifindex = ifindex;
1089
1087 /* initialize uninitialized (kzalloc) structure */ 1090 /* initialize uninitialized (kzalloc) structure */
1088 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1091 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1089 op->timer.function = bcm_rx_timeout_handler; 1092 op->timer.function = bcm_rx_timeout_handler;
diff --git a/net/can/gw.c b/net/can/gw.c
index 1f5c9785a262..574dda78eb0f 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -751,6 +751,9 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
751 struct cgw_job *gwj; 751 struct cgw_job *gwj;
752 int err = 0; 752 int err = 0;
753 753
754 if (!capable(CAP_NET_ADMIN))
755 return -EPERM;
756
754 if (nlmsg_len(nlh) < sizeof(*r)) 757 if (nlmsg_len(nlh) < sizeof(*r))
755 return -EINVAL; 758 return -EINVAL;
756 759
@@ -839,6 +842,9 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
839 struct can_can_gw ccgw; 842 struct can_can_gw ccgw;
840 int err = 0; 843 int err = 0;
841 844
845 if (!capable(CAP_NET_ADMIN))
846 return -EPERM;
847
842 if (nlmsg_len(nlh) < sizeof(*r)) 848 if (nlmsg_len(nlh) < sizeof(*r))
843 return -EINVAL; 849 return -EINVAL;
844 850
diff --git a/net/can/proc.c b/net/can/proc.c
index 3b6dd3180492..ae566902d2bf 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -397,7 +397,7 @@ static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
397 int i; 397 int i;
398 int all_empty = 1; 398 int all_empty = 1;
399 399
400 /* check wether at least one list is non-empty */ 400 /* check whether at least one list is non-empty */
401 for (i = 0; i < 0x800; i++) 401 for (i = 0; i < 0x800; i++)
402 if (!hlist_empty(&d->rx_sff[i])) { 402 if (!hlist_empty(&d->rx_sff[i])) {
403 all_empty = 0; 403 all_empty = 0;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a8020293f342..ee71ea26777a 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
305 305
306 /* start with defaults */ 306 /* start with defaults */
307 opt->flags = CEPH_OPT_DEFAULT; 307 opt->flags = CEPH_OPT_DEFAULT;
308 opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
309 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; 308 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
310 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ 309 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
311 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ 310 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
@@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
391 390
392 /* misc */ 391 /* misc */
393 case Opt_osdtimeout: 392 case Opt_osdtimeout:
394 opt->osd_timeout = intval; 393 pr_warning("ignoring deprecated osdtimeout option\n");
395 break; 394 break;
396 case Opt_osdkeepalivetimeout: 395 case Opt_osdkeepalivetimeout:
397 opt->osd_keepalive_timeout = intval; 396 opt->osd_keepalive_timeout = intval;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3ef1759403b4..5ccf87ed8d68 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
506{ 506{
507 /* reset connection, out_queue, msg_ and connect_seq */ 507 /* reset connection, out_queue, msg_ and connect_seq */
508 /* discard existing out_queue and msg_seq */ 508 /* discard existing out_queue and msg_seq */
509 dout("reset_connection %p\n", con);
509 ceph_msg_remove_list(&con->out_queue); 510 ceph_msg_remove_list(&con->out_queue);
510 ceph_msg_remove_list(&con->out_sent); 511 ceph_msg_remove_list(&con->out_sent);
511 512
@@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
561 mutex_lock(&con->mutex); 562 mutex_lock(&con->mutex);
562 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 563 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
563 564
564 BUG_ON(con->state != CON_STATE_CLOSED); 565 WARN_ON(con->state != CON_STATE_CLOSED);
565 con->state = CON_STATE_PREOPEN; 566 con->state = CON_STATE_PREOPEN;
566 567
567 con->peer_name.type = (__u8) entity_type; 568 con->peer_name.type = (__u8) entity_type;
@@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
1506 return 0; 1507 return 0;
1507} 1508}
1508 1509
1509static void fail_protocol(struct ceph_connection *con)
1510{
1511 reset_connection(con);
1512 BUG_ON(con->state != CON_STATE_NEGOTIATING);
1513 con->state = CON_STATE_CLOSED;
1514}
1515
1516static int process_connect(struct ceph_connection *con) 1510static int process_connect(struct ceph_connection *con)
1517{ 1511{
1518 u64 sup_feat = con->msgr->supported_features; 1512 u64 sup_feat = con->msgr->supported_features;
@@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
1530 ceph_pr_addr(&con->peer_addr.in_addr), 1524 ceph_pr_addr(&con->peer_addr.in_addr),
1531 sup_feat, server_feat, server_feat & ~sup_feat); 1525 sup_feat, server_feat, server_feat & ~sup_feat);
1532 con->error_msg = "missing required protocol features"; 1526 con->error_msg = "missing required protocol features";
1533 fail_protocol(con); 1527 reset_connection(con);
1534 return -1; 1528 return -1;
1535 1529
1536 case CEPH_MSGR_TAG_BADPROTOVER: 1530 case CEPH_MSGR_TAG_BADPROTOVER:
@@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
1541 le32_to_cpu(con->out_connect.protocol_version), 1535 le32_to_cpu(con->out_connect.protocol_version),
1542 le32_to_cpu(con->in_reply.protocol_version)); 1536 le32_to_cpu(con->in_reply.protocol_version));
1543 con->error_msg = "protocol version mismatch"; 1537 con->error_msg = "protocol version mismatch";
1544 fail_protocol(con); 1538 reset_connection(con);
1545 return -1; 1539 return -1;
1546 1540
1547 case CEPH_MSGR_TAG_BADAUTHORIZER: 1541 case CEPH_MSGR_TAG_BADAUTHORIZER:
@@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
1631 ceph_pr_addr(&con->peer_addr.in_addr), 1625 ceph_pr_addr(&con->peer_addr.in_addr),
1632 req_feat, server_feat, req_feat & ~server_feat); 1626 req_feat, server_feat, req_feat & ~server_feat);
1633 con->error_msg = "missing required protocol features"; 1627 con->error_msg = "missing required protocol features";
1634 fail_protocol(con); 1628 reset_connection(con);
1635 return -1; 1629 return -1;
1636 } 1630 }
1637 1631
1638 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1632 WARN_ON(con->state != CON_STATE_NEGOTIATING);
1639 con->state = CON_STATE_OPEN; 1633 con->state = CON_STATE_OPEN;
1640 1634
1641 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1635 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
@@ -2132,7 +2126,6 @@ more:
2132 if (ret < 0) 2126 if (ret < 0)
2133 goto out; 2127 goto out;
2134 2128
2135 BUG_ON(con->state != CON_STATE_CONNECTING);
2136 con->state = CON_STATE_NEGOTIATING; 2129 con->state = CON_STATE_NEGOTIATING;
2137 2130
2138 /* 2131 /*
@@ -2160,7 +2153,7 @@ more:
2160 goto more; 2153 goto more;
2161 } 2154 }
2162 2155
2163 BUG_ON(con->state != CON_STATE_OPEN); 2156 WARN_ON(con->state != CON_STATE_OPEN);
2164 2157
2165 if (con->in_base_pos < 0) { 2158 if (con->in_base_pos < 0) {
2166 /* 2159 /*
@@ -2244,22 +2237,62 @@ bad_tag:
2244 2237
2245 2238
2246/* 2239/*
2247 * Atomically queue work on a connection. Bump @con reference to 2240 * Atomically queue work on a connection after the specified delay.
2248 * avoid races with connection teardown. 2241 * Bump @con reference to avoid races with connection teardown.
2242 * Returns 0 if work was queued, or an error code otherwise.
2249 */ 2243 */
2250static void queue_con(struct ceph_connection *con) 2244static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2251{ 2245{
2252 if (!con->ops->get(con)) { 2246 if (!con->ops->get(con)) {
2253 dout("queue_con %p ref count 0\n", con); 2247 dout("%s %p ref count 0\n", __func__, con);
2254 return; 2248
2249 return -ENOENT;
2255 } 2250 }
2256 2251
2257 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 2252 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2258 dout("queue_con %p - already queued\n", con); 2253 dout("%s %p - already queued\n", __func__, con);
2259 con->ops->put(con); 2254 con->ops->put(con);
2260 } else { 2255
2261 dout("queue_con %p\n", con); 2256 return -EBUSY;
2257 }
2258
2259 dout("%s %p %lu\n", __func__, con, delay);
2260
2261 return 0;
2262}
2263
2264static void queue_con(struct ceph_connection *con)
2265{
2266 (void) queue_con_delay(con, 0);
2267}
2268
2269static bool con_sock_closed(struct ceph_connection *con)
2270{
2271 if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
2272 return false;
2273
2274#define CASE(x) \
2275 case CON_STATE_ ## x: \
2276 con->error_msg = "socket closed (con state " #x ")"; \
2277 break;
2278
2279 switch (con->state) {
2280 CASE(CLOSED);
2281 CASE(PREOPEN);
2282 CASE(CONNECTING);
2283 CASE(NEGOTIATING);
2284 CASE(OPEN);
2285 CASE(STANDBY);
2286 default:
2287 pr_warning("%s con %p unrecognized state %lu\n",
2288 __func__, con, con->state);
2289 con->error_msg = "unrecognized con state";
2290 BUG();
2291 break;
2262 } 2292 }
2293#undef CASE
2294
2295 return true;
2263} 2296}
2264 2297
2265/* 2298/*
@@ -2273,35 +2306,16 @@ static void con_work(struct work_struct *work)
2273 2306
2274 mutex_lock(&con->mutex); 2307 mutex_lock(&con->mutex);
2275restart: 2308restart:
2276 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) { 2309 if (con_sock_closed(con))
2277 switch (con->state) {
2278 case CON_STATE_CONNECTING:
2279 con->error_msg = "connection failed";
2280 break;
2281 case CON_STATE_NEGOTIATING:
2282 con->error_msg = "negotiation failed";
2283 break;
2284 case CON_STATE_OPEN:
2285 con->error_msg = "socket closed";
2286 break;
2287 default:
2288 dout("unrecognized con state %d\n", (int)con->state);
2289 con->error_msg = "unrecognized con state";
2290 BUG();
2291 }
2292 goto fault; 2310 goto fault;
2293 }
2294 2311
2295 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { 2312 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
2296 dout("con_work %p backing off\n", con); 2313 dout("con_work %p backing off\n", con);
2297 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2314 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2298 round_jiffies_relative(con->delay))) { 2315 if (ret) {
2299 dout("con_work %p backoff %lu\n", con, con->delay);
2300 mutex_unlock(&con->mutex);
2301 return;
2302 } else {
2303 dout("con_work %p FAILED to back off %lu\n", con, 2316 dout("con_work %p FAILED to back off %lu\n", con,
2304 con->delay); 2317 con->delay);
2318 BUG_ON(ret == -ENOENT);
2305 set_bit(CON_FLAG_BACKOFF, &con->flags); 2319 set_bit(CON_FLAG_BACKOFF, &con->flags);
2306 } 2320 }
2307 goto done; 2321 goto done;
@@ -2356,12 +2370,12 @@ fault:
2356static void ceph_fault(struct ceph_connection *con) 2370static void ceph_fault(struct ceph_connection *con)
2357 __releases(con->mutex) 2371 __releases(con->mutex)
2358{ 2372{
2359 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2373 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2360 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2374 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2361 dout("fault %p state %lu to peer %s\n", 2375 dout("fault %p state %lu to peer %s\n",
2362 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2376 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2363 2377
2364 BUG_ON(con->state != CON_STATE_CONNECTING && 2378 WARN_ON(con->state != CON_STATE_CONNECTING &&
2365 con->state != CON_STATE_NEGOTIATING && 2379 con->state != CON_STATE_NEGOTIATING &&
2366 con->state != CON_STATE_OPEN); 2380 con->state != CON_STATE_OPEN);
2367 2381
@@ -2398,24 +2412,8 @@ static void ceph_fault(struct ceph_connection *con)
2398 con->delay = BASE_DELAY_INTERVAL; 2412 con->delay = BASE_DELAY_INTERVAL;
2399 else if (con->delay < MAX_DELAY_INTERVAL) 2413 else if (con->delay < MAX_DELAY_INTERVAL)
2400 con->delay *= 2; 2414 con->delay *= 2;
2401 con->ops->get(con); 2415 set_bit(CON_FLAG_BACKOFF, &con->flags);
2402 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2416 queue_con(con);
2403 round_jiffies_relative(con->delay))) {
2404 dout("fault queued %p delay %lu\n", con, con->delay);
2405 } else {
2406 con->ops->put(con);
2407 dout("fault failed to queue %p delay %lu, backoff\n",
2408 con, con->delay);
2409 /*
2410 * In many cases we see a socket state change
2411 * while con_work is running and end up
2412 * queuing (non-delayed) work, such that we
2413 * can't backoff with a delay. Set a flag so
2414 * that when con_work restarts we schedule the
2415 * delay then.
2416 */
2417 set_bit(CON_FLAG_BACKOFF, &con->flags);
2418 }
2419 } 2417 }
2420 2418
2421out_unlock: 2419out_unlock:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index c1d756cc7448..eb9a44478764 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -221,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
221 kref_init(&req->r_kref); 221 kref_init(&req->r_kref);
222 init_completion(&req->r_completion); 222 init_completion(&req->r_completion);
223 init_completion(&req->r_safe_completion); 223 init_completion(&req->r_safe_completion);
224 RB_CLEAR_NODE(&req->r_node);
224 INIT_LIST_HEAD(&req->r_unsafe_item); 225 INIT_LIST_HEAD(&req->r_unsafe_item);
225 INIT_LIST_HEAD(&req->r_linger_item); 226 INIT_LIST_HEAD(&req->r_linger_item);
226 INIT_LIST_HEAD(&req->r_linger_osd); 227 INIT_LIST_HEAD(&req->r_linger_osd);
@@ -580,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
580 581
581 dout("__kick_osd_requests osd%d\n", osd->o_osd); 582 dout("__kick_osd_requests osd%d\n", osd->o_osd);
582 err = __reset_osd(osdc, osd); 583 err = __reset_osd(osdc, osd);
583 if (err == -EAGAIN) 584 if (err)
584 return; 585 return;
585 586
586 list_for_each_entry(req, &osd->o_requests, r_osd_item) { 587 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
@@ -607,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
607 } 608 }
608} 609}
609 610
610static void kick_osd_requests(struct ceph_osd_client *osdc,
611 struct ceph_osd *kickosd)
612{
613 mutex_lock(&osdc->request_mutex);
614 __kick_osd_requests(osdc, kickosd);
615 mutex_unlock(&osdc->request_mutex);
616}
617
618/* 611/*
619 * If the osd connection drops, we need to resubmit all requests. 612 * If the osd connection drops, we need to resubmit all requests.
620 */ 613 */
@@ -628,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
628 dout("osd_reset osd%d\n", osd->o_osd); 621 dout("osd_reset osd%d\n", osd->o_osd);
629 osdc = osd->o_osdc; 622 osdc = osd->o_osdc;
630 down_read(&osdc->map_sem); 623 down_read(&osdc->map_sem);
631 kick_osd_requests(osdc, osd); 624 mutex_lock(&osdc->request_mutex);
625 __kick_osd_requests(osdc, osd);
626 mutex_unlock(&osdc->request_mutex);
632 send_queued(osdc); 627 send_queued(osdc);
633 up_read(&osdc->map_sem); 628 up_read(&osdc->map_sem);
634} 629}
@@ -647,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
647 atomic_set(&osd->o_ref, 1); 642 atomic_set(&osd->o_ref, 1);
648 osd->o_osdc = osdc; 643 osd->o_osdc = osdc;
649 osd->o_osd = onum; 644 osd->o_osd = onum;
645 RB_CLEAR_NODE(&osd->o_node);
650 INIT_LIST_HEAD(&osd->o_requests); 646 INIT_LIST_HEAD(&osd->o_requests);
651 INIT_LIST_HEAD(&osd->o_linger_requests); 647 INIT_LIST_HEAD(&osd->o_linger_requests);
652 INIT_LIST_HEAD(&osd->o_osd_lru); 648 INIT_LIST_HEAD(&osd->o_osd_lru);
@@ -750,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
750 if (list_empty(&osd->o_requests) && 746 if (list_empty(&osd->o_requests) &&
751 list_empty(&osd->o_linger_requests)) { 747 list_empty(&osd->o_linger_requests)) {
752 __remove_osd(osdc, osd); 748 __remove_osd(osdc, osd);
749 ret = -ENODEV;
753 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], 750 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
754 &osd->o_con.peer_addr, 751 &osd->o_con.peer_addr,
755 sizeof(osd->o_con.peer_addr)) == 0 && 752 sizeof(osd->o_con.peer_addr)) == 0 &&
@@ -876,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
876 req->r_osd = NULL; 873 req->r_osd = NULL;
877 } 874 }
878 875
876 list_del_init(&req->r_req_lru_item);
879 ceph_osdc_put_request(req); 877 ceph_osdc_put_request(req);
880 878
881 list_del_init(&req->r_req_lru_item);
882 if (osdc->num_requests == 0) { 879 if (osdc->num_requests == 0) {
883 dout(" no requests, canceling timeout\n"); 880 dout(" no requests, canceling timeout\n");
884 __cancel_osd_timeout(osdc); 881 __cancel_osd_timeout(osdc);
@@ -910,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
910 struct ceph_osd_request *req) 907 struct ceph_osd_request *req)
911{ 908{
912 dout("__unregister_linger_request %p\n", req); 909 dout("__unregister_linger_request %p\n", req);
910 list_del_init(&req->r_linger_item);
913 if (req->r_osd) { 911 if (req->r_osd) {
914 list_del_init(&req->r_linger_item);
915 list_del_init(&req->r_linger_osd); 912 list_del_init(&req->r_linger_osd);
916 913
917 if (list_empty(&req->r_osd->o_requests) && 914 if (list_empty(&req->r_osd->o_requests) &&
@@ -1090,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
1090{ 1087{
1091 struct ceph_osd_client *osdc = 1088 struct ceph_osd_client *osdc =
1092 container_of(work, struct ceph_osd_client, timeout_work.work); 1089 container_of(work, struct ceph_osd_client, timeout_work.work);
1093 struct ceph_osd_request *req, *last_req = NULL; 1090 struct ceph_osd_request *req;
1094 struct ceph_osd *osd; 1091 struct ceph_osd *osd;
1095 unsigned long timeout = osdc->client->options->osd_timeout * HZ;
1096 unsigned long keepalive = 1092 unsigned long keepalive =
1097 osdc->client->options->osd_keepalive_timeout * HZ; 1093 osdc->client->options->osd_keepalive_timeout * HZ;
1098 unsigned long last_stamp = 0;
1099 struct list_head slow_osds; 1094 struct list_head slow_osds;
1100 dout("timeout\n"); 1095 dout("timeout\n");
1101 down_read(&osdc->map_sem); 1096 down_read(&osdc->map_sem);
@@ -1105,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
1105 mutex_lock(&osdc->request_mutex); 1100 mutex_lock(&osdc->request_mutex);
1106 1101
1107 /* 1102 /*
1108 * reset osds that appear to be _really_ unresponsive. this
1109 * is a failsafe measure.. we really shouldn't be getting to
1110 * this point if the system is working properly. the monitors
1111 * should mark the osd as failed and we should find out about
1112 * it from an updated osd map.
1113 */
1114 while (timeout && !list_empty(&osdc->req_lru)) {
1115 req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
1116 r_req_lru_item);
1117
1118 /* hasn't been long enough since we sent it? */
1119 if (time_before(jiffies, req->r_stamp + timeout))
1120 break;
1121
1122 /* hasn't been long enough since it was acked? */
1123 if (req->r_request->ack_stamp == 0 ||
1124 time_before(jiffies, req->r_request->ack_stamp + timeout))
1125 break;
1126
1127 BUG_ON(req == last_req && req->r_stamp == last_stamp);
1128 last_req = req;
1129 last_stamp = req->r_stamp;
1130
1131 osd = req->r_osd;
1132 BUG_ON(!osd);
1133 pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
1134 req->r_tid, osd->o_osd);
1135 __kick_osd_requests(osdc, osd);
1136 }
1137
1138 /*
1139 * ping osds that are a bit slow. this ensures that if there 1103 * ping osds that are a bit slow. this ensures that if there
1140 * is a break in the TCP connection we will notice, and reopen 1104 * is a break in the TCP connection we will notice, and reopen
1141 * a connection with that osd (from the fault callback). 1105 * a connection with that osd (from the fault callback).
@@ -1306,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
1306 * Requeue requests whose mapping to an OSD has changed. If requests map to 1270 * Requeue requests whose mapping to an OSD has changed. If requests map to
1307 * no osd, request a new map. 1271 * no osd, request a new map.
1308 * 1272 *
1309 * Caller should hold map_sem for read and request_mutex. 1273 * Caller should hold map_sem for read.
1310 */ 1274 */
1311static void kick_requests(struct ceph_osd_client *osdc, int force_resend) 1275static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1312{ 1276{
@@ -1320,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1320 for (p = rb_first(&osdc->requests); p; ) { 1284 for (p = rb_first(&osdc->requests); p; ) {
1321 req = rb_entry(p, struct ceph_osd_request, r_node); 1285 req = rb_entry(p, struct ceph_osd_request, r_node);
1322 p = rb_next(p); 1286 p = rb_next(p);
1287
1288 /*
1289 * For linger requests that have not yet been
1290 * registered, move them to the linger list; they'll
1291 * be sent to the osd in the loop below. Unregister
1292 * the request before re-registering it as a linger
1293 * request to ensure the __map_request() below
1294 * will decide it needs to be sent.
1295 */
1296 if (req->r_linger && list_empty(&req->r_linger_item)) {
1297 dout("%p tid %llu restart on osd%d\n",
1298 req, req->r_tid,
1299 req->r_osd ? req->r_osd->o_osd : -1);
1300 __unregister_request(osdc, req);
1301 __register_linger_request(osdc, req);
1302 continue;
1303 }
1304
1323 err = __map_request(osdc, req, force_resend); 1305 err = __map_request(osdc, req, force_resend);
1324 if (err < 0) 1306 if (err < 0)
1325 continue; /* error */ 1307 continue; /* error */
@@ -1334,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1334 req->r_flags |= CEPH_OSD_FLAG_RETRY; 1316 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1335 } 1317 }
1336 } 1318 }
1337 if (req->r_linger && list_empty(&req->r_linger_item)) {
1338 /*
1339 * register as a linger so that we will
1340 * re-submit below and get a new tid
1341 */
1342 dout("%p tid %llu restart on osd%d\n",
1343 req, req->r_tid,
1344 req->r_osd ? req->r_osd->o_osd : -1);
1345 __register_linger_request(osdc, req);
1346 __unregister_request(osdc, req);
1347 }
1348 } 1319 }
1349 1320
1350 list_for_each_entry_safe(req, nreq, &osdc->req_linger, 1321 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
@@ -1352,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1352 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); 1323 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1353 1324
1354 err = __map_request(osdc, req, force_resend); 1325 err = __map_request(osdc, req, force_resend);
1326 dout("__map_request returned %d\n", err);
1355 if (err == 0) 1327 if (err == 0)
1356 continue; /* no change and no osd was specified */ 1328 continue; /* no change and no osd was specified */
1357 if (err < 0) 1329 if (err < 0)
@@ -1364,8 +1336,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1364 1336
1365 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, 1337 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1366 req->r_osd ? req->r_osd->o_osd : -1); 1338 req->r_osd ? req->r_osd->o_osd : -1);
1367 __unregister_linger_request(osdc, req);
1368 __register_request(osdc, req); 1339 __register_request(osdc, req);
1340 __unregister_linger_request(osdc, req);
1369 } 1341 }
1370 mutex_unlock(&osdc->request_mutex); 1342 mutex_unlock(&osdc->request_mutex);
1371 1343
@@ -1373,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
1373 dout("%d requests for down osds, need new map\n", needmap); 1345 dout("%d requests for down osds, need new map\n", needmap);
1374 ceph_monc_request_next_osdmap(&osdc->client->monc); 1346 ceph_monc_request_next_osdmap(&osdc->client->monc);
1375 } 1347 }
1348 reset_changed_osds(osdc);
1376} 1349}
1377 1350
1378 1351
@@ -1429,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1429 osdc->osdmap = newmap; 1402 osdc->osdmap = newmap;
1430 } 1403 }
1431 kick_requests(osdc, 0); 1404 kick_requests(osdc, 0);
1432 reset_changed_osds(osdc);
1433 } else { 1405 } else {
1434 dout("ignoring incremental map %u len %d\n", 1406 dout("ignoring incremental map %u len %d\n",
1435 epoch, maplen); 1407 epoch, maplen);
@@ -1599,6 +1571,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1599 event->data = data; 1571 event->data = data;
1600 event->osdc = osdc; 1572 event->osdc = osdc;
1601 INIT_LIST_HEAD(&event->osd_node); 1573 INIT_LIST_HEAD(&event->osd_node);
1574 RB_CLEAR_NODE(&event->node);
1602 kref_init(&event->kref); /* one ref for us */ 1575 kref_init(&event->kref); /* one ref for us */
1603 kref_get(&event->kref); /* one ref for the caller */ 1576 kref_get(&event->kref); /* one ref for the caller */
1604 init_completion(&event->completion); 1577 init_completion(&event->completion);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 5433fb0eb3c6..de73214b5d26 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -469,6 +469,22 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
469 return NULL; 469 return NULL;
470} 470}
471 471
472const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
473{
474 struct ceph_pg_pool_info *pi;
475
476 if (id == CEPH_NOPOOL)
477 return NULL;
478
479 if (WARN_ON_ONCE(id > (u64) INT_MAX))
480 return NULL;
481
482 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
483
484 return pi ? pi->name : NULL;
485}
486EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
487
472int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) 488int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
473{ 489{
474 struct rb_node *rbp; 490 struct rb_node *rbp;
@@ -645,10 +661,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
645 ceph_decode_32_safe(p, end, max, bad); 661 ceph_decode_32_safe(p, end, max, bad);
646 while (max--) { 662 while (max--) {
647 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 663 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
664 err = -ENOMEM;
648 pi = kzalloc(sizeof(*pi), GFP_NOFS); 665 pi = kzalloc(sizeof(*pi), GFP_NOFS);
649 if (!pi) 666 if (!pi)
650 goto bad; 667 goto bad;
651 pi->id = ceph_decode_32(p); 668 pi->id = ceph_decode_32(p);
669 err = -EINVAL;
652 ev = ceph_decode_8(p); /* encoding version */ 670 ev = ceph_decode_8(p); /* encoding version */
653 if (ev > CEPH_PG_POOL_VERSION) { 671 if (ev > CEPH_PG_POOL_VERSION) {
654 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 672 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
@@ -664,8 +682,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
664 __insert_pg_pool(&map->pg_pools, pi); 682 __insert_pg_pool(&map->pg_pools, pi);
665 } 683 }
666 684
667 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 685 if (version >= 5) {
668 goto bad; 686 err = __decode_pool_names(p, end, map);
687 if (err < 0) {
688 dout("fail to decode pool names");
689 goto bad;
690 }
691 }
669 692
670 ceph_decode_32_safe(p, end, map->pool_max, bad); 693 ceph_decode_32_safe(p, end, map->pool_max, bad);
671 694
@@ -745,7 +768,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
745 return map; 768 return map;
746 769
747bad: 770bad:
748 dout("osdmap_decode fail\n"); 771 dout("osdmap_decode fail err %d\n", err);
749 ceph_osdmap_destroy(map); 772 ceph_osdmap_destroy(map);
750 return ERR_PTR(err); 773 return ERR_PTR(err);
751} 774}
@@ -839,6 +862,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
839 if (ev > CEPH_PG_POOL_VERSION) { 862 if (ev > CEPH_PG_POOL_VERSION) {
840 pr_warning("got unknown v %d > %d of ceph_pg_pool\n", 863 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
841 ev, CEPH_PG_POOL_VERSION); 864 ev, CEPH_PG_POOL_VERSION);
865 err = -EINVAL;
842 goto bad; 866 goto bad;
843 } 867 }
844 pi = __lookup_pg_pool(&map->pg_pools, pool); 868 pi = __lookup_pg_pool(&map->pg_pools, pool);
@@ -855,8 +879,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
855 if (err < 0) 879 if (err < 0)
856 goto bad; 880 goto bad;
857 } 881 }
858 if (version >= 5 && __decode_pool_names(p, end, map) < 0) 882 if (version >= 5) {
859 goto bad; 883 err = __decode_pool_names(p, end, map);
884 if (err < 0)
885 goto bad;
886 }
860 887
861 /* old_pool */ 888 /* old_pool */
862 ceph_decode_32_safe(p, end, len, bad); 889 ceph_decode_32_safe(p, end, len, bad);
@@ -932,15 +959,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
932 (void) __remove_pg_mapping(&map->pg_temp, pgid); 959 (void) __remove_pg_mapping(&map->pg_temp, pgid);
933 960
934 /* insert */ 961 /* insert */
935 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) { 962 err = -EINVAL;
936 err = -EINVAL; 963 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
937 goto bad; 964 goto bad;
938 } 965 err = -ENOMEM;
939 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); 966 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
940 if (!pg) { 967 if (!pg)
941 err = -ENOMEM;
942 goto bad; 968 goto bad;
943 }
944 pg->pgid = pgid; 969 pg->pgid = pgid;
945 pg->len = pglen; 970 pg->len = pglen;
946 for (j = 0; j < pglen; j++) 971 for (j = 0; j < pglen; j++)
diff --git a/net/core/dev.c b/net/core/dev.c
index bda6d004f9f0..f64e439b4a00 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -176,8 +176,10 @@
176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177 177
178static DEFINE_SPINLOCK(ptype_lock); 178static DEFINE_SPINLOCK(ptype_lock);
179static DEFINE_SPINLOCK(offload_lock);
179static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
180static struct list_head ptype_all __read_mostly; /* Taps */ 181static struct list_head ptype_all __read_mostly; /* Taps */
182static struct list_head offload_base __read_mostly;
181 183
182/* 184/*
183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -201,6 +203,8 @@ static struct list_head ptype_all __read_mostly; /* Taps */
201DEFINE_RWLOCK(dev_base_lock); 203DEFINE_RWLOCK(dev_base_lock);
202EXPORT_SYMBOL(dev_base_lock); 204EXPORT_SYMBOL(dev_base_lock);
203 205
206seqcount_t devnet_rename_seq;
207
204static inline void dev_base_seq_inc(struct net *net) 208static inline void dev_base_seq_inc(struct net *net)
205{ 209{
206 while (++net->dev_base_seq == 0); 210 while (++net->dev_base_seq == 0);
@@ -470,6 +474,82 @@ void dev_remove_pack(struct packet_type *pt)
470} 474}
471EXPORT_SYMBOL(dev_remove_pack); 475EXPORT_SYMBOL(dev_remove_pack);
472 476
477
478/**
479 * dev_add_offload - register offload handlers
480 * @po: protocol offload declaration
481 *
482 * Add protocol offload handlers to the networking stack. The passed
483 * &proto_offload is linked into kernel lists and may not be freed until
484 * it has been removed from the kernel lists.
485 *
486 * This call does not sleep therefore it can not
487 * guarantee all CPU's that are in middle of receiving packets
488 * will see the new offload handlers (until the next received packet).
489 */
490void dev_add_offload(struct packet_offload *po)
491{
492 struct list_head *head = &offload_base;
493
494 spin_lock(&offload_lock);
495 list_add_rcu(&po->list, head);
496 spin_unlock(&offload_lock);
497}
498EXPORT_SYMBOL(dev_add_offload);
499
500/**
501 * __dev_remove_offload - remove offload handler
502 * @po: packet offload declaration
503 *
504 * Remove a protocol offload handler that was previously added to the
505 * kernel offload handlers by dev_add_offload(). The passed &offload_type
506 * is removed from the kernel lists and can be freed or reused once this
507 * function returns.
508 *
509 * The packet type might still be in use by receivers
510 * and must not be freed until after all the CPU's have gone
511 * through a quiescent state.
512 */
513void __dev_remove_offload(struct packet_offload *po)
514{
515 struct list_head *head = &offload_base;
516 struct packet_offload *po1;
517
518 spin_lock(&offload_lock);
519
520 list_for_each_entry(po1, head, list) {
521 if (po == po1) {
522 list_del_rcu(&po->list);
523 goto out;
524 }
525 }
526
527 pr_warn("dev_remove_offload: %p not found\n", po);
528out:
529 spin_unlock(&offload_lock);
530}
531EXPORT_SYMBOL(__dev_remove_offload);
532
533/**
534 * dev_remove_offload - remove packet offload handler
535 * @po: packet offload declaration
536 *
537 * Remove a packet offload handler that was previously added to the kernel
538 * offload handlers by dev_add_offload(). The passed &offload_type is
539 * removed from the kernel lists and can be freed or reused once this
540 * function returns.
541 *
542 * This call sleeps to guarantee that no CPU is looking at the packet
543 * type after return.
544 */
545void dev_remove_offload(struct packet_offload *po)
546{
547 __dev_remove_offload(po);
548
549 synchronize_net();
550}
551EXPORT_SYMBOL(dev_remove_offload);
552
473/****************************************************************************** 553/******************************************************************************
474 554
475 Device Boot-time Settings Routines 555 Device Boot-time Settings Routines
@@ -1013,22 +1093,31 @@ int dev_change_name(struct net_device *dev, const char *newname)
1013 if (dev->flags & IFF_UP) 1093 if (dev->flags & IFF_UP)
1014 return -EBUSY; 1094 return -EBUSY;
1015 1095
1016 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) 1096 write_seqcount_begin(&devnet_rename_seq);
1097
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1099 write_seqcount_end(&devnet_rename_seq);
1017 return 0; 1100 return 0;
1101 }
1018 1102
1019 memcpy(oldname, dev->name, IFNAMSIZ); 1103 memcpy(oldname, dev->name, IFNAMSIZ);
1020 1104
1021 err = dev_get_valid_name(net, dev, newname); 1105 err = dev_get_valid_name(net, dev, newname);
1022 if (err < 0) 1106 if (err < 0) {
1107 write_seqcount_end(&devnet_rename_seq);
1023 return err; 1108 return err;
1109 }
1024 1110
1025rollback: 1111rollback:
1026 ret = device_rename(&dev->dev, dev->name); 1112 ret = device_rename(&dev->dev, dev->name);
1027 if (ret) { 1113 if (ret) {
1028 memcpy(dev->name, oldname, IFNAMSIZ); 1114 memcpy(dev->name, oldname, IFNAMSIZ);
1115 write_seqcount_end(&devnet_rename_seq);
1029 return ret; 1116 return ret;
1030 } 1117 }
1031 1118
1119 write_seqcount_end(&devnet_rename_seq);
1120
1032 write_lock_bh(&dev_base_lock); 1121 write_lock_bh(&dev_base_lock);
1033 hlist_del_rcu(&dev->name_hlist); 1122 hlist_del_rcu(&dev->name_hlist);
1034 write_unlock_bh(&dev_base_lock); 1123 write_unlock_bh(&dev_base_lock);
@@ -1046,6 +1135,7 @@ rollback:
1046 /* err >= 0 after dev_alloc_name() or stores the first errno */ 1135 /* err >= 0 after dev_alloc_name() or stores the first errno */
1047 if (err >= 0) { 1136 if (err >= 0) {
1048 err = ret; 1137 err = ret;
1138 write_seqcount_begin(&devnet_rename_seq);
1049 memcpy(dev->name, oldname, IFNAMSIZ); 1139 memcpy(dev->name, oldname, IFNAMSIZ);
1050 goto rollback; 1140 goto rollback;
1051 } else { 1141 } else {
@@ -1075,10 +1165,8 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1075 return -EINVAL; 1165 return -EINVAL;
1076 1166
1077 if (!len) { 1167 if (!len) {
1078 if (dev->ifalias) { 1168 kfree(dev->ifalias);
1079 kfree(dev->ifalias); 1169 dev->ifalias = NULL;
1080 dev->ifalias = NULL;
1081 }
1082 return 0; 1170 return 0;
1083 } 1171 }
1084 1172
@@ -1994,7 +2082,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1994 netdev_features_t features) 2082 netdev_features_t features)
1995{ 2083{
1996 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2084 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1997 struct packet_type *ptype; 2085 struct packet_offload *ptype;
1998 __be16 type = skb->protocol; 2086 __be16 type = skb->protocol;
1999 int vlan_depth = ETH_HLEN; 2087 int vlan_depth = ETH_HLEN;
2000 int err; 2088 int err;
@@ -2023,18 +2111,17 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2023 } 2111 }
2024 2112
2025 rcu_read_lock(); 2113 rcu_read_lock();
2026 list_for_each_entry_rcu(ptype, 2114 list_for_each_entry_rcu(ptype, &offload_base, list) {
2027 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2115 if (ptype->type == type && ptype->callbacks.gso_segment) {
2028 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
2029 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2030 err = ptype->gso_send_check(skb); 2117 err = ptype->callbacks.gso_send_check(skb);
2031 segs = ERR_PTR(err); 2118 segs = ERR_PTR(err);
2032 if (err || skb_gso_ok(skb, features)) 2119 if (err || skb_gso_ok(skb, features))
2033 break; 2120 break;
2034 __skb_push(skb, (skb->data - 2121 __skb_push(skb, (skb->data -
2035 skb_network_header(skb))); 2122 skb_network_header(skb)));
2036 } 2123 }
2037 segs = ptype->gso_segment(skb, features); 2124 segs = ptype->callbacks.gso_segment(skb, features);
2038 break; 2125 break;
2039 } 2126 }
2040 } 2127 }
@@ -2237,6 +2324,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2237 skb->vlan_tci = 0; 2324 skb->vlan_tci = 0;
2238 } 2325 }
2239 2326
2327 /* If encapsulation offload request, verify we are testing
2328 * hardware encapsulation features instead of standard
2329 * features for the netdev
2330 */
2331 if (skb->encapsulation)
2332 features &= dev->hw_enc_features;
2333
2240 if (netif_needs_gso(skb, features)) { 2334 if (netif_needs_gso(skb, features)) {
2241 if (unlikely(dev_gso_segment(skb, features))) 2335 if (unlikely(dev_gso_segment(skb, features)))
2242 goto out_kfree_skb; 2336 goto out_kfree_skb;
@@ -2252,8 +2346,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2252 * checksumming here. 2346 * checksumming here.
2253 */ 2347 */
2254 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2348 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2255 skb_set_transport_header(skb, 2349 if (skb->encapsulation)
2256 skb_checksum_start_offset(skb)); 2350 skb_set_inner_transport_header(skb,
2351 skb_checksum_start_offset(skb));
2352 else
2353 skb_set_transport_header(skb,
2354 skb_checksum_start_offset(skb));
2257 if (!(features & NETIF_F_ALL_CSUM) && 2355 if (!(features & NETIF_F_ALL_CSUM) &&
2258 skb_checksum_help(skb)) 2356 skb_checksum_help(skb))
2259 goto out_kfree_skb; 2357 goto out_kfree_skb;
@@ -2818,8 +2916,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2818 if (unlikely(tcpu != next_cpu) && 2916 if (unlikely(tcpu != next_cpu) &&
2819 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || 2917 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2820 ((int)(per_cpu(softnet_data, tcpu).input_queue_head - 2918 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2821 rflow->last_qtail)) >= 0)) 2919 rflow->last_qtail)) >= 0)) {
2920 tcpu = next_cpu;
2822 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); 2921 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2922 }
2823 2923
2824 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { 2924 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2825 *rflowp = rflow; 2925 *rflowp = rflow;
@@ -3444,11 +3544,13 @@ static void flush_backlog(void *arg)
3444 3544
3445static int napi_gro_complete(struct sk_buff *skb) 3545static int napi_gro_complete(struct sk_buff *skb)
3446{ 3546{
3447 struct packet_type *ptype; 3547 struct packet_offload *ptype;
3448 __be16 type = skb->protocol; 3548 __be16 type = skb->protocol;
3449 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3549 struct list_head *head = &offload_base;
3450 int err = -ENOENT; 3550 int err = -ENOENT;
3451 3551
3552 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3553
3452 if (NAPI_GRO_CB(skb)->count == 1) { 3554 if (NAPI_GRO_CB(skb)->count == 1) {
3453 skb_shinfo(skb)->gso_size = 0; 3555 skb_shinfo(skb)->gso_size = 0;
3454 goto out; 3556 goto out;
@@ -3456,10 +3558,10 @@ static int napi_gro_complete(struct sk_buff *skb)
3456 3558
3457 rcu_read_lock(); 3559 rcu_read_lock();
3458 list_for_each_entry_rcu(ptype, head, list) { 3560 list_for_each_entry_rcu(ptype, head, list) {
3459 if (ptype->type != type || ptype->dev || !ptype->gro_complete) 3561 if (ptype->type != type || !ptype->callbacks.gro_complete)
3460 continue; 3562 continue;
3461 3563
3462 err = ptype->gro_complete(skb); 3564 err = ptype->callbacks.gro_complete(skb);
3463 break; 3565 break;
3464 } 3566 }
3465 rcu_read_unlock(); 3567 rcu_read_unlock();
@@ -3503,12 +3605,34 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3503} 3605}
3504EXPORT_SYMBOL(napi_gro_flush); 3606EXPORT_SYMBOL(napi_gro_flush);
3505 3607
3506enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3608static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3609{
3610 struct sk_buff *p;
3611 unsigned int maclen = skb->dev->hard_header_len;
3612
3613 for (p = napi->gro_list; p; p = p->next) {
3614 unsigned long diffs;
3615
3616 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3617 diffs |= p->vlan_tci ^ skb->vlan_tci;
3618 if (maclen == ETH_HLEN)
3619 diffs |= compare_ether_header(skb_mac_header(p),
3620 skb_gro_mac_header(skb));
3621 else if (!diffs)
3622 diffs = memcmp(skb_mac_header(p),
3623 skb_gro_mac_header(skb),
3624 maclen);
3625 NAPI_GRO_CB(p)->same_flow = !diffs;
3626 NAPI_GRO_CB(p)->flush = 0;
3627 }
3628}
3629
3630static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3507{ 3631{
3508 struct sk_buff **pp = NULL; 3632 struct sk_buff **pp = NULL;
3509 struct packet_type *ptype; 3633 struct packet_offload *ptype;
3510 __be16 type = skb->protocol; 3634 __be16 type = skb->protocol;
3511 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 3635 struct list_head *head = &offload_base;
3512 int same_flow; 3636 int same_flow;
3513 int mac_len; 3637 int mac_len;
3514 enum gro_result ret; 3638 enum gro_result ret;
@@ -3519,9 +3643,11 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3519 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3643 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3520 goto normal; 3644 goto normal;
3521 3645
3646 gro_list_prepare(napi, skb);
3647
3522 rcu_read_lock(); 3648 rcu_read_lock();
3523 list_for_each_entry_rcu(ptype, head, list) { 3649 list_for_each_entry_rcu(ptype, head, list) {
3524 if (ptype->type != type || ptype->dev || !ptype->gro_receive) 3650 if (ptype->type != type || !ptype->callbacks.gro_receive)
3525 continue; 3651 continue;
3526 3652
3527 skb_set_network_header(skb, skb_gro_offset(skb)); 3653 skb_set_network_header(skb, skb_gro_offset(skb));
@@ -3531,7 +3657,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3531 NAPI_GRO_CB(skb)->flush = 0; 3657 NAPI_GRO_CB(skb)->flush = 0;
3532 NAPI_GRO_CB(skb)->free = 0; 3658 NAPI_GRO_CB(skb)->free = 0;
3533 3659
3534 pp = ptype->gro_receive(&napi->gro_list, skb); 3660 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3535 break; 3661 break;
3536 } 3662 }
3537 rcu_read_unlock(); 3663 rcu_read_unlock();
@@ -3594,34 +3720,9 @@ normal:
3594 ret = GRO_NORMAL; 3720 ret = GRO_NORMAL;
3595 goto pull; 3721 goto pull;
3596} 3722}
3597EXPORT_SYMBOL(dev_gro_receive);
3598
3599static inline gro_result_t
3600__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3601{
3602 struct sk_buff *p;
3603 unsigned int maclen = skb->dev->hard_header_len;
3604
3605 for (p = napi->gro_list; p; p = p->next) {
3606 unsigned long diffs;
3607
3608 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3609 diffs |= p->vlan_tci ^ skb->vlan_tci;
3610 if (maclen == ETH_HLEN)
3611 diffs |= compare_ether_header(skb_mac_header(p),
3612 skb_gro_mac_header(skb));
3613 else if (!diffs)
3614 diffs = memcmp(skb_mac_header(p),
3615 skb_gro_mac_header(skb),
3616 maclen);
3617 NAPI_GRO_CB(p)->same_flow = !diffs;
3618 NAPI_GRO_CB(p)->flush = 0;
3619 }
3620 3723
3621 return dev_gro_receive(napi, skb);
3622}
3623 3724
3624gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 3725static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3625{ 3726{
3626 switch (ret) { 3727 switch (ret) {
3627 case GRO_NORMAL: 3728 case GRO_NORMAL:
@@ -3647,7 +3748,6 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3647 3748
3648 return ret; 3749 return ret;
3649} 3750}
3650EXPORT_SYMBOL(napi_skb_finish);
3651 3751
3652static void skb_gro_reset_offset(struct sk_buff *skb) 3752static void skb_gro_reset_offset(struct sk_buff *skb)
3653{ 3753{
@@ -3670,7 +3770,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3670{ 3770{
3671 skb_gro_reset_offset(skb); 3771 skb_gro_reset_offset(skb);
3672 3772
3673 return napi_skb_finish(__napi_gro_receive(napi, skb), skb); 3773 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
3674} 3774}
3675EXPORT_SYMBOL(napi_gro_receive); 3775EXPORT_SYMBOL(napi_gro_receive);
3676 3776
@@ -3699,7 +3799,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
3699} 3799}
3700EXPORT_SYMBOL(napi_get_frags); 3800EXPORT_SYMBOL(napi_get_frags);
3701 3801
3702gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, 3802static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3703 gro_result_t ret) 3803 gro_result_t ret)
3704{ 3804{
3705 switch (ret) { 3805 switch (ret) {
@@ -3724,7 +3824,6 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3724 3824
3725 return ret; 3825 return ret;
3726} 3826}
3727EXPORT_SYMBOL(napi_frags_finish);
3728 3827
3729static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 3828static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3730{ 3829{
@@ -3769,7 +3868,7 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
3769 if (!skb) 3868 if (!skb)
3770 return GRO_DROP; 3869 return GRO_DROP;
3771 3870
3772 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); 3871 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
3773} 3872}
3774EXPORT_SYMBOL(napi_gro_frags); 3873EXPORT_SYMBOL(napi_gro_frags);
3775 3874
@@ -4071,6 +4170,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
4071{ 4170{
4072 struct net_device *dev; 4171 struct net_device *dev;
4073 struct ifreq ifr; 4172 struct ifreq ifr;
4173 unsigned seq;
4074 4174
4075 /* 4175 /*
4076 * Fetch the caller's info block. 4176 * Fetch the caller's info block.
@@ -4079,6 +4179,8 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
4079 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 4179 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4080 return -EFAULT; 4180 return -EFAULT;
4081 4181
4182retry:
4183 seq = read_seqcount_begin(&devnet_rename_seq);
4082 rcu_read_lock(); 4184 rcu_read_lock();
4083 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); 4185 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4084 if (!dev) { 4186 if (!dev) {
@@ -4088,6 +4190,8 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
4088 4190
4089 strcpy(ifr.ifr_name, dev->name); 4191 strcpy(ifr.ifr_name, dev->name);
4090 rcu_read_unlock(); 4192 rcu_read_unlock();
4193 if (read_seqcount_retry(&devnet_rename_seq, seq))
4194 goto retry;
4091 4195
4092 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 4196 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4093 return -EFAULT; 4197 return -EFAULT;
@@ -4880,7 +4984,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
4880 else 4984 else
4881 dev->mtu = new_mtu; 4985 dev->mtu = new_mtu;
4882 4986
4883 if (!err && dev->flags & IFF_UP) 4987 if (!err)
4884 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4988 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4885 return err; 4989 return err;
4886} 4990}
@@ -5200,7 +5304,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5200 case SIOCGMIIPHY: 5304 case SIOCGMIIPHY:
5201 case SIOCGMIIREG: 5305 case SIOCGMIIREG:
5202 case SIOCSIFNAME: 5306 case SIOCSIFNAME:
5203 if (!capable(CAP_NET_ADMIN)) 5307 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5204 return -EPERM; 5308 return -EPERM;
5205 dev_load(net, ifr.ifr_name); 5309 dev_load(net, ifr.ifr_name);
5206 rtnl_lock(); 5310 rtnl_lock();
@@ -5221,16 +5325,25 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5221 * - require strict serialization. 5325 * - require strict serialization.
5222 * - do not return a value 5326 * - do not return a value
5223 */ 5327 */
5328 case SIOCSIFMAP:
5329 case SIOCSIFTXQLEN:
5330 if (!capable(CAP_NET_ADMIN))
5331 return -EPERM;
5332 /* fall through */
5333 /*
5334 * These ioctl calls:
5335 * - require local superuser power.
5336 * - require strict serialization.
5337 * - do not return a value
5338 */
5224 case SIOCSIFFLAGS: 5339 case SIOCSIFFLAGS:
5225 case SIOCSIFMETRIC: 5340 case SIOCSIFMETRIC:
5226 case SIOCSIFMTU: 5341 case SIOCSIFMTU:
5227 case SIOCSIFMAP:
5228 case SIOCSIFHWADDR: 5342 case SIOCSIFHWADDR:
5229 case SIOCSIFSLAVE: 5343 case SIOCSIFSLAVE:
5230 case SIOCADDMULTI: 5344 case SIOCADDMULTI:
5231 case SIOCDELMULTI: 5345 case SIOCDELMULTI:
5232 case SIOCSIFHWBROADCAST: 5346 case SIOCSIFHWBROADCAST:
5233 case SIOCSIFTXQLEN:
5234 case SIOCSMIIREG: 5347 case SIOCSMIIREG:
5235 case SIOCBONDENSLAVE: 5348 case SIOCBONDENSLAVE:
5236 case SIOCBONDRELEASE: 5349 case SIOCBONDRELEASE:
@@ -5239,7 +5352,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5239 case SIOCBRADDIF: 5352 case SIOCBRADDIF:
5240 case SIOCBRDELIF: 5353 case SIOCBRDELIF:
5241 case SIOCSHWTSTAMP: 5354 case SIOCSHWTSTAMP:
5242 if (!capable(CAP_NET_ADMIN)) 5355 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5243 return -EPERM; 5356 return -EPERM;
5244 /* fall through */ 5357 /* fall through */
5245 case SIOCBONDSLAVEINFOQUERY: 5358 case SIOCBONDSLAVEINFOQUERY:
@@ -6008,6 +6121,14 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6008 6121
6009static const struct ethtool_ops default_ethtool_ops; 6122static const struct ethtool_ops default_ethtool_ops;
6010 6123
6124void netdev_set_default_ethtool_ops(struct net_device *dev,
6125 const struct ethtool_ops *ops)
6126{
6127 if (dev->ethtool_ops == &default_ethtool_ops)
6128 dev->ethtool_ops = ops;
6129}
6130EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6131
6011/** 6132/**
6012 * alloc_netdev_mqs - allocate network device 6133 * alloc_netdev_mqs - allocate network device
6013 * @sizeof_priv: size of private data to allocate space for 6134 * @sizeof_priv: size of private data to allocate space for
@@ -6264,7 +6385,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6264 goto out; 6385 goto out;
6265 6386
6266 /* Ensure the device has been registrered */ 6387 /* Ensure the device has been registrered */
6267 err = -EINVAL;
6268 if (dev->reg_state != NETREG_REGISTERED) 6388 if (dev->reg_state != NETREG_REGISTERED)
6269 goto out; 6389 goto out;
6270 6390
@@ -6319,6 +6439,9 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6319 dev_uc_flush(dev); 6439 dev_uc_flush(dev);
6320 dev_mc_flush(dev); 6440 dev_mc_flush(dev);
6321 6441
6442 /* Send a netdev-removed uevent to the old namespace */
6443 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6444
6322 /* Actually switch the network namespace */ 6445 /* Actually switch the network namespace */
6323 dev_net_set(dev, net); 6446 dev_net_set(dev, net);
6324 6447
@@ -6330,6 +6453,9 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6330 dev->iflink = dev->ifindex; 6453 dev->iflink = dev->ifindex;
6331 } 6454 }
6332 6455
6456 /* Send a netdev-add uevent to the new namespace */
6457 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6458
6333 /* Fixup kobjects */ 6459 /* Fixup kobjects */
6334 err = device_rename(&dev->dev, dev->name); 6460 err = device_rename(&dev->dev, dev->name);
6335 WARN_ON(err); 6461 WARN_ON(err);
@@ -6662,6 +6788,8 @@ static int __init net_dev_init(void)
6662 for (i = 0; i < PTYPE_HASH_SIZE; i++) 6788 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6663 INIT_LIST_HEAD(&ptype_base[i]); 6789 INIT_LIST_HEAD(&ptype_base[i]);
6664 6790
6791 INIT_LIST_HEAD(&offload_base);
6792
6665 if (register_pernet_subsys(&netdev_net_ops)) 6793 if (register_pernet_subsys(&netdev_net_ops))
6666 goto out; 6794 goto out;
6667 6795
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 87cc17db2d56..b079c7bbc157 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -319,7 +319,8 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr,
319 */ 319 */
320 ha = list_first_entry(&dev->dev_addrs.list, 320 ha = list_first_entry(&dev->dev_addrs.list,
321 struct netdev_hw_addr, list); 321 struct netdev_hw_addr, list);
322 if (ha->addr == dev->dev_addr && ha->refcount == 1) 322 if (!memcmp(ha->addr, addr, dev->addr_len) &&
323 ha->type == addr_type && ha->refcount == 1)
323 return -ENOENT; 324 return -ENOENT;
324 325
325 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, 326 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 4d64cc2e3fa9..a8705432e4b1 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1460,7 +1460,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1460 case ETHTOOL_GEEE: 1460 case ETHTOOL_GEEE:
1461 break; 1461 break;
1462 default: 1462 default:
1463 if (!capable(CAP_NET_ADMIN)) 1463 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1464 return -EPERM; 1464 return -EPERM;
1465 } 1465 }
1466 1466
diff --git a/net/core/filter.c b/net/core/filter.c
index 3d92ebb7fbcf..c23543cba132 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,6 +39,7 @@
39#include <linux/reciprocal_div.h> 39#include <linux/reciprocal_div.h>
40#include <linux/ratelimit.h> 40#include <linux/ratelimit.h>
41#include <linux/seccomp.h> 41#include <linux/seccomp.h>
42#include <linux/if_vlan.h>
42 43
43/* No hurry in this branch 44/* No hurry in this branch
44 * 45 *
@@ -341,6 +342,12 @@ load_b:
341 case BPF_S_ANC_CPU: 342 case BPF_S_ANC_CPU:
342 A = raw_smp_processor_id(); 343 A = raw_smp_processor_id();
343 continue; 344 continue;
345 case BPF_S_ANC_VLAN_TAG:
346 A = vlan_tx_tag_get(skb);
347 continue;
348 case BPF_S_ANC_VLAN_TAG_PRESENT:
349 A = !!vlan_tx_tag_present(skb);
350 continue;
344 case BPF_S_ANC_NLATTR: { 351 case BPF_S_ANC_NLATTR: {
345 struct nlattr *nla; 352 struct nlattr *nla;
346 353
@@ -600,6 +607,8 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
600 ANCILLARY(RXHASH); 607 ANCILLARY(RXHASH);
601 ANCILLARY(CPU); 608 ANCILLARY(CPU);
602 ANCILLARY(ALU_XOR_X); 609 ANCILLARY(ALU_XOR_X);
610 ANCILLARY(VLAN_TAG);
611 ANCILLARY(VLAN_TAG_PRESENT);
603 } 612 }
604 } 613 }
605 ftest->code = code; 614 ftest->code = code;
@@ -751,3 +760,133 @@ int sk_detach_filter(struct sock *sk)
751 return ret; 760 return ret;
752} 761}
753EXPORT_SYMBOL_GPL(sk_detach_filter); 762EXPORT_SYMBOL_GPL(sk_detach_filter);
763
764static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
765{
766 static const u16 decodes[] = {
767 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
768 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
769 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
770 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
771 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
772 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
773 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
774 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
775 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
776 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
777 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
778 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
779 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
780 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
781 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
782 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
783 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
784 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
785 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
786 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
787 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
788 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
789 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
790 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
791 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
792 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
793 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
794 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
795 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
796 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
797 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
798 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
799 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
800 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
801 [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
802 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
803 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
804 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
805 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
806 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
807 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
808 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
809 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
810 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
811 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
812 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
813 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
814 [BPF_S_RET_K] = BPF_RET|BPF_K,
815 [BPF_S_RET_A] = BPF_RET|BPF_A,
816 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
817 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
818 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
819 [BPF_S_ST] = BPF_ST,
820 [BPF_S_STX] = BPF_STX,
821 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
822 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
823 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
824 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
825 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
826 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
827 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
828 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
829 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
830 };
831 u16 code;
832
833 code = filt->code;
834
835 to->code = decodes[code];
836 to->jt = filt->jt;
837 to->jf = filt->jf;
838
839 if (code == BPF_S_ALU_DIV_K) {
840 /*
841 * When loaded this rule user gave us X, which was
842 * translated into R = r(X). Now we calculate the
843 * RR = r(R) and report it back. If next time this
844 * value is loaded and RRR = r(RR) is calculated
845 * then the R == RRR will be true.
846 *
847 * One exception. X == 1 translates into R == 0 and
848 * we can't calculate RR out of it with r().
849 */
850
851 if (filt->k == 0)
852 to->k = 1;
853 else
854 to->k = reciprocal_value(filt->k);
855
856 BUG_ON(reciprocal_value(to->k) != filt->k);
857 } else
858 to->k = filt->k;
859}
860
861int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
862{
863 struct sk_filter *filter;
864 int i, ret;
865
866 lock_sock(sk);
867 filter = rcu_dereference_protected(sk->sk_filter,
868 sock_owned_by_user(sk));
869 ret = 0;
870 if (!filter)
871 goto out;
872 ret = filter->len;
873 if (!len)
874 goto out;
875 ret = -EINVAL;
876 if (len < filter->len)
877 goto out;
878
879 ret = -EFAULT;
880 for (i = 0; i < filter->len; i++) {
881 struct sock_filter fb;
882
883 sk_decode_filter(&filter->insns[i], &fb);
884 if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
885 goto out;
886 }
887
888 ret = filter->len;
889out:
890 release_sock(sk);
891 return ret;
892}
diff --git a/net/core/flow.c b/net/core/flow.c
index e318c7e98042..b0901ee5a002 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -327,11 +327,9 @@ static void flow_cache_flush_tasklet(unsigned long data)
327static void flow_cache_flush_per_cpu(void *data) 327static void flow_cache_flush_per_cpu(void *data)
328{ 328{
329 struct flow_flush_info *info = data; 329 struct flow_flush_info *info = data;
330 int cpu;
331 struct tasklet_struct *tasklet; 330 struct tasklet_struct *tasklet;
332 331
333 cpu = smp_processor_id(); 332 tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet);
334 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
335 tasklet->data = (unsigned long)info; 333 tasklet->data = (unsigned long)info;
336 tasklet_schedule(tasklet); 334 tasklet_schedule(tasklet);
337} 335}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 22571488730a..c815f285e5ab 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1787,8 +1787,7 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1787 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) || 1787 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1788 /* approximative value for deprecated QUEUE_LEN (in packets) */ 1788 /* approximative value for deprecated QUEUE_LEN (in packets) */
1789 nla_put_u32(skb, NDTPA_QUEUE_LEN, 1789 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1790 DIV_ROUND_UP(parms->queue_len_bytes, 1790 parms->queue_len_bytes / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1791 SKB_TRUESIZE(ETH_FRAME_LEN))) ||
1792 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) || 1791 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1793 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) || 1792 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1794 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) || 1793 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
@@ -2770,6 +2769,8 @@ EXPORT_SYMBOL(neigh_app_ns);
2770#endif /* CONFIG_ARPD */ 2769#endif /* CONFIG_ARPD */
2771 2770
2772#ifdef CONFIG_SYSCTL 2771#ifdef CONFIG_SYSCTL
2772static int zero;
2773static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2773 2774
2774static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer, 2775static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2775 size_t *lenp, loff_t *ppos) 2776 size_t *lenp, loff_t *ppos)
@@ -2777,9 +2778,13 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2777 int size, ret; 2778 int size, ret;
2778 ctl_table tmp = *ctl; 2779 ctl_table tmp = *ctl;
2779 2780
2781 tmp.extra1 = &zero;
2782 tmp.extra2 = &unres_qlen_max;
2780 tmp.data = &size; 2783 tmp.data = &size;
2781 size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN)); 2784
2782 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); 2785 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2786 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2787
2783 if (write && !ret) 2788 if (write && !ret)
2784 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); 2789 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2785 return ret; 2790 return ret;
@@ -2865,7 +2870,8 @@ static struct neigh_sysctl_table {
2865 .procname = "unres_qlen_bytes", 2870 .procname = "unres_qlen_bytes",
2866 .maxlen = sizeof(int), 2871 .maxlen = sizeof(int),
2867 .mode = 0644, 2872 .mode = 0644,
2868 .proc_handler = proc_dointvec, 2873 .extra1 = &zero,
2874 .proc_handler = proc_dointvec_minmax,
2869 }, 2875 },
2870 [NEIGH_VAR_PROXY_QLEN] = { 2876 [NEIGH_VAR_PROXY_QLEN] = {
2871 .procname = "proxy_qlen", 2877 .procname = "proxy_qlen",
@@ -2987,6 +2993,10 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2987 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; 2993 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2988 } 2994 }
2989 2995
2996 /* Don't export sysctls to unprivileged users */
2997 if (neigh_parms_net(p)->user_ns != &init_user_ns)
2998 t->neigh_vars[0].procname = NULL;
2999
2990 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", 3000 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
2991 p_name, dev_name_source); 3001 p_name, dev_name_source);
2992 t->sysctl_header = 3002 t->sysctl_header =
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index bcf02f608cbf..28c5f5aa7ca7 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -18,11 +18,9 @@
18#include <net/sock.h> 18#include <net/sock.h>
19#include <net/net_namespace.h> 19#include <net/net_namespace.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/wireless.h>
22#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
23#include <linux/export.h> 22#include <linux/export.h>
24#include <linux/jiffies.h> 23#include <linux/jiffies.h>
25#include <net/wext.h>
26 24
27#include "net-sysfs.h" 25#include "net-sysfs.h"
28 26
@@ -73,11 +71,12 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
73 const char *buf, size_t len, 71 const char *buf, size_t len,
74 int (*set)(struct net_device *, unsigned long)) 72 int (*set)(struct net_device *, unsigned long))
75{ 73{
76 struct net_device *net = to_net_dev(dev); 74 struct net_device *netdev = to_net_dev(dev);
75 struct net *net = dev_net(netdev);
77 unsigned long new; 76 unsigned long new;
78 int ret = -EINVAL; 77 int ret = -EINVAL;
79 78
80 if (!capable(CAP_NET_ADMIN)) 79 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
81 return -EPERM; 80 return -EPERM;
82 81
83 ret = kstrtoul(buf, 0, &new); 82 ret = kstrtoul(buf, 0, &new);
@@ -87,8 +86,8 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
87 if (!rtnl_trylock()) 86 if (!rtnl_trylock())
88 return restart_syscall(); 87 return restart_syscall();
89 88
90 if (dev_isalive(net)) { 89 if (dev_isalive(netdev)) {
91 if ((ret = (*set)(net, new)) == 0) 90 if ((ret = (*set)(netdev, new)) == 0)
92 ret = len; 91 ret = len;
93 } 92 }
94 rtnl_unlock(); 93 rtnl_unlock();
@@ -264,6 +263,9 @@ static ssize_t store_tx_queue_len(struct device *dev,
264 struct device_attribute *attr, 263 struct device_attribute *attr,
265 const char *buf, size_t len) 264 const char *buf, size_t len)
266{ 265{
266 if (!capable(CAP_NET_ADMIN))
267 return -EPERM;
268
267 return netdev_store(dev, attr, buf, len, change_tx_queue_len); 269 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
268} 270}
269 271
@@ -271,10 +273,11 @@ static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
271 const char *buf, size_t len) 273 const char *buf, size_t len)
272{ 274{
273 struct net_device *netdev = to_net_dev(dev); 275 struct net_device *netdev = to_net_dev(dev);
276 struct net *net = dev_net(netdev);
274 size_t count = len; 277 size_t count = len;
275 ssize_t ret; 278 ssize_t ret;
276 279
277 if (!capable(CAP_NET_ADMIN)) 280 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
278 return -EPERM; 281 return -EPERM;
279 282
280 /* ignore trailing newline */ 283 /* ignore trailing newline */
@@ -429,6 +432,17 @@ static struct attribute_group netstat_group = {
429 .name = "statistics", 432 .name = "statistics",
430 .attrs = netstat_attrs, 433 .attrs = netstat_attrs,
431}; 434};
435
436#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
437static struct attribute *wireless_attrs[] = {
438 NULL
439};
440
441static struct attribute_group wireless_group = {
442 .name = "wireless",
443 .attrs = wireless_attrs,
444};
445#endif
432#endif /* CONFIG_SYSFS */ 446#endif /* CONFIG_SYSFS */
433 447
434#ifdef CONFIG_RPS 448#ifdef CONFIG_RPS
@@ -1320,7 +1334,6 @@ struct kobj_ns_type_operations net_ns_type_operations = {
1320}; 1334};
1321EXPORT_SYMBOL_GPL(net_ns_type_operations); 1335EXPORT_SYMBOL_GPL(net_ns_type_operations);
1322 1336
1323#ifdef CONFIG_HOTPLUG
1324static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1337static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1325{ 1338{
1326 struct net_device *dev = to_net_dev(d); 1339 struct net_device *dev = to_net_dev(d);
@@ -1339,7 +1352,6 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1339exit: 1352exit:
1340 return retval; 1353 return retval;
1341} 1354}
1342#endif
1343 1355
1344/* 1356/*
1345 * netdev_release -- destroy and free a dead device. 1357 * netdev_release -- destroy and free a dead device.
@@ -1368,9 +1380,7 @@ static struct class net_class = {
1368#ifdef CONFIG_SYSFS 1380#ifdef CONFIG_SYSFS
1369 .dev_attrs = net_class_attributes, 1381 .dev_attrs = net_class_attributes,
1370#endif /* CONFIG_SYSFS */ 1382#endif /* CONFIG_SYSFS */
1371#ifdef CONFIG_HOTPLUG
1372 .dev_uevent = netdev_uevent, 1383 .dev_uevent = netdev_uevent,
1373#endif
1374 .ns_type = &net_ns_type_operations, 1384 .ns_type = &net_ns_type_operations,
1375 .namespace = net_namespace, 1385 .namespace = net_namespace,
1376}; 1386};
@@ -1409,6 +1419,15 @@ int netdev_register_kobject(struct net_device *net)
1409 groups++; 1419 groups++;
1410 1420
1411 *groups++ = &netstat_group; 1421 *groups++ = &netstat_group;
1422
1423#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1424 if (net->ieee80211_ptr)
1425 *groups++ = &wireless_group;
1426#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1427 else if (net->wireless_handlers)
1428 *groups++ = &wireless_group;
1429#endif
1430#endif
1412#endif /* CONFIG_SYSFS */ 1431#endif /* CONFIG_SYSFS */
1413 1432
1414 error = device_add(dev); 1433 error = device_add(dev);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 42f1e1c7514f..8acce01b6dab 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -13,6 +13,7 @@
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14#include <linux/file.h> 14#include <linux/file.h>
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/user_namespace.h>
16#include <net/net_namespace.h> 17#include <net/net_namespace.h>
17#include <net/netns/generic.h> 18#include <net/netns/generic.h>
18 19
@@ -145,7 +146,7 @@ static void ops_free_list(const struct pernet_operations *ops,
145/* 146/*
146 * setup_net runs the initializers for the network namespace object. 147 * setup_net runs the initializers for the network namespace object.
147 */ 148 */
148static __net_init int setup_net(struct net *net) 149static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
149{ 150{
150 /* Must be called with net_mutex held */ 151 /* Must be called with net_mutex held */
151 const struct pernet_operations *ops, *saved_ops; 152 const struct pernet_operations *ops, *saved_ops;
@@ -155,6 +156,7 @@ static __net_init int setup_net(struct net *net)
155 atomic_set(&net->count, 1); 156 atomic_set(&net->count, 1);
156 atomic_set(&net->passive, 1); 157 atomic_set(&net->passive, 1);
157 net->dev_base_seq = 1; 158 net->dev_base_seq = 1;
159 net->user_ns = user_ns;
158 160
159#ifdef NETNS_REFCNT_DEBUG 161#ifdef NETNS_REFCNT_DEBUG
160 atomic_set(&net->use_count, 0); 162 atomic_set(&net->use_count, 0);
@@ -232,7 +234,8 @@ void net_drop_ns(void *p)
232 net_free(ns); 234 net_free(ns);
233} 235}
234 236
235struct net *copy_net_ns(unsigned long flags, struct net *old_net) 237struct net *copy_net_ns(unsigned long flags,
238 struct user_namespace *user_ns, struct net *old_net)
236{ 239{
237 struct net *net; 240 struct net *net;
238 int rv; 241 int rv;
@@ -243,8 +246,11 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
243 net = net_alloc(); 246 net = net_alloc();
244 if (!net) 247 if (!net)
245 return ERR_PTR(-ENOMEM); 248 return ERR_PTR(-ENOMEM);
249
250 get_user_ns(user_ns);
251
246 mutex_lock(&net_mutex); 252 mutex_lock(&net_mutex);
247 rv = setup_net(net); 253 rv = setup_net(net, user_ns);
248 if (rv == 0) { 254 if (rv == 0) {
249 rtnl_lock(); 255 rtnl_lock();
250 list_add_tail_rcu(&net->list, &net_namespace_list); 256 list_add_tail_rcu(&net->list, &net_namespace_list);
@@ -252,6 +258,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
252 } 258 }
253 mutex_unlock(&net_mutex); 259 mutex_unlock(&net_mutex);
254 if (rv < 0) { 260 if (rv < 0) {
261 put_user_ns(user_ns);
255 net_drop_ns(net); 262 net_drop_ns(net);
256 return ERR_PTR(rv); 263 return ERR_PTR(rv);
257 } 264 }
@@ -308,6 +315,7 @@ static void cleanup_net(struct work_struct *work)
308 /* Finally it is safe to free my network namespace structure */ 315 /* Finally it is safe to free my network namespace structure */
309 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 316 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
310 list_del_init(&net->exit_list); 317 list_del_init(&net->exit_list);
318 put_user_ns(net->user_ns);
311 net_drop_ns(net); 319 net_drop_ns(net);
312 } 320 }
313} 321}
@@ -347,13 +355,6 @@ struct net *get_net_ns_by_fd(int fd)
347} 355}
348 356
349#else 357#else
350struct net *copy_net_ns(unsigned long flags, struct net *old_net)
351{
352 if (flags & CLONE_NEWNET)
353 return ERR_PTR(-EINVAL);
354 return old_net;
355}
356
357struct net *get_net_ns_by_fd(int fd) 358struct net *get_net_ns_by_fd(int fd)
358{ 359{
359 return ERR_PTR(-EINVAL); 360 return ERR_PTR(-EINVAL);
@@ -380,6 +381,21 @@ struct net *get_net_ns_by_pid(pid_t pid)
380} 381}
381EXPORT_SYMBOL_GPL(get_net_ns_by_pid); 382EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
382 383
384static __net_init int net_ns_net_init(struct net *net)
385{
386 return proc_alloc_inum(&net->proc_inum);
387}
388
389static __net_exit void net_ns_net_exit(struct net *net)
390{
391 proc_free_inum(net->proc_inum);
392}
393
394static struct pernet_operations __net_initdata net_ns_ops = {
395 .init = net_ns_net_init,
396 .exit = net_ns_net_exit,
397};
398
383static int __init net_ns_init(void) 399static int __init net_ns_init(void)
384{ 400{
385 struct net_generic *ng; 401 struct net_generic *ng;
@@ -402,7 +418,7 @@ static int __init net_ns_init(void)
402 rcu_assign_pointer(init_net.gen, ng); 418 rcu_assign_pointer(init_net.gen, ng);
403 419
404 mutex_lock(&net_mutex); 420 mutex_lock(&net_mutex);
405 if (setup_net(&init_net)) 421 if (setup_net(&init_net, &init_user_ns))
406 panic("Could not setup the initial network namespace"); 422 panic("Could not setup the initial network namespace");
407 423
408 rtnl_lock(); 424 rtnl_lock();
@@ -411,6 +427,8 @@ static int __init net_ns_init(void)
411 427
412 mutex_unlock(&net_mutex); 428 mutex_unlock(&net_mutex);
413 429
430 register_pernet_subsys(&net_ns_ops);
431
414 return 0; 432 return 0;
415} 433}
416 434
@@ -629,16 +647,29 @@ static void netns_put(void *ns)
629 647
630static int netns_install(struct nsproxy *nsproxy, void *ns) 648static int netns_install(struct nsproxy *nsproxy, void *ns)
631{ 649{
650 struct net *net = ns;
651
652 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
653 !nsown_capable(CAP_SYS_ADMIN))
654 return -EPERM;
655
632 put_net(nsproxy->net_ns); 656 put_net(nsproxy->net_ns);
633 nsproxy->net_ns = get_net(ns); 657 nsproxy->net_ns = get_net(net);
634 return 0; 658 return 0;
635} 659}
636 660
661static unsigned int netns_inum(void *ns)
662{
663 struct net *net = ns;
664 return net->proc_inum;
665}
666
637const struct proc_ns_operations netns_operations = { 667const struct proc_ns_operations netns_operations = {
638 .name = "net", 668 .name = "net",
639 .type = CLONE_NEWNET, 669 .type = CLONE_NEWNET,
640 .get = netns_get, 670 .get = netns_get,
641 .put = netns_put, 671 .put = netns_put,
642 .install = netns_install, 672 .install = netns_install,
673 .inum = netns_inum,
643}; 674};
644#endif 675#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 77a0388fc3be..3151acf5ec13 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -674,7 +674,8 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
674 if ((delim = strchr(cur, '@')) == NULL) 674 if ((delim = strchr(cur, '@')) == NULL)
675 goto parse_failed; 675 goto parse_failed;
676 *delim = 0; 676 *delim = 0;
677 np->local_port = simple_strtol(cur, NULL, 10); 677 if (kstrtou16(cur, 10, &np->local_port))
678 goto parse_failed;
678 cur = delim; 679 cur = delim;
679 } 680 }
680 cur++; 681 cur++;
@@ -705,7 +706,8 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
705 *delim = 0; 706 *delim = 0;
706 if (*cur == ' ' || *cur == '\t') 707 if (*cur == ' ' || *cur == '\t')
707 np_info(np, "warning: whitespace is not allowed\n"); 708 np_info(np, "warning: whitespace is not allowed\n");
708 np->remote_port = simple_strtol(cur, NULL, 10); 709 if (kstrtou16(cur, 10, &np->remote_port))
710 goto parse_failed;
709 cur = delim; 711 cur = delim;
710 } 712 }
711 cur++; 713 cur++;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 79285a36035f..5e67defe2cb0 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -27,11 +27,7 @@
27 27
28#include <linux/fdtable.h> 28#include <linux/fdtable.h>
29 29
30#define PRIOIDX_SZ 128 30#define PRIOMAP_MIN_SZ 128
31
32static unsigned long prioidx_map[PRIOIDX_SZ];
33static DEFINE_SPINLOCK(prioidx_map_lock);
34static atomic_t max_prioidx = ATOMIC_INIT(0);
35 31
36static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp) 32static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
37{ 33{
@@ -39,136 +35,157 @@ static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgr
39 struct cgroup_netprio_state, css); 35 struct cgroup_netprio_state, css);
40} 36}
41 37
42static int get_prioidx(u32 *prio) 38/*
43{ 39 * Extend @dev->priomap so that it's large enough to accomodate
44 unsigned long flags; 40 * @target_idx. @dev->priomap.priomap_len > @target_idx after successful
45 u32 prioidx; 41 * return. Must be called under rtnl lock.
46 42 */
47 spin_lock_irqsave(&prioidx_map_lock, flags); 43static int extend_netdev_table(struct net_device *dev, u32 target_idx)
48 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
49 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
50 spin_unlock_irqrestore(&prioidx_map_lock, flags);
51 return -ENOSPC;
52 }
53 set_bit(prioidx, prioidx_map);
54 if (atomic_read(&max_prioidx) < prioidx)
55 atomic_set(&max_prioidx, prioidx);
56 spin_unlock_irqrestore(&prioidx_map_lock, flags);
57 *prio = prioidx;
58 return 0;
59}
60
61static void put_prioidx(u32 idx)
62{ 44{
63 unsigned long flags; 45 struct netprio_map *old, *new;
64 46 size_t new_sz, new_len;
65 spin_lock_irqsave(&prioidx_map_lock, flags);
66 clear_bit(idx, prioidx_map);
67 spin_unlock_irqrestore(&prioidx_map_lock, flags);
68}
69 47
70static int extend_netdev_table(struct net_device *dev, u32 new_len) 48 /* is the existing priomap large enough? */
71{ 49 old = rtnl_dereference(dev->priomap);
72 size_t new_size = sizeof(struct netprio_map) + 50 if (old && old->priomap_len > target_idx)
73 ((sizeof(u32) * new_len)); 51 return 0;
74 struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
75 struct netprio_map *old_priomap;
76 52
77 old_priomap = rtnl_dereference(dev->priomap); 53 /*
54 * Determine the new size. Let's keep it power-of-two. We start
55 * from PRIOMAP_MIN_SZ and double it until it's large enough to
56 * accommodate @target_idx.
57 */
58 new_sz = PRIOMAP_MIN_SZ;
59 while (true) {
60 new_len = (new_sz - offsetof(struct netprio_map, priomap)) /
61 sizeof(new->priomap[0]);
62 if (new_len > target_idx)
63 break;
64 new_sz *= 2;
65 /* overflowed? */
66 if (WARN_ON(new_sz < PRIOMAP_MIN_SZ))
67 return -ENOSPC;
68 }
78 69
79 if (!new_priomap) { 70 /* allocate & copy */
71 new = kzalloc(new_sz, GFP_KERNEL);
72 if (!new) {
80 pr_warn("Unable to alloc new priomap!\n"); 73 pr_warn("Unable to alloc new priomap!\n");
81 return -ENOMEM; 74 return -ENOMEM;
82 } 75 }
83 76
84 if (old_priomap) 77 if (old)
85 memcpy(new_priomap->priomap, old_priomap->priomap, 78 memcpy(new->priomap, old->priomap,
86 old_priomap->priomap_len * 79 old->priomap_len * sizeof(old->priomap[0]));
87 sizeof(old_priomap->priomap[0]));
88 80
89 new_priomap->priomap_len = new_len; 81 new->priomap_len = new_len;
90 82
91 rcu_assign_pointer(dev->priomap, new_priomap); 83 /* install the new priomap */
92 if (old_priomap) 84 rcu_assign_pointer(dev->priomap, new);
93 kfree_rcu(old_priomap, rcu); 85 if (old)
86 kfree_rcu(old, rcu);
94 return 0; 87 return 0;
95} 88}
96 89
97static int write_update_netdev_table(struct net_device *dev) 90/**
91 * netprio_prio - return the effective netprio of a cgroup-net_device pair
92 * @cgrp: cgroup part of the target pair
93 * @dev: net_device part of the target pair
94 *
95 * Should be called under RCU read or rtnl lock.
96 */
97static u32 netprio_prio(struct cgroup *cgrp, struct net_device *dev)
98{
99 struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
100
101 if (map && cgrp->id < map->priomap_len)
102 return map->priomap[cgrp->id];
103 return 0;
104}
105
106/**
107 * netprio_set_prio - set netprio on a cgroup-net_device pair
108 * @cgrp: cgroup part of the target pair
109 * @dev: net_device part of the target pair
110 * @prio: prio to set
111 *
112 * Set netprio to @prio on @cgrp-@dev pair. Should be called under rtnl
113 * lock and may fail under memory pressure for non-zero @prio.
114 */
115static int netprio_set_prio(struct cgroup *cgrp, struct net_device *dev,
116 u32 prio)
98{ 117{
99 int ret = 0;
100 u32 max_len;
101 struct netprio_map *map; 118 struct netprio_map *map;
119 int ret;
102 120
103 max_len = atomic_read(&max_prioidx) + 1; 121 /* avoid extending priomap for zero writes */
104 map = rtnl_dereference(dev->priomap); 122 map = rtnl_dereference(dev->priomap);
105 if (!map || map->priomap_len < max_len) 123 if (!prio && (!map || map->priomap_len <= cgrp->id))
106 ret = extend_netdev_table(dev, max_len); 124 return 0;
107 125
108 return ret; 126 ret = extend_netdev_table(dev, cgrp->id);
127 if (ret)
128 return ret;
129
130 map = rtnl_dereference(dev->priomap);
131 map->priomap[cgrp->id] = prio;
132 return 0;
109} 133}
110 134
111static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 135static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
112{ 136{
113 struct cgroup_netprio_state *cs; 137 struct cgroup_netprio_state *cs;
114 int ret = -EINVAL;
115 138
116 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 139 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
117 if (!cs) 140 if (!cs)
118 return ERR_PTR(-ENOMEM); 141 return ERR_PTR(-ENOMEM);
119 142
120 if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
121 goto out;
122
123 ret = get_prioidx(&cs->prioidx);
124 if (ret < 0) {
125 pr_warn("No space in priority index array\n");
126 goto out;
127 }
128
129 return &cs->css; 143 return &cs->css;
130out:
131 kfree(cs);
132 return ERR_PTR(ret);
133} 144}
134 145
135static void cgrp_destroy(struct cgroup *cgrp) 146static int cgrp_css_online(struct cgroup *cgrp)
136{ 147{
137 struct cgroup_netprio_state *cs; 148 struct cgroup *parent = cgrp->parent;
138 struct net_device *dev; 149 struct net_device *dev;
139 struct netprio_map *map; 150 int ret = 0;
151
152 if (!parent)
153 return 0;
140 154
141 cs = cgrp_netprio_state(cgrp);
142 rtnl_lock(); 155 rtnl_lock();
156 /*
157 * Inherit prios from the parent. As all prios are set during
158 * onlining, there is no need to clear them on offline.
159 */
143 for_each_netdev(&init_net, dev) { 160 for_each_netdev(&init_net, dev) {
144 map = rtnl_dereference(dev->priomap); 161 u32 prio = netprio_prio(parent, dev);
145 if (map && cs->prioidx < map->priomap_len) 162
146 map->priomap[cs->prioidx] = 0; 163 ret = netprio_set_prio(cgrp, dev, prio);
164 if (ret)
165 break;
147 } 166 }
148 rtnl_unlock(); 167 rtnl_unlock();
149 put_prioidx(cs->prioidx); 168 return ret;
150 kfree(cs); 169}
170
171static void cgrp_css_free(struct cgroup *cgrp)
172{
173 kfree(cgrp_netprio_state(cgrp));
151} 174}
152 175
153static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft) 176static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
154{ 177{
155 return (u64)cgrp_netprio_state(cgrp)->prioidx; 178 return cgrp->id;
156} 179}
157 180
158static int read_priomap(struct cgroup *cont, struct cftype *cft, 181static int read_priomap(struct cgroup *cont, struct cftype *cft,
159 struct cgroup_map_cb *cb) 182 struct cgroup_map_cb *cb)
160{ 183{
161 struct net_device *dev; 184 struct net_device *dev;
162 u32 prioidx = cgrp_netprio_state(cont)->prioidx;
163 u32 priority;
164 struct netprio_map *map;
165 185
166 rcu_read_lock(); 186 rcu_read_lock();
167 for_each_netdev_rcu(&init_net, dev) { 187 for_each_netdev_rcu(&init_net, dev)
168 map = rcu_dereference(dev->priomap); 188 cb->fill(cb, dev->name, netprio_prio(cont, dev));
169 priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
170 cb->fill(cb, dev->name, priority);
171 }
172 rcu_read_unlock(); 189 rcu_read_unlock();
173 return 0; 190 return 0;
174} 191}
@@ -176,66 +193,24 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
176static int write_priomap(struct cgroup *cgrp, struct cftype *cft, 193static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
177 const char *buffer) 194 const char *buffer)
178{ 195{
179 char *devname = kstrdup(buffer, GFP_KERNEL); 196 char devname[IFNAMSIZ + 1];
180 int ret = -EINVAL;
181 u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
182 unsigned long priority;
183 char *priostr;
184 struct net_device *dev; 197 struct net_device *dev;
185 struct netprio_map *map; 198 u32 prio;
186 199 int ret;
187 if (!devname)
188 return -ENOMEM;
189
190 /*
191 * Minimally sized valid priomap string
192 */
193 if (strlen(devname) < 3)
194 goto out_free_devname;
195
196 priostr = strstr(devname, " ");
197 if (!priostr)
198 goto out_free_devname;
199
200 /*
201 *Separate the devname from the associated priority
202 *and advance the priostr pointer to the priority value
203 */
204 *priostr = '\0';
205 priostr++;
206
207 /*
208 * If the priostr points to NULL, we're at the end of the passed
209 * in string, and its not a valid write
210 */
211 if (*priostr == '\0')
212 goto out_free_devname;
213
214 ret = kstrtoul(priostr, 10, &priority);
215 if (ret < 0)
216 goto out_free_devname;
217 200
218 ret = -ENODEV; 201 if (sscanf(buffer, "%"__stringify(IFNAMSIZ)"s %u", devname, &prio) != 2)
202 return -EINVAL;
219 203
220 dev = dev_get_by_name(&init_net, devname); 204 dev = dev_get_by_name(&init_net, devname);
221 if (!dev) 205 if (!dev)
222 goto out_free_devname; 206 return -ENODEV;
223 207
224 rtnl_lock(); 208 rtnl_lock();
225 ret = write_update_netdev_table(dev);
226 if (ret < 0)
227 goto out_put_dev;
228 209
229 map = rtnl_dereference(dev->priomap); 210 ret = netprio_set_prio(cgrp, dev, prio);
230 if (map)
231 map->priomap[prioidx] = priority;
232 211
233out_put_dev:
234 rtnl_unlock(); 212 rtnl_unlock();
235 dev_put(dev); 213 dev_put(dev);
236
237out_free_devname:
238 kfree(devname);
239 return ret; 214 return ret;
240} 215}
241 216
@@ -248,7 +223,7 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
248 return 0; 223 return 0;
249} 224}
250 225
251void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 226static void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
252{ 227{
253 struct task_struct *p; 228 struct task_struct *p;
254 void *v; 229 void *v;
@@ -276,22 +251,13 @@ static struct cftype ss_files[] = {
276 251
277struct cgroup_subsys net_prio_subsys = { 252struct cgroup_subsys net_prio_subsys = {
278 .name = "net_prio", 253 .name = "net_prio",
279 .create = cgrp_create, 254 .css_alloc = cgrp_css_alloc,
280 .destroy = cgrp_destroy, 255 .css_online = cgrp_css_online,
256 .css_free = cgrp_css_free,
281 .attach = net_prio_attach, 257 .attach = net_prio_attach,
282 .subsys_id = net_prio_subsys_id, 258 .subsys_id = net_prio_subsys_id,
283 .base_cftypes = ss_files, 259 .base_cftypes = ss_files,
284 .module = THIS_MODULE, 260 .module = THIS_MODULE,
285
286 /*
287 * net_prio has artificial limit on the number of cgroups and
288 * disallows nesting making it impossible to co-mount it with other
289 * hierarchical subsystems. Remove the artificially low PRIOIDX_SZ
290 * limit and properly nest configuration such that children follow
291 * their parents' configurations by default and are allowed to
292 * override and remove the following.
293 */
294 .broken_hierarchy = true,
295}; 261};
296 262
297static int netprio_device_event(struct notifier_block *unused, 263static int netprio_device_event(struct notifier_block *unused,
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d1dc14c2aac4..b29dacf900f9 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -419,20 +419,6 @@ struct pktgen_thread {
419#define REMOVE 1 419#define REMOVE 1
420#define FIND 0 420#define FIND 0
421 421
422static inline ktime_t ktime_now(void)
423{
424 struct timespec ts;
425 ktime_get_ts(&ts);
426
427 return timespec_to_ktime(ts);
428}
429
430/* This works even if 32 bit because of careful byte order choice */
431static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
432{
433 return cmp1.tv64 < cmp2.tv64;
434}
435
436static const char version[] = 422static const char version[] =
437 "Packet Generator for packet performance testing. " 423 "Packet Generator for packet performance testing. "
438 "Version: " VERSION "\n"; 424 "Version: " VERSION "\n";
@@ -675,7 +661,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
675 seq_puts(seq, "\n"); 661 seq_puts(seq, "\n");
676 662
677 /* not really stopped, more like last-running-at */ 663 /* not really stopped, more like last-running-at */
678 stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at; 664 stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at;
679 idle = pkt_dev->idle_acc; 665 idle = pkt_dev->idle_acc;
680 do_div(idle, NSEC_PER_USEC); 666 do_div(idle, NSEC_PER_USEC);
681 667
@@ -2141,12 +2127,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2141 return; 2127 return;
2142 } 2128 }
2143 2129
2144 start_time = ktime_now(); 2130 start_time = ktime_get();
2145 if (remaining < 100000) { 2131 if (remaining < 100000) {
2146 /* for small delays (<100us), just loop until limit is reached */ 2132 /* for small delays (<100us), just loop until limit is reached */
2147 do { 2133 do {
2148 end_time = ktime_now(); 2134 end_time = ktime_get();
2149 } while (ktime_lt(end_time, spin_until)); 2135 } while (ktime_compare(end_time, spin_until) < 0);
2150 } else { 2136 } else {
2151 /* see do_nanosleep */ 2137 /* see do_nanosleep */
2152 hrtimer_init_sleeper(&t, current); 2138 hrtimer_init_sleeper(&t, current);
@@ -2162,7 +2148,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2162 hrtimer_cancel(&t.timer); 2148 hrtimer_cancel(&t.timer);
2163 } while (t.task && pkt_dev->running && !signal_pending(current)); 2149 } while (t.task && pkt_dev->running && !signal_pending(current));
2164 __set_current_state(TASK_RUNNING); 2150 __set_current_state(TASK_RUNNING);
2165 end_time = ktime_now(); 2151 end_time = ktime_get();
2166 } 2152 }
2167 2153
2168 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2154 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
@@ -2427,11 +2413,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2427 } 2413 }
2428 } else { /* IPV6 * */ 2414 } else { /* IPV6 * */
2429 2415
2430 if (pkt_dev->min_in6_daddr.s6_addr32[0] == 0 && 2416 if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) {
2431 pkt_dev->min_in6_daddr.s6_addr32[1] == 0 &&
2432 pkt_dev->min_in6_daddr.s6_addr32[2] == 0 &&
2433 pkt_dev->min_in6_daddr.s6_addr32[3] == 0) ;
2434 else {
2435 int i; 2417 int i;
2436 2418
2437 /* Only random destinations yet */ 2419 /* Only random destinations yet */
@@ -2916,8 +2898,7 @@ static void pktgen_run(struct pktgen_thread *t)
2916 pktgen_clear_counters(pkt_dev); 2898 pktgen_clear_counters(pkt_dev);
2917 pkt_dev->running = 1; /* Cranke yeself! */ 2899 pkt_dev->running = 1; /* Cranke yeself! */
2918 pkt_dev->skb = NULL; 2900 pkt_dev->skb = NULL;
2919 pkt_dev->started_at = 2901 pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
2920 pkt_dev->next_tx = ktime_now();
2921 2902
2922 set_pkt_overhead(pkt_dev); 2903 set_pkt_overhead(pkt_dev);
2923 2904
@@ -3076,7 +3057,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3076 3057
3077 kfree_skb(pkt_dev->skb); 3058 kfree_skb(pkt_dev->skb);
3078 pkt_dev->skb = NULL; 3059 pkt_dev->skb = NULL;
3079 pkt_dev->stopped_at = ktime_now(); 3060 pkt_dev->stopped_at = ktime_get();
3080 pkt_dev->running = 0; 3061 pkt_dev->running = 0;
3081 3062
3082 show_results(pkt_dev, nr_frags); 3063 show_results(pkt_dev, nr_frags);
@@ -3095,7 +3076,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
3095 continue; 3076 continue;
3096 if (best == NULL) 3077 if (best == NULL)
3097 best = pkt_dev; 3078 best = pkt_dev;
3098 else if (ktime_lt(pkt_dev->next_tx, best->next_tx)) 3079 else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
3099 best = pkt_dev; 3080 best = pkt_dev;
3100 } 3081 }
3101 if_unlock(t); 3082 if_unlock(t);
@@ -3180,14 +3161,14 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3180 3161
3181static void pktgen_resched(struct pktgen_dev *pkt_dev) 3162static void pktgen_resched(struct pktgen_dev *pkt_dev)
3182{ 3163{
3183 ktime_t idle_start = ktime_now(); 3164 ktime_t idle_start = ktime_get();
3184 schedule(); 3165 schedule();
3185 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); 3166 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3186} 3167}
3187 3168
3188static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) 3169static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3189{ 3170{
3190 ktime_t idle_start = ktime_now(); 3171 ktime_t idle_start = ktime_get();
3191 3172
3192 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3173 while (atomic_read(&(pkt_dev->skb->users)) != 1) {
3193 if (signal_pending(current)) 3174 if (signal_pending(current))
@@ -3198,7 +3179,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3198 else 3179 else
3199 cpu_relax(); 3180 cpu_relax();
3200 } 3181 }
3201 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); 3182 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start));
3202} 3183}
3203 3184
3204static void pktgen_xmit(struct pktgen_dev *pkt_dev) 3185static void pktgen_xmit(struct pktgen_dev *pkt_dev)
@@ -3220,7 +3201,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3220 * "never transmit" 3201 * "never transmit"
3221 */ 3202 */
3222 if (unlikely(pkt_dev->delay == ULLONG_MAX)) { 3203 if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
3223 pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); 3204 pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX);
3224 return; 3205 return;
3225 } 3206 }
3226 3207
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fad649ae4dec..1868625af25e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -128,7 +128,7 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
128 if (tab == NULL || tab[msgindex].doit == NULL) 128 if (tab == NULL || tab[msgindex].doit == NULL)
129 tab = rtnl_msg_handlers[PF_UNSPEC]; 129 tab = rtnl_msg_handlers[PF_UNSPEC];
130 130
131 return tab ? tab[msgindex].doit : NULL; 131 return tab[msgindex].doit;
132} 132}
133 133
134static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) 134static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
@@ -143,7 +143,7 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
143 if (tab == NULL || tab[msgindex].dumpit == NULL) 143 if (tab == NULL || tab[msgindex].dumpit == NULL)
144 tab = rtnl_msg_handlers[PF_UNSPEC]; 144 tab = rtnl_msg_handlers[PF_UNSPEC];
145 145
146 return tab ? tab[msgindex].dumpit : NULL; 146 return tab[msgindex].dumpit;
147} 147}
148 148
149static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex) 149static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
@@ -158,7 +158,7 @@ static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
158 if (tab == NULL || tab[msgindex].calcit == NULL) 158 if (tab == NULL || tab[msgindex].calcit == NULL)
159 tab = rtnl_msg_handlers[PF_UNSPEC]; 159 tab = rtnl_msg_handlers[PF_UNSPEC];
160 160
161 return tab ? tab[msgindex].calcit : NULL; 161 return tab[msgindex].calcit;
162} 162}
163 163
164/** 164/**
@@ -1316,6 +1316,10 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1316 err = PTR_ERR(net); 1316 err = PTR_ERR(net);
1317 goto errout; 1317 goto errout;
1318 } 1318 }
1319 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1320 err = -EPERM;
1321 goto errout;
1322 }
1319 err = dev_change_net_namespace(dev, net, ifname); 1323 err = dev_change_net_namespace(dev, net, ifname);
1320 put_net(net); 1324 put_net(net);
1321 if (err) 1325 if (err)
@@ -1638,7 +1642,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
1638} 1642}
1639EXPORT_SYMBOL(rtnl_configure_link); 1643EXPORT_SYMBOL(rtnl_configure_link);
1640 1644
1641struct net_device *rtnl_create_link(struct net *src_net, struct net *net, 1645struct net_device *rtnl_create_link(struct net *net,
1642 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) 1646 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
1643{ 1647{
1644 int err; 1648 int err;
@@ -1836,7 +1840,7 @@ replay:
1836 if (IS_ERR(dest_net)) 1840 if (IS_ERR(dest_net))
1837 return PTR_ERR(dest_net); 1841 return PTR_ERR(dest_net);
1838 1842
1839 dev = rtnl_create_link(net, dest_net, ifname, ops, tb); 1843 dev = rtnl_create_link(dest_net, ifname, ops, tb);
1840 if (IS_ERR(dev)) { 1844 if (IS_ERR(dev)) {
1841 err = PTR_ERR(dev); 1845 err = PTR_ERR(dev);
1842 goto out; 1846 goto out;
@@ -2057,6 +2061,9 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2057 u8 *addr; 2061 u8 *addr;
2058 int err; 2062 int err;
2059 2063
2064 if (!capable(CAP_NET_ADMIN))
2065 return -EPERM;
2066
2060 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); 2067 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
2061 if (err < 0) 2068 if (err < 0)
2062 return err; 2069 return err;
@@ -2123,6 +2130,9 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2123 int err = -EINVAL; 2130 int err = -EINVAL;
2124 __u8 *addr; 2131 __u8 *addr;
2125 2132
2133 if (!capable(CAP_NET_ADMIN))
2134 return -EPERM;
2135
2126 if (nlmsg_len(nlh) < sizeof(*ndm)) 2136 if (nlmsg_len(nlh) < sizeof(*ndm))
2127 return -EINVAL; 2137 return -EINVAL;
2128 2138
@@ -2253,6 +2263,211 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
2253 return skb->len; 2263 return skb->len;
2254} 2264}
2255 2265
2266int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2267 struct net_device *dev, u16 mode)
2268{
2269 struct nlmsghdr *nlh;
2270 struct ifinfomsg *ifm;
2271 struct nlattr *br_afspec;
2272 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
2273
2274 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
2275 if (nlh == NULL)
2276 return -EMSGSIZE;
2277
2278 ifm = nlmsg_data(nlh);
2279 ifm->ifi_family = AF_BRIDGE;
2280 ifm->__ifi_pad = 0;
2281 ifm->ifi_type = dev->type;
2282 ifm->ifi_index = dev->ifindex;
2283 ifm->ifi_flags = dev_get_flags(dev);
2284 ifm->ifi_change = 0;
2285
2286
2287 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
2288 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
2289 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
2290 (dev->master &&
2291 nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
2292 (dev->addr_len &&
2293 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
2294 (dev->ifindex != dev->iflink &&
2295 nla_put_u32(skb, IFLA_LINK, dev->iflink)))
2296 goto nla_put_failure;
2297
2298 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
2299 if (!br_afspec)
2300 goto nla_put_failure;
2301
2302 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) ||
2303 nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
2304 nla_nest_cancel(skb, br_afspec);
2305 goto nla_put_failure;
2306 }
2307 nla_nest_end(skb, br_afspec);
2308
2309 return nlmsg_end(skb, nlh);
2310nla_put_failure:
2311 nlmsg_cancel(skb, nlh);
2312 return -EMSGSIZE;
2313}
2314EXPORT_SYMBOL(ndo_dflt_bridge_getlink);
2315
2316static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2317{
2318 struct net *net = sock_net(skb->sk);
2319 struct net_device *dev;
2320 int idx = 0;
2321 u32 portid = NETLINK_CB(cb->skb).portid;
2322 u32 seq = cb->nlh->nlmsg_seq;
2323
2324 rcu_read_lock();
2325 for_each_netdev_rcu(net, dev) {
2326 const struct net_device_ops *ops = dev->netdev_ops;
2327 struct net_device *master = dev->master;
2328
2329 if (master && master->netdev_ops->ndo_bridge_getlink) {
2330 if (idx >= cb->args[0] &&
2331 master->netdev_ops->ndo_bridge_getlink(
2332 skb, portid, seq, dev) < 0)
2333 break;
2334 idx++;
2335 }
2336
2337 if (ops->ndo_bridge_getlink) {
2338 if (idx >= cb->args[0] &&
2339 ops->ndo_bridge_getlink(skb, portid, seq, dev) < 0)
2340 break;
2341 idx++;
2342 }
2343 }
2344 rcu_read_unlock();
2345 cb->args[0] = idx;
2346
2347 return skb->len;
2348}
2349
2350static inline size_t bridge_nlmsg_size(void)
2351{
2352 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
2353 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
2354 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
2355 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
2356 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
2357 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
2358 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
2359 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
2360 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
2361 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
2362 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
2363}
2364
2365static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
2366{
2367 struct net *net = dev_net(dev);
2368 struct net_device *master = dev->master;
2369 struct sk_buff *skb;
2370 int err = -EOPNOTSUPP;
2371
2372 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
2373 if (!skb) {
2374 err = -ENOMEM;
2375 goto errout;
2376 }
2377
2378 if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
2379 master && master->netdev_ops->ndo_bridge_getlink) {
2380 err = master->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
2381 if (err < 0)
2382 goto errout;
2383 }
2384
2385 if ((flags & BRIDGE_FLAGS_SELF) &&
2386 dev->netdev_ops->ndo_bridge_getlink) {
2387 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
2388 if (err < 0)
2389 goto errout;
2390 }
2391
2392 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
2393 return 0;
2394errout:
2395 WARN_ON(err == -EMSGSIZE);
2396 kfree_skb(skb);
2397 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2398 return err;
2399}
2400
2401static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2402 void *arg)
2403{
2404 struct net *net = sock_net(skb->sk);
2405 struct ifinfomsg *ifm;
2406 struct net_device *dev;
2407 struct nlattr *br_spec, *attr = NULL;
2408 int rem, err = -EOPNOTSUPP;
2409 u16 oflags, flags = 0;
2410 bool have_flags = false;
2411
2412 if (nlmsg_len(nlh) < sizeof(*ifm))
2413 return -EINVAL;
2414
2415 ifm = nlmsg_data(nlh);
2416 if (ifm->ifi_family != AF_BRIDGE)
2417 return -EPFNOSUPPORT;
2418
2419 dev = __dev_get_by_index(net, ifm->ifi_index);
2420 if (!dev) {
2421 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
2422 return -ENODEV;
2423 }
2424
2425 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
2426 if (br_spec) {
2427 nla_for_each_nested(attr, br_spec, rem) {
2428 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
2429 have_flags = true;
2430 flags = nla_get_u16(attr);
2431 break;
2432 }
2433 }
2434 }
2435
2436 oflags = flags;
2437
2438 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
2439 if (!dev->master ||
2440 !dev->master->netdev_ops->ndo_bridge_setlink) {
2441 err = -EOPNOTSUPP;
2442 goto out;
2443 }
2444
2445 err = dev->master->netdev_ops->ndo_bridge_setlink(dev, nlh);
2446 if (err)
2447 goto out;
2448
2449 flags &= ~BRIDGE_FLAGS_MASTER;
2450 }
2451
2452 if ((flags & BRIDGE_FLAGS_SELF)) {
2453 if (!dev->netdev_ops->ndo_bridge_setlink)
2454 err = -EOPNOTSUPP;
2455 else
2456 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
2457
2458 if (!err)
2459 flags &= ~BRIDGE_FLAGS_SELF;
2460 }
2461
2462 if (have_flags)
2463 memcpy(nla_data(attr), &flags, sizeof(flags));
2464 /* Generate event to notify upper layer of bridge change */
2465 if (!err)
2466 err = rtnl_bridge_notify(dev, oflags);
2467out:
2468 return err;
2469}
2470
2256/* Protected by RTNL sempahore. */ 2471/* Protected by RTNL sempahore. */
2257static struct rtattr **rta_buf; 2472static struct rtattr **rta_buf;
2258static int rtattr_max; 2473static int rtattr_max;
@@ -2283,7 +2498,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2283 sz_idx = type>>2; 2498 sz_idx = type>>2;
2284 kind = type&3; 2499 kind = type&3;
2285 2500
2286 if (kind != 2 && !capable(CAP_NET_ADMIN)) 2501 if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
2287 return -EPERM; 2502 return -EPERM;
2288 2503
2289 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 2504 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
@@ -2434,5 +2649,8 @@ void __init rtnetlink_init(void)
2434 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); 2649 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
2435 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); 2650 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
2436 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); 2651 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
2652
2653 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
2654 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
2437} 2655}
2438 2656
diff --git a/net/core/scm.c b/net/core/scm.c
index ab570841a532..57fb1ee6649f 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -51,11 +51,11 @@ static __inline__ int scm_check_creds(struct ucred *creds)
51 if (!uid_valid(uid) || !gid_valid(gid)) 51 if (!uid_valid(uid) || !gid_valid(gid))
52 return -EINVAL; 52 return -EINVAL;
53 53
54 if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && 54 if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) &&
55 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 55 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
56 uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) && 56 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
57 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 57 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
58 gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) { 58 gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) {
59 return 0; 59 return 0;
60 } 60 }
61 return -EPERM; 61 return -EPERM;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4007c1437fda..3ab989b0de42 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -519,7 +519,7 @@ static void skb_release_data(struct sk_buff *skb)
519 519
520 uarg = skb_shinfo(skb)->destructor_arg; 520 uarg = skb_shinfo(skb)->destructor_arg;
521 if (uarg->callback) 521 if (uarg->callback)
522 uarg->callback(uarg); 522 uarg->callback(uarg, true);
523 } 523 }
524 524
525 if (skb_has_frag_list(skb)) 525 if (skb_has_frag_list(skb))
@@ -635,6 +635,26 @@ void kfree_skb(struct sk_buff *skb)
635EXPORT_SYMBOL(kfree_skb); 635EXPORT_SYMBOL(kfree_skb);
636 636
637/** 637/**
638 * skb_tx_error - report an sk_buff xmit error
639 * @skb: buffer that triggered an error
640 *
641 * Report xmit error if a device callback is tracking this skb.
642 * skb must be freed afterwards.
643 */
644void skb_tx_error(struct sk_buff *skb)
645{
646 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
647 struct ubuf_info *uarg;
648
649 uarg = skb_shinfo(skb)->destructor_arg;
650 if (uarg->callback)
651 uarg->callback(uarg, false);
652 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
653 }
654}
655EXPORT_SYMBOL(skb_tx_error);
656
657/**
638 * consume_skb - free an skbuff 658 * consume_skb - free an skbuff
639 * @skb: buffer to free 659 * @skb: buffer to free
640 * 660 *
@@ -662,11 +682,14 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
662 new->transport_header = old->transport_header; 682 new->transport_header = old->transport_header;
663 new->network_header = old->network_header; 683 new->network_header = old->network_header;
664 new->mac_header = old->mac_header; 684 new->mac_header = old->mac_header;
685 new->inner_transport_header = old->inner_transport_header;
686 new->inner_network_header = old->inner_transport_header;
665 skb_dst_copy(new, old); 687 skb_dst_copy(new, old);
666 new->rxhash = old->rxhash; 688 new->rxhash = old->rxhash;
667 new->ooo_okay = old->ooo_okay; 689 new->ooo_okay = old->ooo_okay;
668 new->l4_rxhash = old->l4_rxhash; 690 new->l4_rxhash = old->l4_rxhash;
669 new->no_fcs = old->no_fcs; 691 new->no_fcs = old->no_fcs;
692 new->encapsulation = old->encapsulation;
670#ifdef CONFIG_XFRM 693#ifdef CONFIG_XFRM
671 new->sp = secpath_get(old->sp); 694 new->sp = secpath_get(old->sp);
672#endif 695#endif
@@ -797,7 +820,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
797 for (i = 0; i < num_frags; i++) 820 for (i = 0; i < num_frags; i++)
798 skb_frag_unref(skb, i); 821 skb_frag_unref(skb, i);
799 822
800 uarg->callback(uarg); 823 uarg->callback(uarg, false);
801 824
802 /* skb frags point to kernel buffers */ 825 /* skb frags point to kernel buffers */
803 for (i = num_frags - 1; i >= 0; i--) { 826 for (i = num_frags - 1; i >= 0; i--) {
@@ -872,6 +895,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
872 new->network_header += offset; 895 new->network_header += offset;
873 if (skb_mac_header_was_set(new)) 896 if (skb_mac_header_was_set(new))
874 new->mac_header += offset; 897 new->mac_header += offset;
898 new->inner_transport_header += offset;
899 new->inner_network_header += offset;
875#endif 900#endif
876 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 901 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
877 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 902 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
@@ -1069,6 +1094,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1069 skb->network_header += off; 1094 skb->network_header += off;
1070 if (skb_mac_header_was_set(skb)) 1095 if (skb_mac_header_was_set(skb))
1071 skb->mac_header += off; 1096 skb->mac_header += off;
1097 skb->inner_transport_header += off;
1098 skb->inner_network_header += off;
1072 /* Only adjust this if it actually is csum_start rather than csum */ 1099 /* Only adjust this if it actually is csum_start rather than csum */
1073 if (skb->ip_summed == CHECKSUM_PARTIAL) 1100 if (skb->ip_summed == CHECKSUM_PARTIAL)
1074 skb->csum_start += nhead; 1101 skb->csum_start += nhead;
@@ -1168,6 +1195,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1168 n->network_header += off; 1195 n->network_header += off;
1169 if (skb_mac_header_was_set(skb)) 1196 if (skb_mac_header_was_set(skb))
1170 n->mac_header += off; 1197 n->mac_header += off;
1198 n->inner_transport_header += off;
1199 n->inner_network_header += off;
1171#endif 1200#endif
1172 1201
1173 return n; 1202 return n;
@@ -2999,12 +3028,11 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2999 memcpy(skb_mac_header(nskb), skb_mac_header(p), 3028 memcpy(skb_mac_header(nskb), skb_mac_header(p),
3000 p->data - skb_mac_header(p)); 3029 p->data - skb_mac_header(p));
3001 3030
3002 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
3003 skb_shinfo(nskb)->frag_list = p; 3031 skb_shinfo(nskb)->frag_list = p;
3004 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 3032 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3005 pinfo->gso_size = 0; 3033 pinfo->gso_size = 0;
3006 skb_header_release(p); 3034 skb_header_release(p);
3007 nskb->prev = p; 3035 NAPI_GRO_CB(nskb)->last = p;
3008 3036
3009 nskb->data_len += p->len; 3037 nskb->data_len += p->len;
3010 nskb->truesize += p->truesize; 3038 nskb->truesize += p->truesize;
@@ -3030,8 +3058,8 @@ merge:
3030 3058
3031 __skb_pull(skb, offset); 3059 __skb_pull(skb, offset);
3032 3060
3033 p->prev->next = skb; 3061 NAPI_GRO_CB(p)->last->next = skb;
3034 p->prev = skb; 3062 NAPI_GRO_CB(p)->last = skb;
3035 skb_header_release(skb); 3063 skb_header_release(skb);
3036 3064
3037done: 3065done:
diff --git a/net/core/sock.c b/net/core/sock.c
index 8a146cfcc366..bc131d419683 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -505,7 +505,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
505} 505}
506EXPORT_SYMBOL(sk_dst_check); 506EXPORT_SYMBOL(sk_dst_check);
507 507
508static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 508static int sock_setbindtodevice(struct sock *sk, char __user *optval,
509 int optlen)
509{ 510{
510 int ret = -ENOPROTOOPT; 511 int ret = -ENOPROTOOPT;
511#ifdef CONFIG_NETDEVICES 512#ifdef CONFIG_NETDEVICES
@@ -515,7 +516,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
515 516
516 /* Sorry... */ 517 /* Sorry... */
517 ret = -EPERM; 518 ret = -EPERM;
518 if (!capable(CAP_NET_RAW)) 519 if (!ns_capable(net->user_ns, CAP_NET_RAW))
519 goto out; 520 goto out;
520 521
521 ret = -EINVAL; 522 ret = -EINVAL;
@@ -562,6 +563,59 @@ out:
562 return ret; 563 return ret;
563} 564}
564 565
566static int sock_getbindtodevice(struct sock *sk, char __user *optval,
567 int __user *optlen, int len)
568{
569 int ret = -ENOPROTOOPT;
570#ifdef CONFIG_NETDEVICES
571 struct net *net = sock_net(sk);
572 struct net_device *dev;
573 char devname[IFNAMSIZ];
574 unsigned seq;
575
576 if (sk->sk_bound_dev_if == 0) {
577 len = 0;
578 goto zero;
579 }
580
581 ret = -EINVAL;
582 if (len < IFNAMSIZ)
583 goto out;
584
585retry:
586 seq = read_seqcount_begin(&devnet_rename_seq);
587 rcu_read_lock();
588 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
589 ret = -ENODEV;
590 if (!dev) {
591 rcu_read_unlock();
592 goto out;
593 }
594
595 strcpy(devname, dev->name);
596 rcu_read_unlock();
597 if (read_seqcount_retry(&devnet_rename_seq, seq))
598 goto retry;
599
600 len = strlen(devname) + 1;
601
602 ret = -EFAULT;
603 if (copy_to_user(optval, devname, len))
604 goto out;
605
606zero:
607 ret = -EFAULT;
608 if (put_user(len, optlen))
609 goto out;
610
611 ret = 0;
612
613out:
614#endif
615
616 return ret;
617}
618
565static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 619static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
566{ 620{
567 if (valbool) 621 if (valbool)
@@ -589,7 +643,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
589 */ 643 */
590 644
591 if (optname == SO_BINDTODEVICE) 645 if (optname == SO_BINDTODEVICE)
592 return sock_bindtodevice(sk, optval, optlen); 646 return sock_setbindtodevice(sk, optval, optlen);
593 647
594 if (optlen < sizeof(int)) 648 if (optlen < sizeof(int))
595 return -EINVAL; 649 return -EINVAL;
@@ -696,7 +750,8 @@ set_rcvbuf:
696 break; 750 break;
697 751
698 case SO_PRIORITY: 752 case SO_PRIORITY:
699 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 753 if ((val >= 0 && val <= 6) ||
754 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
700 sk->sk_priority = val; 755 sk->sk_priority = val;
701 else 756 else
702 ret = -EPERM; 757 ret = -EPERM;
@@ -813,7 +868,7 @@ set_rcvbuf:
813 clear_bit(SOCK_PASSSEC, &sock->flags); 868 clear_bit(SOCK_PASSSEC, &sock->flags);
814 break; 869 break;
815 case SO_MARK: 870 case SO_MARK:
816 if (!capable(CAP_NET_ADMIN)) 871 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
817 ret = -EPERM; 872 ret = -EPERM;
818 else 873 else
819 sk->sk_mark = val; 874 sk->sk_mark = val;
@@ -1074,6 +1129,17 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1074 case SO_NOFCS: 1129 case SO_NOFCS:
1075 v.val = sock_flag(sk, SOCK_NOFCS); 1130 v.val = sock_flag(sk, SOCK_NOFCS);
1076 break; 1131 break;
1132
1133 case SO_BINDTODEVICE:
1134 return sock_getbindtodevice(sk, optval, optlen, len);
1135
1136 case SO_GET_FILTER:
1137 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1138 if (len < 0)
1139 return len;
1140
1141 goto lenout;
1142
1077 default: 1143 default:
1078 return -ENOPROTOOPT; 1144 return -ENOPROTOOPT;
1079 } 1145 }
@@ -1214,13 +1280,11 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
1214 1280
1215#ifdef CONFIG_CGROUPS 1281#ifdef CONFIG_CGROUPS
1216#if IS_ENABLED(CONFIG_NET_CLS_CGROUP) 1282#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
1217void sock_update_classid(struct sock *sk) 1283void sock_update_classid(struct sock *sk, struct task_struct *task)
1218{ 1284{
1219 u32 classid; 1285 u32 classid;
1220 1286
1221 rcu_read_lock(); /* doing current task, which cannot vanish. */ 1287 classid = task_cls_classid(task);
1222 classid = task_cls_classid(current);
1223 rcu_read_unlock();
1224 if (classid != sk->sk_classid) 1288 if (classid != sk->sk_classid)
1225 sk->sk_classid = classid; 1289 sk->sk_classid = classid;
1226} 1290}
@@ -1263,7 +1327,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1263 sock_net_set(sk, get_net(net)); 1327 sock_net_set(sk, get_net(net));
1264 atomic_set(&sk->sk_wmem_alloc, 1); 1328 atomic_set(&sk->sk_wmem_alloc, 1);
1265 1329
1266 sock_update_classid(sk); 1330 sock_update_classid(sk, current);
1267 sock_update_netprioidx(sk, current); 1331 sock_update_netprioidx(sk, current);
1268 } 1332 }
1269 1333
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a7c36845b123..d1b08045a9df 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -216,6 +216,11 @@ static __net_init int sysctl_core_net_init(struct net *net)
216 goto err_dup; 216 goto err_dup;
217 217
218 tbl[0].data = &net->core.sysctl_somaxconn; 218 tbl[0].data = &net->core.sysctl_somaxconn;
219
220 /* Don't export any sysctls to unprivileged users */
221 if (net->user_ns != &init_user_ns) {
222 tbl[0].procname = NULL;
223 }
219 } 224 }
220 225
221 net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); 226 net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 70989e672304..1b588e23cf80 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1662,8 +1662,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1662 struct nlmsghdr *reply_nlh = NULL; 1662 struct nlmsghdr *reply_nlh = NULL;
1663 const struct reply_func *fn; 1663 const struct reply_func *fn;
1664 1664
1665 if (!net_eq(net, &init_net)) 1665 if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
1666 return -EINVAL; 1666 return -EPERM;
1667 1667
1668 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, 1668 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1669 dcbnl_rtnl_policy); 1669 dcbnl_rtnl_policy);
@@ -1681,7 +1681,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1681 if (!tb[DCB_ATTR_IFNAME]) 1681 if (!tb[DCB_ATTR_IFNAME])
1682 return -EINVAL; 1682 return -EINVAL;
1683 1683
1684 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME])); 1684 netdev = dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1685 if (!netdev) 1685 if (!netdev)
1686 return -ENODEV; 1686 return -ENODEV;
1687 1687
@@ -1705,7 +1705,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1705 1705
1706 nlmsg_end(reply_skb, reply_nlh); 1706 nlmsg_end(reply_skb, reply_nlh);
1707 1707
1708 ret = rtnl_unicast(reply_skb, &init_net, portid); 1708 ret = rtnl_unicast(reply_skb, net, portid);
1709out: 1709out:
1710 dev_put(netdev); 1710 dev_put(netdev);
1711 return ret; 1711 return ret;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 176ecdba4a22..4f9f5eb478f1 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -439,8 +439,8 @@ exit:
439 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 439 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
440 return NULL; 440 return NULL;
441put_and_exit: 441put_and_exit:
442 bh_unlock_sock(newsk); 442 inet_csk_prepare_forced_close(newsk);
443 sock_put(newsk); 443 dccp_done(newsk);
444 goto exit; 444 goto exit;
445} 445}
446 446
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 56840b249f3b..6e05981f271e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -585,7 +585,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
585 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 585 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
586 586
587 if (__inet_inherit_port(sk, newsk) < 0) { 587 if (__inet_inherit_port(sk, newsk) < 0) {
588 sock_put(newsk); 588 inet_csk_prepare_forced_close(newsk);
589 dccp_done(newsk);
589 goto out; 590 goto out;
590 } 591 }
591 __inet6_hash(newsk, NULL); 592 __inet6_hash(newsk, NULL);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index ea850ce35d4a..662071b249cc 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -174,8 +174,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
174 * To protect against Request floods, increment retrans 174 * To protect against Request floods, increment retrans
175 * counter (backoff, monitored by dccp_response_timer). 175 * counter (backoff, monitored by dccp_response_timer).
176 */ 176 */
177 req->retrans++; 177 inet_rtx_syn_ack(sk, req);
178 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
179 } 178 }
180 /* Network Duplicate, discard packet */ 179 /* Network Duplicate, discard packet */
181 return NULL; 180 return NULL;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 7b7e561412d3..e47ba9fc4a0e 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -573,6 +573,9 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
573 struct dn_ifaddr __rcu **ifap; 573 struct dn_ifaddr __rcu **ifap;
574 int err = -EINVAL; 574 int err = -EINVAL;
575 575
576 if (!capable(CAP_NET_ADMIN))
577 return -EPERM;
578
576 if (!net_eq(net, &init_net)) 579 if (!net_eq(net, &init_net))
577 goto errout; 580 goto errout;
578 581
@@ -614,6 +617,9 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
614 struct dn_ifaddr *ifa; 617 struct dn_ifaddr *ifa;
615 int err; 618 int err;
616 619
620 if (!capable(CAP_NET_ADMIN))
621 return -EPERM;
622
617 if (!net_eq(net, &init_net)) 623 if (!net_eq(net, &init_net))
618 return -EINVAL; 624 return -EINVAL;
619 625
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 102d6106a942..e36614eccc04 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -520,6 +520,9 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
520 struct rtattr **rta = arg; 520 struct rtattr **rta = arg;
521 struct rtmsg *r = NLMSG_DATA(nlh); 521 struct rtmsg *r = NLMSG_DATA(nlh);
522 522
523 if (!capable(CAP_NET_ADMIN))
524 return -EPERM;
525
523 if (!net_eq(net, &init_net)) 526 if (!net_eq(net, &init_net))
524 return -EINVAL; 527 return -EINVAL;
525 528
@@ -540,6 +543,9 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
540 struct rtattr **rta = arg; 543 struct rtattr **rta = arg;
541 struct rtmsg *r = NLMSG_DATA(nlh); 544 struct rtmsg *r = NLMSG_DATA(nlh);
542 545
546 if (!capable(CAP_NET_ADMIN))
547 return -EPERM;
548
543 if (!net_eq(net, &init_net)) 549 if (!net_eq(net, &init_net))
544 return -EINVAL; 550 return -EINVAL;
545 551
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8aa4b1115384..0a69d0757795 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -259,20 +259,16 @@ static int __init init_dns_resolver(void)
259 if (!cred) 259 if (!cred)
260 return -ENOMEM; 260 return -ENOMEM;
261 261
262 keyring = key_alloc(&key_type_keyring, ".dns_resolver", 262 keyring = keyring_alloc(".dns_resolver",
263 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 263 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
264 (KEY_POS_ALL & ~KEY_POS_SETATTR) | 264 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
265 KEY_USR_VIEW | KEY_USR_READ, 265 KEY_USR_VIEW | KEY_USR_READ,
266 KEY_ALLOC_NOT_IN_QUOTA); 266 KEY_ALLOC_NOT_IN_QUOTA, NULL);
267 if (IS_ERR(keyring)) { 267 if (IS_ERR(keyring)) {
268 ret = PTR_ERR(keyring); 268 ret = PTR_ERR(keyring);
269 goto failed_put_cred; 269 goto failed_put_cred;
270 } 270 }
271 271
272 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
273 if (ret < 0)
274 goto failed_put_key;
275
276 ret = register_key_type(&key_type_dns_resolver); 272 ret = register_key_type(&key_type_dns_resolver);
277 if (ret < 0) 273 if (ret < 0)
278 goto failed_put_key; 274 goto failed_put_key;
@@ -304,3 +300,4 @@ static void __exit exit_dns_resolver(void)
304module_init(init_dns_resolver) 300module_init(init_dns_resolver)
305module_exit(exit_dns_resolver) 301module_exit(exit_dns_resolver)
306MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
303
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 274791cd7a35..f5eede1d6cb8 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,26 +1,24 @@
1config HAVE_NET_DSA
2 def_bool y
3 depends on NETDEVICES && !S390
4
5# Drivers must select NET_DSA and the appropriate tagging format
6
1config NET_DSA 7config NET_DSA
2 tristate "Distributed Switch Architecture support" 8 tristate
3 default n 9 depends on HAVE_NET_DSA
4 depends on EXPERIMENTAL && NETDEVICES && !S390
5 select PHYLIB 10 select PHYLIB
6 ---help---
7 This allows you to use hardware switch chips that use
8 the Distributed Switch Architecture.
9
10 11
11if NET_DSA 12if NET_DSA
12 13
13# tagging formats 14# tagging formats
14config NET_DSA_TAG_DSA 15config NET_DSA_TAG_DSA
15 bool 16 bool
16 default n
17 17
18config NET_DSA_TAG_EDSA 18config NET_DSA_TAG_EDSA
19 bool 19 bool
20 default n
21 20
22config NET_DSA_TAG_TRAILER 21config NET_DSA_TAG_TRAILER
23 bool 22 bool
24 default n
25 23
26endif 24endif
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 6d42c17af96b..f651da60f161 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1047,7 +1047,8 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
1047 goto error; 1047 goto error;
1048 } 1048 }
1049 1049
1050 if (skb->len <= IEEE802154_MTU) { 1050 /* Send directly if less than the MTU minus the 2 checksum bytes. */
1051 if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
1051 err = dev_queue_xmit(skb); 1052 err = dev_queue_xmit(skb);
1052 goto out; 1053 goto out;
1053 } 1054 }
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 766c59658563..24b384b7903e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -346,7 +346,8 @@ lookup_protocol:
346 } 346 }
347 347
348 err = -EPERM; 348 err = -EPERM;
349 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) 349 if (sock->type == SOCK_RAW && !kern &&
350 !ns_capable(net->user_ns, CAP_NET_RAW))
350 goto out_rcu_unlock; 351 goto out_rcu_unlock;
351 352
352 err = -EAFNOSUPPORT; 353 err = -EAFNOSUPPORT;
@@ -473,6 +474,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
473 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; 474 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
474 struct sock *sk = sock->sk; 475 struct sock *sk = sock->sk;
475 struct inet_sock *inet = inet_sk(sk); 476 struct inet_sock *inet = inet_sk(sk);
477 struct net *net = sock_net(sk);
476 unsigned short snum; 478 unsigned short snum;
477 int chk_addr_ret; 479 int chk_addr_ret;
478 int err; 480 int err;
@@ -496,7 +498,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
496 goto out; 498 goto out;
497 } 499 }
498 500
499 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 501 chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
500 502
501 /* Not specified by any standard per-se, however it breaks too 503 /* Not specified by any standard per-se, however it breaks too
502 * many applications when removed. It is unfortunate since 504 * many applications when removed. It is unfortunate since
@@ -516,7 +518,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
516 518
517 snum = ntohs(addr->sin_port); 519 snum = ntohs(addr->sin_port);
518 err = -EACCES; 520 err = -EACCES;
519 if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 521 if (snum && snum < PROT_SOCK &&
522 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
520 goto out; 523 goto out;
521 524
522 /* We keep a pair of addresses. rcv_saddr is the one 525 /* We keep a pair of addresses. rcv_saddr is the one
@@ -1251,7 +1254,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1251 1254
1252static int inet_gso_send_check(struct sk_buff *skb) 1255static int inet_gso_send_check(struct sk_buff *skb)
1253{ 1256{
1254 const struct net_protocol *ops; 1257 const struct net_offload *ops;
1255 const struct iphdr *iph; 1258 const struct iphdr *iph;
1256 int proto; 1259 int proto;
1257 int ihl; 1260 int ihl;
@@ -1275,9 +1278,9 @@ static int inet_gso_send_check(struct sk_buff *skb)
1275 err = -EPROTONOSUPPORT; 1278 err = -EPROTONOSUPPORT;
1276 1279
1277 rcu_read_lock(); 1280 rcu_read_lock();
1278 ops = rcu_dereference(inet_protos[proto]); 1281 ops = rcu_dereference(inet_offloads[proto]);
1279 if (likely(ops && ops->gso_send_check)) 1282 if (likely(ops && ops->callbacks.gso_send_check))
1280 err = ops->gso_send_check(skb); 1283 err = ops->callbacks.gso_send_check(skb);
1281 rcu_read_unlock(); 1284 rcu_read_unlock();
1282 1285
1283out: 1286out:
@@ -1288,7 +1291,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1288 netdev_features_t features) 1291 netdev_features_t features)
1289{ 1292{
1290 struct sk_buff *segs = ERR_PTR(-EINVAL); 1293 struct sk_buff *segs = ERR_PTR(-EINVAL);
1291 const struct net_protocol *ops; 1294 const struct net_offload *ops;
1292 struct iphdr *iph; 1295 struct iphdr *iph;
1293 int proto; 1296 int proto;
1294 int ihl; 1297 int ihl;
@@ -1325,9 +1328,9 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1325 segs = ERR_PTR(-EPROTONOSUPPORT); 1328 segs = ERR_PTR(-EPROTONOSUPPORT);
1326 1329
1327 rcu_read_lock(); 1330 rcu_read_lock();
1328 ops = rcu_dereference(inet_protos[proto]); 1331 ops = rcu_dereference(inet_offloads[proto]);
1329 if (likely(ops && ops->gso_segment)) 1332 if (likely(ops && ops->callbacks.gso_segment))
1330 segs = ops->gso_segment(skb, features); 1333 segs = ops->callbacks.gso_segment(skb, features);
1331 rcu_read_unlock(); 1334 rcu_read_unlock();
1332 1335
1333 if (!segs || IS_ERR(segs)) 1336 if (!segs || IS_ERR(segs))
@@ -1356,7 +1359,7 @@ out:
1356static struct sk_buff **inet_gro_receive(struct sk_buff **head, 1359static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1357 struct sk_buff *skb) 1360 struct sk_buff *skb)
1358{ 1361{
1359 const struct net_protocol *ops; 1362 const struct net_offload *ops;
1360 struct sk_buff **pp = NULL; 1363 struct sk_buff **pp = NULL;
1361 struct sk_buff *p; 1364 struct sk_buff *p;
1362 const struct iphdr *iph; 1365 const struct iphdr *iph;
@@ -1378,8 +1381,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1378 proto = iph->protocol; 1381 proto = iph->protocol;
1379 1382
1380 rcu_read_lock(); 1383 rcu_read_lock();
1381 ops = rcu_dereference(inet_protos[proto]); 1384 ops = rcu_dereference(inet_offloads[proto]);
1382 if (!ops || !ops->gro_receive) 1385 if (!ops || !ops->callbacks.gro_receive)
1383 goto out_unlock; 1386 goto out_unlock;
1384 1387
1385 if (*(u8 *)iph != 0x45) 1388 if (*(u8 *)iph != 0x45)
@@ -1420,7 +1423,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1420 skb_gro_pull(skb, sizeof(*iph)); 1423 skb_gro_pull(skb, sizeof(*iph));
1421 skb_set_transport_header(skb, skb_gro_offset(skb)); 1424 skb_set_transport_header(skb, skb_gro_offset(skb));
1422 1425
1423 pp = ops->gro_receive(head, skb); 1426 pp = ops->callbacks.gro_receive(head, skb);
1424 1427
1425out_unlock: 1428out_unlock:
1426 rcu_read_unlock(); 1429 rcu_read_unlock();
@@ -1435,7 +1438,7 @@ static int inet_gro_complete(struct sk_buff *skb)
1435{ 1438{
1436 __be16 newlen = htons(skb->len - skb_network_offset(skb)); 1439 __be16 newlen = htons(skb->len - skb_network_offset(skb));
1437 struct iphdr *iph = ip_hdr(skb); 1440 struct iphdr *iph = ip_hdr(skb);
1438 const struct net_protocol *ops; 1441 const struct net_offload *ops;
1439 int proto = iph->protocol; 1442 int proto = iph->protocol;
1440 int err = -ENOSYS; 1443 int err = -ENOSYS;
1441 1444
@@ -1443,11 +1446,11 @@ static int inet_gro_complete(struct sk_buff *skb)
1443 iph->tot_len = newlen; 1446 iph->tot_len = newlen;
1444 1447
1445 rcu_read_lock(); 1448 rcu_read_lock();
1446 ops = rcu_dereference(inet_protos[proto]); 1449 ops = rcu_dereference(inet_offloads[proto]);
1447 if (WARN_ON(!ops || !ops->gro_complete)) 1450 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1448 goto out_unlock; 1451 goto out_unlock;
1449 1452
1450 err = ops->gro_complete(skb); 1453 err = ops->callbacks.gro_complete(skb);
1451 1454
1452out_unlock: 1455out_unlock:
1453 rcu_read_unlock(); 1456 rcu_read_unlock();
@@ -1558,23 +1561,33 @@ static const struct net_protocol tcp_protocol = {
1558 .early_demux = tcp_v4_early_demux, 1561 .early_demux = tcp_v4_early_demux,
1559 .handler = tcp_v4_rcv, 1562 .handler = tcp_v4_rcv,
1560 .err_handler = tcp_v4_err, 1563 .err_handler = tcp_v4_err,
1561 .gso_send_check = tcp_v4_gso_send_check,
1562 .gso_segment = tcp_tso_segment,
1563 .gro_receive = tcp4_gro_receive,
1564 .gro_complete = tcp4_gro_complete,
1565 .no_policy = 1, 1564 .no_policy = 1,
1566 .netns_ok = 1, 1565 .netns_ok = 1,
1567}; 1566};
1568 1567
1568static const struct net_offload tcp_offload = {
1569 .callbacks = {
1570 .gso_send_check = tcp_v4_gso_send_check,
1571 .gso_segment = tcp_tso_segment,
1572 .gro_receive = tcp4_gro_receive,
1573 .gro_complete = tcp4_gro_complete,
1574 },
1575};
1576
1569static const struct net_protocol udp_protocol = { 1577static const struct net_protocol udp_protocol = {
1570 .handler = udp_rcv, 1578 .handler = udp_rcv,
1571 .err_handler = udp_err, 1579 .err_handler = udp_err,
1572 .gso_send_check = udp4_ufo_send_check,
1573 .gso_segment = udp4_ufo_fragment,
1574 .no_policy = 1, 1580 .no_policy = 1,
1575 .netns_ok = 1, 1581 .netns_ok = 1,
1576}; 1582};
1577 1583
1584static const struct net_offload udp_offload = {
1585 .callbacks = {
1586 .gso_send_check = udp4_ufo_send_check,
1587 .gso_segment = udp4_ufo_fragment,
1588 },
1589};
1590
1578static const struct net_protocol icmp_protocol = { 1591static const struct net_protocol icmp_protocol = {
1579 .handler = icmp_rcv, 1592 .handler = icmp_rcv,
1580 .err_handler = ping_err, 1593 .err_handler = ping_err,
@@ -1659,13 +1672,35 @@ static int ipv4_proc_init(void);
1659 * IP protocol layer initialiser 1672 * IP protocol layer initialiser
1660 */ 1673 */
1661 1674
1675static struct packet_offload ip_packet_offload __read_mostly = {
1676 .type = cpu_to_be16(ETH_P_IP),
1677 .callbacks = {
1678 .gso_send_check = inet_gso_send_check,
1679 .gso_segment = inet_gso_segment,
1680 .gro_receive = inet_gro_receive,
1681 .gro_complete = inet_gro_complete,
1682 },
1683};
1684
1685static int __init ipv4_offload_init(void)
1686{
1687 /*
1688 * Add offloads
1689 */
1690 if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0)
1691 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1692 if (inet_add_offload(&tcp_offload, IPPROTO_TCP) < 0)
1693 pr_crit("%s: Cannot add TCP protocol offlaod\n", __func__);
1694
1695 dev_add_offload(&ip_packet_offload);
1696 return 0;
1697}
1698
1699fs_initcall(ipv4_offload_init);
1700
1662static struct packet_type ip_packet_type __read_mostly = { 1701static struct packet_type ip_packet_type __read_mostly = {
1663 .type = cpu_to_be16(ETH_P_IP), 1702 .type = cpu_to_be16(ETH_P_IP),
1664 .func = ip_rcv, 1703 .func = ip_rcv,
1665 .gso_send_check = inet_gso_send_check,
1666 .gso_segment = inet_gso_segment,
1667 .gro_receive = inet_gro_receive,
1668 .gro_complete = inet_gro_complete,
1669}; 1704};
1670 1705
1671static int __init inet_init(void) 1706static int __init inet_init(void)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 47800459e4cb..9547a273b9e9 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -321,7 +321,7 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
321static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) 321static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
322{ 322{
323 __be32 saddr = 0; 323 __be32 saddr = 0;
324 u8 *dst_ha = NULL; 324 u8 dst_ha[MAX_ADDR_LEN], *dst_hw = NULL;
325 struct net_device *dev = neigh->dev; 325 struct net_device *dev = neigh->dev;
326 __be32 target = *(__be32 *)neigh->primary_key; 326 __be32 target = *(__be32 *)neigh->primary_key;
327 int probes = atomic_read(&neigh->probes); 327 int probes = atomic_read(&neigh->probes);
@@ -363,8 +363,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
363 if (probes < 0) { 363 if (probes < 0) {
364 if (!(neigh->nud_state & NUD_VALID)) 364 if (!(neigh->nud_state & NUD_VALID))
365 pr_debug("trying to ucast probe in NUD_INVALID\n"); 365 pr_debug("trying to ucast probe in NUD_INVALID\n");
366 dst_ha = neigh->ha; 366 neigh_ha_snapshot(dst_ha, neigh, dev);
367 read_lock_bh(&neigh->lock); 367 dst_hw = dst_ha;
368 } else { 368 } else {
369 probes -= neigh->parms->app_probes; 369 probes -= neigh->parms->app_probes;
370 if (probes < 0) { 370 if (probes < 0) {
@@ -376,9 +376,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
376 } 376 }
377 377
378 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 378 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
379 dst_ha, dev->dev_addr, NULL); 379 dst_hw, dev->dev_addr, NULL);
380 if (dst_ha)
381 read_unlock_bh(&neigh->lock);
382} 380}
383 381
384static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) 382static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -1161,7 +1159,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1161 switch (cmd) { 1159 switch (cmd) {
1162 case SIOCDARP: 1160 case SIOCDARP:
1163 case SIOCSARP: 1161 case SIOCSARP:
1164 if (!capable(CAP_NET_ADMIN)) 1162 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1165 return -EPERM; 1163 return -EPERM;
1166 case SIOCGARP: 1164 case SIOCGARP:
1167 err = copy_from_user(&r, arg, sizeof(struct arpreq)); 1165 err = copy_from_user(&r, arg, sizeof(struct arpreq));
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2a6abc163ed2..a8e4f2665d5e 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -55,6 +55,7 @@
55#include <linux/sysctl.h> 55#include <linux/sysctl.h>
56#endif 56#endif
57#include <linux/kmod.h> 57#include <linux/kmod.h>
58#include <linux/netconf.h>
58 59
59#include <net/arp.h> 60#include <net/arp.h>
60#include <net/ip.h> 61#include <net/ip.h>
@@ -723,7 +724,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
723 724
724 case SIOCSIFFLAGS: 725 case SIOCSIFFLAGS:
725 ret = -EPERM; 726 ret = -EPERM;
726 if (!capable(CAP_NET_ADMIN)) 727 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
727 goto out; 728 goto out;
728 break; 729 break;
729 case SIOCSIFADDR: /* Set interface address (and family) */ 730 case SIOCSIFADDR: /* Set interface address (and family) */
@@ -731,7 +732,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
731 case SIOCSIFDSTADDR: /* Set the destination address */ 732 case SIOCSIFDSTADDR: /* Set the destination address */
732 case SIOCSIFNETMASK: /* Set the netmask for the interface */ 733 case SIOCSIFNETMASK: /* Set the netmask for the interface */
733 ret = -EPERM; 734 ret = -EPERM;
734 if (!capable(CAP_NET_ADMIN)) 735 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
735 goto out; 736 goto out;
736 ret = -EINVAL; 737 ret = -EINVAL;
737 if (sin->sin_family != AF_INET) 738 if (sin->sin_family != AF_INET)
@@ -822,9 +823,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
822 if (!ifa) { 823 if (!ifa) {
823 ret = -ENOBUFS; 824 ret = -ENOBUFS;
824 ifa = inet_alloc_ifa(); 825 ifa = inet_alloc_ifa();
825 INIT_HLIST_NODE(&ifa->hash);
826 if (!ifa) 826 if (!ifa)
827 break; 827 break;
828 INIT_HLIST_NODE(&ifa->hash);
828 if (colon) 829 if (colon)
829 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ); 830 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
830 else 831 else
@@ -1442,6 +1443,155 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1442 return 0; 1443 return 0;
1443} 1444}
1444 1445
1446static int inet_netconf_msgsize_devconf(int type)
1447{
1448 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1449 + nla_total_size(4); /* NETCONFA_IFINDEX */
1450
1451 /* type -1 is used for ALL */
1452 if (type == -1 || type == NETCONFA_FORWARDING)
1453 size += nla_total_size(4);
1454 if (type == -1 || type == NETCONFA_RP_FILTER)
1455 size += nla_total_size(4);
1456 if (type == -1 || type == NETCONFA_MC_FORWARDING)
1457 size += nla_total_size(4);
1458
1459 return size;
1460}
1461
1462static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1463 struct ipv4_devconf *devconf, u32 portid,
1464 u32 seq, int event, unsigned int flags,
1465 int type)
1466{
1467 struct nlmsghdr *nlh;
1468 struct netconfmsg *ncm;
1469
1470 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1471 flags);
1472 if (nlh == NULL)
1473 return -EMSGSIZE;
1474
1475 ncm = nlmsg_data(nlh);
1476 ncm->ncm_family = AF_INET;
1477
1478 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1479 goto nla_put_failure;
1480
1481 /* type -1 is used for ALL */
1482 if ((type == -1 || type == NETCONFA_FORWARDING) &&
1483 nla_put_s32(skb, NETCONFA_FORWARDING,
1484 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1485 goto nla_put_failure;
1486 if ((type == -1 || type == NETCONFA_RP_FILTER) &&
1487 nla_put_s32(skb, NETCONFA_RP_FILTER,
1488 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1489 goto nla_put_failure;
1490 if ((type == -1 || type == NETCONFA_MC_FORWARDING) &&
1491 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1492 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1493 goto nla_put_failure;
1494
1495 return nlmsg_end(skb, nlh);
1496
1497nla_put_failure:
1498 nlmsg_cancel(skb, nlh);
1499 return -EMSGSIZE;
1500}
1501
1502void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1503 struct ipv4_devconf *devconf)
1504{
1505 struct sk_buff *skb;
1506 int err = -ENOBUFS;
1507
1508 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
1509 if (skb == NULL)
1510 goto errout;
1511
1512 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1513 RTM_NEWNETCONF, 0, type);
1514 if (err < 0) {
1515 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1516 WARN_ON(err == -EMSGSIZE);
1517 kfree_skb(skb);
1518 goto errout;
1519 }
1520 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
1521 return;
1522errout:
1523 if (err < 0)
1524 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1525}
1526
1527static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1528 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1529 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1530 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1531};
1532
1533static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1534 struct nlmsghdr *nlh,
1535 void *arg)
1536{
1537 struct net *net = sock_net(in_skb->sk);
1538 struct nlattr *tb[NETCONFA_MAX+1];
1539 struct netconfmsg *ncm;
1540 struct sk_buff *skb;
1541 struct ipv4_devconf *devconf;
1542 struct in_device *in_dev;
1543 struct net_device *dev;
1544 int ifindex;
1545 int err;
1546
1547 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1548 devconf_ipv4_policy);
1549 if (err < 0)
1550 goto errout;
1551
1552 err = EINVAL;
1553 if (!tb[NETCONFA_IFINDEX])
1554 goto errout;
1555
1556 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1557 switch (ifindex) {
1558 case NETCONFA_IFINDEX_ALL:
1559 devconf = net->ipv4.devconf_all;
1560 break;
1561 case NETCONFA_IFINDEX_DEFAULT:
1562 devconf = net->ipv4.devconf_dflt;
1563 break;
1564 default:
1565 dev = __dev_get_by_index(net, ifindex);
1566 if (dev == NULL)
1567 goto errout;
1568 in_dev = __in_dev_get_rtnl(dev);
1569 if (in_dev == NULL)
1570 goto errout;
1571 devconf = &in_dev->cnf;
1572 break;
1573 }
1574
1575 err = -ENOBUFS;
1576 skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
1577 if (skb == NULL)
1578 goto errout;
1579
1580 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1581 NETLINK_CB(in_skb).portid,
1582 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1583 -1);
1584 if (err < 0) {
1585 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1586 WARN_ON(err == -EMSGSIZE);
1587 kfree_skb(skb);
1588 goto errout;
1589 }
1590 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1591errout:
1592 return err;
1593}
1594
1445#ifdef CONFIG_SYSCTL 1595#ifdef CONFIG_SYSCTL
1446 1596
1447static void devinet_copy_dflt_conf(struct net *net, int i) 1597static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -1467,6 +1617,12 @@ static void inet_forward_change(struct net *net)
1467 1617
1468 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; 1618 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
1469 IPV4_DEVCONF_DFLT(net, FORWARDING) = on; 1619 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
1620 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
1621 NETCONFA_IFINDEX_ALL,
1622 net->ipv4.devconf_all);
1623 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
1624 NETCONFA_IFINDEX_DEFAULT,
1625 net->ipv4.devconf_dflt);
1470 1626
1471 for_each_netdev(net, dev) { 1627 for_each_netdev(net, dev) {
1472 struct in_device *in_dev; 1628 struct in_device *in_dev;
@@ -1474,8 +1630,11 @@ static void inet_forward_change(struct net *net)
1474 dev_disable_lro(dev); 1630 dev_disable_lro(dev);
1475 rcu_read_lock(); 1631 rcu_read_lock();
1476 in_dev = __in_dev_get_rcu(dev); 1632 in_dev = __in_dev_get_rcu(dev);
1477 if (in_dev) 1633 if (in_dev) {
1478 IN_DEV_CONF_SET(in_dev, FORWARDING, on); 1634 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
1635 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
1636 dev->ifindex, &in_dev->cnf);
1637 }
1479 rcu_read_unlock(); 1638 rcu_read_unlock();
1480 } 1639 }
1481} 1640}
@@ -1501,6 +1660,23 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
1501 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) 1660 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
1502 if ((new_value == 0) && (old_value != 0)) 1661 if ((new_value == 0) && (old_value != 0))
1503 rt_cache_flush(net); 1662 rt_cache_flush(net);
1663 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
1664 new_value != old_value) {
1665 int ifindex;
1666
1667 if (cnf == net->ipv4.devconf_dflt)
1668 ifindex = NETCONFA_IFINDEX_DEFAULT;
1669 else if (cnf == net->ipv4.devconf_all)
1670 ifindex = NETCONFA_IFINDEX_ALL;
1671 else {
1672 struct in_device *idev =
1673 container_of(cnf, struct in_device,
1674 cnf);
1675 ifindex = idev->dev->ifindex;
1676 }
1677 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
1678 ifindex, cnf);
1679 }
1504 } 1680 }
1505 1681
1506 return ret; 1682 return ret;
@@ -1527,15 +1703,23 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1527 } 1703 }
1528 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { 1704 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1529 inet_forward_change(net); 1705 inet_forward_change(net);
1530 } else if (*valp) { 1706 } else {
1531 struct ipv4_devconf *cnf = ctl->extra1; 1707 struct ipv4_devconf *cnf = ctl->extra1;
1532 struct in_device *idev = 1708 struct in_device *idev =
1533 container_of(cnf, struct in_device, cnf); 1709 container_of(cnf, struct in_device, cnf);
1534 dev_disable_lro(idev->dev); 1710 if (*valp)
1711 dev_disable_lro(idev->dev);
1712 inet_netconf_notify_devconf(net,
1713 NETCONFA_FORWARDING,
1714 idev->dev->ifindex,
1715 cnf);
1535 } 1716 }
1536 rtnl_unlock(); 1717 rtnl_unlock();
1537 rt_cache_flush(net); 1718 rt_cache_flush(net);
1538 } 1719 } else
1720 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
1721 NETCONFA_IFINDEX_DEFAULT,
1722 net->ipv4.devconf_dflt);
1539 } 1723 }
1540 1724
1541 return ret; 1725 return ret;
@@ -1809,5 +1993,7 @@ void __init devinet_init(void)
1809 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL); 1993 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
1810 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); 1994 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
1811 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); 1995 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
1996 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
1997 NULL, NULL);
1812} 1998}
1813 1999
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 825c608826de..5cd75e2dab2c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -488,7 +488,7 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
488 switch (cmd) { 488 switch (cmd) {
489 case SIOCADDRT: /* Add a route */ 489 case SIOCADDRT: /* Add a route */
490 case SIOCDELRT: /* Delete a route */ 490 case SIOCDELRT: /* Delete a route */
491 if (!capable(CAP_NET_ADMIN)) 491 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
492 return -EPERM; 492 return -EPERM;
493 493
494 if (copy_from_user(&rt, arg, sizeof(rt))) 494 if (copy_from_user(&rt, arg, sizeof(rt)))
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 71b125cd5db1..4797a800faf8 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -803,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
803 unsigned int bytes; 803 unsigned int bytes;
804 804
805 if (!new_size) 805 if (!new_size)
806 new_size = 1; 806 new_size = 16;
807 bytes = new_size * sizeof(struct hlist_head *); 807 bytes = new_size * sizeof(struct hlist_head *);
808 new_info_hash = fib_info_hash_alloc(bytes); 808 new_info_hash = fib_info_hash_alloc(bytes);
809 new_laddrhash = fib_info_hash_alloc(bytes); 809 new_laddrhash = fib_info_hash_alloc(bytes);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f2eccd531746..17ff9fd7cdda 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -257,7 +257,8 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
257 struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1); 257 struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
258 rc = inet_peer_xrlim_allow(peer, 258 rc = inet_peer_xrlim_allow(peer,
259 net->ipv4.sysctl_icmp_ratelimit); 259 net->ipv4.sysctl_icmp_ratelimit);
260 inet_putpeer(peer); 260 if (peer)
261 inet_putpeer(peer);
261 } 262 }
262out: 263out:
263 return rc; 264 return rc;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d34ce2972c8f..d0670f00d524 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -521,21 +521,31 @@ static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
521 int *expire, int *resend) 521 int *expire, int *resend)
522{ 522{
523 if (!rskq_defer_accept) { 523 if (!rskq_defer_accept) {
524 *expire = req->retrans >= thresh; 524 *expire = req->num_timeout >= thresh;
525 *resend = 1; 525 *resend = 1;
526 return; 526 return;
527 } 527 }
528 *expire = req->retrans >= thresh && 528 *expire = req->num_timeout >= thresh &&
529 (!inet_rsk(req)->acked || req->retrans >= max_retries); 529 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
530 /* 530 /*
531 * Do not resend while waiting for data after ACK, 531 * Do not resend while waiting for data after ACK,
532 * start to resend on end of deferring period to give 532 * start to resend on end of deferring period to give
533 * last chance for data or ACK to create established socket. 533 * last chance for data or ACK to create established socket.
534 */ 534 */
535 *resend = !inet_rsk(req)->acked || 535 *resend = !inet_rsk(req)->acked ||
536 req->retrans >= rskq_defer_accept - 1; 536 req->num_timeout >= rskq_defer_accept - 1;
537} 537}
538 538
539int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
540{
541 int err = req->rsk_ops->rtx_syn_ack(parent, req, NULL);
542
543 if (!err)
544 req->num_retrans++;
545 return err;
546}
547EXPORT_SYMBOL(inet_rtx_syn_ack);
548
539void inet_csk_reqsk_queue_prune(struct sock *parent, 549void inet_csk_reqsk_queue_prune(struct sock *parent,
540 const unsigned long interval, 550 const unsigned long interval,
541 const unsigned long timeout, 551 const unsigned long timeout,
@@ -599,13 +609,14 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
599 req->rsk_ops->syn_ack_timeout(parent, req); 609 req->rsk_ops->syn_ack_timeout(parent, req);
600 if (!expire && 610 if (!expire &&
601 (!resend || 611 (!resend ||
602 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 612 !inet_rtx_syn_ack(parent, req) ||
603 inet_rsk(req)->acked)) { 613 inet_rsk(req)->acked)) {
604 unsigned long timeo; 614 unsigned long timeo;
605 615
606 if (req->retrans++ == 0) 616 if (req->num_timeout++ == 0)
607 lopt->qlen_young--; 617 lopt->qlen_young--;
608 timeo = min((timeout << req->retrans), max_rto); 618 timeo = min(timeout << req->num_timeout,
619 max_rto);
609 req->expires = now + timeo; 620 req->expires = now + timeo;
610 reqp = &req->dl_next; 621 reqp = &req->dl_next;
611 continue; 622 continue;
@@ -699,6 +710,22 @@ void inet_csk_destroy_sock(struct sock *sk)
699} 710}
700EXPORT_SYMBOL(inet_csk_destroy_sock); 711EXPORT_SYMBOL(inet_csk_destroy_sock);
701 712
713/* This function allows to force a closure of a socket after the call to
714 * tcp/dccp_create_openreq_child().
715 */
716void inet_csk_prepare_forced_close(struct sock *sk)
717{
718 /* sk_clone_lock locked the socket and set refcnt to 2 */
719 bh_unlock_sock(sk);
720 sock_put(sk);
721
722 /* The below has to be done to allow calling inet_csk_destroy_sock */
723 sock_set_flag(sk, SOCK_DEAD);
724 percpu_counter_inc(sk->sk_prot->orphan_count);
725 inet_sk(sk)->inet_num = 0;
726}
727EXPORT_SYMBOL(inet_csk_prepare_forced_close);
728
702int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) 729int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
703{ 730{
704 struct inet_sock *inet = inet_sk(sk); 731 struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 0c34bfabc11f..7afa2c3c788f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -44,6 +44,10 @@ struct inet_diag_entry {
44 u16 dport; 44 u16 dport;
45 u16 family; 45 u16 family;
46 u16 userlocks; 46 u16 userlocks;
47#if IS_ENABLED(CONFIG_IPV6)
48 struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
49 struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
50#endif
47}; 51};
48 52
49static DEFINE_MUTEX(inet_diag_table_mutex); 53static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -105,6 +109,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
105 r->id.idiag_src[0] = inet->inet_rcv_saddr; 109 r->id.idiag_src[0] = inet->inet_rcv_saddr;
106 r->id.idiag_dst[0] = inet->inet_daddr; 110 r->id.idiag_dst[0] = inet->inet_daddr;
107 111
112 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
113 goto errout;
114
108 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, 115 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
109 * hence this needs to be included regardless of socket family. 116 * hence this needs to be included regardless of socket family.
110 */ 117 */
@@ -428,25 +435,31 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
428 break; 435 break;
429 } 436 }
430 437
431 if (cond->prefix_len == 0)
432 break;
433
434 if (op->code == INET_DIAG_BC_S_COND) 438 if (op->code == INET_DIAG_BC_S_COND)
435 addr = entry->saddr; 439 addr = entry->saddr;
436 else 440 else
437 addr = entry->daddr; 441 addr = entry->daddr;
438 442
443 if (cond->family != AF_UNSPEC &&
444 cond->family != entry->family) {
445 if (entry->family == AF_INET6 &&
446 cond->family == AF_INET) {
447 if (addr[0] == 0 && addr[1] == 0 &&
448 addr[2] == htonl(0xffff) &&
449 bitstring_match(addr + 3,
450 cond->addr,
451 cond->prefix_len))
452 break;
453 }
454 yes = 0;
455 break;
456 }
457
458 if (cond->prefix_len == 0)
459 break;
439 if (bitstring_match(addr, cond->addr, 460 if (bitstring_match(addr, cond->addr,
440 cond->prefix_len)) 461 cond->prefix_len))
441 break; 462 break;
442 if (entry->family == AF_INET6 &&
443 cond->family == AF_INET) {
444 if (addr[0] == 0 && addr[1] == 0 &&
445 addr[2] == htonl(0xffff) &&
446 bitstring_match(addr + 3, cond->addr,
447 cond->prefix_len))
448 break;
449 }
450 yes = 0; 463 yes = 0;
451 break; 464 break;
452 } 465 }
@@ -509,6 +522,55 @@ static int valid_cc(const void *bc, int len, int cc)
509 return 0; 522 return 0;
510} 523}
511 524
525/* Validate an inet_diag_hostcond. */
526static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
527 int *min_len)
528{
529 int addr_len;
530 struct inet_diag_hostcond *cond;
531
532 /* Check hostcond space. */
533 *min_len += sizeof(struct inet_diag_hostcond);
534 if (len < *min_len)
535 return false;
536 cond = (struct inet_diag_hostcond *)(op + 1);
537
538 /* Check address family and address length. */
539 switch (cond->family) {
540 case AF_UNSPEC:
541 addr_len = 0;
542 break;
543 case AF_INET:
544 addr_len = sizeof(struct in_addr);
545 break;
546 case AF_INET6:
547 addr_len = sizeof(struct in6_addr);
548 break;
549 default:
550 return false;
551 }
552 *min_len += addr_len;
553 if (len < *min_len)
554 return false;
555
556 /* Check prefix length (in bits) vs address length (in bytes). */
557 if (cond->prefix_len > 8 * addr_len)
558 return false;
559
560 return true;
561}
562
563/* Validate a port comparison operator. */
564static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
565 int len, int *min_len)
566{
567 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
568 *min_len += sizeof(struct inet_diag_bc_op);
569 if (len < *min_len)
570 return false;
571 return true;
572}
573
512static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 574static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
513{ 575{
514 const void *bc = bytecode; 576 const void *bc = bytecode;
@@ -516,29 +578,39 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
516 578
517 while (len > 0) { 579 while (len > 0) {
518 const struct inet_diag_bc_op *op = bc; 580 const struct inet_diag_bc_op *op = bc;
581 int min_len = sizeof(struct inet_diag_bc_op);
519 582
520//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 583//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
521 switch (op->code) { 584 switch (op->code) {
522 case INET_DIAG_BC_AUTO:
523 case INET_DIAG_BC_S_COND: 585 case INET_DIAG_BC_S_COND:
524 case INET_DIAG_BC_D_COND: 586 case INET_DIAG_BC_D_COND:
587 if (!valid_hostcond(bc, len, &min_len))
588 return -EINVAL;
589 break;
525 case INET_DIAG_BC_S_GE: 590 case INET_DIAG_BC_S_GE:
526 case INET_DIAG_BC_S_LE: 591 case INET_DIAG_BC_S_LE:
527 case INET_DIAG_BC_D_GE: 592 case INET_DIAG_BC_D_GE:
528 case INET_DIAG_BC_D_LE: 593 case INET_DIAG_BC_D_LE:
529 case INET_DIAG_BC_JMP: 594 if (!valid_port_comparison(bc, len, &min_len))
530 if (op->no < 4 || op->no > len + 4 || op->no & 3)
531 return -EINVAL;
532 if (op->no < len &&
533 !valid_cc(bytecode, bytecode_len, len - op->no))
534 return -EINVAL; 595 return -EINVAL;
535 break; 596 break;
597 case INET_DIAG_BC_AUTO:
598 case INET_DIAG_BC_JMP:
536 case INET_DIAG_BC_NOP: 599 case INET_DIAG_BC_NOP:
537 break; 600 break;
538 default: 601 default:
539 return -EINVAL; 602 return -EINVAL;
540 } 603 }
541 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3) 604
605 if (op->code != INET_DIAG_BC_NOP) {
606 if (op->no < min_len || op->no > len + 4 || op->no & 3)
607 return -EINVAL;
608 if (op->no < len &&
609 !valid_cc(bytecode, bytecode_len, len - op->no))
610 return -EINVAL;
611 }
612
613 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
542 return -EINVAL; 614 return -EINVAL;
543 bc += op->yes; 615 bc += op->yes;
544 len -= op->yes; 616 len -= op->yes;
@@ -596,6 +668,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
596 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 668 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
597} 669}
598 670
671/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
672 * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
673 */
674static inline void inet_diag_req_addrs(const struct sock *sk,
675 const struct request_sock *req,
676 struct inet_diag_entry *entry)
677{
678 struct inet_request_sock *ireq = inet_rsk(req);
679
680#if IS_ENABLED(CONFIG_IPV6)
681 if (sk->sk_family == AF_INET6) {
682 if (req->rsk_ops->family == AF_INET6) {
683 entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
684 entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
685 } else if (req->rsk_ops->family == AF_INET) {
686 ipv6_addr_set_v4mapped(ireq->loc_addr,
687 &entry->saddr_storage);
688 ipv6_addr_set_v4mapped(ireq->rmt_addr,
689 &entry->daddr_storage);
690 entry->saddr = entry->saddr_storage.s6_addr32;
691 entry->daddr = entry->daddr_storage.s6_addr32;
692 }
693 } else
694#endif
695 {
696 entry->saddr = &ireq->loc_addr;
697 entry->daddr = &ireq->rmt_addr;
698 }
699}
700
599static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 701static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
600 struct request_sock *req, 702 struct request_sock *req,
601 struct user_namespace *user_ns, 703 struct user_namespace *user_ns,
@@ -617,7 +719,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
617 r->idiag_family = sk->sk_family; 719 r->idiag_family = sk->sk_family;
618 r->idiag_state = TCP_SYN_RECV; 720 r->idiag_state = TCP_SYN_RECV;
619 r->idiag_timer = 1; 721 r->idiag_timer = 1;
620 r->idiag_retrans = req->retrans; 722 r->idiag_retrans = req->num_retrans;
621 723
622 r->id.idiag_if = sk->sk_bound_dev_if; 724 r->id.idiag_if = sk->sk_bound_dev_if;
623 sock_diag_save_cookie(req, r->id.idiag_cookie); 725 sock_diag_save_cookie(req, r->id.idiag_cookie);
@@ -637,8 +739,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
637 r->idiag_inode = 0; 739 r->idiag_inode = 0;
638#if IS_ENABLED(CONFIG_IPV6) 740#if IS_ENABLED(CONFIG_IPV6)
639 if (r->idiag_family == AF_INET6) { 741 if (r->idiag_family == AF_INET6) {
640 *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr; 742 struct inet_diag_entry entry;
641 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; 743 inet_diag_req_addrs(sk, req, &entry);
744 memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
745 memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
642 } 746 }
643#endif 747#endif
644 748
@@ -691,18 +795,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
691 continue; 795 continue;
692 796
693 if (bc) { 797 if (bc) {
694 entry.saddr = 798 inet_diag_req_addrs(sk, req, &entry);
695#if IS_ENABLED(CONFIG_IPV6)
696 (entry.family == AF_INET6) ?
697 inet6_rsk(req)->loc_addr.s6_addr32 :
698#endif
699 &ireq->loc_addr;
700 entry.daddr =
701#if IS_ENABLED(CONFIG_IPV6)
702 (entry.family == AF_INET6) ?
703 inet6_rsk(req)->rmt_addr.s6_addr32 :
704#endif
705 &ireq->rmt_addr;
706 entry.dport = ntohs(ireq->rmt_port); 799 entry.dport = ntohs(ireq->rmt_port);
707 800
708 if (!inet_diag_bc_run(bc, &entry)) 801 if (!inet_diag_bc_run(bc, &entry))
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7880af970208..fa3ae8148710 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -237,12 +237,14 @@ struct sock *__inet_lookup_established(struct net *net,
237 rcu_read_lock(); 237 rcu_read_lock();
238begin: 238begin:
239 sk_nulls_for_each_rcu(sk, node, &head->chain) { 239 sk_nulls_for_each_rcu(sk, node, &head->chain) {
240 if (INET_MATCH(sk, net, hash, acookie, 240 if (sk->sk_hash != hash)
241 saddr, daddr, ports, dif)) { 241 continue;
242 if (likely(INET_MATCH(sk, net, acookie,
243 saddr, daddr, ports, dif))) {
242 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) 244 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
243 goto begintw; 245 goto begintw;
244 if (unlikely(!INET_MATCH(sk, net, hash, acookie, 246 if (unlikely(!INET_MATCH(sk, net, acookie,
245 saddr, daddr, ports, dif))) { 247 saddr, daddr, ports, dif))) {
246 sock_put(sk); 248 sock_put(sk);
247 goto begin; 249 goto begin;
248 } 250 }
@@ -260,14 +262,18 @@ begin:
260begintw: 262begintw:
261 /* Must check for a TIME_WAIT'er before going to listener hash. */ 263 /* Must check for a TIME_WAIT'er before going to listener hash. */
262 sk_nulls_for_each_rcu(sk, node, &head->twchain) { 264 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
263 if (INET_TW_MATCH(sk, net, hash, acookie, 265 if (sk->sk_hash != hash)
264 saddr, daddr, ports, dif)) { 266 continue;
267 if (likely(INET_TW_MATCH(sk, net, acookie,
268 saddr, daddr, ports,
269 dif))) {
265 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) { 270 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
266 sk = NULL; 271 sk = NULL;
267 goto out; 272 goto out;
268 } 273 }
269 if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie, 274 if (unlikely(!INET_TW_MATCH(sk, net, acookie,
270 saddr, daddr, ports, dif))) { 275 saddr, daddr, ports,
276 dif))) {
271 sock_put(sk); 277 sock_put(sk);
272 goto begintw; 278 goto begintw;
273 } 279 }
@@ -314,10 +320,12 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
314 320
315 /* Check TIME-WAIT sockets first. */ 321 /* Check TIME-WAIT sockets first. */
316 sk_nulls_for_each(sk2, node, &head->twchain) { 322 sk_nulls_for_each(sk2, node, &head->twchain) {
317 tw = inet_twsk(sk2); 323 if (sk2->sk_hash != hash)
324 continue;
318 325
319 if (INET_TW_MATCH(sk2, net, hash, acookie, 326 if (likely(INET_TW_MATCH(sk2, net, acookie,
320 saddr, daddr, ports, dif)) { 327 saddr, daddr, ports, dif))) {
328 tw = inet_twsk(sk2);
321 if (twsk_unique(sk, sk2, twp)) 329 if (twsk_unique(sk, sk2, twp))
322 goto unique; 330 goto unique;
323 else 331 else
@@ -328,8 +336,10 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
328 336
329 /* And established part... */ 337 /* And established part... */
330 sk_nulls_for_each(sk2, node, &head->chain) { 338 sk_nulls_for_each(sk2, node, &head->chain) {
331 if (INET_MATCH(sk2, net, hash, acookie, 339 if (sk2->sk_hash != hash)
332 saddr, daddr, ports, dif)) 340 continue;
341 if (likely(INET_MATCH(sk2, net, acookie,
342 saddr, daddr, ports, dif)))
333 goto not_unique; 343 goto not_unique;
334 } 344 }
335 345
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 448e68546827..eb9d63a570cd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -707,28 +707,27 @@ EXPORT_SYMBOL(ip_defrag);
707 707
708struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 708struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
709{ 709{
710 const struct iphdr *iph; 710 struct iphdr iph;
711 u32 len; 711 u32 len;
712 712
713 if (skb->protocol != htons(ETH_P_IP)) 713 if (skb->protocol != htons(ETH_P_IP))
714 return skb; 714 return skb;
715 715
716 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 716 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
717 return skb; 717 return skb;
718 718
719 iph = ip_hdr(skb); 719 if (iph.ihl < 5 || iph.version != 4)
720 if (iph->ihl < 5 || iph->version != 4)
721 return skb; 720 return skb;
722 if (!pskb_may_pull(skb, iph->ihl*4)) 721
723 return skb; 722 len = ntohs(iph.tot_len);
724 iph = ip_hdr(skb); 723 if (skb->len < len || len < (iph.ihl * 4))
725 len = ntohs(iph->tot_len);
726 if (skb->len < len || len < (iph->ihl * 4))
727 return skb; 724 return skb;
728 725
729 if (ip_is_fragment(ip_hdr(skb))) { 726 if (ip_is_fragment(&iph)) {
730 skb = skb_share_check(skb, GFP_ATOMIC); 727 skb = skb_share_check(skb, GFP_ATOMIC);
731 if (skb) { 728 if (skb) {
729 if (!pskb_may_pull(skb, iph.ihl*4))
730 return skb;
732 if (pskb_trim_rcsum(skb, len)) 731 if (pskb_trim_rcsum(skb, len))
733 return skb; 732 return skb;
734 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 733 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
@@ -802,6 +801,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
802 table[0].data = &net->ipv4.frags.high_thresh; 801 table[0].data = &net->ipv4.frags.high_thresh;
803 table[1].data = &net->ipv4.frags.low_thresh; 802 table[1].data = &net->ipv4.frags.low_thresh;
804 table[2].data = &net->ipv4.frags.timeout; 803 table[2].data = &net->ipv4.frags.timeout;
804
805 /* Don't export sysctls to unprivileged users */
806 if (net->user_ns != &init_user_ns)
807 table[0].procname = NULL;
805 } 808 }
806 809
807 hdr = register_net_sysctl(net, "net/ipv4", table); 810 hdr = register_net_sysctl(net, "net/ipv4", table);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 7240f8e2dd45..303012adf9e6 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -164,21 +164,6 @@ struct ipgre_net {
164#define tunnels_r tunnels[2] 164#define tunnels_r tunnels[2]
165#define tunnels_l tunnels[1] 165#define tunnels_l tunnels[1]
166#define tunnels_wc tunnels[0] 166#define tunnels_wc tunnels[0]
167/*
168 * Locking : hash tables are protected by RCU and RTNL
169 */
170
171#define for_each_ip_tunnel_rcu(start) \
172 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
173
174/* often modified stats are per cpu, other are shared (netdev->stats) */
175struct pcpu_tstats {
176 u64 rx_packets;
177 u64 rx_bytes;
178 u64 tx_packets;
179 u64 tx_bytes;
180 struct u64_stats_sync syncp;
181};
182 167
183static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, 168static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
184 struct rtnl_link_stats64 *tot) 169 struct rtnl_link_stats64 *tot)
@@ -250,7 +235,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
250 ARPHRD_ETHER : ARPHRD_IPGRE; 235 ARPHRD_ETHER : ARPHRD_IPGRE;
251 int score, cand_score = 4; 236 int score, cand_score = 4;
252 237
253 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { 238 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
254 if (local != t->parms.iph.saddr || 239 if (local != t->parms.iph.saddr ||
255 remote != t->parms.iph.daddr || 240 remote != t->parms.iph.daddr ||
256 !(t->dev->flags & IFF_UP)) 241 !(t->dev->flags & IFF_UP))
@@ -277,7 +262,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
277 } 262 }
278 } 263 }
279 264
280 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { 265 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
281 if (remote != t->parms.iph.daddr || 266 if (remote != t->parms.iph.daddr ||
282 !(t->dev->flags & IFF_UP)) 267 !(t->dev->flags & IFF_UP))
283 continue; 268 continue;
@@ -303,7 +288,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
303 } 288 }
304 } 289 }
305 290
306 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { 291 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
307 if ((local != t->parms.iph.saddr && 292 if ((local != t->parms.iph.saddr &&
308 (local != t->parms.iph.daddr || 293 (local != t->parms.iph.daddr ||
309 !ipv4_is_multicast(local))) || 294 !ipv4_is_multicast(local))) ||
@@ -331,7 +316,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
331 } 316 }
332 } 317 }
333 318
334 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { 319 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
335 if (t->parms.i_key != key || 320 if (t->parms.i_key != key ||
336 !(t->dev->flags & IFF_UP)) 321 !(t->dev->flags & IFF_UP))
337 continue; 322 continue;
@@ -753,7 +738,6 @@ drop:
753static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 738static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
754{ 739{
755 struct ip_tunnel *tunnel = netdev_priv(dev); 740 struct ip_tunnel *tunnel = netdev_priv(dev);
756 struct pcpu_tstats *tstats;
757 const struct iphdr *old_iph = ip_hdr(skb); 741 const struct iphdr *old_iph = ip_hdr(skb);
758 const struct iphdr *tiph; 742 const struct iphdr *tiph;
759 struct flowi4 fl4; 743 struct flowi4 fl4;
@@ -766,6 +750,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
766 int gre_hlen; 750 int gre_hlen;
767 __be32 dst; 751 __be32 dst;
768 int mtu; 752 int mtu;
753 u8 ttl;
769 754
770 if (skb->ip_summed == CHECKSUM_PARTIAL && 755 if (skb->ip_summed == CHECKSUM_PARTIAL &&
771 skb_checksum_help(skb)) 756 skb_checksum_help(skb))
@@ -776,7 +761,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
776 761
777 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 762 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
778 gre_hlen = 0; 763 gre_hlen = 0;
779 tiph = (const struct iphdr *)skb->data; 764 if (skb->protocol == htons(ETH_P_IP))
765 tiph = (const struct iphdr *)skb->data;
766 else
767 tiph = &tunnel->parms.iph;
780 } else { 768 } else {
781 gre_hlen = tunnel->hlen; 769 gre_hlen = tunnel->hlen;
782 tiph = &tunnel->parms.iph; 770 tiph = &tunnel->parms.iph;
@@ -828,6 +816,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
828 goto tx_error; 816 goto tx_error;
829 } 817 }
830 818
819 ttl = tiph->ttl;
831 tos = tiph->tos; 820 tos = tiph->tos;
832 if (tos == 1) { 821 if (tos == 1) {
833 tos = 0; 822 tos = 0;
@@ -920,11 +909,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
920 dev_kfree_skb(skb); 909 dev_kfree_skb(skb);
921 skb = new_skb; 910 skb = new_skb;
922 old_iph = ip_hdr(skb); 911 old_iph = ip_hdr(skb);
912 /* Warning : tiph value might point to freed memory */
923 } 913 }
924 914
925 skb_reset_transport_header(skb);
926 skb_push(skb, gre_hlen); 915 skb_push(skb, gre_hlen);
927 skb_reset_network_header(skb); 916 skb_reset_network_header(skb);
917 skb_set_transport_header(skb, sizeof(*iph));
928 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 918 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
929 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 919 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
930 IPSKB_REROUTED); 920 IPSKB_REROUTED);
@@ -943,8 +933,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
943 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 933 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
944 iph->daddr = fl4.daddr; 934 iph->daddr = fl4.daddr;
945 iph->saddr = fl4.saddr; 935 iph->saddr = fl4.saddr;
936 iph->ttl = ttl;
946 937
947 if ((iph->ttl = tiph->ttl) == 0) { 938 if (ttl == 0) {
948 if (skb->protocol == htons(ETH_P_IP)) 939 if (skb->protocol == htons(ETH_P_IP))
949 iph->ttl = old_iph->ttl; 940 iph->ttl = old_iph->ttl;
950#if IS_ENABLED(CONFIG_IPV6) 941#if IS_ENABLED(CONFIG_IPV6)
@@ -977,9 +968,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
977 } 968 }
978 } 969 }
979 970
980 nf_reset(skb); 971 iptunnel_xmit(skb, dev);
981 tstats = this_cpu_ptr(dev->tstats);
982 __IPTUNNEL_XMIT(tstats, &dev->stats);
983 return NETDEV_TX_OK; 972 return NETDEV_TX_OK;
984 973
985#if IS_ENABLED(CONFIG_IPV6) 974#if IS_ENABLED(CONFIG_IPV6)
@@ -1082,7 +1071,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1082 case SIOCADDTUNNEL: 1071 case SIOCADDTUNNEL:
1083 case SIOCCHGTUNNEL: 1072 case SIOCCHGTUNNEL:
1084 err = -EPERM; 1073 err = -EPERM;
1085 if (!capable(CAP_NET_ADMIN)) 1074 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1086 goto done; 1075 goto done;
1087 1076
1088 err = -EFAULT; 1077 err = -EFAULT;
@@ -1157,7 +1146,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1157 1146
1158 case SIOCDELTUNNEL: 1147 case SIOCDELTUNNEL:
1159 err = -EPERM; 1148 err = -EPERM;
1160 if (!capable(CAP_NET_ADMIN)) 1149 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1161 goto done; 1150 goto done;
1162 1151
1163 if (dev == ign->fb_tunnel_dev) { 1152 if (dev == ign->fb_tunnel_dev) {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 1dc01f9793d5..f6289bf6f332 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -409,7 +409,7 @@ int ip_options_compile(struct net *net,
409 optptr[2] += 8; 409 optptr[2] += 8;
410 break; 410 break;
411 default: 411 default:
412 if (!skb && !capable(CAP_NET_RAW)) { 412 if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
413 pp_ptr = optptr + 3; 413 pp_ptr = optptr + 3;
414 goto error; 414 goto error;
415 } 415 }
@@ -445,7 +445,7 @@ int ip_options_compile(struct net *net,
445 opt->router_alert = optptr - iph; 445 opt->router_alert = optptr - iph;
446 break; 446 break;
447 case IPOPT_CIPSO: 447 case IPOPT_CIPSO:
448 if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) { 448 if ((!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) || opt->cipso) {
449 pp_ptr = optptr; 449 pp_ptr = optptr;
450 goto error; 450 goto error;
451 } 451 }
@@ -458,7 +458,7 @@ int ip_options_compile(struct net *net,
458 case IPOPT_SEC: 458 case IPOPT_SEC:
459 case IPOPT_SID: 459 case IPOPT_SID:
460 default: 460 default:
461 if (!skb && !capable(CAP_NET_RAW)) { 461 if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) {
462 pp_ptr = optptr; 462 pp_ptr = optptr;
463 goto error; 463 goto error;
464 } 464 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6537a408a4fb..3e98ed2bff55 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -595,6 +595,10 @@ slow_path_clean:
595 } 595 }
596 596
597slow_path: 597slow_path:
598 /* for offloaded checksums cleanup checksum before fragmentation */
599 if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
600 goto fail;
601
598 left = skb->len - hlen; /* Space per frame */ 602 left = skb->len - hlen; /* Space per frame */
599 ptr = hlen; /* Where to start from */ 603 ptr = hlen; /* Where to start from */
600 604
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5eea4a811042..d9c4f113d709 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -457,19 +457,28 @@ static int do_ip_setsockopt(struct sock *sk, int level,
457 struct inet_sock *inet = inet_sk(sk); 457 struct inet_sock *inet = inet_sk(sk);
458 int val = 0, err; 458 int val = 0, err;
459 459
460 if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | 460 switch (optname) {
461 (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | 461 case IP_PKTINFO:
462 (1<<IP_RETOPTS) | (1<<IP_TOS) | 462 case IP_RECVTTL:
463 (1<<IP_TTL) | (1<<IP_HDRINCL) | 463 case IP_RECVOPTS:
464 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 464 case IP_RECVTOS:
465 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 465 case IP_RETOPTS:
466 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | 466 case IP_TOS:
467 (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || 467 case IP_TTL:
468 optname == IP_UNICAST_IF || 468 case IP_HDRINCL:
469 optname == IP_MULTICAST_TTL || 469 case IP_MTU_DISCOVER:
470 optname == IP_MULTICAST_ALL || 470 case IP_RECVERR:
471 optname == IP_MULTICAST_LOOP || 471 case IP_ROUTER_ALERT:
472 optname == IP_RECVORIGDSTADDR) { 472 case IP_FREEBIND:
473 case IP_PASSSEC:
474 case IP_TRANSPARENT:
475 case IP_MINTTL:
476 case IP_NODEFRAG:
477 case IP_UNICAST_IF:
478 case IP_MULTICAST_TTL:
479 case IP_MULTICAST_ALL:
480 case IP_MULTICAST_LOOP:
481 case IP_RECVORIGDSTADDR:
473 if (optlen >= sizeof(int)) { 482 if (optlen >= sizeof(int)) {
474 if (get_user(val, (int __user *) optval)) 483 if (get_user(val, (int __user *) optval))
475 return -EFAULT; 484 return -EFAULT;
@@ -581,7 +590,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
581 case IP_TTL: 590 case IP_TTL:
582 if (optlen < 1) 591 if (optlen < 1)
583 goto e_inval; 592 goto e_inval;
584 if (val != -1 && (val < 0 || val > 255)) 593 if (val != -1 && (val < 1 || val > 255))
585 goto e_inval; 594 goto e_inval;
586 inet->uc_ttl = val; 595 inet->uc_ttl = val;
587 break; 596 break;
@@ -980,13 +989,14 @@ mc_msf_out:
980 case IP_IPSEC_POLICY: 989 case IP_IPSEC_POLICY:
981 case IP_XFRM_POLICY: 990 case IP_XFRM_POLICY:
982 err = -EPERM; 991 err = -EPERM;
983 if (!capable(CAP_NET_ADMIN)) 992 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
984 break; 993 break;
985 err = xfrm_user_policy(sk, optname, optval, optlen); 994 err = xfrm_user_policy(sk, optname, optval, optlen);
986 break; 995 break;
987 996
988 case IP_TRANSPARENT: 997 case IP_TRANSPARENT:
989 if (!!val && !capable(CAP_NET_RAW) && !capable(CAP_NET_ADMIN)) { 998 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
999 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
990 err = -EPERM; 1000 err = -EPERM;
991 break; 1001 break;
992 } 1002 }
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 1831092f999f..c3a4233c0ac2 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -66,20 +66,6 @@ static void vti_tunnel_setup(struct net_device *dev);
66static void vti_dev_free(struct net_device *dev); 66static void vti_dev_free(struct net_device *dev);
67static int vti_tunnel_bind_dev(struct net_device *dev); 67static int vti_tunnel_bind_dev(struct net_device *dev);
68 68
69/* Locking : hash tables are protected by RCU and RTNL */
70
71#define for_each_ip_tunnel_rcu(start) \
72 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
73
74/* often modified stats are per cpu, other are shared (netdev->stats) */
75struct pcpu_tstats {
76 u64 rx_packets;
77 u64 rx_bytes;
78 u64 tx_packets;
79 u64 tx_bytes;
80 struct u64_stats_sync syncp;
81};
82
83#define VTI_XMIT(stats1, stats2) do { \ 69#define VTI_XMIT(stats1, stats2) do { \
84 int err; \ 70 int err; \
85 int pkt_len = skb->len; \ 71 int pkt_len = skb->len; \
@@ -142,19 +128,19 @@ static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
142 struct ip_tunnel *t; 128 struct ip_tunnel *t;
143 struct vti_net *ipn = net_generic(net, vti_net_id); 129 struct vti_net *ipn = net_generic(net, vti_net_id);
144 130
145 for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1]) 131 for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
146 if (local == t->parms.iph.saddr && 132 if (local == t->parms.iph.saddr &&
147 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 133 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
148 return t; 134 return t;
149 for_each_ip_tunnel_rcu(ipn->tunnels_r[h0]) 135 for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
150 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 136 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
151 return t; 137 return t;
152 138
153 for_each_ip_tunnel_rcu(ipn->tunnels_l[h1]) 139 for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
154 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) 140 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
155 return t; 141 return t;
156 142
157 for_each_ip_tunnel_rcu(ipn->tunnels_wc[0]) 143 for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
158 if (t && (t->dev->flags&IFF_UP)) 144 if (t && (t->dev->flags&IFF_UP))
159 return t; 145 return t;
160 return NULL; 146 return NULL;
@@ -338,12 +324,17 @@ static int vti_rcv(struct sk_buff *skb)
338 if (tunnel != NULL) { 324 if (tunnel != NULL) {
339 struct pcpu_tstats *tstats; 325 struct pcpu_tstats *tstats;
340 326
327 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
328 return -1;
329
341 tstats = this_cpu_ptr(tunnel->dev->tstats); 330 tstats = this_cpu_ptr(tunnel->dev->tstats);
342 u64_stats_update_begin(&tstats->syncp); 331 u64_stats_update_begin(&tstats->syncp);
343 tstats->rx_packets++; 332 tstats->rx_packets++;
344 tstats->rx_bytes += skb->len; 333 tstats->rx_bytes += skb->len;
345 u64_stats_update_end(&tstats->syncp); 334 u64_stats_update_end(&tstats->syncp);
346 335
336 skb->mark = 0;
337 secpath_reset(skb);
347 skb->dev = tunnel->dev; 338 skb->dev = tunnel->dev;
348 return 1; 339 return 1;
349 } 340 }
@@ -497,7 +488,7 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
497 case SIOCADDTUNNEL: 488 case SIOCADDTUNNEL:
498 case SIOCCHGTUNNEL: 489 case SIOCCHGTUNNEL:
499 err = -EPERM; 490 err = -EPERM;
500 if (!capable(CAP_NET_ADMIN)) 491 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
501 goto done; 492 goto done;
502 493
503 err = -EFAULT; 494 err = -EFAULT;
@@ -562,7 +553,7 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
562 553
563 case SIOCDELTUNNEL: 554 case SIOCDELTUNNEL:
564 err = -EPERM; 555 err = -EPERM;
565 if (!capable(CAP_NET_ADMIN)) 556 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
566 goto done; 557 goto done;
567 558
568 if (dev == ipn->fb_tunnel_dev) { 559 if (dev == ipn->fb_tunnel_dev) {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 798358b10717..a2e50ae80b53 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -136,6 +136,8 @@ __be32 ic_myaddr = NONE; /* My IP address */
136static __be32 ic_netmask = NONE; /* Netmask for local subnet */ 136static __be32 ic_netmask = NONE; /* Netmask for local subnet */
137__be32 ic_gateway = NONE; /* Gateway IP address */ 137__be32 ic_gateway = NONE; /* Gateway IP address */
138 138
139__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
140
139__be32 ic_servaddr = NONE; /* Boot server IP address */ 141__be32 ic_servaddr = NONE; /* Boot server IP address */
140 142
141__be32 root_server_addr = NONE; /* Address of NFS server */ 143__be32 root_server_addr = NONE; /* Address of NFS server */
@@ -558,6 +560,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
558 if (ic_myaddr == NONE) 560 if (ic_myaddr == NONE)
559 ic_myaddr = tip; 561 ic_myaddr = tip;
560 ic_servaddr = sip; 562 ic_servaddr = sip;
563 ic_addrservaddr = sip;
561 ic_got_reply = IC_RARP; 564 ic_got_reply = IC_RARP;
562 565
563drop_unlock: 566drop_unlock:
@@ -1068,7 +1071,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
1068 ic_servaddr = server_id; 1071 ic_servaddr = server_id;
1069#ifdef IPCONFIG_DEBUG 1072#ifdef IPCONFIG_DEBUG
1070 printk("DHCP: Offered address %pI4 by server %pI4\n", 1073 printk("DHCP: Offered address %pI4 by server %pI4\n",
1071 &ic_myaddr, &ic_servaddr); 1074 &ic_myaddr, &b->iph.saddr);
1072#endif 1075#endif
1073 /* The DHCP indicated server address takes 1076 /* The DHCP indicated server address takes
1074 * precedence over the bootp header one if 1077 * precedence over the bootp header one if
@@ -1113,6 +1116,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
1113 ic_dev = dev; 1116 ic_dev = dev;
1114 ic_myaddr = b->your_ip; 1117 ic_myaddr = b->your_ip;
1115 ic_servaddr = b->server_ip; 1118 ic_servaddr = b->server_ip;
1119 ic_addrservaddr = b->iph.saddr;
1116 if (ic_gateway == NONE && b->relay_ip) 1120 if (ic_gateway == NONE && b->relay_ip)
1117 ic_gateway = b->relay_ip; 1121 ic_gateway = b->relay_ip;
1118 if (ic_nameservers[0] == NONE) 1122 if (ic_nameservers[0] == NONE)
@@ -1268,7 +1272,7 @@ static int __init ic_dynamic(void)
1268 printk("IP-Config: Got %s answer from %pI4, ", 1272 printk("IP-Config: Got %s answer from %pI4, ",
1269 ((ic_got_reply & IC_RARP) ? "RARP" 1273 ((ic_got_reply & IC_RARP) ? "RARP"
1270 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1274 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1271 &ic_servaddr); 1275 &ic_addrservaddr);
1272 pr_cont("my address is %pI4\n", &ic_myaddr); 1276 pr_cont("my address is %pI4\n", &ic_myaddr);
1273 1277
1274 return 0; 1278 return 0;
@@ -1500,8 +1504,10 @@ static int __init ip_auto_config(void)
1500 * Clue in the operator. 1504 * Clue in the operator.
1501 */ 1505 */
1502 pr_info("IP-Config: Complete:\n"); 1506 pr_info("IP-Config: Complete:\n");
1503 pr_info(" device=%s, addr=%pI4, mask=%pI4, gw=%pI4\n", 1507
1504 ic_dev->name, &ic_myaddr, &ic_netmask, &ic_gateway); 1508 pr_info(" device=%s, hwaddr=%*phC, ipaddr=%pI4, mask=%pI4, gw=%pI4\n",
1509 ic_dev->name, ic_dev->addr_len, ic_dev->dev_addr,
1510 &ic_myaddr, &ic_netmask, &ic_gateway);
1505 pr_info(" host=%s, domain=%s, nis-domain=%s\n", 1511 pr_info(" host=%s, domain=%s, nis-domain=%s\n",
1506 utsname()->nodename, ic_domain, utsname()->domainname); 1512 utsname()->nodename, ic_domain, utsname()->domainname);
1507 pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s", 1513 pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s",
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e15b45297c09..191fc24a745a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -138,22 +138,7 @@ struct ipip_net {
138static int ipip_tunnel_init(struct net_device *dev); 138static int ipip_tunnel_init(struct net_device *dev);
139static void ipip_tunnel_setup(struct net_device *dev); 139static void ipip_tunnel_setup(struct net_device *dev);
140static void ipip_dev_free(struct net_device *dev); 140static void ipip_dev_free(struct net_device *dev);
141 141static struct rtnl_link_ops ipip_link_ops __read_mostly;
142/*
143 * Locking : hash tables are protected by RCU and RTNL
144 */
145
146#define for_each_ip_tunnel_rcu(start) \
147 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
148
149/* often modified stats are per cpu, other are shared (netdev->stats) */
150struct pcpu_tstats {
151 u64 rx_packets;
152 u64 rx_bytes;
153 u64 tx_packets;
154 u64 tx_bytes;
155 struct u64_stats_sync syncp;
156};
157 142
158static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev, 143static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
159 struct rtnl_link_stats64 *tot) 144 struct rtnl_link_stats64 *tot)
@@ -197,16 +182,16 @@ static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
197 struct ip_tunnel *t; 182 struct ip_tunnel *t;
198 struct ipip_net *ipn = net_generic(net, ipip_net_id); 183 struct ipip_net *ipn = net_generic(net, ipip_net_id);
199 184
200 for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1]) 185 for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
201 if (local == t->parms.iph.saddr && 186 if (local == t->parms.iph.saddr &&
202 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 187 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
203 return t; 188 return t;
204 189
205 for_each_ip_tunnel_rcu(ipn->tunnels_r[h0]) 190 for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
206 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) 191 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
207 return t; 192 return t;
208 193
209 for_each_ip_tunnel_rcu(ipn->tunnels_l[h1]) 194 for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
210 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) 195 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
211 return t; 196 return t;
212 197
@@ -264,6 +249,32 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
264 rcu_assign_pointer(*tp, t); 249 rcu_assign_pointer(*tp, t);
265} 250}
266 251
252static int ipip_tunnel_create(struct net_device *dev)
253{
254 struct ip_tunnel *t = netdev_priv(dev);
255 struct net *net = dev_net(dev);
256 struct ipip_net *ipn = net_generic(net, ipip_net_id);
257 int err;
258
259 err = ipip_tunnel_init(dev);
260 if (err < 0)
261 goto out;
262
263 err = register_netdevice(dev);
264 if (err < 0)
265 goto out;
266
267 strcpy(t->parms.name, dev->name);
268 dev->rtnl_link_ops = &ipip_link_ops;
269
270 dev_hold(dev);
271 ipip_tunnel_link(ipn, t);
272 return 0;
273
274out:
275 return err;
276}
277
267static struct ip_tunnel *ipip_tunnel_locate(struct net *net, 278static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
268 struct ip_tunnel_parm *parms, int create) 279 struct ip_tunnel_parm *parms, int create)
269{ 280{
@@ -298,16 +309,9 @@ static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
298 nt = netdev_priv(dev); 309 nt = netdev_priv(dev);
299 nt->parms = *parms; 310 nt->parms = *parms;
300 311
301 if (ipip_tunnel_init(dev) < 0) 312 if (ipip_tunnel_create(dev) < 0)
302 goto failed_free; 313 goto failed_free;
303 314
304 if (register_netdevice(dev) < 0)
305 goto failed_free;
306
307 strcpy(nt->parms.name, dev->name);
308
309 dev_hold(dev);
310 ipip_tunnel_link(ipn, nt);
311 return nt; 315 return nt;
312 316
313failed_free: 317failed_free:
@@ -463,7 +467,6 @@ drop:
463static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 467static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
464{ 468{
465 struct ip_tunnel *tunnel = netdev_priv(dev); 469 struct ip_tunnel *tunnel = netdev_priv(dev);
466 struct pcpu_tstats *tstats;
467 const struct iphdr *tiph = &tunnel->parms.iph; 470 const struct iphdr *tiph = &tunnel->parms.iph;
468 u8 tos = tunnel->parms.iph.tos; 471 u8 tos = tunnel->parms.iph.tos;
469 __be16 df = tiph->frag_off; 472 __be16 df = tiph->frag_off;
@@ -479,6 +482,10 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
479 if (skb->protocol != htons(ETH_P_IP)) 482 if (skb->protocol != htons(ETH_P_IP))
480 goto tx_error; 483 goto tx_error;
481 484
485 if (skb->ip_summed == CHECKSUM_PARTIAL &&
486 skb_checksum_help(skb))
487 goto tx_error;
488
482 if (tos & 1) 489 if (tos & 1)
483 tos = old_iph->tos; 490 tos = old_iph->tos;
484 491
@@ -586,9 +593,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
586 if ((iph->ttl = tiph->ttl) == 0) 593 if ((iph->ttl = tiph->ttl) == 0)
587 iph->ttl = old_iph->ttl; 594 iph->ttl = old_iph->ttl;
588 595
589 nf_reset(skb); 596 iptunnel_xmit(skb, dev);
590 tstats = this_cpu_ptr(dev->tstats);
591 __IPTUNNEL_XMIT(tstats, &dev->stats);
592 return NETDEV_TX_OK; 597 return NETDEV_TX_OK;
593 598
594tx_error_icmp: 599tx_error_icmp:
@@ -635,6 +640,28 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
635 dev->iflink = tunnel->parms.link; 640 dev->iflink = tunnel->parms.link;
636} 641}
637 642
643static void ipip_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
644{
645 struct net *net = dev_net(t->dev);
646 struct ipip_net *ipn = net_generic(net, ipip_net_id);
647
648 ipip_tunnel_unlink(ipn, t);
649 synchronize_net();
650 t->parms.iph.saddr = p->iph.saddr;
651 t->parms.iph.daddr = p->iph.daddr;
652 memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
653 memcpy(t->dev->broadcast, &p->iph.daddr, 4);
654 ipip_tunnel_link(ipn, t);
655 t->parms.iph.ttl = p->iph.ttl;
656 t->parms.iph.tos = p->iph.tos;
657 t->parms.iph.frag_off = p->iph.frag_off;
658 if (t->parms.link != p->link) {
659 t->parms.link = p->link;
660 ipip_tunnel_bind_dev(t->dev);
661 }
662 netdev_state_change(t->dev);
663}
664
638static int 665static int
639ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 666ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
640{ 667{
@@ -664,7 +691,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
664 case SIOCADDTUNNEL: 691 case SIOCADDTUNNEL:
665 case SIOCCHGTUNNEL: 692 case SIOCCHGTUNNEL:
666 err = -EPERM; 693 err = -EPERM;
667 if (!capable(CAP_NET_ADMIN)) 694 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
668 goto done; 695 goto done;
669 696
670 err = -EFAULT; 697 err = -EFAULT;
@@ -693,29 +720,13 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
693 break; 720 break;
694 } 721 }
695 t = netdev_priv(dev); 722 t = netdev_priv(dev);
696 ipip_tunnel_unlink(ipn, t);
697 synchronize_net();
698 t->parms.iph.saddr = p.iph.saddr;
699 t->parms.iph.daddr = p.iph.daddr;
700 memcpy(dev->dev_addr, &p.iph.saddr, 4);
701 memcpy(dev->broadcast, &p.iph.daddr, 4);
702 ipip_tunnel_link(ipn, t);
703 netdev_state_change(dev);
704 } 723 }
724
725 ipip_tunnel_update(t, &p);
705 } 726 }
706 727
707 if (t) { 728 if (t) {
708 err = 0; 729 err = 0;
709 if (cmd == SIOCCHGTUNNEL) {
710 t->parms.iph.ttl = p.iph.ttl;
711 t->parms.iph.tos = p.iph.tos;
712 t->parms.iph.frag_off = p.iph.frag_off;
713 if (t->parms.link != p.link) {
714 t->parms.link = p.link;
715 ipip_tunnel_bind_dev(dev);
716 netdev_state_change(dev);
717 }
718 }
719 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) 730 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
720 err = -EFAULT; 731 err = -EFAULT;
721 } else 732 } else
@@ -724,7 +735,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
724 735
725 case SIOCDELTUNNEL: 736 case SIOCDELTUNNEL:
726 err = -EPERM; 737 err = -EPERM;
727 if (!capable(CAP_NET_ADMIN)) 738 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
728 goto done; 739 goto done;
729 740
730 if (dev == ipn->fb_tunnel_dev) { 741 if (dev == ipn->fb_tunnel_dev) {
@@ -773,6 +784,11 @@ static void ipip_dev_free(struct net_device *dev)
773 free_netdev(dev); 784 free_netdev(dev);
774} 785}
775 786
787#define IPIP_FEATURES (NETIF_F_SG | \
788 NETIF_F_FRAGLIST | \
789 NETIF_F_HIGHDMA | \
790 NETIF_F_HW_CSUM)
791
776static void ipip_tunnel_setup(struct net_device *dev) 792static void ipip_tunnel_setup(struct net_device *dev)
777{ 793{
778 dev->netdev_ops = &ipip_netdev_ops; 794 dev->netdev_ops = &ipip_netdev_ops;
@@ -787,6 +803,9 @@ static void ipip_tunnel_setup(struct net_device *dev)
787 dev->features |= NETIF_F_NETNS_LOCAL; 803 dev->features |= NETIF_F_NETNS_LOCAL;
788 dev->features |= NETIF_F_LLTX; 804 dev->features |= NETIF_F_LLTX;
789 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 805 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
806
807 dev->features |= IPIP_FEATURES;
808 dev->hw_features |= IPIP_FEATURES;
790} 809}
791 810
792static int ipip_tunnel_init(struct net_device *dev) 811static int ipip_tunnel_init(struct net_device *dev)
@@ -829,6 +848,142 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
829 return 0; 848 return 0;
830} 849}
831 850
851static void ipip_netlink_parms(struct nlattr *data[],
852 struct ip_tunnel_parm *parms)
853{
854 memset(parms, 0, sizeof(*parms));
855
856 parms->iph.version = 4;
857 parms->iph.protocol = IPPROTO_IPIP;
858 parms->iph.ihl = 5;
859
860 if (!data)
861 return;
862
863 if (data[IFLA_IPTUN_LINK])
864 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
865
866 if (data[IFLA_IPTUN_LOCAL])
867 parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
868
869 if (data[IFLA_IPTUN_REMOTE])
870 parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
871
872 if (data[IFLA_IPTUN_TTL]) {
873 parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
874 if (parms->iph.ttl)
875 parms->iph.frag_off = htons(IP_DF);
876 }
877
878 if (data[IFLA_IPTUN_TOS])
879 parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
880
881 if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
882 parms->iph.frag_off = htons(IP_DF);
883}
884
885static int ipip_newlink(struct net *src_net, struct net_device *dev,
886 struct nlattr *tb[], struct nlattr *data[])
887{
888 struct net *net = dev_net(dev);
889 struct ip_tunnel *nt;
890
891 nt = netdev_priv(dev);
892 ipip_netlink_parms(data, &nt->parms);
893
894 if (ipip_tunnel_locate(net, &nt->parms, 0))
895 return -EEXIST;
896
897 return ipip_tunnel_create(dev);
898}
899
900static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
901 struct nlattr *data[])
902{
903 struct ip_tunnel *t;
904 struct ip_tunnel_parm p;
905 struct net *net = dev_net(dev);
906 struct ipip_net *ipn = net_generic(net, ipip_net_id);
907
908 if (dev == ipn->fb_tunnel_dev)
909 return -EINVAL;
910
911 ipip_netlink_parms(data, &p);
912
913 if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
914 (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
915 return -EINVAL;
916
917 t = ipip_tunnel_locate(net, &p, 0);
918
919 if (t) {
920 if (t->dev != dev)
921 return -EEXIST;
922 } else
923 t = netdev_priv(dev);
924
925 ipip_tunnel_update(t, &p);
926 return 0;
927}
928
929static size_t ipip_get_size(const struct net_device *dev)
930{
931 return
932 /* IFLA_IPTUN_LINK */
933 nla_total_size(4) +
934 /* IFLA_IPTUN_LOCAL */
935 nla_total_size(4) +
936 /* IFLA_IPTUN_REMOTE */
937 nla_total_size(4) +
938 /* IFLA_IPTUN_TTL */
939 nla_total_size(1) +
940 /* IFLA_IPTUN_TOS */
941 nla_total_size(1) +
942 /* IFLA_IPTUN_PMTUDISC */
943 nla_total_size(1) +
944 0;
945}
946
947static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
948{
949 struct ip_tunnel *tunnel = netdev_priv(dev);
950 struct ip_tunnel_parm *parm = &tunnel->parms;
951
952 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
953 nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
954 nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
955 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
956 nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
957 nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
958 !!(parm->iph.frag_off & htons(IP_DF))))
959 goto nla_put_failure;
960 return 0;
961
962nla_put_failure:
963 return -EMSGSIZE;
964}
965
966static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
967 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
968 [IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },
969 [IFLA_IPTUN_REMOTE] = { .type = NLA_U32 },
970 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
971 [IFLA_IPTUN_TOS] = { .type = NLA_U8 },
972 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
973};
974
975static struct rtnl_link_ops ipip_link_ops __read_mostly = {
976 .kind = "ipip",
977 .maxtype = IFLA_IPTUN_MAX,
978 .policy = ipip_policy,
979 .priv_size = sizeof(struct ip_tunnel),
980 .setup = ipip_tunnel_setup,
981 .newlink = ipip_newlink,
982 .changelink = ipip_changelink,
983 .get_size = ipip_get_size,
984 .fill_info = ipip_fill_info,
985};
986
832static struct xfrm_tunnel ipip_handler __read_mostly = { 987static struct xfrm_tunnel ipip_handler __read_mostly = {
833 .handler = ipip_rcv, 988 .handler = ipip_rcv,
834 .err_handler = ipip_err, 989 .err_handler = ipip_err,
@@ -925,14 +1080,26 @@ static int __init ipip_init(void)
925 return err; 1080 return err;
926 err = xfrm4_tunnel_register(&ipip_handler, AF_INET); 1081 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
927 if (err < 0) { 1082 if (err < 0) {
928 unregister_pernet_device(&ipip_net_ops);
929 pr_info("%s: can't register tunnel\n", __func__); 1083 pr_info("%s: can't register tunnel\n", __func__);
1084 goto xfrm_tunnel_failed;
930 } 1085 }
1086 err = rtnl_link_register(&ipip_link_ops);
1087 if (err < 0)
1088 goto rtnl_link_failed;
1089
1090out:
931 return err; 1091 return err;
1092
1093rtnl_link_failed:
1094 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
1095xfrm_tunnel_failed:
1096 unregister_pernet_device(&ipip_net_ops);
1097 goto out;
932} 1098}
933 1099
934static void __exit ipip_fini(void) 1100static void __exit ipip_fini(void)
935{ 1101{
1102 rtnl_link_unregister(&ipip_link_ops);
936 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) 1103 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
937 pr_info("%s: can't deregister tunnel\n", __func__); 1104 pr_info("%s: can't deregister tunnel\n", __func__);
938 1105
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 6168c4dc58b1..a9454cbd953c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -65,6 +65,7 @@
65#include <net/checksum.h> 65#include <net/checksum.h>
66#include <net/netlink.h> 66#include <net/netlink.h>
67#include <net/fib_rules.h> 67#include <net/fib_rules.h>
68#include <linux/netconf.h>
68 69
69#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 70#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
70#define CONFIG_IP_PIMSM 1 71#define CONFIG_IP_PIMSM 1
@@ -83,8 +84,8 @@ struct mr_table {
83 struct vif_device vif_table[MAXVIFS]; 84 struct vif_device vif_table[MAXVIFS];
84 int maxvif; 85 int maxvif;
85 atomic_t cache_resolve_queue_len; 86 atomic_t cache_resolve_queue_len;
86 int mroute_do_assert; 87 bool mroute_do_assert;
87 int mroute_do_pim; 88 bool mroute_do_pim;
88#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 89#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
89 int mroute_reg_vif_num; 90 int mroute_reg_vif_num;
90#endif 91#endif
@@ -133,6 +134,8 @@ static int ipmr_cache_report(struct mr_table *mrt,
133 struct sk_buff *pkt, vifi_t vifi, int assert); 134 struct sk_buff *pkt, vifi_t vifi, int assert);
134static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 135static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
135 struct mfc_cache *c, struct rtmsg *rtm); 136 struct mfc_cache *c, struct rtmsg *rtm);
137static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
138 int cmd);
136static void mroute_clean_tables(struct mr_table *mrt); 139static void mroute_clean_tables(struct mr_table *mrt);
137static void ipmr_expire_process(unsigned long arg); 140static void ipmr_expire_process(unsigned long arg);
138 141
@@ -582,6 +585,9 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
582 in_dev = __in_dev_get_rtnl(dev); 585 in_dev = __in_dev_get_rtnl(dev);
583 if (in_dev) { 586 if (in_dev) {
584 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; 587 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
588 inet_netconf_notify_devconf(dev_net(dev),
589 NETCONFA_MC_FORWARDING,
590 dev->ifindex, &in_dev->cnf);
585 ip_rt_multicast_event(in_dev); 591 ip_rt_multicast_event(in_dev);
586 } 592 }
587 593
@@ -665,6 +671,7 @@ static void ipmr_expire_process(unsigned long arg)
665 } 671 }
666 672
667 list_del(&c->list); 673 list_del(&c->list);
674 mroute_netlink_event(mrt, c, RTM_DELROUTE);
668 ipmr_destroy_unres(mrt, c); 675 ipmr_destroy_unres(mrt, c);
669 } 676 }
670 677
@@ -772,6 +779,8 @@ static int vif_add(struct net *net, struct mr_table *mrt,
772 return -EADDRNOTAVAIL; 779 return -EADDRNOTAVAIL;
773 } 780 }
774 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 781 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
782 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
783 &in_dev->cnf);
775 ip_rt_multicast_event(in_dev); 784 ip_rt_multicast_event(in_dev);
776 785
777 /* Fill in the VIF structures */ 786 /* Fill in the VIF structures */
@@ -1020,6 +1029,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1020 1029
1021 atomic_inc(&mrt->cache_resolve_queue_len); 1030 atomic_inc(&mrt->cache_resolve_queue_len);
1022 list_add(&c->list, &mrt->mfc_unres_queue); 1031 list_add(&c->list, &mrt->mfc_unres_queue);
1032 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1023 1033
1024 if (atomic_read(&mrt->cache_resolve_queue_len) == 1) 1034 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1025 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); 1035 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
@@ -1054,7 +1064,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1054 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1064 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1055 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1065 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1056 list_del_rcu(&c->list); 1066 list_del_rcu(&c->list);
1057 1067 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1058 ipmr_cache_free(c); 1068 ipmr_cache_free(c);
1059 return 0; 1069 return 0;
1060 } 1070 }
@@ -1089,6 +1099,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1089 if (!mrtsock) 1099 if (!mrtsock)
1090 c->mfc_flags |= MFC_STATIC; 1100 c->mfc_flags |= MFC_STATIC;
1091 write_unlock_bh(&mrt_lock); 1101 write_unlock_bh(&mrt_lock);
1102 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1092 return 0; 1103 return 0;
1093 } 1104 }
1094 1105
@@ -1131,6 +1142,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1131 ipmr_cache_resolve(net, mrt, uc, c); 1142 ipmr_cache_resolve(net, mrt, uc, c);
1132 ipmr_cache_free(uc); 1143 ipmr_cache_free(uc);
1133 } 1144 }
1145 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1134 return 0; 1146 return 0;
1135} 1147}
1136 1148
@@ -1159,6 +1171,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1159 if (c->mfc_flags & MFC_STATIC) 1171 if (c->mfc_flags & MFC_STATIC)
1160 continue; 1172 continue;
1161 list_del_rcu(&c->list); 1173 list_del_rcu(&c->list);
1174 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1162 ipmr_cache_free(c); 1175 ipmr_cache_free(c);
1163 } 1176 }
1164 } 1177 }
@@ -1167,6 +1180,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1167 spin_lock_bh(&mfc_unres_lock); 1180 spin_lock_bh(&mfc_unres_lock);
1168 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 1181 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1169 list_del(&c->list); 1182 list_del(&c->list);
1183 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1170 ipmr_destroy_unres(mrt, c); 1184 ipmr_destroy_unres(mrt, c);
1171 } 1185 }
1172 spin_unlock_bh(&mfc_unres_lock); 1186 spin_unlock_bh(&mfc_unres_lock);
@@ -1185,6 +1199,9 @@ static void mrtsock_destruct(struct sock *sk)
1185 ipmr_for_each_table(mrt, net) { 1199 ipmr_for_each_table(mrt, net) {
1186 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1200 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1187 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1201 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1202 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1203 NETCONFA_IFINDEX_ALL,
1204 net->ipv4.devconf_all);
1188 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1205 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1189 mroute_clean_tables(mrt); 1206 mroute_clean_tables(mrt);
1190 } 1207 }
@@ -1207,23 +1224,24 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1207 struct net *net = sock_net(sk); 1224 struct net *net = sock_net(sk);
1208 struct mr_table *mrt; 1225 struct mr_table *mrt;
1209 1226
1227 if (sk->sk_type != SOCK_RAW ||
1228 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1229 return -EOPNOTSUPP;
1230
1210 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1231 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1211 if (mrt == NULL) 1232 if (mrt == NULL)
1212 return -ENOENT; 1233 return -ENOENT;
1213 1234
1214 if (optname != MRT_INIT) { 1235 if (optname != MRT_INIT) {
1215 if (sk != rcu_access_pointer(mrt->mroute_sk) && 1236 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1216 !capable(CAP_NET_ADMIN)) 1237 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1217 return -EACCES; 1238 return -EACCES;
1218 } 1239 }
1219 1240
1220 switch (optname) { 1241 switch (optname) {
1221 case MRT_INIT: 1242 case MRT_INIT:
1222 if (sk->sk_type != SOCK_RAW ||
1223 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1224 return -EOPNOTSUPP;
1225 if (optlen != sizeof(int)) 1243 if (optlen != sizeof(int))
1226 return -ENOPROTOOPT; 1244 return -EINVAL;
1227 1245
1228 rtnl_lock(); 1246 rtnl_lock();
1229 if (rtnl_dereference(mrt->mroute_sk)) { 1247 if (rtnl_dereference(mrt->mroute_sk)) {
@@ -1235,6 +1253,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1235 if (ret == 0) { 1253 if (ret == 0) {
1236 rcu_assign_pointer(mrt->mroute_sk, sk); 1254 rcu_assign_pointer(mrt->mroute_sk, sk);
1237 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1255 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1256 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1257 NETCONFA_IFINDEX_ALL,
1258 net->ipv4.devconf_all);
1238 } 1259 }
1239 rtnl_unlock(); 1260 rtnl_unlock();
1240 return ret; 1261 return ret;
@@ -1284,9 +1305,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1284 case MRT_ASSERT: 1305 case MRT_ASSERT:
1285 { 1306 {
1286 int v; 1307 int v;
1308 if (optlen != sizeof(v))
1309 return -EINVAL;
1287 if (get_user(v, (int __user *)optval)) 1310 if (get_user(v, (int __user *)optval))
1288 return -EFAULT; 1311 return -EFAULT;
1289 mrt->mroute_do_assert = (v) ? 1 : 0; 1312 mrt->mroute_do_assert = v;
1290 return 0; 1313 return 0;
1291 } 1314 }
1292#ifdef CONFIG_IP_PIMSM 1315#ifdef CONFIG_IP_PIMSM
@@ -1294,9 +1317,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1294 { 1317 {
1295 int v; 1318 int v;
1296 1319
1320 if (optlen != sizeof(v))
1321 return -EINVAL;
1297 if (get_user(v, (int __user *)optval)) 1322 if (get_user(v, (int __user *)optval))
1298 return -EFAULT; 1323 return -EFAULT;
1299 v = (v) ? 1 : 0; 1324 v = !!v;
1300 1325
1301 rtnl_lock(); 1326 rtnl_lock();
1302 ret = 0; 1327 ret = 0;
@@ -1318,6 +1343,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1318 if (get_user(v, (u32 __user *)optval)) 1343 if (get_user(v, (u32 __user *)optval))
1319 return -EFAULT; 1344 return -EFAULT;
1320 1345
1346 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1347 if (v != RT_TABLE_DEFAULT && v >= 1000000000)
1348 return -EINVAL;
1349
1321 rtnl_lock(); 1350 rtnl_lock();
1322 ret = 0; 1351 ret = 0;
1323 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1352 if (sk == rtnl_dereference(mrt->mroute_sk)) {
@@ -1325,7 +1354,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1325 } else { 1354 } else {
1326 if (!ipmr_new_table(net, v)) 1355 if (!ipmr_new_table(net, v))
1327 ret = -ENOMEM; 1356 ret = -ENOMEM;
1328 raw_sk(sk)->ipmr_table = v; 1357 else
1358 raw_sk(sk)->ipmr_table = v;
1329 } 1359 }
1330 rtnl_unlock(); 1360 rtnl_unlock();
1331 return ret; 1361 return ret;
@@ -1351,6 +1381,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1351 struct net *net = sock_net(sk); 1381 struct net *net = sock_net(sk);
1352 struct mr_table *mrt; 1382 struct mr_table *mrt;
1353 1383
1384 if (sk->sk_type != SOCK_RAW ||
1385 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1386 return -EOPNOTSUPP;
1387
1354 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1388 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1355 if (mrt == NULL) 1389 if (mrt == NULL)
1356 return -ENOENT; 1390 return -ENOENT;
@@ -2020,6 +2054,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2020 int ct; 2054 int ct;
2021 struct rtnexthop *nhp; 2055 struct rtnexthop *nhp;
2022 struct nlattr *mp_attr; 2056 struct nlattr *mp_attr;
2057 struct rta_mfc_stats mfcs;
2023 2058
2024 /* If cache is unresolved, don't try to parse IIF and OIF */ 2059 /* If cache is unresolved, don't try to parse IIF and OIF */
2025 if (c->mfc_parent >= MAXVIFS) 2060 if (c->mfc_parent >= MAXVIFS)
@@ -2048,6 +2083,12 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2048 2083
2049 nla_nest_end(skb, mp_attr); 2084 nla_nest_end(skb, mp_attr);
2050 2085
2086 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2087 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2088 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2089 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2090 return -EMSGSIZE;
2091
2051 rtm->rtm_type = RTN_MULTICAST; 2092 rtm->rtm_type = RTN_MULTICAST;
2052 return 1; 2093 return 1;
2053} 2094}
@@ -2117,12 +2158,13 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2117} 2158}
2118 2159
2119static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2160static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2120 u32 portid, u32 seq, struct mfc_cache *c) 2161 u32 portid, u32 seq, struct mfc_cache *c, int cmd)
2121{ 2162{
2122 struct nlmsghdr *nlh; 2163 struct nlmsghdr *nlh;
2123 struct rtmsg *rtm; 2164 struct rtmsg *rtm;
2165 int err;
2124 2166
2125 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2167 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
2126 if (nlh == NULL) 2168 if (nlh == NULL)
2127 return -EMSGSIZE; 2169 return -EMSGSIZE;
2128 2170
@@ -2136,13 +2178,18 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2136 goto nla_put_failure; 2178 goto nla_put_failure;
2137 rtm->rtm_type = RTN_MULTICAST; 2179 rtm->rtm_type = RTN_MULTICAST;
2138 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2180 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2139 rtm->rtm_protocol = RTPROT_UNSPEC; 2181 if (c->mfc_flags & MFC_STATIC)
2182 rtm->rtm_protocol = RTPROT_STATIC;
2183 else
2184 rtm->rtm_protocol = RTPROT_MROUTED;
2140 rtm->rtm_flags = 0; 2185 rtm->rtm_flags = 0;
2141 2186
2142 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) || 2187 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
2143 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp)) 2188 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
2144 goto nla_put_failure; 2189 goto nla_put_failure;
2145 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) 2190 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2191 /* do not break the dump if cache is unresolved */
2192 if (err < 0 && err != -ENOENT)
2146 goto nla_put_failure; 2193 goto nla_put_failure;
2147 2194
2148 return nlmsg_end(skb, nlh); 2195 return nlmsg_end(skb, nlh);
@@ -2152,6 +2199,52 @@ nla_put_failure:
2152 return -EMSGSIZE; 2199 return -EMSGSIZE;
2153} 2200}
2154 2201
2202static size_t mroute_msgsize(bool unresolved, int maxvif)
2203{
2204 size_t len =
2205 NLMSG_ALIGN(sizeof(struct rtmsg))
2206 + nla_total_size(4) /* RTA_TABLE */
2207 + nla_total_size(4) /* RTA_SRC */
2208 + nla_total_size(4) /* RTA_DST */
2209 ;
2210
2211 if (!unresolved)
2212 len = len
2213 + nla_total_size(4) /* RTA_IIF */
2214 + nla_total_size(0) /* RTA_MULTIPATH */
2215 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2216 /* RTA_MFC_STATS */
2217 + nla_total_size(sizeof(struct rta_mfc_stats))
2218 ;
2219
2220 return len;
2221}
2222
2223static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2224 int cmd)
2225{
2226 struct net *net = read_pnet(&mrt->net);
2227 struct sk_buff *skb;
2228 int err = -ENOBUFS;
2229
2230 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2231 GFP_ATOMIC);
2232 if (skb == NULL)
2233 goto errout;
2234
2235 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
2236 if (err < 0)
2237 goto errout;
2238
2239 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2240 return;
2241
2242errout:
2243 kfree_skb(skb);
2244 if (err < 0)
2245 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2246}
2247
2155static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2248static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2156{ 2249{
2157 struct net *net = sock_net(skb->sk); 2250 struct net *net = sock_net(skb->sk);
@@ -2178,13 +2271,29 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2178 if (ipmr_fill_mroute(mrt, skb, 2271 if (ipmr_fill_mroute(mrt, skb,
2179 NETLINK_CB(cb->skb).portid, 2272 NETLINK_CB(cb->skb).portid,
2180 cb->nlh->nlmsg_seq, 2273 cb->nlh->nlmsg_seq,
2181 mfc) < 0) 2274 mfc, RTM_NEWROUTE) < 0)
2182 goto done; 2275 goto done;
2183next_entry: 2276next_entry:
2184 e++; 2277 e++;
2185 } 2278 }
2186 e = s_e = 0; 2279 e = s_e = 0;
2187 } 2280 }
2281 spin_lock_bh(&mfc_unres_lock);
2282 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2283 if (e < s_e)
2284 goto next_entry2;
2285 if (ipmr_fill_mroute(mrt, skb,
2286 NETLINK_CB(cb->skb).portid,
2287 cb->nlh->nlmsg_seq,
2288 mfc, RTM_NEWROUTE) < 0) {
2289 spin_unlock_bh(&mfc_unres_lock);
2290 goto done;
2291 }
2292next_entry2:
2293 e++;
2294 }
2295 spin_unlock_bh(&mfc_unres_lock);
2296 e = s_e = 0;
2188 s_h = 0; 2297 s_h = 0;
2189next_table: 2298next_table:
2190 t++; 2299 t++;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 97e61eadf580..3ea4127404d6 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1533,7 +1533,7 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1533{ 1533{
1534 int ret; 1534 int ret;
1535 1535
1536 if (!capable(CAP_NET_ADMIN)) 1536 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1537 return -EPERM; 1537 return -EPERM;
1538 1538
1539 switch (cmd) { 1539 switch (cmd) {
@@ -1677,7 +1677,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
1677{ 1677{
1678 int ret; 1678 int ret;
1679 1679
1680 if (!capable(CAP_NET_ADMIN)) 1680 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1681 return -EPERM; 1681 return -EPERM;
1682 1682
1683 switch (cmd) { 1683 switch (cmd) {
@@ -1698,7 +1698,7 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned
1698{ 1698{
1699 int ret; 1699 int ret;
1700 1700
1701 if (!capable(CAP_NET_ADMIN)) 1701 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1702 return -EPERM; 1702 return -EPERM;
1703 1703
1704 switch (cmd) { 1704 switch (cmd) {
@@ -1722,7 +1722,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
1722{ 1722{
1723 int ret; 1723 int ret;
1724 1724
1725 if (!capable(CAP_NET_ADMIN)) 1725 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1726 return -EPERM; 1726 return -EPERM;
1727 1727
1728 switch (cmd) { 1728 switch (cmd) {
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 170b1fdd6b72..17c5e06da662 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1846,7 +1846,7 @@ compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1846{ 1846{
1847 int ret; 1847 int ret;
1848 1848
1849 if (!capable(CAP_NET_ADMIN)) 1849 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1850 return -EPERM; 1850 return -EPERM;
1851 1851
1852 switch (cmd) { 1852 switch (cmd) {
@@ -1961,7 +1961,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1961{ 1961{
1962 int ret; 1962 int ret;
1963 1963
1964 if (!capable(CAP_NET_ADMIN)) 1964 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1965 return -EPERM; 1965 return -EPERM;
1966 1966
1967 switch (cmd) { 1967 switch (cmd) {
@@ -1983,7 +1983,7 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1983{ 1983{
1984 int ret; 1984 int ret;
1985 1985
1986 if (!capable(CAP_NET_ADMIN)) 1986 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1987 return -EPERM; 1987 return -EPERM;
1988 1988
1989 switch (cmd) { 1989 switch (cmd) {
@@ -2008,7 +2008,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2008{ 2008{
2009 int ret; 2009 int ret;
2010 2010
2011 if (!capable(CAP_NET_ADMIN)) 2011 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2012 return -EPERM; 2012 return -EPERM;
2013 2013
2014 switch (cmd) { 2014 switch (cmd) {
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index fe5daea5214d..75e33a7048f8 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -661,6 +661,7 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
661#define PROC_WRITELEN 10 661#define PROC_WRITELEN 10
662 char buffer[PROC_WRITELEN+1]; 662 char buffer[PROC_WRITELEN+1];
663 unsigned long nodenum; 663 unsigned long nodenum;
664 int rc;
664 665
665 if (size > PROC_WRITELEN) 666 if (size > PROC_WRITELEN)
666 return -EIO; 667 return -EIO;
@@ -669,11 +670,15 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
669 buffer[size] = 0; 670 buffer[size] = 0;
670 671
671 if (*buffer == '+') { 672 if (*buffer == '+') {
672 nodenum = simple_strtoul(buffer+1, NULL, 10); 673 rc = kstrtoul(buffer+1, 10, &nodenum);
674 if (rc)
675 return rc;
673 if (clusterip_add_node(c, nodenum)) 676 if (clusterip_add_node(c, nodenum))
674 return -ENOMEM; 677 return -ENOMEM;
675 } else if (*buffer == '-') { 678 } else if (*buffer == '-') {
676 nodenum = simple_strtoul(buffer+1, NULL,10); 679 rc = kstrtoul(buffer+1, 10, &nodenum);
680 if (rc)
681 return rc;
677 if (clusterip_del_node(c, nodenum)) 682 if (clusterip_del_node(c, nodenum))
678 return -ENOENT; 683 return -ENOENT;
679 } else 684 } else
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 51f13f8ec724..04b18c1ac345 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -81,6 +81,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
81 niph->saddr = oiph->daddr; 81 niph->saddr = oiph->daddr;
82 niph->daddr = oiph->saddr; 82 niph->daddr = oiph->saddr;
83 83
84 skb_reset_transport_header(nskb);
84 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 85 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
85 memset(tcph, 0, sizeof(*tcph)); 86 memset(tcph, 0, sizeof(*tcph));
86 tcph->source = oth->dest; 87 tcph->source = oth->dest;
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index a82047282dbb..eeaff7e4acb5 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -124,19 +124,28 @@ nf_nat_ipv4_fn(unsigned int hooknum,
124 ret = nf_nat_rule_find(skb, hooknum, in, out, ct); 124 ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
125 if (ret != NF_ACCEPT) 125 if (ret != NF_ACCEPT)
126 return ret; 126 return ret;
127 } else 127 } else {
128 pr_debug("Already setup manip %s for ct %p\n", 128 pr_debug("Already setup manip %s for ct %p\n",
129 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 129 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
130 ct); 130 ct);
131 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
132 goto oif_changed;
133 }
131 break; 134 break;
132 135
133 default: 136 default:
134 /* ESTABLISHED */ 137 /* ESTABLISHED */
135 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 138 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
136 ctinfo == IP_CT_ESTABLISHED_REPLY); 139 ctinfo == IP_CT_ESTABLISHED_REPLY);
140 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
141 goto oif_changed;
137 } 142 }
138 143
139 return nf_nat_packet(ct, ctinfo, hooknum, skb); 144 return nf_nat_packet(ct, ctinfo, hooknum, skb);
145
146oif_changed:
147 nf_ct_kill_acct(ct, ctinfo, skb);
148 return NF_DROP;
140} 149}
141 150
142static unsigned int 151static unsigned int
@@ -276,9 +285,7 @@ static int __net_init iptable_nat_net_init(struct net *net)
276 return -ENOMEM; 285 return -ENOMEM;
277 net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl); 286 net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl);
278 kfree(repl); 287 kfree(repl);
279 if (IS_ERR(net->ipv4.nat_table)) 288 return PTR_RET(net->ipv4.nat_table);
280 return PTR_ERR(net->ipv4.nat_table);
281 return 0;
282} 289}
283 290
284static void __net_exit iptable_nat_net_exit(struct net *net) 291static void __net_exit iptable_nat_net_exit(struct net *net)
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 8918eff1426d..0f9d09f54bd9 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -29,6 +29,7 @@
29#include <net/protocol.h> 29#include <net/protocol.h>
30 30
31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; 31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
32const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
32 33
33/* 34/*
34 * Add a protocol handler to the hash tables 35 * Add a protocol handler to the hash tables
@@ -41,6 +42,13 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
41} 42}
42EXPORT_SYMBOL(inet_add_protocol); 43EXPORT_SYMBOL(inet_add_protocol);
43 44
45int inet_add_offload(const struct net_offload *prot, unsigned char protocol)
46{
47 return !cmpxchg((const struct net_offload **)&inet_offloads[protocol],
48 NULL, prot) ? 0 : -1;
49}
50EXPORT_SYMBOL(inet_add_offload);
51
44/* 52/*
45 * Remove a protocol from the hash tables. 53 * Remove a protocol from the hash tables.
46 */ 54 */
@@ -57,3 +65,16 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
57 return ret; 65 return ret;
58} 66}
59EXPORT_SYMBOL(inet_del_protocol); 67EXPORT_SYMBOL(inet_del_protocol);
68
69int inet_del_offload(const struct net_offload *prot, unsigned char protocol)
70{
71 int ret;
72
73 ret = (cmpxchg((const struct net_offload **)&inet_offloads[protocol],
74 prot, NULL) == prot) ? 0 : -1;
75
76 synchronize_net();
77
78 return ret;
79}
80EXPORT_SYMBOL(inet_del_offload);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a8c651216fa6..844a9ef60dbd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1785,6 +1785,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1785 if (dev_out->flags & IFF_LOOPBACK) 1785 if (dev_out->flags & IFF_LOOPBACK)
1786 flags |= RTCF_LOCAL; 1786 flags |= RTCF_LOCAL;
1787 1787
1788 do_cache = true;
1788 if (type == RTN_BROADCAST) { 1789 if (type == RTN_BROADCAST) {
1789 flags |= RTCF_BROADCAST | RTCF_LOCAL; 1790 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1790 fi = NULL; 1791 fi = NULL;
@@ -1793,6 +1794,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1793 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, 1794 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1794 fl4->flowi4_proto)) 1795 fl4->flowi4_proto))
1795 flags &= ~RTCF_LOCAL; 1796 flags &= ~RTCF_LOCAL;
1797 else
1798 do_cache = false;
1796 /* If multicast route do not exist use 1799 /* If multicast route do not exist use
1797 * default one, but do not gateway in this case. 1800 * default one, but do not gateway in this case.
1798 * Yes, it is hack. 1801 * Yes, it is hack.
@@ -1802,8 +1805,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1802 } 1805 }
1803 1806
1804 fnhe = NULL; 1807 fnhe = NULL;
1805 do_cache = fi != NULL; 1808 do_cache &= fi != NULL;
1806 if (fi) { 1809 if (do_cache) {
1807 struct rtable __rcu **prth; 1810 struct rtable __rcu **prth;
1808 struct fib_nh *nh = &FIB_RES_NH(*res); 1811 struct fib_nh *nh = &FIB_RES_NH(*res);
1809 1812
@@ -2229,8 +2232,27 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2229 error = rt->dst.error; 2232 error = rt->dst.error;
2230 2233
2231 if (rt_is_input_route(rt)) { 2234 if (rt_is_input_route(rt)) {
2232 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) 2235#ifdef CONFIG_IP_MROUTE
2233 goto nla_put_failure; 2236 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2237 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2238 int err = ipmr_get_route(net, skb,
2239 fl4->saddr, fl4->daddr,
2240 r, nowait);
2241 if (err <= 0) {
2242 if (!nowait) {
2243 if (err == 0)
2244 return 0;
2245 goto nla_put_failure;
2246 } else {
2247 if (err == -EMSGSIZE)
2248 goto nla_put_failure;
2249 error = err;
2250 }
2251 }
2252 } else
2253#endif
2254 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2255 goto nla_put_failure;
2234 } 2256 }
2235 2257
2236 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) 2258 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
@@ -2493,6 +2515,10 @@ static __net_init int sysctl_route_net_init(struct net *net)
2493 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); 2515 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2494 if (tbl == NULL) 2516 if (tbl == NULL)
2495 goto err_dup; 2517 goto err_dup;
2518
2519 /* Don't export sysctls to unprivileged users */
2520 if (net->user_ns != &init_user_ns)
2521 tbl[0].procname = NULL;
2496 } 2522 }
2497 tbl[0].extra1 = net; 2523 tbl[0].extra1 = net;
2498 2524
@@ -2597,7 +2623,7 @@ int __init ip_rt_init(void)
2597 pr_err("Unable to create route proc files\n"); 2623 pr_err("Unable to create route proc files\n");
2598#ifdef CONFIG_XFRM 2624#ifdef CONFIG_XFRM
2599 xfrm_init(); 2625 xfrm_init();
2600 xfrm4_init(ip_rt_max_size); 2626 xfrm4_init();
2601#endif 2627#endif
2602 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); 2628 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2603 2629
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index ba48e799b031..b236ef04914f 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -340,7 +340,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
340 } 340 }
341 341
342 req->expires = 0UL; 342 req->expires = 0UL;
343 req->retrans = 0; 343 req->num_retrans = 0;
344 344
345 /* 345 /*
346 * We need to lookup the route here to get at the correct 346 * We need to lookup the route here to get at the correct
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 63d4eccc674d..d84400b65049 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -883,6 +883,9 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
883 table[6].data = 883 table[6].data =
884 &net->ipv4.sysctl_ping_group_range; 884 &net->ipv4.sysctl_ping_group_range;
885 885
886 /* Don't export sysctls to unprivileged users */
887 if (net->user_ns != &init_user_ns)
888 table[0].procname = NULL;
886 } 889 }
887 890
888 /* 891 /*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 197c0008503c..2aa69c8ae60c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,13 +536,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
536{ 536{
537 struct tcp_sock *tp = tcp_sk(sk); 537 struct tcp_sock *tp = tcp_sk(sk);
538 int answ; 538 int answ;
539 bool slow;
539 540
540 switch (cmd) { 541 switch (cmd) {
541 case SIOCINQ: 542 case SIOCINQ:
542 if (sk->sk_state == TCP_LISTEN) 543 if (sk->sk_state == TCP_LISTEN)
543 return -EINVAL; 544 return -EINVAL;
544 545
545 lock_sock(sk); 546 slow = lock_sock_fast(sk);
546 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 547 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
547 answ = 0; 548 answ = 0;
548 else if (sock_flag(sk, SOCK_URGINLINE) || 549 else if (sock_flag(sk, SOCK_URGINLINE) ||
@@ -557,7 +558,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
557 answ--; 558 answ--;
558 } else 559 } else
559 answ = tp->urg_seq - tp->copied_seq; 560 answ = tp->urg_seq - tp->copied_seq;
560 release_sock(sk); 561 unlock_sock_fast(sk, slow);
561 break; 562 break;
562 case SIOCATMARK: 563 case SIOCATMARK:
563 answ = tp->urg_data && tp->urg_seq == tp->copied_seq; 564 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
@@ -830,8 +831,8 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
830 return mss_now; 831 return mss_now;
831} 832}
832 833
833static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, 834static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
834 size_t psize, int flags) 835 size_t size, int flags)
835{ 836{
836 struct tcp_sock *tp = tcp_sk(sk); 837 struct tcp_sock *tp = tcp_sk(sk);
837 int mss_now, size_goal; 838 int mss_now, size_goal;
@@ -858,12 +859,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
858 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 859 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
859 goto out_err; 860 goto out_err;
860 861
861 while (psize > 0) { 862 while (size > 0) {
862 struct sk_buff *skb = tcp_write_queue_tail(sk); 863 struct sk_buff *skb = tcp_write_queue_tail(sk);
863 struct page *page = pages[poffset / PAGE_SIZE];
864 int copy, i; 864 int copy, i;
865 int offset = poffset % PAGE_SIZE;
866 int size = min_t(size_t, psize, PAGE_SIZE - offset);
867 bool can_coalesce; 865 bool can_coalesce;
868 866
869 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { 867 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
@@ -912,8 +910,8 @@ new_segment:
912 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 910 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
913 911
914 copied += copy; 912 copied += copy;
915 poffset += copy; 913 offset += copy;
916 if (!(psize -= copy)) 914 if (!(size -= copy))
917 goto out; 915 goto out;
918 916
919 if (skb->len < size_goal || (flags & MSG_OOB)) 917 if (skb->len < size_goal || (flags & MSG_OOB))
@@ -960,7 +958,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
960 flags); 958 flags);
961 959
962 lock_sock(sk); 960 lock_sock(sk);
963 res = do_tcp_sendpages(sk, &page, offset, size, flags); 961 res = do_tcp_sendpages(sk, page, offset, size, flags);
964 release_sock(sk); 962 release_sock(sk);
965 return res; 963 return res;
966} 964}
@@ -1212,7 +1210,7 @@ new_segment:
1212wait_for_sndbuf: 1210wait_for_sndbuf:
1213 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1211 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1214wait_for_memory: 1212wait_for_memory:
1215 if (copied && likely(!tp->repair)) 1213 if (copied)
1216 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 1214 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1217 1215
1218 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 1216 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@@ -1223,7 +1221,7 @@ wait_for_memory:
1223 } 1221 }
1224 1222
1225out: 1223out:
1226 if (copied && likely(!tp->repair)) 1224 if (copied)
1227 tcp_push(sk, flags, mss_now, tp->nonagle); 1225 tcp_push(sk, flags, mss_now, tp->nonagle);
1228 release_sock(sk); 1226 release_sock(sk);
1229 return copied + copied_syn; 1227 return copied + copied_syn;
@@ -1430,12 +1428,12 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
1430} 1428}
1431#endif 1429#endif
1432 1430
1433static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1431static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1434{ 1432{
1435 struct sk_buff *skb; 1433 struct sk_buff *skb;
1436 u32 offset; 1434 u32 offset;
1437 1435
1438 skb_queue_walk(&sk->sk_receive_queue, skb) { 1436 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1439 offset = seq - TCP_SKB_CB(skb)->seq; 1437 offset = seq - TCP_SKB_CB(skb)->seq;
1440 if (tcp_hdr(skb)->syn) 1438 if (tcp_hdr(skb)->syn)
1441 offset--; 1439 offset--;
@@ -1443,6 +1441,11 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1443 *off = offset; 1441 *off = offset;
1444 return skb; 1442 return skb;
1445 } 1443 }
1444 /* This looks weird, but this can happen if TCP collapsing
1445 * splitted a fat GRO packet, while we released socket lock
1446 * in skb_splice_bits()
1447 */
1448 sk_eat_skb(sk, skb, false);
1446 } 1449 }
1447 return NULL; 1450 return NULL;
1448} 1451}
@@ -1484,7 +1487,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1484 break; 1487 break;
1485 } 1488 }
1486 used = recv_actor(desc, skb, offset, len); 1489 used = recv_actor(desc, skb, offset, len);
1487 if (used < 0) { 1490 if (used <= 0) {
1488 if (!copied) 1491 if (!copied)
1489 copied = used; 1492 copied = used;
1490 break; 1493 break;
@@ -1493,15 +1496,19 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1493 copied += used; 1496 copied += used;
1494 offset += used; 1497 offset += used;
1495 } 1498 }
1496 /* 1499 /* If recv_actor drops the lock (e.g. TCP splice
1497 * If recv_actor drops the lock (e.g. TCP splice
1498 * receive) the skb pointer might be invalid when 1500 * receive) the skb pointer might be invalid when
1499 * getting here: tcp_collapse might have deleted it 1501 * getting here: tcp_collapse might have deleted it
1500 * while aggregating skbs from the socket queue. 1502 * while aggregating skbs from the socket queue.
1501 */ 1503 */
1502 skb = tcp_recv_skb(sk, seq-1, &offset); 1504 skb = tcp_recv_skb(sk, seq - 1, &offset);
1503 if (!skb || (offset+1 != skb->len)) 1505 if (!skb)
1504 break; 1506 break;
1507 /* TCP coalescing might have appended data to the skb.
1508 * Try to splice more frags
1509 */
1510 if (offset + 1 != skb->len)
1511 continue;
1505 } 1512 }
1506 if (tcp_hdr(skb)->fin) { 1513 if (tcp_hdr(skb)->fin) {
1507 sk_eat_skb(sk, skb, false); 1514 sk_eat_skb(sk, skb, false);
@@ -1518,8 +1525,10 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1518 tcp_rcv_space_adjust(sk); 1525 tcp_rcv_space_adjust(sk);
1519 1526
1520 /* Clean up data we have read: This will do ACK frames. */ 1527 /* Clean up data we have read: This will do ACK frames. */
1521 if (copied > 0) 1528 if (copied > 0) {
1529 tcp_recv_skb(sk, seq, &offset);
1522 tcp_cleanup_rbuf(sk, copied); 1530 tcp_cleanup_rbuf(sk, copied);
1531 }
1523 return copied; 1532 return copied;
1524} 1533}
1525EXPORT_SYMBOL(tcp_read_sock); 1534EXPORT_SYMBOL(tcp_read_sock);
@@ -2303,7 +2312,7 @@ void tcp_sock_destruct(struct sock *sk)
2303 2312
2304static inline bool tcp_can_repair_sock(const struct sock *sk) 2313static inline bool tcp_can_repair_sock(const struct sock *sk)
2305{ 2314{
2306 return capable(CAP_NET_ADMIN) && 2315 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2307 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); 2316 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2308} 2317}
2309 2318
@@ -3589,8 +3598,7 @@ void __init tcp_init(void)
3589 alloc_large_system_hash("TCP established", 3598 alloc_large_system_hash("TCP established",
3590 sizeof(struct inet_ehash_bucket), 3599 sizeof(struct inet_ehash_bucket),
3591 thash_entries, 3600 thash_entries,
3592 (totalram_pages >= 128 * 1024) ? 3601 17, /* one slot per 128 KB of memory */
3593 13 : 15,
3594 0, 3602 0,
3595 NULL, 3603 NULL,
3596 &tcp_hashinfo.ehash_mask, 3604 &tcp_hashinfo.ehash_mask,
@@ -3606,8 +3614,7 @@ void __init tcp_init(void)
3606 alloc_large_system_hash("TCP bind", 3614 alloc_large_system_hash("TCP bind",
3607 sizeof(struct inet_bind_hashbucket), 3615 sizeof(struct inet_bind_hashbucket),
3608 tcp_hashinfo.ehash_mask + 1, 3616 tcp_hashinfo.ehash_mask + 1,
3609 (totalram_pages >= 128 * 1024) ? 3617 17, /* one slot per 128 KB of memory */
3610 13 : 15,
3611 0, 3618 0,
3612 &tcp_hashinfo.bhash_size, 3619 &tcp_hashinfo.bhash_size,
3613 NULL, 3620 NULL,
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1432cdb0644c..291f2ed7cc31 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Plugable TCP congestion control support and newReno 2 * Plugable TCP congestion control support and newReno
3 * congestion control. 3 * congestion control.
4 * Based on ideas from I/O scheduler suport and Web100. 4 * Based on ideas from I/O scheduler support and Web100.
5 * 5 *
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> 6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7 */ 7 */
@@ -259,7 +259,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
259 if (!ca) 259 if (!ca)
260 err = -ENOENT; 260 err = -ENOENT;
261 261
262 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) 262 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
263 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
263 err = -EPERM; 264 err = -EPERM;
264 265
265 else if (!try_module_get(ca->owner)) 266 else if (!try_module_get(ca->owner))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2c2b13a999ea..18f97ca76b00 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3552,6 +3552,24 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3552 return false; 3552 return false;
3553} 3553}
3554 3554
3555/* RFC 5961 7 [ACK Throttling] */
3556static void tcp_send_challenge_ack(struct sock *sk)
3557{
3558 /* unprotected vars, we dont care of overwrites */
3559 static u32 challenge_timestamp;
3560 static unsigned int challenge_count;
3561 u32 now = jiffies / HZ;
3562
3563 if (now != challenge_timestamp) {
3564 challenge_timestamp = now;
3565 challenge_count = 0;
3566 }
3567 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
3568 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
3569 tcp_send_ack(sk);
3570 }
3571}
3572
3555/* This routine deals with incoming acks, but not outgoing ones. */ 3573/* This routine deals with incoming acks, but not outgoing ones. */
3556static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3574static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3557{ 3575{
@@ -3571,8 +3589,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3571 /* If the ack is older than previous acks 3589 /* If the ack is older than previous acks
3572 * then we can probably ignore it. 3590 * then we can probably ignore it.
3573 */ 3591 */
3574 if (before(ack, prior_snd_una)) 3592 if (before(ack, prior_snd_una)) {
3593 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
3594 if (before(ack, prior_snd_una - tp->max_window)) {
3595 tcp_send_challenge_ack(sk);
3596 return -1;
3597 }
3575 goto old_ack; 3598 goto old_ack;
3599 }
3576 3600
3577 /* If the ack includes data we haven't sent yet, discard 3601 /* If the ack includes data we haven't sent yet, discard
3578 * this segment (RFC793 Section 3.9). 3602 * this segment (RFC793 Section 3.9).
@@ -5244,23 +5268,6 @@ out:
5244} 5268}
5245#endif /* CONFIG_NET_DMA */ 5269#endif /* CONFIG_NET_DMA */
5246 5270
5247static void tcp_send_challenge_ack(struct sock *sk)
5248{
5249 /* unprotected vars, we dont care of overwrites */
5250 static u32 challenge_timestamp;
5251 static unsigned int challenge_count;
5252 u32 now = jiffies / HZ;
5253
5254 if (now != challenge_timestamp) {
5255 challenge_timestamp = now;
5256 challenge_count = 0;
5257 }
5258 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
5259 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
5260 tcp_send_ack(sk);
5261 }
5262}
5263
5264/* Does PAWS and seqno based validation of an incoming segment, flags will 5271/* Does PAWS and seqno based validation of an incoming segment, flags will
5265 * play significant role here. 5272 * play significant role here.
5266 */ 5273 */
@@ -5313,11 +5320,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5313 goto discard; 5320 goto discard;
5314 } 5321 }
5315 5322
5316 /* ts_recent update must be made after we are sure that the packet
5317 * is in window.
5318 */
5319 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5320
5321 /* step 3: check security and precedence [ignored] */ 5323 /* step 3: check security and precedence [ignored] */
5322 5324
5323 /* step 4: Check for a SYN 5325 /* step 4: Check for a SYN
@@ -5541,6 +5543,9 @@ slow_path:
5541 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5543 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
5542 goto csum_error; 5544 goto csum_error;
5543 5545
5546 if (!th->ack && !th->rst)
5547 goto discard;
5548
5544 /* 5549 /*
5545 * Standard slow path. 5550 * Standard slow path.
5546 */ 5551 */
@@ -5549,9 +5554,14 @@ slow_path:
5549 return 0; 5554 return 0;
5550 5555
5551step5: 5556step5:
5552 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5557 if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
5553 goto discard; 5558 goto discard;
5554 5559
5560 /* ts_recent update must be made after we are sure that the packet
5561 * is in window.
5562 */
5563 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5564
5555 tcp_rcv_rtt_measure_ts(sk, skb); 5565 tcp_rcv_rtt_measure_ts(sk, skb);
5556 5566
5557 /* Process urgent data. */ 5567 /* Process urgent data. */
@@ -5645,7 +5655,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5645 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5655 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5646 5656
5647 if (data) { /* Retransmit unacked data in SYN */ 5657 if (data) { /* Retransmit unacked data in SYN */
5648 tcp_retransmit_skb(sk, data); 5658 tcp_for_write_queue_from(data, sk) {
5659 if (data == tcp_send_head(sk) ||
5660 __tcp_retransmit_skb(sk, data))
5661 break;
5662 }
5649 tcp_rearm_rto(sk); 5663 tcp_rearm_rto(sk);
5650 return true; 5664 return true;
5651 } 5665 }
@@ -5973,11 +5987,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5973 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) 5987 if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
5974 goto discard; 5988 goto discard;
5975 } 5989 }
5990
5991 if (!th->ack && !th->rst)
5992 goto discard;
5993
5976 if (!tcp_validate_incoming(sk, skb, th, 0)) 5994 if (!tcp_validate_incoming(sk, skb, th, 0))
5977 return 0; 5995 return 0;
5978 5996
5979 /* step 5: check the ACK field */ 5997 /* step 5: check the ACK field */
5980 if (th->ack) { 5998 if (true) {
5981 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 5999 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
5982 6000
5983 switch (sk->sk_state) { 6001 switch (sk->sk_state) {
@@ -5988,7 +6006,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5988 */ 6006 */
5989 if (req) { 6007 if (req) {
5990 tcp_synack_rtt_meas(sk, req); 6008 tcp_synack_rtt_meas(sk, req);
5991 tp->total_retrans = req->retrans; 6009 tp->total_retrans = req->num_retrans;
5992 6010
5993 reqsk_fastopen_remove(sk, req, false); 6011 reqsk_fastopen_remove(sk, req, false);
5994 } else { 6012 } else {
@@ -6127,8 +6145,12 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6127 } 6145 }
6128 break; 6146 break;
6129 } 6147 }
6130 } else 6148 }
6131 goto discard; 6149
6150 /* ts_recent update must be made after we are sure that the packet
6151 * is in window.
6152 */
6153 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
6132 6154
6133 /* step 6: check the URG bit */ 6155 /* step 6: check the URG bit */
6134 tcp_urg(sk, skb, th); 6156 tcp_urg(sk, skb, th);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c4a64355603..54139fa514e6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -138,14 +138,6 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
138} 138}
139EXPORT_SYMBOL_GPL(tcp_twsk_unique); 139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 140
141static int tcp_repair_connect(struct sock *sk)
142{
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
145
146 return 0;
147}
148
149/* This will initiate an outgoing connection. */ 141/* This will initiate an outgoing connection. */
150int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 142int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
151{ 143{
@@ -250,10 +242,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
250 242
251 inet->inet_id = tp->write_seq ^ jiffies; 243 inet->inet_id = tp->write_seq ^ jiffies;
252 244
253 if (likely(!tp->repair)) 245 err = tcp_connect(sk);
254 err = tcp_connect(sk);
255 else
256 err = tcp_repair_connect(sk);
257 246
258 rt = NULL; 247 rt = NULL;
259 if (err) 248 if (err)
@@ -877,10 +866,13 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
877} 866}
878 867
879static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, 868static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
880 struct request_values *rvp) 869 struct request_values *rvp)
881{ 870{
882 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 871 int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
883 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); 872
873 if (!res)
874 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
875 return res;
884} 876}
885 877
886/* 878/*
@@ -1070,7 +1062,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1070} 1062}
1071EXPORT_SYMBOL(tcp_md5_do_del); 1063EXPORT_SYMBOL(tcp_md5_do_del);
1072 1064
1073void tcp_clear_md5_list(struct sock *sk) 1065static void tcp_clear_md5_list(struct sock *sk)
1074{ 1066{
1075 struct tcp_sock *tp = tcp_sk(sk); 1067 struct tcp_sock *tp = tcp_sk(sk);
1076 struct tcp_md5sig_key *key; 1068 struct tcp_md5sig_key *key;
@@ -1386,7 +1378,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1386 struct sock *child; 1378 struct sock *child;
1387 int err; 1379 int err;
1388 1380
1389 req->retrans = 0; 1381 req->num_retrans = 0;
1382 req->num_timeout = 0;
1390 req->sk = NULL; 1383 req->sk = NULL;
1391 1384
1392 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 1385 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
@@ -1741,7 +1734,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1741 1734
1742 tcp_initialize_rcv_mss(newsk); 1735 tcp_initialize_rcv_mss(newsk);
1743 tcp_synack_rtt_meas(newsk, req); 1736 tcp_synack_rtt_meas(newsk, req);
1744 newtp->total_retrans = req->retrans; 1737 newtp->total_retrans = req->num_retrans;
1745 1738
1746#ifdef CONFIG_TCP_MD5SIG 1739#ifdef CONFIG_TCP_MD5SIG
1747 /* Copy over the MD5 key from the original socket */ 1740 /* Copy over the MD5 key from the original socket */
@@ -1774,10 +1767,8 @@ exit:
1774 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1775 return NULL; 1768 return NULL;
1776put_and_exit: 1769put_and_exit:
1777 tcp_clear_xmit_timers(newsk); 1770 inet_csk_prepare_forced_close(newsk);
1778 tcp_cleanup_congestion_control(newsk); 1771 tcp_done(newsk);
1779 bh_unlock_sock(newsk);
1780 sock_put(newsk);
1781 goto exit; 1772 goto exit;
1782} 1773}
1783EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1774EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
@@ -1919,7 +1910,6 @@ EXPORT_SYMBOL(tcp_v4_do_rcv);
1919 1910
1920void tcp_v4_early_demux(struct sk_buff *skb) 1911void tcp_v4_early_demux(struct sk_buff *skb)
1921{ 1912{
1922 struct net *net = dev_net(skb->dev);
1923 const struct iphdr *iph; 1913 const struct iphdr *iph;
1924 const struct tcphdr *th; 1914 const struct tcphdr *th;
1925 struct sock *sk; 1915 struct sock *sk;
@@ -1927,16 +1917,16 @@ void tcp_v4_early_demux(struct sk_buff *skb)
1927 if (skb->pkt_type != PACKET_HOST) 1917 if (skb->pkt_type != PACKET_HOST)
1928 return; 1918 return;
1929 1919
1930 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr))) 1920 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1931 return; 1921 return;
1932 1922
1933 iph = ip_hdr(skb); 1923 iph = ip_hdr(skb);
1934 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb)); 1924 th = tcp_hdr(skb);
1935 1925
1936 if (th->doff < sizeof(struct tcphdr) / 4) 1926 if (th->doff < sizeof(struct tcphdr) / 4)
1937 return; 1927 return;
1938 1928
1939 sk = __inet_lookup_established(net, &tcp_hashinfo, 1929 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1940 iph->saddr, th->source, 1930 iph->saddr, th->source,
1941 iph->daddr, ntohs(th->dest), 1931 iph->daddr, ntohs(th->dest),
1942 skb->skb_iif); 1932 skb->skb_iif);
@@ -2640,7 +2630,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2640 0, 0, /* could print option size, but that is af dependent. */ 2630 0, 0, /* could print option size, but that is af dependent. */
2641 1, /* timers active (only the expire timer) */ 2631 1, /* timers active (only the expire timer) */
2642 jiffies_delta_to_clock_t(delta), 2632 jiffies_delta_to_clock_t(delta),
2643 req->retrans, 2633 req->num_timeout,
2644 from_kuid_munged(seq_user_ns(f), uid), 2634 from_kuid_munged(seq_user_ns(f), uid),
2645 0, /* non standard timer */ 2635 0, /* non standard timer */
2646 0, /* open_requests have no inode */ 2636 0, /* open_requests have no inode */
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 53bc5847bfa8..f696d7c2e9fa 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -1,7 +1,6 @@
1#include <linux/rcupdate.h> 1#include <linux/rcupdate.h>
2#include <linux/spinlock.h> 2#include <linux/spinlock.h>
3#include <linux/jiffies.h> 3#include <linux/jiffies.h>
4#include <linux/bootmem.h>
5#include <linux/module.h> 4#include <linux/module.h>
6#include <linux/cache.h> 5#include <linux/cache.h>
7#include <linux/slab.h> 6#include <linux/slab.h>
@@ -9,6 +8,7 @@
9#include <linux/tcp.h> 8#include <linux/tcp.h>
10#include <linux/hash.h> 9#include <linux/hash.h>
11#include <linux/tcp_metrics.h> 10#include <linux/tcp_metrics.h>
11#include <linux/vmalloc.h>
12 12
13#include <net/inet_connection_sock.h> 13#include <net/inet_connection_sock.h>
14#include <net/net_namespace.h> 14#include <net/net_namespace.h>
@@ -1034,7 +1034,10 @@ static int __net_init tcp_net_metrics_init(struct net *net)
1034 net->ipv4.tcp_metrics_hash_log = order_base_2(slots); 1034 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1035 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; 1035 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1036 1036
1037 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL); 1037 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1038 if (!net->ipv4.tcp_metrics_hash)
1039 net->ipv4.tcp_metrics_hash = vzalloc(size);
1040
1038 if (!net->ipv4.tcp_metrics_hash) 1041 if (!net->ipv4.tcp_metrics_hash)
1039 return -ENOMEM; 1042 return -ENOMEM;
1040 1043
@@ -1055,7 +1058,10 @@ static void __net_exit tcp_net_metrics_exit(struct net *net)
1055 tm = next; 1058 tm = next;
1056 } 1059 }
1057 } 1060 }
1058 kfree(net->ipv4.tcp_metrics_hash); 1061 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1062 vfree(net->ipv4.tcp_metrics_hash);
1063 else
1064 kfree(net->ipv4.tcp_metrics_hash);
1059} 1065}
1060 1066
1061static __net_initdata struct pernet_operations tcp_net_metrics_ops = { 1067static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a7302d974f32..f35f2dfb6401 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -553,7 +553,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
553 * it can be estimated (approximately) 553 * it can be estimated (approximately)
554 * from another data. 554 * from another data.
555 */ 555 */
556 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); 556 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
557 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 557 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
558 } 558 }
559 } 559 }
@@ -582,7 +582,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
582 * Note that even if there is new data in the SYN packet 582 * Note that even if there is new data in the SYN packet
583 * they will be thrown away too. 583 * they will be thrown away too.
584 */ 584 */
585 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 585 inet_rtx_syn_ack(sk, req);
586 return NULL; 586 return NULL;
587 } 587 }
588 588
@@ -696,7 +696,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
696 /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */ 696 /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
697 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr) 697 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
698 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr; 698 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
699 else if (req->retrans) /* don't take RTT sample if retrans && ~TS */ 699 else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
700 tcp_rsk(req)->snt_synack = 0; 700 tcp_rsk(req)->snt_synack = 0;
701 701
702 /* For Fast Open no more processing is needed (sk is the 702 /* For Fast Open no more processing is needed (sk is the
@@ -706,7 +706,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
706 return sk; 706 return sk;
707 707
708 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 708 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
709 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 709 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
710 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 710 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
711 inet_rsk(req)->acked = 1; 711 inet_rsk(req)->acked = 1;
712 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 712 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cfe6ffe1c177..5d451593ef16 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1986,6 +1986,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1986 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1986 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1987 BUG_ON(!tso_segs); 1987 BUG_ON(!tso_segs);
1988 1988
1989 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1990 goto repair; /* Skip network transmission */
1991
1989 cwnd_quota = tcp_cwnd_test(tp, skb); 1992 cwnd_quota = tcp_cwnd_test(tp, skb);
1990 if (!cwnd_quota) 1993 if (!cwnd_quota)
1991 break; 1994 break;
@@ -2026,6 +2029,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2026 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 2029 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2027 break; 2030 break;
2028 2031
2032repair:
2029 /* Advance the send_head. This one is sent out. 2033 /* Advance the send_head. This one is sent out.
2030 * This call will increment packets_out. 2034 * This call will increment packets_out.
2031 */ 2035 */
@@ -2305,12 +2309,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2305 * state updates are done by the caller. Returns non-zero if an 2309 * state updates are done by the caller. Returns non-zero if an
2306 * error occurred which prevented the send. 2310 * error occurred which prevented the send.
2307 */ 2311 */
2308int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2312int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2309{ 2313{
2310 struct tcp_sock *tp = tcp_sk(sk); 2314 struct tcp_sock *tp = tcp_sk(sk);
2311 struct inet_connection_sock *icsk = inet_csk(sk); 2315 struct inet_connection_sock *icsk = inet_csk(sk);
2312 unsigned int cur_mss; 2316 unsigned int cur_mss;
2313 int err;
2314 2317
2315 /* Inconslusive MTU probe */ 2318 /* Inconslusive MTU probe */
2316 if (icsk->icsk_mtup.probe_size) { 2319 if (icsk->icsk_mtup.probe_size) {
@@ -2383,11 +2386,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2383 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { 2386 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
2384 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2387 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2385 GFP_ATOMIC); 2388 GFP_ATOMIC);
2386 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2389 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2387 -ENOBUFS; 2390 -ENOBUFS;
2388 } else { 2391 } else {
2389 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2392 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2390 } 2393 }
2394}
2395
2396int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2397{
2398 struct tcp_sock *tp = tcp_sk(sk);
2399 int err = __tcp_retransmit_skb(sk, skb);
2391 2400
2392 if (err == 0) { 2401 if (err == 0) {
2393 /* Update global TCP statistics. */ 2402 /* Update global TCP statistics. */
@@ -2983,6 +2992,11 @@ int tcp_connect(struct sock *sk)
2983 2992
2984 tcp_connect_init(sk); 2993 tcp_connect_init(sk);
2985 2994
2995 if (unlikely(tp->repair)) {
2996 tcp_finish_connect(sk, NULL);
2997 return 0;
2998 }
2999
2986 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 3000 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2987 if (unlikely(buff == NULL)) 3001 if (unlikely(buff == NULL))
2988 return -ENOBUFS; 3002 return -ENOBUFS;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d47c1b4421a3..b78aac30c498 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -318,7 +318,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
318 req = tcp_sk(sk)->fastopen_rsk; 318 req = tcp_sk(sk)->fastopen_rsk;
319 req->rsk_ops->syn_ack_timeout(sk, req); 319 req->rsk_ops->syn_ack_timeout(sk, req);
320 320
321 if (req->retrans >= max_retries) { 321 if (req->num_timeout >= max_retries) {
322 tcp_write_err(sk); 322 tcp_write_err(sk);
323 return; 323 return;
324 } 324 }
@@ -327,10 +327,10 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
327 * regular retransmit because if the child socket has been accepted 327 * regular retransmit because if the child socket has been accepted
328 * it's not good to give up too easily. 328 * it's not good to give up too easily.
329 */ 329 */
330 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 330 inet_rtx_syn_ack(sk, req);
331 req->retrans++; 331 req->num_timeout++;
332 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 332 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
333 TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX); 333 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
334} 334}
335 335
336/* 336/*
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 05c5ab8d983c..3be0ac2c1920 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -279,19 +279,8 @@ static void __exit xfrm4_policy_fini(void)
279 xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); 279 xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
280} 280}
281 281
282void __init xfrm4_init(int rt_max_size) 282void __init xfrm4_init(void)
283{ 283{
284 /*
285 * Select a default value for the gc_thresh based on the main route
286 * table hash size. It seems to me the worst case scenario is when
287 * we have ipsec operating in transport mode, in which we create a
288 * dst_entry per socket. The xfrm gc algorithm starts trying to remove
289 * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
290 * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
291 * That will let us store an ipsec connection per route table entry,
292 * and start cleaning when were 1/2 full
293 */
294 xfrm4_dst_ops.gc_thresh = rt_max_size/2;
295 dst_entries_init(&xfrm4_dst_ops); 284 dst_entries_init(&xfrm4_dst_ops);
296 285
297 xfrm4_state_init(); 286 xfrm4_state_init();
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index b6d3f79151e2..4ea244891b58 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -7,9 +7,11 @@ obj-$(CONFIG_IPV6) += ipv6.o
7ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ 7ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
8 addrlabel.o \ 8 addrlabel.o \
9 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ 9 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
10 raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ 10 raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o 11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
12 12
13ipv6-offload := ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
14
13ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o 15ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
14ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o 16ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
15 17
@@ -39,5 +41,6 @@ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
39obj-$(CONFIG_IPV6_GRE) += ip6_gre.o 41obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
40 42
41obj-y += addrconf_core.o exthdrs_core.o 43obj-y += addrconf_core.o exthdrs_core.o
44obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
42 45
43obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o 46obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0424e4e27414..420e56326384 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -81,6 +81,7 @@
81#include <net/pkt_sched.h> 81#include <net/pkt_sched.h>
82#include <linux/if_tunnel.h> 82#include <linux/if_tunnel.h>
83#include <linux/rtnetlink.h> 83#include <linux/rtnetlink.h>
84#include <linux/netconf.h>
84 85
85#ifdef CONFIG_IPV6_PRIVACY 86#ifdef CONFIG_IPV6_PRIVACY
86#include <linux/random.h> 87#include <linux/random.h>
@@ -153,6 +154,11 @@ static void addrconf_type_change(struct net_device *dev,
153 unsigned long event); 154 unsigned long event);
154static int addrconf_ifdown(struct net_device *dev, int how); 155static int addrconf_ifdown(struct net_device *dev, int how);
155 156
157static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
158 int plen,
159 const struct net_device *dev,
160 u32 flags, u32 noflags);
161
156static void addrconf_dad_start(struct inet6_ifaddr *ifp); 162static void addrconf_dad_start(struct inet6_ifaddr *ifp);
157static void addrconf_dad_timer(unsigned long data); 163static void addrconf_dad_timer(unsigned long data);
158static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 164static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
@@ -249,12 +255,6 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
249 return !qdisc_tx_is_noop(dev); 255 return !qdisc_tx_is_noop(dev);
250} 256}
251 257
252/* Check if a route is valid prefix route */
253static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
254{
255 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
256}
257
258static void addrconf_del_timer(struct inet6_ifaddr *ifp) 258static void addrconf_del_timer(struct inet6_ifaddr *ifp)
259{ 259{
260 if (del_timer(&ifp->timer)) 260 if (del_timer(&ifp->timer))
@@ -401,7 +401,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
401 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 401 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
402 ndev->cnf.accept_dad = -1; 402 ndev->cnf.accept_dad = -1;
403 403
404#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 404#if IS_ENABLED(CONFIG_IPV6_SIT)
405 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { 405 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
406 pr_info("%s: Disabled Multicast RS\n", dev->name); 406 pr_info("%s: Disabled Multicast RS\n", dev->name);
407 ndev->cnf.rtr_solicits = 0; 407 ndev->cnf.rtr_solicits = 0;
@@ -460,6 +460,149 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
460 return idev; 460 return idev;
461} 461}
462 462
463static int inet6_netconf_msgsize_devconf(int type)
464{
465 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
466 + nla_total_size(4); /* NETCONFA_IFINDEX */
467
468 /* type -1 is used for ALL */
469 if (type == -1 || type == NETCONFA_FORWARDING)
470 size += nla_total_size(4);
471#ifdef CONFIG_IPV6_MROUTE
472 if (type == -1 || type == NETCONFA_MC_FORWARDING)
473 size += nla_total_size(4);
474#endif
475
476 return size;
477}
478
479static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
480 struct ipv6_devconf *devconf, u32 portid,
481 u32 seq, int event, unsigned int flags,
482 int type)
483{
484 struct nlmsghdr *nlh;
485 struct netconfmsg *ncm;
486
487 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
488 flags);
489 if (nlh == NULL)
490 return -EMSGSIZE;
491
492 ncm = nlmsg_data(nlh);
493 ncm->ncm_family = AF_INET6;
494
495 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
496 goto nla_put_failure;
497
498 /* type -1 is used for ALL */
499 if ((type == -1 || type == NETCONFA_FORWARDING) &&
500 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
501 goto nla_put_failure;
502#ifdef CONFIG_IPV6_MROUTE
503 if ((type == -1 || type == NETCONFA_MC_FORWARDING) &&
504 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
505 devconf->mc_forwarding) < 0)
506 goto nla_put_failure;
507#endif
508 return nlmsg_end(skb, nlh);
509
510nla_put_failure:
511 nlmsg_cancel(skb, nlh);
512 return -EMSGSIZE;
513}
514
515void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
516 struct ipv6_devconf *devconf)
517{
518 struct sk_buff *skb;
519 int err = -ENOBUFS;
520
521 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_ATOMIC);
522 if (skb == NULL)
523 goto errout;
524
525 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
526 RTM_NEWNETCONF, 0, type);
527 if (err < 0) {
528 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
529 WARN_ON(err == -EMSGSIZE);
530 kfree_skb(skb);
531 goto errout;
532 }
533 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC);
534 return;
535errout:
536 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
537}
538
539static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
540 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
541 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
542};
543
544static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
545 struct nlmsghdr *nlh,
546 void *arg)
547{
548 struct net *net = sock_net(in_skb->sk);
549 struct nlattr *tb[NETCONFA_MAX+1];
550 struct netconfmsg *ncm;
551 struct sk_buff *skb;
552 struct ipv6_devconf *devconf;
553 struct inet6_dev *in6_dev;
554 struct net_device *dev;
555 int ifindex;
556 int err;
557
558 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
559 devconf_ipv6_policy);
560 if (err < 0)
561 goto errout;
562
563 err = EINVAL;
564 if (!tb[NETCONFA_IFINDEX])
565 goto errout;
566
567 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
568 switch (ifindex) {
569 case NETCONFA_IFINDEX_ALL:
570 devconf = net->ipv6.devconf_all;
571 break;
572 case NETCONFA_IFINDEX_DEFAULT:
573 devconf = net->ipv6.devconf_dflt;
574 break;
575 default:
576 dev = __dev_get_by_index(net, ifindex);
577 if (dev == NULL)
578 goto errout;
579 in6_dev = __in6_dev_get(dev);
580 if (in6_dev == NULL)
581 goto errout;
582 devconf = &in6_dev->cnf;
583 break;
584 }
585
586 err = -ENOBUFS;
587 skb = nlmsg_new(inet6_netconf_msgsize_devconf(-1), GFP_ATOMIC);
588 if (skb == NULL)
589 goto errout;
590
591 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
592 NETLINK_CB(in_skb).portid,
593 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
594 -1);
595 if (err < 0) {
596 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
597 WARN_ON(err == -EMSGSIZE);
598 kfree_skb(skb);
599 goto errout;
600 }
601 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
602errout:
603 return err;
604}
605
463#ifdef CONFIG_SYSCTL 606#ifdef CONFIG_SYSCTL
464static void dev_forward_change(struct inet6_dev *idev) 607static void dev_forward_change(struct inet6_dev *idev)
465{ 608{
@@ -471,7 +614,7 @@ static void dev_forward_change(struct inet6_dev *idev)
471 dev = idev->dev; 614 dev = idev->dev;
472 if (idev->cnf.forwarding) 615 if (idev->cnf.forwarding)
473 dev_disable_lro(dev); 616 dev_disable_lro(dev);
474 if (dev && (dev->flags & IFF_MULTICAST)) { 617 if (dev->flags & IFF_MULTICAST) {
475 if (idev->cnf.forwarding) 618 if (idev->cnf.forwarding)
476 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 619 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
477 else 620 else
@@ -486,6 +629,8 @@ static void dev_forward_change(struct inet6_dev *idev)
486 else 629 else
487 addrconf_leave_anycast(ifa); 630 addrconf_leave_anycast(ifa);
488 } 631 }
632 inet6_netconf_notify_devconf(dev_net(dev), NETCONFA_FORWARDING,
633 dev->ifindex, &idev->cnf);
489} 634}
490 635
491 636
@@ -518,6 +663,10 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
518 *p = newf; 663 *p = newf;
519 664
520 if (p == &net->ipv6.devconf_dflt->forwarding) { 665 if (p == &net->ipv6.devconf_dflt->forwarding) {
666 if ((!newf) ^ (!old))
667 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
668 NETCONFA_IFINDEX_DEFAULT,
669 net->ipv6.devconf_dflt);
521 rtnl_unlock(); 670 rtnl_unlock();
522 return 0; 671 return 0;
523 } 672 }
@@ -525,6 +674,10 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
525 if (p == &net->ipv6.devconf_all->forwarding) { 674 if (p == &net->ipv6.devconf_all->forwarding) {
526 net->ipv6.devconf_dflt->forwarding = newf; 675 net->ipv6.devconf_dflt->forwarding = newf;
527 addrconf_forward_change(net, newf); 676 addrconf_forward_change(net, newf);
677 if ((!newf) ^ (!old))
678 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
679 NETCONFA_IFINDEX_ALL,
680 net->ipv6.devconf_all);
528 } else if ((!newf) ^ (!old)) 681 } else if ((!newf) ^ (!old))
529 dev_forward_change((struct inet6_dev *)table->extra1); 682 dev_forward_change((struct inet6_dev *)table->extra1);
530 rtnl_unlock(); 683 rtnl_unlock();
@@ -553,7 +706,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
553 pr_warn("Freeing alive inet6 address %p\n", ifp); 706 pr_warn("Freeing alive inet6 address %p\n", ifp);
554 return; 707 return;
555 } 708 }
556 dst_release(&ifp->rt->dst); 709 ip6_rt_put(ifp->rt);
557 710
558 kfree_rcu(ifp, rcu); 711 kfree_rcu(ifp, rcu);
559} 712}
@@ -787,17 +940,15 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
787 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) { 940 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
788 struct in6_addr prefix; 941 struct in6_addr prefix;
789 struct rt6_info *rt; 942 struct rt6_info *rt;
790 struct net *net = dev_net(ifp->idev->dev);
791 struct flowi6 fl6 = {};
792 943
793 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 944 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
794 fl6.flowi6_oif = ifp->idev->dev->ifindex;
795 fl6.daddr = prefix;
796 rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
797 RT6_LOOKUP_F_IFACE);
798 945
799 if (rt != net->ipv6.ip6_null_entry && 946 rt = addrconf_get_prefix_route(&prefix,
800 addrconf_is_prefix_route(rt)) { 947 ifp->prefix_len,
948 ifp->idev->dev,
949 0, RTF_GATEWAY | RTF_DEFAULT);
950
951 if (rt) {
801 if (onlink == 0) { 952 if (onlink == 0) {
802 ip6_del_rt(rt); 953 ip6_del_rt(rt);
803 rt = NULL; 954 rt = NULL;
@@ -805,7 +956,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
805 rt6_set_expires(rt, expires); 956 rt6_set_expires(rt, expires);
806 } 957 }
807 } 958 }
808 dst_release(&rt->dst); 959 ip6_rt_put(rt);
809 } 960 }
810 961
811 /* clean up prefsrc entries */ 962 /* clean up prefsrc entries */
@@ -1692,7 +1843,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1692 This thing is done here expecting that the whole 1843 This thing is done here expecting that the whole
1693 class of non-broadcast devices need not cloning. 1844 class of non-broadcast devices need not cloning.
1694 */ 1845 */
1695#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1846#if IS_ENABLED(CONFIG_IPV6_SIT)
1696 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) 1847 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
1697 cfg.fc_flags |= RTF_NONEXTHOP; 1848 cfg.fc_flags |= RTF_NONEXTHOP;
1698#endif 1849#endif
@@ -1723,7 +1874,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1723 continue; 1874 continue;
1724 if ((rt->rt6i_flags & flags) != flags) 1875 if ((rt->rt6i_flags & flags) != flags)
1725 continue; 1876 continue;
1726 if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0)) 1877 if ((rt->rt6i_flags & noflags) != 0)
1727 continue; 1878 continue;
1728 dst_hold(&rt->dst); 1879 dst_hold(&rt->dst);
1729 break; 1880 break;
@@ -1752,7 +1903,7 @@ static void addrconf_add_mroute(struct net_device *dev)
1752 ip6_route_add(&cfg); 1903 ip6_route_add(&cfg);
1753} 1904}
1754 1905
1755#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1906#if IS_ENABLED(CONFIG_IPV6_SIT)
1756static void sit_route_add(struct net_device *dev) 1907static void sit_route_add(struct net_device *dev)
1757{ 1908{
1758 struct fib6_config cfg = { 1909 struct fib6_config cfg = {
@@ -1881,8 +2032,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1881 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len, 2032 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
1882 dev, expires, flags); 2033 dev, expires, flags);
1883 } 2034 }
1884 if (rt) 2035 ip6_rt_put(rt);
1885 dst_release(&rt->dst);
1886 } 2036 }
1887 2037
1888 /* Try to figure out our local address for this prefix */ 2038 /* Try to figure out our local address for this prefix */
@@ -2104,7 +2254,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
2104 if (dev == NULL) 2254 if (dev == NULL)
2105 goto err_exit; 2255 goto err_exit;
2106 2256
2107#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2257#if IS_ENABLED(CONFIG_IPV6_SIT)
2108 if (dev->type == ARPHRD_SIT) { 2258 if (dev->type == ARPHRD_SIT) {
2109 const struct net_device_ops *ops = dev->netdev_ops; 2259 const struct net_device_ops *ops = dev->netdev_ops;
2110 struct ifreq ifr; 2260 struct ifreq ifr;
@@ -2268,7 +2418,7 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg)
2268 struct in6_ifreq ireq; 2418 struct in6_ifreq ireq;
2269 int err; 2419 int err;
2270 2420
2271 if (!capable(CAP_NET_ADMIN)) 2421 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2272 return -EPERM; 2422 return -EPERM;
2273 2423
2274 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 2424 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
@@ -2287,7 +2437,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg)
2287 struct in6_ifreq ireq; 2437 struct in6_ifreq ireq;
2288 int err; 2438 int err;
2289 2439
2290 if (!capable(CAP_NET_ADMIN)) 2440 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2291 return -EPERM; 2441 return -EPERM;
2292 2442
2293 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 2443 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
@@ -2315,7 +2465,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2315 } 2465 }
2316} 2466}
2317 2467
2318#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2468#if IS_ENABLED(CONFIG_IPV6_SIT)
2319static void sit_add_v4_addrs(struct inet6_dev *idev) 2469static void sit_add_v4_addrs(struct inet6_dev *idev)
2320{ 2470{
2321 struct in6_addr addr; 2471 struct in6_addr addr;
@@ -2434,7 +2584,7 @@ static void addrconf_dev_config(struct net_device *dev)
2434 addrconf_add_linklocal(idev, &addr); 2584 addrconf_add_linklocal(idev, &addr);
2435} 2585}
2436 2586
2437#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2587#if IS_ENABLED(CONFIG_IPV6_SIT)
2438static void addrconf_sit_config(struct net_device *dev) 2588static void addrconf_sit_config(struct net_device *dev)
2439{ 2589{
2440 struct inet6_dev *idev; 2590 struct inet6_dev *idev;
@@ -2471,7 +2621,7 @@ static void addrconf_sit_config(struct net_device *dev)
2471} 2621}
2472#endif 2622#endif
2473 2623
2474#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE) 2624#if IS_ENABLED(CONFIG_NET_IPGRE)
2475static void addrconf_gre_config(struct net_device *dev) 2625static void addrconf_gre_config(struct net_device *dev)
2476{ 2626{
2477 struct inet6_dev *idev; 2627 struct inet6_dev *idev;
@@ -2601,12 +2751,12 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2601 } 2751 }
2602 2752
2603 switch (dev->type) { 2753 switch (dev->type) {
2604#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2754#if IS_ENABLED(CONFIG_IPV6_SIT)
2605 case ARPHRD_SIT: 2755 case ARPHRD_SIT:
2606 addrconf_sit_config(dev); 2756 addrconf_sit_config(dev);
2607 break; 2757 break;
2608#endif 2758#endif
2609#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE) 2759#if IS_ENABLED(CONFIG_NET_IPGRE)
2610 case ARPHRD_IPGRE: 2760 case ARPHRD_IPGRE:
2611 addrconf_gre_config(dev); 2761 addrconf_gre_config(dev);
2612 break; 2762 break;
@@ -2843,7 +2993,7 @@ static void addrconf_rs_timer(unsigned long data)
2843 if (idev->dead || !(idev->if_flags & IF_READY)) 2993 if (idev->dead || !(idev->if_flags & IF_READY))
2844 goto out; 2994 goto out;
2845 2995
2846 if (idev->cnf.forwarding) 2996 if (!ipv6_accept_ra(idev))
2847 goto out; 2997 goto out;
2848 2998
2849 /* Announcement received after solicitation was sent */ 2999 /* Announcement received after solicitation was sent */
@@ -3005,8 +3155,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3005 router advertisements, start sending router solicitations. 3155 router advertisements, start sending router solicitations.
3006 */ 3156 */
3007 3157
3008 if (((ifp->idev->cnf.accept_ra == 1 && !ifp->idev->cnf.forwarding) || 3158 if (ipv6_accept_ra(ifp->idev) &&
3009 ifp->idev->cnf.accept_ra == 2) &&
3010 ifp->idev->cnf.rtr_solicits > 0 && 3159 ifp->idev->cnf.rtr_solicits > 0 &&
3011 (dev->flags&IFF_LOOPBACK) == 0 && 3160 (dev->flags&IFF_LOOPBACK) == 0 &&
3012 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { 3161 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
@@ -3194,7 +3343,7 @@ void if6_proc_exit(void)
3194} 3343}
3195#endif /* CONFIG_PROC_FS */ 3344#endif /* CONFIG_PROC_FS */
3196 3345
3197#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 3346#if IS_ENABLED(CONFIG_IPV6_MIP6)
3198/* Check if address is a home address configured on any interface. */ 3347/* Check if address is a home address configured on any interface. */
3199int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) 3348int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3200{ 3349{
@@ -3892,6 +4041,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3892 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; 4041 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
3893 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; 4042 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
3894 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; 4043 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
4044 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
3895} 4045}
3896 4046
3897static inline size_t inet6_ifla6_size(void) 4047static inline size_t inet6_ifla6_size(void)
@@ -4560,6 +4710,13 @@ static struct addrconf_sysctl_table
4560 .proc_handler = proc_dointvec 4710 .proc_handler = proc_dointvec
4561 }, 4711 },
4562 { 4712 {
4713 .procname = "ndisc_notify",
4714 .data = &ipv6_devconf.ndisc_notify,
4715 .maxlen = sizeof(int),
4716 .mode = 0644,
4717 .proc_handler = proc_dointvec
4718 },
4719 {
4563 /* sentinel */ 4720 /* sentinel */
4564 } 4721 }
4565 }, 4722 },
@@ -4784,6 +4941,8 @@ int __init addrconf_init(void)
4784 inet6_dump_ifmcaddr, NULL); 4941 inet6_dump_ifmcaddr, NULL);
4785 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, 4942 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
4786 inet6_dump_ifacaddr, NULL); 4943 inet6_dump_ifacaddr, NULL);
4944 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
4945 NULL, NULL);
4787 4946
4788 ipv6_addr_label_rtnl_register(); 4947 ipv6_addr_label_rtnl_register();
4789 4948
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index a974247a9ae4..b043c60429bd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -160,7 +160,8 @@ lookup_protocol:
160 } 160 }
161 161
162 err = -EPERM; 162 err = -EPERM;
163 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) 163 if (sock->type == SOCK_RAW && !kern &&
164 !ns_capable(net->user_ns, CAP_NET_RAW))
164 goto out_rcu_unlock; 165 goto out_rcu_unlock;
165 166
166 sock->ops = answer->ops; 167 sock->ops = answer->ops;
@@ -282,7 +283,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
282 return -EINVAL; 283 return -EINVAL;
283 284
284 snum = ntohs(addr->sin6_port); 285 snum = ntohs(addr->sin6_port);
285 if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 286 if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
286 return -EACCES; 287 return -EACCES;
287 288
288 lock_sock(sk); 289 lock_sock(sk);
@@ -699,249 +700,9 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb)
699} 700}
700EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 701EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
701 702
702static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
703{
704 const struct inet6_protocol *ops = NULL;
705
706 for (;;) {
707 struct ipv6_opt_hdr *opth;
708 int len;
709
710 if (proto != NEXTHDR_HOP) {
711 ops = rcu_dereference(inet6_protos[proto]);
712
713 if (unlikely(!ops))
714 break;
715
716 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
717 break;
718 }
719
720 if (unlikely(!pskb_may_pull(skb, 8)))
721 break;
722
723 opth = (void *)skb->data;
724 len = ipv6_optlen(opth);
725
726 if (unlikely(!pskb_may_pull(skb, len)))
727 break;
728
729 proto = opth->nexthdr;
730 __skb_pull(skb, len);
731 }
732
733 return proto;
734}
735
736static int ipv6_gso_send_check(struct sk_buff *skb)
737{
738 const struct ipv6hdr *ipv6h;
739 const struct inet6_protocol *ops;
740 int err = -EINVAL;
741
742 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
743 goto out;
744
745 ipv6h = ipv6_hdr(skb);
746 __skb_pull(skb, sizeof(*ipv6h));
747 err = -EPROTONOSUPPORT;
748
749 rcu_read_lock();
750 ops = rcu_dereference(inet6_protos[
751 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
752
753 if (likely(ops && ops->gso_send_check)) {
754 skb_reset_transport_header(skb);
755 err = ops->gso_send_check(skb);
756 }
757 rcu_read_unlock();
758
759out:
760 return err;
761}
762
763static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
764 netdev_features_t features)
765{
766 struct sk_buff *segs = ERR_PTR(-EINVAL);
767 struct ipv6hdr *ipv6h;
768 const struct inet6_protocol *ops;
769 int proto;
770 struct frag_hdr *fptr;
771 unsigned int unfrag_ip6hlen;
772 u8 *prevhdr;
773 int offset = 0;
774
775 if (!(features & NETIF_F_V6_CSUM))
776 features &= ~NETIF_F_SG;
777
778 if (unlikely(skb_shinfo(skb)->gso_type &
779 ~(SKB_GSO_UDP |
780 SKB_GSO_DODGY |
781 SKB_GSO_TCP_ECN |
782 SKB_GSO_TCPV6 |
783 0)))
784 goto out;
785
786 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
787 goto out;
788
789 ipv6h = ipv6_hdr(skb);
790 __skb_pull(skb, sizeof(*ipv6h));
791 segs = ERR_PTR(-EPROTONOSUPPORT);
792
793 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
794 rcu_read_lock();
795 ops = rcu_dereference(inet6_protos[proto]);
796 if (likely(ops && ops->gso_segment)) {
797 skb_reset_transport_header(skb);
798 segs = ops->gso_segment(skb, features);
799 }
800 rcu_read_unlock();
801
802 if (IS_ERR(segs))
803 goto out;
804
805 for (skb = segs; skb; skb = skb->next) {
806 ipv6h = ipv6_hdr(skb);
807 ipv6h->payload_len = htons(skb->len - skb->mac_len -
808 sizeof(*ipv6h));
809 if (proto == IPPROTO_UDP) {
810 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
811 fptr = (struct frag_hdr *)(skb_network_header(skb) +
812 unfrag_ip6hlen);
813 fptr->frag_off = htons(offset);
814 if (skb->next != NULL)
815 fptr->frag_off |= htons(IP6_MF);
816 offset += (ntohs(ipv6h->payload_len) -
817 sizeof(struct frag_hdr));
818 }
819 }
820
821out:
822 return segs;
823}
824
825static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
826 struct sk_buff *skb)
827{
828 const struct inet6_protocol *ops;
829 struct sk_buff **pp = NULL;
830 struct sk_buff *p;
831 struct ipv6hdr *iph;
832 unsigned int nlen;
833 unsigned int hlen;
834 unsigned int off;
835 int flush = 1;
836 int proto;
837 __wsum csum;
838
839 off = skb_gro_offset(skb);
840 hlen = off + sizeof(*iph);
841 iph = skb_gro_header_fast(skb, off);
842 if (skb_gro_header_hard(skb, hlen)) {
843 iph = skb_gro_header_slow(skb, hlen, off);
844 if (unlikely(!iph))
845 goto out;
846 }
847
848 skb_gro_pull(skb, sizeof(*iph));
849 skb_set_transport_header(skb, skb_gro_offset(skb));
850
851 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
852
853 rcu_read_lock();
854 proto = iph->nexthdr;
855 ops = rcu_dereference(inet6_protos[proto]);
856 if (!ops || !ops->gro_receive) {
857 __pskb_pull(skb, skb_gro_offset(skb));
858 proto = ipv6_gso_pull_exthdrs(skb, proto);
859 skb_gro_pull(skb, -skb_transport_offset(skb));
860 skb_reset_transport_header(skb);
861 __skb_push(skb, skb_gro_offset(skb));
862
863 ops = rcu_dereference(inet6_protos[proto]);
864 if (!ops || !ops->gro_receive)
865 goto out_unlock;
866
867 iph = ipv6_hdr(skb);
868 }
869
870 NAPI_GRO_CB(skb)->proto = proto;
871
872 flush--;
873 nlen = skb_network_header_len(skb);
874
875 for (p = *head; p; p = p->next) {
876 const struct ipv6hdr *iph2;
877 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
878
879 if (!NAPI_GRO_CB(p)->same_flow)
880 continue;
881
882 iph2 = ipv6_hdr(p);
883 first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
884
885 /* All fields must match except length and Traffic Class. */
886 if (nlen != skb_network_header_len(p) ||
887 (first_word & htonl(0xF00FFFFF)) ||
888 memcmp(&iph->nexthdr, &iph2->nexthdr,
889 nlen - offsetof(struct ipv6hdr, nexthdr))) {
890 NAPI_GRO_CB(p)->same_flow = 0;
891 continue;
892 }
893 /* flush if Traffic Class fields are different */
894 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
895 NAPI_GRO_CB(p)->flush |= flush;
896 }
897
898 NAPI_GRO_CB(skb)->flush |= flush;
899
900 csum = skb->csum;
901 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
902
903 pp = ops->gro_receive(head, skb);
904
905 skb->csum = csum;
906
907out_unlock:
908 rcu_read_unlock();
909
910out:
911 NAPI_GRO_CB(skb)->flush |= flush;
912
913 return pp;
914}
915
916static int ipv6_gro_complete(struct sk_buff *skb)
917{
918 const struct inet6_protocol *ops;
919 struct ipv6hdr *iph = ipv6_hdr(skb);
920 int err = -ENOSYS;
921
922 iph->payload_len = htons(skb->len - skb_network_offset(skb) -
923 sizeof(*iph));
924
925 rcu_read_lock();
926 ops = rcu_dereference(inet6_protos[NAPI_GRO_CB(skb)->proto]);
927 if (WARN_ON(!ops || !ops->gro_complete))
928 goto out_unlock;
929
930 err = ops->gro_complete(skb);
931
932out_unlock:
933 rcu_read_unlock();
934
935 return err;
936}
937
938static struct packet_type ipv6_packet_type __read_mostly = { 703static struct packet_type ipv6_packet_type __read_mostly = {
939 .type = cpu_to_be16(ETH_P_IPV6), 704 .type = cpu_to_be16(ETH_P_IPV6),
940 .func = ipv6_rcv, 705 .func = ipv6_rcv,
941 .gso_send_check = ipv6_gso_send_check,
942 .gso_segment = ipv6_gso_segment,
943 .gro_receive = ipv6_gro_receive,
944 .gro_complete = ipv6_gro_complete,
945}; 706};
946 707
947static int __init ipv6_packet_init(void) 708static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 7e6139508ee7..ecc35b93314b 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -44,7 +44,7 @@
44#define IPV6HDR_BASELEN 8 44#define IPV6HDR_BASELEN 8
45 45
46struct tmp_ext { 46struct tmp_ext {
47#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 47#if IS_ENABLED(CONFIG_IPV6_MIP6)
48 struct in6_addr saddr; 48 struct in6_addr saddr;
49#endif 49#endif
50 struct in6_addr daddr; 50 struct in6_addr daddr;
@@ -152,7 +152,7 @@ bad:
152 return false; 152 return false;
153} 153}
154 154
155#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 155#if IS_ENABLED(CONFIG_IPV6_MIP6)
156/** 156/**
157 * ipv6_rearrange_destopt - rearrange IPv6 destination options header 157 * ipv6_rearrange_destopt - rearrange IPv6 destination options header
158 * @iph: IPv6 header 158 * @iph: IPv6 header
@@ -320,7 +320,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
320 memcpy(top_iph, iph_base, IPV6HDR_BASELEN); 320 memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
321 321
322 if (extlen) { 322 if (extlen) {
323#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 323#if IS_ENABLED(CONFIG_IPV6_MIP6)
324 memcpy(&top_iph->saddr, iph_ext, extlen); 324 memcpy(&top_iph->saddr, iph_ext, extlen);
325#else 325#else
326 memcpy(&top_iph->daddr, iph_ext, extlen); 326 memcpy(&top_iph->daddr, iph_ext, extlen);
@@ -385,7 +385,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
385 memcpy(iph_base, top_iph, IPV6HDR_BASELEN); 385 memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
386 386
387 if (extlen) { 387 if (extlen) {
388#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 388#if IS_ENABLED(CONFIG_IPV6_MIP6)
389 memcpy(iph_ext, &top_iph->saddr, extlen); 389 memcpy(iph_ext, &top_iph->saddr, extlen);
390#else 390#else
391 memcpy(iph_ext, &top_iph->daddr, extlen); 391 memcpy(iph_ext, &top_iph->daddr, extlen);
@@ -434,7 +434,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
434 memcpy(top_iph, iph_base, IPV6HDR_BASELEN); 434 memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
435 435
436 if (extlen) { 436 if (extlen) {
437#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 437#if IS_ENABLED(CONFIG_IPV6_MIP6)
438 memcpy(&top_iph->saddr, iph_ext, extlen); 438 memcpy(&top_iph->saddr, iph_ext, extlen);
439#else 439#else
440 memcpy(&top_iph->daddr, iph_ext, extlen); 440 memcpy(&top_iph->daddr, iph_ext, extlen);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index cdf02be5f191..757a810d8f15 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -64,7 +64,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
64 int ishost = !net->ipv6.devconf_all->forwarding; 64 int ishost = !net->ipv6.devconf_all->forwarding;
65 int err = 0; 65 int err = 0;
66 66
67 if (!capable(CAP_NET_ADMIN)) 67 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
68 return -EPERM; 68 return -EPERM;
69 if (ipv6_addr_is_multicast(addr)) 69 if (ipv6_addr_is_multicast(addr))
70 return -EINVAL; 70 return -EINVAL;
@@ -84,7 +84,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
84 rt = rt6_lookup(net, addr, NULL, 0, 0); 84 rt = rt6_lookup(net, addr, NULL, 0, 0);
85 if (rt) { 85 if (rt) {
86 dev = rt->dst.dev; 86 dev = rt->dst.dev;
87 dst_release(&rt->dst); 87 ip6_rt_put(rt);
88 } else if (ishost) { 88 } else if (ishost) {
89 err = -EADDRNOTAVAIL; 89 err = -EADDRNOTAVAIL;
90 goto error; 90 goto error;
@@ -189,6 +189,9 @@ void ipv6_sock_ac_close(struct sock *sk)
189 struct net *net = sock_net(sk); 189 struct net *net = sock_net(sk);
190 int prev_index; 190 int prev_index;
191 191
192 if (!np->ipv6_ac_list)
193 return;
194
192 write_lock_bh(&ipv6_sk_ac_lock); 195 write_lock_bh(&ipv6_sk_ac_lock);
193 pac = np->ipv6_ac_list; 196 pac = np->ipv6_ac_list;
194 np->ipv6_ac_list = NULL; 197 np->ipv6_ac_list = NULL;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index be2b67d631e5..8edf2601065a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -701,7 +701,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
701 err = -EINVAL; 701 err = -EINVAL;
702 goto exit_f; 702 goto exit_f;
703 } 703 }
704 if (!capable(CAP_NET_RAW)) { 704 if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
705 err = -EPERM; 705 err = -EPERM;
706 goto exit_f; 706 goto exit_f;
707 } 707 }
@@ -721,7 +721,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
721 err = -EINVAL; 721 err = -EINVAL;
722 goto exit_f; 722 goto exit_f;
723 } 723 }
724 if (!capable(CAP_NET_RAW)) { 724 if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
725 err = -EPERM; 725 err = -EPERM;
726 goto exit_f; 726 goto exit_f;
727 } 727 }
@@ -746,7 +746,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
746 err = -EINVAL; 746 err = -EINVAL;
747 goto exit_f; 747 goto exit_f;
748 } 748 }
749 if (!capable(CAP_NET_RAW)) { 749 if (!ns_capable(net->user_ns, CAP_NET_RAW)) {
750 err = -EPERM; 750 err = -EPERM;
751 goto exit_f; 751 goto exit_f;
752 } 752 }
@@ -769,7 +769,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
769 rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg); 769 rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg);
770 770
771 switch (rthdr->type) { 771 switch (rthdr->type) {
772#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 772#if IS_ENABLED(CONFIG_IPV6_MIP6)
773 case IPV6_SRCRT_TYPE_2: 773 case IPV6_SRCRT_TYPE_2:
774 if (rthdr->hdrlen != 2 || 774 if (rthdr->hdrlen != 2 ||
775 rthdr->segments_left != 1) { 775 rthdr->segments_left != 1) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index fa3d9c328092..473f628f9f20 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -43,56 +43,12 @@
43#include <net/ndisc.h> 43#include <net/ndisc.h>
44#include <net/ip6_route.h> 44#include <net/ip6_route.h>
45#include <net/addrconf.h> 45#include <net/addrconf.h>
46#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 46#if IS_ENABLED(CONFIG_IPV6_MIP6)
47#include <net/xfrm.h> 47#include <net/xfrm.h>
48#endif 48#endif
49 49
50#include <asm/uaccess.h> 50#include <asm/uaccess.h>
51 51
52int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
53{
54 const unsigned char *nh = skb_network_header(skb);
55 int packet_len = skb->tail - skb->network_header;
56 struct ipv6_opt_hdr *hdr;
57 int len;
58
59 if (offset + 2 > packet_len)
60 goto bad;
61 hdr = (struct ipv6_opt_hdr *)(nh + offset);
62 len = ((hdr->hdrlen + 1) << 3);
63
64 if (offset + len > packet_len)
65 goto bad;
66
67 offset += 2;
68 len -= 2;
69
70 while (len > 0) {
71 int opttype = nh[offset];
72 int optlen;
73
74 if (opttype == type)
75 return offset;
76
77 switch (opttype) {
78 case IPV6_TLV_PAD1:
79 optlen = 1;
80 break;
81 default:
82 optlen = nh[offset + 1] + 2;
83 if (optlen > len)
84 goto bad;
85 break;
86 }
87 offset += optlen;
88 len -= optlen;
89 }
90 /* not_found */
91 bad:
92 return -1;
93}
94EXPORT_SYMBOL_GPL(ipv6_find_tlv);
95
96/* 52/*
97 * Parsing tlv encoded headers. 53 * Parsing tlv encoded headers.
98 * 54 *
@@ -224,7 +180,7 @@ bad:
224 Destination options header. 180 Destination options header.
225 *****************************/ 181 *****************************/
226 182
227#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 183#if IS_ENABLED(CONFIG_IPV6_MIP6)
228static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) 184static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
229{ 185{
230 struct ipv6_destopt_hao *hao; 186 struct ipv6_destopt_hao *hao;
@@ -288,7 +244,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
288#endif 244#endif
289 245
290static const struct tlvtype_proc tlvprocdestopt_lst[] = { 246static const struct tlvtype_proc tlvprocdestopt_lst[] = {
291#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 247#if IS_ENABLED(CONFIG_IPV6_MIP6)
292 { 248 {
293 .type = IPV6_TLV_HAO, 249 .type = IPV6_TLV_HAO,
294 .func = ipv6_dest_hao, 250 .func = ipv6_dest_hao,
@@ -300,7 +256,7 @@ static const struct tlvtype_proc tlvprocdestopt_lst[] = {
300static int ipv6_destopt_rcv(struct sk_buff *skb) 256static int ipv6_destopt_rcv(struct sk_buff *skb)
301{ 257{
302 struct inet6_skb_parm *opt = IP6CB(skb); 258 struct inet6_skb_parm *opt = IP6CB(skb);
303#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 259#if IS_ENABLED(CONFIG_IPV6_MIP6)
304 __u16 dstbuf; 260 __u16 dstbuf;
305#endif 261#endif
306 struct dst_entry *dst = skb_dst(skb); 262 struct dst_entry *dst = skb_dst(skb);
@@ -315,14 +271,14 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
315 } 271 }
316 272
317 opt->lastopt = opt->dst1 = skb_network_header_len(skb); 273 opt->lastopt = opt->dst1 = skb_network_header_len(skb);
318#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 274#if IS_ENABLED(CONFIG_IPV6_MIP6)
319 dstbuf = opt->dst1; 275 dstbuf = opt->dst1;
320#endif 276#endif
321 277
322 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { 278 if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
323 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; 279 skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
324 opt = IP6CB(skb); 280 opt = IP6CB(skb);
325#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 281#if IS_ENABLED(CONFIG_IPV6_MIP6)
326 opt->nhoff = dstbuf; 282 opt->nhoff = dstbuf;
327#else 283#else
328 opt->nhoff = opt->dst1; 284 opt->nhoff = opt->dst1;
@@ -378,7 +334,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
378looped_back: 334looped_back:
379 if (hdr->segments_left == 0) { 335 if (hdr->segments_left == 0) {
380 switch (hdr->type) { 336 switch (hdr->type) {
381#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 337#if IS_ENABLED(CONFIG_IPV6_MIP6)
382 case IPV6_SRCRT_TYPE_2: 338 case IPV6_SRCRT_TYPE_2:
383 /* Silently discard type 2 header unless it was 339 /* Silently discard type 2 header unless it was
384 * processed by own 340 * processed by own
@@ -404,7 +360,7 @@ looped_back:
404 } 360 }
405 361
406 switch (hdr->type) { 362 switch (hdr->type) {
407#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 363#if IS_ENABLED(CONFIG_IPV6_MIP6)
408 case IPV6_SRCRT_TYPE_2: 364 case IPV6_SRCRT_TYPE_2:
409 if (accept_source_route < 0) 365 if (accept_source_route < 0)
410 goto unknown_rh; 366 goto unknown_rh;
@@ -461,7 +417,7 @@ looped_back:
461 addr += i - 1; 417 addr += i - 1;
462 418
463 switch (hdr->type) { 419 switch (hdr->type) {
464#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 420#if IS_ENABLED(CONFIG_IPV6_MIP6)
465 case IPV6_SRCRT_TYPE_2: 421 case IPV6_SRCRT_TYPE_2:
466 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, 422 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
467 (xfrm_address_t *)&ipv6_hdr(skb)->saddr, 423 (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
@@ -528,12 +484,12 @@ unknown_rh:
528 484
529static const struct inet6_protocol rthdr_protocol = { 485static const struct inet6_protocol rthdr_protocol = {
530 .handler = ipv6_rthdr_rcv, 486 .handler = ipv6_rthdr_rcv,
531 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, 487 .flags = INET6_PROTO_NOPOLICY,
532}; 488};
533 489
534static const struct inet6_protocol destopt_protocol = { 490static const struct inet6_protocol destopt_protocol = {
535 .handler = ipv6_destopt_rcv, 491 .handler = ipv6_destopt_rcv,
536 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, 492 .flags = INET6_PROTO_NOPOLICY,
537}; 493};
538 494
539static const struct inet6_protocol nodata_protocol = { 495static const struct inet6_protocol nodata_protocol = {
@@ -559,10 +515,10 @@ int __init ipv6_exthdrs_init(void)
559 515
560out: 516out:
561 return ret; 517 return ret;
562out_rthdr:
563 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
564out_destopt: 518out_destopt:
565 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); 519 inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
520out_rthdr:
521 inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
566 goto out; 522 goto out;
567}; 523};
568 524
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index f73d59a14131..c5e83fae4df4 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -111,3 +111,171 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
111 return start; 111 return start;
112} 112}
113EXPORT_SYMBOL(ipv6_skip_exthdr); 113EXPORT_SYMBOL(ipv6_skip_exthdr);
114
115int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
116{
117 const unsigned char *nh = skb_network_header(skb);
118 int packet_len = skb->tail - skb->network_header;
119 struct ipv6_opt_hdr *hdr;
120 int len;
121
122 if (offset + 2 > packet_len)
123 goto bad;
124 hdr = (struct ipv6_opt_hdr *)(nh + offset);
125 len = ((hdr->hdrlen + 1) << 3);
126
127 if (offset + len > packet_len)
128 goto bad;
129
130 offset += 2;
131 len -= 2;
132
133 while (len > 0) {
134 int opttype = nh[offset];
135 int optlen;
136
137 if (opttype == type)
138 return offset;
139
140 switch (opttype) {
141 case IPV6_TLV_PAD1:
142 optlen = 1;
143 break;
144 default:
145 optlen = nh[offset + 1] + 2;
146 if (optlen > len)
147 goto bad;
148 break;
149 }
150 offset += optlen;
151 len -= optlen;
152 }
153 /* not_found */
154 bad:
155 return -1;
156}
157EXPORT_SYMBOL_GPL(ipv6_find_tlv);
158
159/*
160 * find the offset to specified header or the protocol number of last header
161 * if target < 0. "last header" is transport protocol header, ESP, or
162 * "No next header".
163 *
164 * Note that *offset is used as input/output parameter. an if it is not zero,
165 * then it must be a valid offset to an inner IPv6 header. This can be used
166 * to explore inner IPv6 header, eg. ICMPv6 error messages.
167 *
168 * If target header is found, its offset is set in *offset and return protocol
169 * number. Otherwise, return -1.
170 *
171 * If the first fragment doesn't contain the final protocol header or
172 * NEXTHDR_NONE it is considered invalid.
173 *
174 * Note that non-1st fragment is special case that "the protocol number
175 * of last header" is "next header" field in Fragment header. In this case,
176 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
177 * isn't NULL.
178 *
179 * if flags is not NULL and it's a fragment, then the frag flag
180 * IP6_FH_F_FRAG will be set. If it's an AH header, the
181 * IP6_FH_F_AUTH flag is set and target < 0, then this function will
182 * stop at the AH header. If IP6_FH_F_SKIP_RH flag was passed, then this
183 * function will skip all those routing headers, where segements_left was 0.
184 */
185int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
186 int target, unsigned short *fragoff, int *flags)
187{
188 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
189 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
190 unsigned int len;
191 bool found;
192
193 if (fragoff)
194 *fragoff = 0;
195
196 if (*offset) {
197 struct ipv6hdr _ip6, *ip6;
198
199 ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
200 if (!ip6 || (ip6->version != 6)) {
201 printk(KERN_ERR "IPv6 header not found\n");
202 return -EBADMSG;
203 }
204 start = *offset + sizeof(struct ipv6hdr);
205 nexthdr = ip6->nexthdr;
206 }
207 len = skb->len - start;
208
209 do {
210 struct ipv6_opt_hdr _hdr, *hp;
211 unsigned int hdrlen;
212 found = (nexthdr == target);
213
214 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
215 if (target < 0)
216 break;
217 return -ENOENT;
218 }
219
220 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
221 if (hp == NULL)
222 return -EBADMSG;
223
224 if (nexthdr == NEXTHDR_ROUTING) {
225 struct ipv6_rt_hdr _rh, *rh;
226
227 rh = skb_header_pointer(skb, start, sizeof(_rh),
228 &_rh);
229 if (rh == NULL)
230 return -EBADMSG;
231
232 if (flags && (*flags & IP6_FH_F_SKIP_RH) &&
233 rh->segments_left == 0)
234 found = false;
235 }
236
237 if (nexthdr == NEXTHDR_FRAGMENT) {
238 unsigned short _frag_off;
239 __be16 *fp;
240
241 if (flags) /* Indicate that this is a fragment */
242 *flags |= IP6_FH_F_FRAG;
243 fp = skb_header_pointer(skb,
244 start+offsetof(struct frag_hdr,
245 frag_off),
246 sizeof(_frag_off),
247 &_frag_off);
248 if (fp == NULL)
249 return -EBADMSG;
250
251 _frag_off = ntohs(*fp) & ~0x7;
252 if (_frag_off) {
253 if (target < 0 &&
254 ((!ipv6_ext_hdr(hp->nexthdr)) ||
255 hp->nexthdr == NEXTHDR_NONE)) {
256 if (fragoff)
257 *fragoff = _frag_off;
258 return hp->nexthdr;
259 }
260 return -ENOENT;
261 }
262 hdrlen = 8;
263 } else if (nexthdr == NEXTHDR_AUTH) {
264 if (flags && (*flags & IP6_FH_F_AUTH) && (target < 0))
265 break;
266 hdrlen = (hp->hdrlen + 2) << 2;
267 } else
268 hdrlen = ipv6_optlen(hp);
269
270 if (!found) {
271 nexthdr = hp->nexthdr;
272 len -= hdrlen;
273 start += hdrlen;
274 }
275 } while (!found);
276
277 *offset = start;
278 return nexthdr;
279}
280EXPORT_SYMBOL(ipv6_find_hdr);
281
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
new file mode 100644
index 000000000000..cf77f3abfd06
--- /dev/null
+++ b/net/ipv6/exthdrs_offload.c
@@ -0,0 +1,41 @@
1/*
2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * IPV6 Extension Header GSO/GRO support
11 */
12#include <net/protocol.h>
13#include "ip6_offload.h"
14
15static const struct net_offload rthdr_offload = {
16 .flags = INET6_PROTO_GSO_EXTHDR,
17};
18
19static const struct net_offload dstopt_offload = {
20 .flags = INET6_PROTO_GSO_EXTHDR,
21};
22
23int __init ipv6_exthdrs_offload_init(void)
24{
25 int ret;
26
27 ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
28 if (!ret)
29 goto out;
30
31 ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
32 if (!ret)
33 goto out_rt;
34
35out:
36 return ret;
37
38out_rt:
39 inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
40 goto out;
41}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index d9fb9110f607..2e1a432867c0 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -100,7 +100,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
100 goto out; 100 goto out;
101 } 101 }
102again: 102again:
103 dst_release(&rt->dst); 103 ip6_rt_put(rt);
104 rt = NULL; 104 rt = NULL;
105 goto out; 105 goto out;
106 106
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 24d69dbca4d6..b4a9fd51dae7 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -280,7 +280,7 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
280 return 0; 280 return 0;
281} 281}
282 282
283#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 283#if IS_ENABLED(CONFIG_IPV6_MIP6)
284static void mip6_addr_swap(struct sk_buff *skb) 284static void mip6_addr_swap(struct sk_buff *skb)
285{ 285{
286 struct ipv6hdr *iph = ipv6_hdr(skb); 286 struct ipv6hdr *iph = ipv6_hdr(skb);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index c4f934176cab..30647857a375 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -252,6 +252,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
252 return NULL; 252 return NULL;
253 dst->ops->update_pmtu(dst, sk, NULL, mtu); 253 dst->ops->update_pmtu(dst, sk, NULL, mtu);
254 254
255 return inet6_csk_route_socket(sk, &fl6); 255 dst = inet6_csk_route_socket(sk, &fl6);
256 return IS_ERR(dst) ? NULL : dst;
256} 257}
257EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); 258EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 73f1a00a96af..dea17fd28e50 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -87,11 +87,13 @@ struct sock *__inet6_lookup_established(struct net *net,
87 rcu_read_lock(); 87 rcu_read_lock();
88begin: 88begin:
89 sk_nulls_for_each_rcu(sk, node, &head->chain) { 89 sk_nulls_for_each_rcu(sk, node, &head->chain) {
90 /* For IPV6 do the cheaper port and family tests first. */ 90 if (sk->sk_hash != hash)
91 if (INET6_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { 91 continue;
92 if (likely(INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
92 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) 93 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
93 goto begintw; 94 goto begintw;
94 if (!INET6_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { 95 if (unlikely(!INET6_MATCH(sk, net, saddr, daddr,
96 ports, dif))) {
95 sock_put(sk); 97 sock_put(sk);
96 goto begin; 98 goto begin;
97 } 99 }
@@ -104,12 +106,16 @@ begin:
104begintw: 106begintw:
105 /* Must check for a TIME_WAIT'er before going to listener hash. */ 107 /* Must check for a TIME_WAIT'er before going to listener hash. */
106 sk_nulls_for_each_rcu(sk, node, &head->twchain) { 108 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
107 if (INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { 109 if (sk->sk_hash != hash)
110 continue;
111 if (likely(INET6_TW_MATCH(sk, net, saddr, daddr,
112 ports, dif))) {
108 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) { 113 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
109 sk = NULL; 114 sk = NULL;
110 goto out; 115 goto out;
111 } 116 }
112 if (!INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif)) { 117 if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
118 ports, dif))) {
113 sock_put(sk); 119 sock_put(sk);
114 goto begintw; 120 goto begintw;
115 } 121 }
@@ -236,9 +242,12 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
236 242
237 /* Check TIME-WAIT sockets first. */ 243 /* Check TIME-WAIT sockets first. */
238 sk_nulls_for_each(sk2, node, &head->twchain) { 244 sk_nulls_for_each(sk2, node, &head->twchain) {
239 tw = inet_twsk(sk2); 245 if (sk2->sk_hash != hash)
246 continue;
240 247
241 if (INET6_TW_MATCH(sk2, net, hash, saddr, daddr, ports, dif)) { 248 if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr,
249 ports, dif))) {
250 tw = inet_twsk(sk2);
242 if (twsk_unique(sk, sk2, twp)) 251 if (twsk_unique(sk, sk2, twp))
243 goto unique; 252 goto unique;
244 else 253 else
@@ -249,7 +258,9 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
249 258
250 /* And established part... */ 259 /* And established part... */
251 sk_nulls_for_each(sk2, node, &head->chain) { 260 sk_nulls_for_each(sk2, node, &head->chain) {
252 if (INET6_MATCH(sk2, net, hash, saddr, daddr, ports, dif)) 261 if (sk2->sk_hash != hash)
262 continue;
263 if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif)))
253 goto not_unique; 264 goto not_unique;
254 } 265 }
255 266
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 24995a93ef8c..710cafd2e1a9 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -672,6 +672,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
672 iter->rt6i_idev == rt->rt6i_idev && 672 iter->rt6i_idev == rt->rt6i_idev &&
673 ipv6_addr_equal(&iter->rt6i_gateway, 673 ipv6_addr_equal(&iter->rt6i_gateway,
674 &rt->rt6i_gateway)) { 674 &rt->rt6i_gateway)) {
675 if (rt->rt6i_nsiblings)
676 rt->rt6i_nsiblings = 0;
675 if (!(iter->rt6i_flags & RTF_EXPIRES)) 677 if (!(iter->rt6i_flags & RTF_EXPIRES))
676 return -EEXIST; 678 return -EEXIST;
677 if (!(rt->rt6i_flags & RTF_EXPIRES)) 679 if (!(rt->rt6i_flags & RTF_EXPIRES))
@@ -680,6 +682,21 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
680 rt6_set_expires(iter, rt->dst.expires); 682 rt6_set_expires(iter, rt->dst.expires);
681 return -EEXIST; 683 return -EEXIST;
682 } 684 }
685 /* If we have the same destination and the same metric,
686 * but not the same gateway, then the route we try to
687 * add is sibling to this route, increment our counter
688 * of siblings, and later we will add our route to the
689 * list.
690 * Only static routes (which don't have flag
691 * RTF_EXPIRES) are used for ECMPv6.
692 *
693 * To avoid long list, we only had siblings if the
694 * route have a gateway.
695 */
696 if (rt->rt6i_flags & RTF_GATEWAY &&
697 !(rt->rt6i_flags & RTF_EXPIRES) &&
698 !(iter->rt6i_flags & RTF_EXPIRES))
699 rt->rt6i_nsiblings++;
683 } 700 }
684 701
685 if (iter->rt6i_metric > rt->rt6i_metric) 702 if (iter->rt6i_metric > rt->rt6i_metric)
@@ -692,6 +709,35 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
692 if (ins == &fn->leaf) 709 if (ins == &fn->leaf)
693 fn->rr_ptr = NULL; 710 fn->rr_ptr = NULL;
694 711
712 /* Link this route to others same route. */
713 if (rt->rt6i_nsiblings) {
714 unsigned int rt6i_nsiblings;
715 struct rt6_info *sibling, *temp_sibling;
716
717 /* Find the first route that have the same metric */
718 sibling = fn->leaf;
719 while (sibling) {
720 if (sibling->rt6i_metric == rt->rt6i_metric) {
721 list_add_tail(&rt->rt6i_siblings,
722 &sibling->rt6i_siblings);
723 break;
724 }
725 sibling = sibling->dst.rt6_next;
726 }
727 /* For each sibling in the list, increment the counter of
728 * siblings. BUG() if counters does not match, list of siblings
729 * is broken!
730 */
731 rt6i_nsiblings = 0;
732 list_for_each_entry_safe(sibling, temp_sibling,
733 &rt->rt6i_siblings, rt6i_siblings) {
734 sibling->rt6i_nsiblings++;
735 BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings);
736 rt6i_nsiblings++;
737 }
738 BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
739 }
740
695 /* 741 /*
696 * insert node 742 * insert node
697 */ 743 */
@@ -1193,6 +1239,17 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1193 if (fn->rr_ptr == rt) 1239 if (fn->rr_ptr == rt)
1194 fn->rr_ptr = NULL; 1240 fn->rr_ptr = NULL;
1195 1241
1242 /* Remove this entry from other siblings */
1243 if (rt->rt6i_nsiblings) {
1244 struct rt6_info *sibling, *next_sibling;
1245
1246 list_for_each_entry_safe(sibling, next_sibling,
1247 &rt->rt6i_siblings, rt6i_siblings)
1248 sibling->rt6i_nsiblings--;
1249 rt->rt6i_nsiblings = 0;
1250 list_del_init(&rt->rt6i_siblings);
1251 }
1252
1196 /* Adjust walkers */ 1253 /* Adjust walkers */
1197 read_lock(&fib6_walker_lock); 1254 read_lock(&fib6_walker_lock);
1198 FOR_WALKERS(w) { 1255 FOR_WALKERS(w) {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 90bbefb57943..29124b7a04c8 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -519,7 +519,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
519 } 519 }
520 read_unlock_bh(&ip6_sk_fl_lock); 520 read_unlock_bh(&ip6_sk_fl_lock);
521 521
522 if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) { 522 if (freq.flr_share == IPV6_FL_S_NONE &&
523 ns_capable(net->user_ns, CAP_NET_ADMIN)) {
523 fl = fl_lookup(net, freq.flr_label); 524 fl = fl_lookup(net, freq.flr_label);
524 if (fl) { 525 if (fl) {
525 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); 526 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index d5cb3c4e66f8..c727e4712751 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -109,21 +109,6 @@ static u32 HASH_ADDR(const struct in6_addr *addr)
109#define tunnels_r tunnels[2] 109#define tunnels_r tunnels[2]
110#define tunnels_l tunnels[1] 110#define tunnels_l tunnels[1]
111#define tunnels_wc tunnels[0] 111#define tunnels_wc tunnels[0]
112/*
113 * Locking : hash tables are protected by RCU and RTNL
114 */
115
116#define for_each_ip_tunnel_rcu(start) \
117 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
118
119/* often modified stats are per cpu, other are shared (netdev->stats) */
120struct pcpu_tstats {
121 u64 rx_packets;
122 u64 rx_bytes;
123 u64 tx_packets;
124 u64 tx_bytes;
125 struct u64_stats_sync syncp;
126};
127 112
128static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev, 113static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
129 struct rtnl_link_stats64 *tot) 114 struct rtnl_link_stats64 *tot)
@@ -181,7 +166,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
181 ARPHRD_ETHER : ARPHRD_IP6GRE; 166 ARPHRD_ETHER : ARPHRD_IP6GRE;
182 int score, cand_score = 4; 167 int score, cand_score = 4;
183 168
184 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { 169 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
185 if (!ipv6_addr_equal(local, &t->parms.laddr) || 170 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
186 !ipv6_addr_equal(remote, &t->parms.raddr) || 171 !ipv6_addr_equal(remote, &t->parms.raddr) ||
187 key != t->parms.i_key || 172 key != t->parms.i_key ||
@@ -206,7 +191,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
206 } 191 }
207 } 192 }
208 193
209 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { 194 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
210 if (!ipv6_addr_equal(remote, &t->parms.raddr) || 195 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
211 key != t->parms.i_key || 196 key != t->parms.i_key ||
212 !(t->dev->flags & IFF_UP)) 197 !(t->dev->flags & IFF_UP))
@@ -230,7 +215,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
230 } 215 }
231 } 216 }
232 217
233 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { 218 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
234 if ((!ipv6_addr_equal(local, &t->parms.laddr) && 219 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
235 (!ipv6_addr_equal(local, &t->parms.raddr) || 220 (!ipv6_addr_equal(local, &t->parms.raddr) ||
236 !ipv6_addr_is_multicast(local))) || 221 !ipv6_addr_is_multicast(local))) ||
@@ -256,7 +241,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
256 } 241 }
257 } 242 }
258 243
259 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { 244 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
260 if (t->parms.i_key != key || 245 if (t->parms.i_key != key ||
261 !(t->dev->flags & IFF_UP)) 246 !(t->dev->flags & IFF_UP))
262 continue; 247 continue;
@@ -773,8 +758,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
773 skb_dst_set_noref(skb, dst); 758 skb_dst_set_noref(skb, dst);
774 } 759 }
775 760
776 skb->transport_header = skb->network_header;
777
778 proto = NEXTHDR_GRE; 761 proto = NEXTHDR_GRE;
779 if (encap_limit >= 0) { 762 if (encap_limit >= 0) {
780 init_tel_txopt(&opt, encap_limit); 763 init_tel_txopt(&opt, encap_limit);
@@ -783,6 +766,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
783 766
784 skb_push(skb, gre_hlen); 767 skb_push(skb, gre_hlen);
785 skb_reset_network_header(skb); 768 skb_reset_network_header(skb);
769 skb_set_transport_header(skb, sizeof(*ipv6h));
786 770
787 /* 771 /*
788 * Push down and install the IP header. 772 * Push down and install the IP header.
@@ -1069,7 +1053,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1069 dev->mtu = IPV6_MIN_MTU; 1053 dev->mtu = IPV6_MIN_MTU;
1070 } 1054 }
1071 } 1055 }
1072 dst_release(&rt->dst); 1056 ip6_rt_put(rt);
1073 } 1057 }
1074 1058
1075 t->hlen = addend; 1059 t->hlen = addend;
@@ -1161,7 +1145,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1161 case SIOCADDTUNNEL: 1145 case SIOCADDTUNNEL:
1162 case SIOCCHGTUNNEL: 1146 case SIOCCHGTUNNEL:
1163 err = -EPERM; 1147 err = -EPERM;
1164 if (!capable(CAP_NET_ADMIN)) 1148 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1165 goto done; 1149 goto done;
1166 1150
1167 err = -EFAULT; 1151 err = -EFAULT;
@@ -1209,7 +1193,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1209 1193
1210 case SIOCDELTUNNEL: 1194 case SIOCDELTUNNEL:
1211 err = -EPERM; 1195 err = -EPERM;
1212 if (!capable(CAP_NET_ADMIN)) 1196 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1213 goto done; 1197 goto done;
1214 1198
1215 if (dev == ign->fb_tunnel_dev) { 1199 if (dev == ign->fb_tunnel_dev) {
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
new file mode 100644
index 000000000000..f26f0da7f095
--- /dev/null
+++ b/net/ipv6/ip6_offload.c
@@ -0,0 +1,282 @@
1/*
2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/socket.h>
13#include <linux/netdevice.h>
14#include <linux/skbuff.h>
15#include <linux/printk.h>
16
17#include <net/protocol.h>
18#include <net/ipv6.h>
19
20#include "ip6_offload.h"
21
22static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
23{
24 const struct net_offload *ops = NULL;
25
26 for (;;) {
27 struct ipv6_opt_hdr *opth;
28 int len;
29
30 if (proto != NEXTHDR_HOP) {
31 ops = rcu_dereference(inet6_offloads[proto]);
32
33 if (unlikely(!ops))
34 break;
35
36 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
37 break;
38 }
39
40 if (unlikely(!pskb_may_pull(skb, 8)))
41 break;
42
43 opth = (void *)skb->data;
44 len = ipv6_optlen(opth);
45
46 if (unlikely(!pskb_may_pull(skb, len)))
47 break;
48
49 proto = opth->nexthdr;
50 __skb_pull(skb, len);
51 }
52
53 return proto;
54}
55
56static int ipv6_gso_send_check(struct sk_buff *skb)
57{
58 const struct ipv6hdr *ipv6h;
59 const struct net_offload *ops;
60 int err = -EINVAL;
61
62 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
63 goto out;
64
65 ipv6h = ipv6_hdr(skb);
66 __skb_pull(skb, sizeof(*ipv6h));
67 err = -EPROTONOSUPPORT;
68
69 rcu_read_lock();
70 ops = rcu_dereference(inet6_offloads[
71 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
72
73 if (likely(ops && ops->callbacks.gso_send_check)) {
74 skb_reset_transport_header(skb);
75 err = ops->callbacks.gso_send_check(skb);
76 }
77 rcu_read_unlock();
78
79out:
80 return err;
81}
82
83static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
84 netdev_features_t features)
85{
86 struct sk_buff *segs = ERR_PTR(-EINVAL);
87 struct ipv6hdr *ipv6h;
88 const struct net_offload *ops;
89 int proto;
90 struct frag_hdr *fptr;
91 unsigned int unfrag_ip6hlen;
92 u8 *prevhdr;
93 int offset = 0;
94
95 if (!(features & NETIF_F_V6_CSUM))
96 features &= ~NETIF_F_SG;
97
98 if (unlikely(skb_shinfo(skb)->gso_type &
99 ~(SKB_GSO_UDP |
100 SKB_GSO_DODGY |
101 SKB_GSO_TCP_ECN |
102 SKB_GSO_TCPV6 |
103 0)))
104 goto out;
105
106 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
107 goto out;
108
109 ipv6h = ipv6_hdr(skb);
110 __skb_pull(skb, sizeof(*ipv6h));
111 segs = ERR_PTR(-EPROTONOSUPPORT);
112
113 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
114 rcu_read_lock();
115 ops = rcu_dereference(inet6_offloads[proto]);
116 if (likely(ops && ops->callbacks.gso_segment)) {
117 skb_reset_transport_header(skb);
118 segs = ops->callbacks.gso_segment(skb, features);
119 }
120 rcu_read_unlock();
121
122 if (IS_ERR(segs))
123 goto out;
124
125 for (skb = segs; skb; skb = skb->next) {
126 ipv6h = ipv6_hdr(skb);
127 ipv6h->payload_len = htons(skb->len - skb->mac_len -
128 sizeof(*ipv6h));
129 if (proto == IPPROTO_UDP) {
130 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
131 fptr = (struct frag_hdr *)(skb_network_header(skb) +
132 unfrag_ip6hlen);
133 fptr->frag_off = htons(offset);
134 if (skb->next != NULL)
135 fptr->frag_off |= htons(IP6_MF);
136 offset += (ntohs(ipv6h->payload_len) -
137 sizeof(struct frag_hdr));
138 }
139 }
140
141out:
142 return segs;
143}
144
145static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
146 struct sk_buff *skb)
147{
148 const struct net_offload *ops;
149 struct sk_buff **pp = NULL;
150 struct sk_buff *p;
151 struct ipv6hdr *iph;
152 unsigned int nlen;
153 unsigned int hlen;
154 unsigned int off;
155 int flush = 1;
156 int proto;
157 __wsum csum;
158
159 off = skb_gro_offset(skb);
160 hlen = off + sizeof(*iph);
161 iph = skb_gro_header_fast(skb, off);
162 if (skb_gro_header_hard(skb, hlen)) {
163 iph = skb_gro_header_slow(skb, hlen, off);
164 if (unlikely(!iph))
165 goto out;
166 }
167
168 skb_gro_pull(skb, sizeof(*iph));
169 skb_set_transport_header(skb, skb_gro_offset(skb));
170
171 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
172
173 rcu_read_lock();
174 proto = iph->nexthdr;
175 ops = rcu_dereference(inet6_offloads[proto]);
176 if (!ops || !ops->callbacks.gro_receive) {
177 __pskb_pull(skb, skb_gro_offset(skb));
178 proto = ipv6_gso_pull_exthdrs(skb, proto);
179 skb_gro_pull(skb, -skb_transport_offset(skb));
180 skb_reset_transport_header(skb);
181 __skb_push(skb, skb_gro_offset(skb));
182
183 ops = rcu_dereference(inet6_offloads[proto]);
184 if (!ops || !ops->callbacks.gro_receive)
185 goto out_unlock;
186
187 iph = ipv6_hdr(skb);
188 }
189
190 NAPI_GRO_CB(skb)->proto = proto;
191
192 flush--;
193 nlen = skb_network_header_len(skb);
194
195 for (p = *head; p; p = p->next) {
196 const struct ipv6hdr *iph2;
197 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
198
199 if (!NAPI_GRO_CB(p)->same_flow)
200 continue;
201
202 iph2 = ipv6_hdr(p);
203 first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
204
205 /* All fields must match except length and Traffic Class. */
206 if (nlen != skb_network_header_len(p) ||
207 (first_word & htonl(0xF00FFFFF)) ||
208 memcmp(&iph->nexthdr, &iph2->nexthdr,
209 nlen - offsetof(struct ipv6hdr, nexthdr))) {
210 NAPI_GRO_CB(p)->same_flow = 0;
211 continue;
212 }
213 /* flush if Traffic Class fields are different */
214 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
215 NAPI_GRO_CB(p)->flush |= flush;
216 }
217
218 NAPI_GRO_CB(skb)->flush |= flush;
219
220 csum = skb->csum;
221 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
222
223 pp = ops->callbacks.gro_receive(head, skb);
224
225 skb->csum = csum;
226
227out_unlock:
228 rcu_read_unlock();
229
230out:
231 NAPI_GRO_CB(skb)->flush |= flush;
232
233 return pp;
234}
235
236static int ipv6_gro_complete(struct sk_buff *skb)
237{
238 const struct net_offload *ops;
239 struct ipv6hdr *iph = ipv6_hdr(skb);
240 int err = -ENOSYS;
241
242 iph->payload_len = htons(skb->len - skb_network_offset(skb) -
243 sizeof(*iph));
244
245 rcu_read_lock();
246 ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
247 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
248 goto out_unlock;
249
250 err = ops->callbacks.gro_complete(skb);
251
252out_unlock:
253 rcu_read_unlock();
254
255 return err;
256}
257
258static struct packet_offload ipv6_packet_offload __read_mostly = {
259 .type = cpu_to_be16(ETH_P_IPV6),
260 .callbacks = {
261 .gso_send_check = ipv6_gso_send_check,
262 .gso_segment = ipv6_gso_segment,
263 .gro_receive = ipv6_gro_receive,
264 .gro_complete = ipv6_gro_complete,
265 },
266};
267
268static int __init ipv6_offload_init(void)
269{
270
271 if (tcpv6_offload_init() < 0)
272 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
273 if (udp_offload_init() < 0)
274 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
275 if (ipv6_exthdrs_offload_init() < 0)
276 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
277
278 dev_add_offload(&ipv6_packet_offload);
279 return 0;
280}
281
282fs_initcall(ipv6_offload_init);
diff --git a/net/ipv6/ip6_offload.h b/net/ipv6/ip6_offload.h
new file mode 100644
index 000000000000..2e155c651b35
--- /dev/null
+++ b/net/ipv6/ip6_offload.h
@@ -0,0 +1,18 @@
1/*
2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#ifndef __ip6_offload_h
12#define __ip6_offload_h
13
14int ipv6_exthdrs_offload_init(void);
15int udp_offload_init(void);
16int tcpv6_offload_init(void);
17
18#endif
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index aece3e792f84..5552d13ae92f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -538,78 +538,12 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
538 to->tc_index = from->tc_index; 538 to->tc_index = from->tc_index;
539#endif 539#endif
540 nf_copy(to, from); 540 nf_copy(to, from);
541#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 541#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
542 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
543 to->nf_trace = from->nf_trace; 542 to->nf_trace = from->nf_trace;
544#endif 543#endif
545 skb_copy_secmark(to, from); 544 skb_copy_secmark(to, from);
546} 545}
547 546
548int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
549{
550 u16 offset = sizeof(struct ipv6hdr);
551 struct ipv6_opt_hdr *exthdr =
552 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
553 unsigned int packet_len = skb->tail - skb->network_header;
554 int found_rhdr = 0;
555 *nexthdr = &ipv6_hdr(skb)->nexthdr;
556
557 while (offset + 1 <= packet_len) {
558
559 switch (**nexthdr) {
560
561 case NEXTHDR_HOP:
562 break;
563 case NEXTHDR_ROUTING:
564 found_rhdr = 1;
565 break;
566 case NEXTHDR_DEST:
567#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
568 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
569 break;
570#endif
571 if (found_rhdr)
572 return offset;
573 break;
574 default :
575 return offset;
576 }
577
578 offset += ipv6_optlen(exthdr);
579 *nexthdr = &exthdr->nexthdr;
580 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
581 offset);
582 }
583
584 return offset;
585}
586
587void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
588{
589 static atomic_t ipv6_fragmentation_id;
590 int old, new;
591
592 if (rt && !(rt->dst.flags & DST_NOPEER)) {
593 struct inet_peer *peer;
594 struct net *net;
595
596 net = dev_net(rt->dst.dev);
597 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
598 if (peer) {
599 fhdr->identification = htonl(inet_getid(peer, 0));
600 inet_putpeer(peer);
601 return;
602 }
603 }
604 do {
605 old = atomic_read(&ipv6_fragmentation_id);
606 new = old + 1;
607 if (!new)
608 new = 1;
609 } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
610 fhdr->identification = htonl(new);
611}
612
613int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 547int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
614{ 548{
615 struct sk_buff *frag; 549 struct sk_buff *frag;
@@ -756,7 +690,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
756 if (err == 0) { 690 if (err == 0) {
757 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), 691 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
758 IPSTATS_MIB_FRAGOKS); 692 IPSTATS_MIB_FRAGOKS);
759 dst_release(&rt->dst); 693 ip6_rt_put(rt);
760 return 0; 694 return 0;
761 } 695 }
762 696
@@ -768,7 +702,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
768 702
769 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), 703 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
770 IPSTATS_MIB_FRAGFAILS); 704 IPSTATS_MIB_FRAGFAILS);
771 dst_release(&rt->dst); 705 ip6_rt_put(rt);
772 return err; 706 return err;
773 707
774slow_path_clean: 708slow_path_clean:
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index cb7e2ded6f08..a14f28b280f5 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -74,6 +74,10 @@ MODULE_ALIAS_NETDEV("ip6tnl0");
74#define HASH_SIZE_SHIFT 5 74#define HASH_SIZE_SHIFT 5
75#define HASH_SIZE (1 << HASH_SIZE_SHIFT) 75#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
76 76
77static bool log_ecn_error = true;
78module_param(log_ecn_error, bool, 0644);
79MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
80
77static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) 81static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
78{ 82{
79 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); 83 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
@@ -83,6 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
83 87
84static int ip6_tnl_dev_init(struct net_device *dev); 88static int ip6_tnl_dev_init(struct net_device *dev);
85static void ip6_tnl_dev_setup(struct net_device *dev); 89static void ip6_tnl_dev_setup(struct net_device *dev);
90static struct rtnl_link_ops ip6_link_ops __read_mostly;
86 91
87static int ip6_tnl_net_id __read_mostly; 92static int ip6_tnl_net_id __read_mostly;
88struct ip6_tnl_net { 93struct ip6_tnl_net {
@@ -94,14 +99,6 @@ struct ip6_tnl_net {
94 struct ip6_tnl __rcu **tnls[2]; 99 struct ip6_tnl __rcu **tnls[2];
95}; 100};
96 101
97/* often modified stats are per cpu, other are shared (netdev->stats) */
98struct pcpu_tstats {
99 unsigned long rx_packets;
100 unsigned long rx_bytes;
101 unsigned long tx_packets;
102 unsigned long tx_bytes;
103} __attribute__((aligned(4*sizeof(unsigned long))));
104
105static struct net_device_stats *ip6_get_stats(struct net_device *dev) 102static struct net_device_stats *ip6_get_stats(struct net_device *dev)
106{ 103{
107 struct pcpu_tstats sum = { 0 }; 104 struct pcpu_tstats sum = { 0 };
@@ -258,6 +255,33 @@ static void ip6_dev_free(struct net_device *dev)
258 free_netdev(dev); 255 free_netdev(dev);
259} 256}
260 257
258static int ip6_tnl_create2(struct net_device *dev)
259{
260 struct ip6_tnl *t = netdev_priv(dev);
261 struct net *net = dev_net(dev);
262 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
263 int err;
264
265 t = netdev_priv(dev);
266 err = ip6_tnl_dev_init(dev);
267 if (err < 0)
268 goto out;
269
270 err = register_netdevice(dev);
271 if (err < 0)
272 goto out;
273
274 strcpy(t->parms.name, dev->name);
275 dev->rtnl_link_ops = &ip6_link_ops;
276
277 dev_hold(dev);
278 ip6_tnl_link(ip6n, t);
279 return 0;
280
281out:
282 return err;
283}
284
261/** 285/**
262 * ip6_tnl_create - create a new tunnel 286 * ip6_tnl_create - create a new tunnel
263 * @p: tunnel parameters 287 * @p: tunnel parameters
@@ -276,7 +300,6 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
276 struct ip6_tnl *t; 300 struct ip6_tnl *t;
277 char name[IFNAMSIZ]; 301 char name[IFNAMSIZ];
278 int err; 302 int err;
279 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
280 303
281 if (p->name[0]) 304 if (p->name[0])
282 strlcpy(name, p->name, IFNAMSIZ); 305 strlcpy(name, p->name, IFNAMSIZ);
@@ -291,17 +314,10 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
291 314
292 t = netdev_priv(dev); 315 t = netdev_priv(dev);
293 t->parms = *p; 316 t->parms = *p;
294 err = ip6_tnl_dev_init(dev); 317 err = ip6_tnl_create2(dev);
295 if (err < 0) 318 if (err < 0)
296 goto failed_free; 319 goto failed_free;
297 320
298 if ((err = register_netdevice(dev)) < 0)
299 goto failed_free;
300
301 strcpy(t->parms.name, dev->name);
302
303 dev_hold(dev);
304 ip6_tnl_link(ip6n, t);
305 return t; 321 return t;
306 322
307failed_free: 323failed_free:
@@ -663,8 +679,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
663 679
664 icmpv6_send(skb2, rel_type, rel_code, rel_info); 680 icmpv6_send(skb2, rel_type, rel_code, rel_info);
665 681
666 if (rt) 682 ip6_rt_put(rt);
667 dst_release(&rt->dst);
668 683
669 kfree_skb(skb2); 684 kfree_skb(skb2);
670 } 685 }
@@ -672,28 +687,26 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
672 return 0; 687 return 0;
673} 688}
674 689
675static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 690static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
676 const struct ipv6hdr *ipv6h, 691 const struct ipv6hdr *ipv6h,
677 struct sk_buff *skb) 692 struct sk_buff *skb)
678{ 693{
679 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 694 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
680 695
681 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 696 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
682 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); 697 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
683 698
684 if (INET_ECN_is_ce(dsfield)) 699 return IP6_ECN_decapsulate(ipv6h, skb);
685 IP_ECN_set_ce(ip_hdr(skb));
686} 700}
687 701
688static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, 702static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
689 const struct ipv6hdr *ipv6h, 703 const struct ipv6hdr *ipv6h,
690 struct sk_buff *skb) 704 struct sk_buff *skb)
691{ 705{
692 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 706 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
693 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); 707 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
694 708
695 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) 709 return IP6_ECN_decapsulate(ipv6h, skb);
696 IP6_ECN_set_ce(ipv6_hdr(skb));
697} 710}
698 711
699__u32 ip6_tnl_get_cap(struct ip6_tnl *t, 712__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
@@ -757,12 +770,13 @@ EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
757 770
758static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, 771static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
759 __u8 ipproto, 772 __u8 ipproto,
760 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, 773 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
761 const struct ipv6hdr *ipv6h, 774 const struct ipv6hdr *ipv6h,
762 struct sk_buff *skb)) 775 struct sk_buff *skb))
763{ 776{
764 struct ip6_tnl *t; 777 struct ip6_tnl *t;
765 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 778 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
779 int err;
766 780
767 rcu_read_lock(); 781 rcu_read_lock();
768 782
@@ -792,14 +806,26 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
792 skb->pkt_type = PACKET_HOST; 806 skb->pkt_type = PACKET_HOST;
793 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 807 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
794 808
809 __skb_tunnel_rx(skb, t->dev);
810
811 err = dscp_ecn_decapsulate(t, ipv6h, skb);
812 if (unlikely(err)) {
813 if (log_ecn_error)
814 net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
815 &ipv6h->saddr,
816 ipv6_get_dsfield(ipv6h));
817 if (err > 1) {
818 ++t->dev->stats.rx_frame_errors;
819 ++t->dev->stats.rx_errors;
820 rcu_read_unlock();
821 goto discard;
822 }
823 }
824
795 tstats = this_cpu_ptr(t->dev->tstats); 825 tstats = this_cpu_ptr(t->dev->tstats);
796 tstats->rx_packets++; 826 tstats->rx_packets++;
797 tstats->rx_bytes += skb->len; 827 tstats->rx_bytes += skb->len;
798 828
799 __skb_tunnel_rx(skb, t->dev);
800
801 dscp_ecn_decapsulate(t, ipv6h, skb);
802
803 netif_rx(skb); 829 netif_rx(skb);
804 830
805 rcu_read_unlock(); 831 rcu_read_unlock();
@@ -1208,7 +1234,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1208 if (dev->mtu < IPV6_MIN_MTU) 1234 if (dev->mtu < IPV6_MIN_MTU)
1209 dev->mtu = IPV6_MIN_MTU; 1235 dev->mtu = IPV6_MIN_MTU;
1210 } 1236 }
1211 dst_release(&rt->dst); 1237 ip6_rt_put(rt);
1212 } 1238 }
1213} 1239}
1214 1240
@@ -1237,6 +1263,20 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1237 return 0; 1263 return 0;
1238} 1264}
1239 1265
1266static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1267{
1268 struct net *net = dev_net(t->dev);
1269 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1270 int err;
1271
1272 ip6_tnl_unlink(ip6n, t);
1273 synchronize_net();
1274 err = ip6_tnl_change(t, p);
1275 ip6_tnl_link(ip6n, t);
1276 netdev_state_change(t->dev);
1277 return err;
1278}
1279
1240static void 1280static void
1241ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) 1281ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1242{ 1282{
@@ -1325,7 +1365,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1325 case SIOCADDTUNNEL: 1365 case SIOCADDTUNNEL:
1326 case SIOCCHGTUNNEL: 1366 case SIOCCHGTUNNEL:
1327 err = -EPERM; 1367 err = -EPERM;
1328 if (!capable(CAP_NET_ADMIN)) 1368 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1329 break; 1369 break;
1330 err = -EFAULT; 1370 err = -EFAULT;
1331 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1371 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
@@ -1345,11 +1385,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1345 } else 1385 } else
1346 t = netdev_priv(dev); 1386 t = netdev_priv(dev);
1347 1387
1348 ip6_tnl_unlink(ip6n, t); 1388 err = ip6_tnl_update(t, &p1);
1349 synchronize_net();
1350 err = ip6_tnl_change(t, &p1);
1351 ip6_tnl_link(ip6n, t);
1352 netdev_state_change(dev);
1353 } 1389 }
1354 if (t) { 1390 if (t) {
1355 err = 0; 1391 err = 0;
@@ -1362,7 +1398,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1362 break; 1398 break;
1363 case SIOCDELTUNNEL: 1399 case SIOCDELTUNNEL:
1364 err = -EPERM; 1400 err = -EPERM;
1365 if (!capable(CAP_NET_ADMIN)) 1401 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1366 break; 1402 break;
1367 1403
1368 if (dev == ip6n->fb_tnl_dev) { 1404 if (dev == ip6n->fb_tnl_dev) {
@@ -1505,6 +1541,164 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1505 return 0; 1541 return 0;
1506} 1542}
1507 1543
1544static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1545{
1546 u8 proto;
1547
1548 if (!data)
1549 return 0;
1550
1551 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1552 if (proto != IPPROTO_IPV6 &&
1553 proto != IPPROTO_IPIP &&
1554 proto != 0)
1555 return -EINVAL;
1556
1557 return 0;
1558}
1559
1560static void ip6_tnl_netlink_parms(struct nlattr *data[],
1561 struct __ip6_tnl_parm *parms)
1562{
1563 memset(parms, 0, sizeof(*parms));
1564
1565 if (!data)
1566 return;
1567
1568 if (data[IFLA_IPTUN_LINK])
1569 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1570
1571 if (data[IFLA_IPTUN_LOCAL])
1572 nla_memcpy(&parms->laddr, data[IFLA_IPTUN_LOCAL],
1573 sizeof(struct in6_addr));
1574
1575 if (data[IFLA_IPTUN_REMOTE])
1576 nla_memcpy(&parms->raddr, data[IFLA_IPTUN_REMOTE],
1577 sizeof(struct in6_addr));
1578
1579 if (data[IFLA_IPTUN_TTL])
1580 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1581
1582 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1583 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1584
1585 if (data[IFLA_IPTUN_FLOWINFO])
1586 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1587
1588 if (data[IFLA_IPTUN_FLAGS])
1589 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1590
1591 if (data[IFLA_IPTUN_PROTO])
1592 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1593}
1594
1595static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1596 struct nlattr *tb[], struct nlattr *data[])
1597{
1598 struct net *net = dev_net(dev);
1599 struct ip6_tnl *nt;
1600
1601 nt = netdev_priv(dev);
1602 ip6_tnl_netlink_parms(data, &nt->parms);
1603
1604 if (ip6_tnl_locate(net, &nt->parms, 0))
1605 return -EEXIST;
1606
1607 return ip6_tnl_create2(dev);
1608}
1609
1610static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
1611 struct nlattr *data[])
1612{
1613 struct ip6_tnl *t;
1614 struct __ip6_tnl_parm p;
1615 struct net *net = dev_net(dev);
1616 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1617
1618 if (dev == ip6n->fb_tnl_dev)
1619 return -EINVAL;
1620
1621 ip6_tnl_netlink_parms(data, &p);
1622
1623 t = ip6_tnl_locate(net, &p, 0);
1624
1625 if (t) {
1626 if (t->dev != dev)
1627 return -EEXIST;
1628 } else
1629 t = netdev_priv(dev);
1630
1631 return ip6_tnl_update(t, &p);
1632}
1633
1634static size_t ip6_tnl_get_size(const struct net_device *dev)
1635{
1636 return
1637 /* IFLA_IPTUN_LINK */
1638 nla_total_size(4) +
1639 /* IFLA_IPTUN_LOCAL */
1640 nla_total_size(sizeof(struct in6_addr)) +
1641 /* IFLA_IPTUN_REMOTE */
1642 nla_total_size(sizeof(struct in6_addr)) +
1643 /* IFLA_IPTUN_TTL */
1644 nla_total_size(1) +
1645 /* IFLA_IPTUN_ENCAP_LIMIT */
1646 nla_total_size(1) +
1647 /* IFLA_IPTUN_FLOWINFO */
1648 nla_total_size(4) +
1649 /* IFLA_IPTUN_FLAGS */
1650 nla_total_size(4) +
1651 /* IFLA_IPTUN_PROTO */
1652 nla_total_size(1) +
1653 0;
1654}
1655
1656static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
1657{
1658 struct ip6_tnl *tunnel = netdev_priv(dev);
1659 struct __ip6_tnl_parm *parm = &tunnel->parms;
1660
1661 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
1662 nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
1663 &parm->raddr) ||
1664 nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
1665 &parm->laddr) ||
1666 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
1667 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
1668 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
1669 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
1670 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
1671 goto nla_put_failure;
1672 return 0;
1673
1674nla_put_failure:
1675 return -EMSGSIZE;
1676}
1677
1678static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
1679 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
1680 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
1681 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
1682 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
1683 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
1684 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
1685 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
1686 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
1687};
1688
1689static struct rtnl_link_ops ip6_link_ops __read_mostly = {
1690 .kind = "ip6tnl",
1691 .maxtype = IFLA_IPTUN_MAX,
1692 .policy = ip6_tnl_policy,
1693 .priv_size = sizeof(struct ip6_tnl),
1694 .setup = ip6_tnl_dev_setup,
1695 .validate = ip6_tnl_validate,
1696 .newlink = ip6_tnl_newlink,
1697 .changelink = ip6_tnl_changelink,
1698 .get_size = ip6_tnl_get_size,
1699 .fill_info = ip6_tnl_fill_info,
1700};
1701
1508static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { 1702static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
1509 .handler = ip4ip6_rcv, 1703 .handler = ip4ip6_rcv,
1510 .err_handler = ip4ip6_err, 1704 .err_handler = ip4ip6_err,
@@ -1613,9 +1807,14 @@ static int __init ip6_tunnel_init(void)
1613 pr_err("%s: can't register ip6ip6\n", __func__); 1807 pr_err("%s: can't register ip6ip6\n", __func__);
1614 goto out_ip6ip6; 1808 goto out_ip6ip6;
1615 } 1809 }
1810 err = rtnl_link_register(&ip6_link_ops);
1811 if (err < 0)
1812 goto rtnl_link_failed;
1616 1813
1617 return 0; 1814 return 0;
1618 1815
1816rtnl_link_failed:
1817 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
1619out_ip6ip6: 1818out_ip6ip6:
1620 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 1819 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1621out_ip4ip6: 1820out_ip4ip6:
@@ -1630,6 +1829,7 @@ out_pernet:
1630 1829
1631static void __exit ip6_tunnel_cleanup(void) 1830static void __exit ip6_tunnel_cleanup(void)
1632{ 1831{
1832 rtnl_link_unregister(&ip6_link_ops);
1633 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) 1833 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
1634 pr_info("%s: can't deregister ip4ip6\n", __func__); 1834 pr_info("%s: can't deregister ip4ip6\n", __func__);
1635 1835
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f7c7c6319720..26dcdec9e3a5 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -52,6 +52,7 @@
52#include <linux/netfilter_ipv6.h> 52#include <linux/netfilter_ipv6.h>
53#include <linux/export.h> 53#include <linux/export.h>
54#include <net/ip6_checksum.h> 54#include <net/ip6_checksum.h>
55#include <linux/netconf.h>
55 56
56struct mr6_table { 57struct mr6_table {
57 struct list_head list; 58 struct list_head list;
@@ -66,8 +67,8 @@ struct mr6_table {
66 struct mif_device vif6_table[MAXMIFS]; 67 struct mif_device vif6_table[MAXMIFS];
67 int maxvif; 68 int maxvif;
68 atomic_t cache_resolve_queue_len; 69 atomic_t cache_resolve_queue_len;
69 int mroute_do_assert; 70 bool mroute_do_assert;
70 int mroute_do_pim; 71 bool mroute_do_pim;
71#ifdef CONFIG_IPV6_PIMSM_V2 72#ifdef CONFIG_IPV6_PIMSM_V2
72 int mroute_reg_vif_num; 73 int mroute_reg_vif_num;
73#endif 74#endif
@@ -115,6 +116,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
115 mifi_t mifi, int assert); 116 mifi_t mifi, int assert);
116static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, 117static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
117 struct mfc6_cache *c, struct rtmsg *rtm); 118 struct mfc6_cache *c, struct rtmsg *rtm);
119static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
120 int cmd);
118static int ip6mr_rtm_dumproute(struct sk_buff *skb, 121static int ip6mr_rtm_dumproute(struct sk_buff *skb,
119 struct netlink_callback *cb); 122 struct netlink_callback *cb);
120static void mroute_clean_tables(struct mr6_table *mrt); 123static void mroute_clean_tables(struct mr6_table *mrt);
@@ -805,8 +808,12 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
805 dev_set_allmulti(dev, -1); 808 dev_set_allmulti(dev, -1);
806 809
807 in6_dev = __in6_dev_get(dev); 810 in6_dev = __in6_dev_get(dev);
808 if (in6_dev) 811 if (in6_dev) {
809 in6_dev->cnf.mc_forwarding--; 812 in6_dev->cnf.mc_forwarding--;
813 inet6_netconf_notify_devconf(dev_net(dev),
814 NETCONFA_MC_FORWARDING,
815 dev->ifindex, &in6_dev->cnf);
816 }
810 817
811 if (v->flags & MIFF_REGISTER) 818 if (v->flags & MIFF_REGISTER)
812 unregister_netdevice_queue(dev, head); 819 unregister_netdevice_queue(dev, head);
@@ -865,6 +872,7 @@ static void ipmr_do_expire_process(struct mr6_table *mrt)
865 } 872 }
866 873
867 list_del(&c->list); 874 list_del(&c->list);
875 mr6_netlink_event(mrt, c, RTM_DELROUTE);
868 ip6mr_destroy_unres(mrt, c); 876 ip6mr_destroy_unres(mrt, c);
869 } 877 }
870 878
@@ -958,8 +966,12 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
958 } 966 }
959 967
960 in6_dev = __in6_dev_get(dev); 968 in6_dev = __in6_dev_get(dev);
961 if (in6_dev) 969 if (in6_dev) {
962 in6_dev->cnf.mc_forwarding++; 970 in6_dev->cnf.mc_forwarding++;
971 inet6_netconf_notify_devconf(dev_net(dev),
972 NETCONFA_MC_FORWARDING,
973 dev->ifindex, &in6_dev->cnf);
974 }
963 975
964 /* 976 /*
965 * Fill in the VIF structures 977 * Fill in the VIF structures
@@ -1211,6 +1223,7 @@ ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1211 1223
1212 atomic_inc(&mrt->cache_resolve_queue_len); 1224 atomic_inc(&mrt->cache_resolve_queue_len);
1213 list_add(&c->list, &mrt->mfc6_unres_queue); 1225 list_add(&c->list, &mrt->mfc6_unres_queue);
1226 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1214 1227
1215 ipmr_do_expire_process(mrt); 1228 ipmr_do_expire_process(mrt);
1216 } 1229 }
@@ -1248,6 +1261,7 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
1248 list_del(&c->list); 1261 list_del(&c->list);
1249 write_unlock_bh(&mrt_lock); 1262 write_unlock_bh(&mrt_lock);
1250 1263
1264 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1251 ip6mr_cache_free(c); 1265 ip6mr_cache_free(c);
1252 return 0; 1266 return 0;
1253 } 1267 }
@@ -1412,6 +1426,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1412 if (!mrtsock) 1426 if (!mrtsock)
1413 c->mfc_flags |= MFC_STATIC; 1427 c->mfc_flags |= MFC_STATIC;
1414 write_unlock_bh(&mrt_lock); 1428 write_unlock_bh(&mrt_lock);
1429 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1415 return 0; 1430 return 0;
1416 } 1431 }
1417 1432
@@ -1456,6 +1471,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1456 ip6mr_cache_resolve(net, mrt, uc, c); 1471 ip6mr_cache_resolve(net, mrt, uc, c);
1457 ip6mr_cache_free(uc); 1472 ip6mr_cache_free(uc);
1458 } 1473 }
1474 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1459 return 0; 1475 return 0;
1460} 1476}
1461 1477
@@ -1489,6 +1505,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1489 list_del(&c->list); 1505 list_del(&c->list);
1490 write_unlock_bh(&mrt_lock); 1506 write_unlock_bh(&mrt_lock);
1491 1507
1508 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1492 ip6mr_cache_free(c); 1509 ip6mr_cache_free(c);
1493 } 1510 }
1494 } 1511 }
@@ -1497,6 +1514,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1497 spin_lock_bh(&mfc_unres_lock); 1514 spin_lock_bh(&mfc_unres_lock);
1498 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) { 1515 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1499 list_del(&c->list); 1516 list_del(&c->list);
1517 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1500 ip6mr_destroy_unres(mrt, c); 1518 ip6mr_destroy_unres(mrt, c);
1501 } 1519 }
1502 spin_unlock_bh(&mfc_unres_lock); 1520 spin_unlock_bh(&mfc_unres_lock);
@@ -1513,6 +1531,9 @@ static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1513 if (likely(mrt->mroute6_sk == NULL)) { 1531 if (likely(mrt->mroute6_sk == NULL)) {
1514 mrt->mroute6_sk = sk; 1532 mrt->mroute6_sk = sk;
1515 net->ipv6.devconf_all->mc_forwarding++; 1533 net->ipv6.devconf_all->mc_forwarding++;
1534 inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1535 NETCONFA_IFINDEX_ALL,
1536 net->ipv6.devconf_all);
1516 } 1537 }
1517 else 1538 else
1518 err = -EADDRINUSE; 1539 err = -EADDRINUSE;
@@ -1535,6 +1556,10 @@ int ip6mr_sk_done(struct sock *sk)
1535 write_lock_bh(&mrt_lock); 1556 write_lock_bh(&mrt_lock);
1536 mrt->mroute6_sk = NULL; 1557 mrt->mroute6_sk = NULL;
1537 net->ipv6.devconf_all->mc_forwarding--; 1558 net->ipv6.devconf_all->mc_forwarding--;
1559 inet6_netconf_notify_devconf(net,
1560 NETCONFA_MC_FORWARDING,
1561 NETCONFA_IFINDEX_ALL,
1562 net->ipv6.devconf_all);
1538 write_unlock_bh(&mrt_lock); 1563 write_unlock_bh(&mrt_lock);
1539 1564
1540 mroute_clean_tables(mrt); 1565 mroute_clean_tables(mrt);
@@ -1583,7 +1608,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1583 return -ENOENT; 1608 return -ENOENT;
1584 1609
1585 if (optname != MRT6_INIT) { 1610 if (optname != MRT6_INIT) {
1586 if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN)) 1611 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1587 return -EACCES; 1612 return -EACCES;
1588 } 1613 }
1589 1614
@@ -1646,9 +1671,12 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1646 case MRT6_ASSERT: 1671 case MRT6_ASSERT:
1647 { 1672 {
1648 int v; 1673 int v;
1674
1675 if (optlen != sizeof(v))
1676 return -EINVAL;
1649 if (get_user(v, (int __user *)optval)) 1677 if (get_user(v, (int __user *)optval))
1650 return -EFAULT; 1678 return -EFAULT;
1651 mrt->mroute_do_assert = !!v; 1679 mrt->mroute_do_assert = v;
1652 return 0; 1680 return 0;
1653 } 1681 }
1654 1682
@@ -1656,6 +1684,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1656 case MRT6_PIM: 1684 case MRT6_PIM:
1657 { 1685 {
1658 int v; 1686 int v;
1687
1688 if (optlen != sizeof(v))
1689 return -EINVAL;
1659 if (get_user(v, (int __user *)optval)) 1690 if (get_user(v, (int __user *)optval))
1660 return -EFAULT; 1691 return -EFAULT;
1661 v = !!v; 1692 v = !!v;
@@ -2097,8 +2128,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2097{ 2128{
2098 int ct; 2129 int ct;
2099 struct rtnexthop *nhp; 2130 struct rtnexthop *nhp;
2100 u8 *b = skb_tail_pointer(skb); 2131 struct nlattr *mp_attr;
2101 struct rtattr *mp_head; 2132 struct rta_mfc_stats mfcs;
2102 2133
2103 /* If cache is unresolved, don't try to parse IIF and OIF */ 2134 /* If cache is unresolved, don't try to parse IIF and OIF */
2104 if (c->mf6c_parent >= MAXMIFS) 2135 if (c->mf6c_parent >= MAXMIFS)
@@ -2107,28 +2138,35 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2107 if (MIF_EXISTS(mrt, c->mf6c_parent) && 2138 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2108 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0) 2139 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2109 return -EMSGSIZE; 2140 return -EMSGSIZE;
2110 2141 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2111 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 2142 if (mp_attr == NULL)
2143 return -EMSGSIZE;
2112 2144
2113 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 2145 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2114 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { 2146 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2115 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 2147 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2116 goto rtattr_failure; 2148 if (nhp == NULL) {
2117 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 2149 nla_nest_cancel(skb, mp_attr);
2150 return -EMSGSIZE;
2151 }
2152
2118 nhp->rtnh_flags = 0; 2153 nhp->rtnh_flags = 0;
2119 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 2154 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2120 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex; 2155 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2121 nhp->rtnh_len = sizeof(*nhp); 2156 nhp->rtnh_len = sizeof(*nhp);
2122 } 2157 }
2123 } 2158 }
2124 mp_head->rta_type = RTA_MULTIPATH; 2159
2125 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; 2160 nla_nest_end(skb, mp_attr);
2161
2162 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2163 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2164 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2165 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2166 return -EMSGSIZE;
2167
2126 rtm->rtm_type = RTN_MULTICAST; 2168 rtm->rtm_type = RTN_MULTICAST;
2127 return 1; 2169 return 1;
2128
2129rtattr_failure:
2130 nlmsg_trim(skb, b);
2131 return -EMSGSIZE;
2132} 2170}
2133 2171
2134int ip6mr_get_route(struct net *net, 2172int ip6mr_get_route(struct net *net,
@@ -2202,31 +2240,38 @@ int ip6mr_get_route(struct net *net,
2202} 2240}
2203 2241
2204static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, 2242static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2205 u32 portid, u32 seq, struct mfc6_cache *c) 2243 u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
2206{ 2244{
2207 struct nlmsghdr *nlh; 2245 struct nlmsghdr *nlh;
2208 struct rtmsg *rtm; 2246 struct rtmsg *rtm;
2247 int err;
2209 2248
2210 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); 2249 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
2211 if (nlh == NULL) 2250 if (nlh == NULL)
2212 return -EMSGSIZE; 2251 return -EMSGSIZE;
2213 2252
2214 rtm = nlmsg_data(nlh); 2253 rtm = nlmsg_data(nlh);
2215 rtm->rtm_family = RTNL_FAMILY_IPMR; 2254 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2216 rtm->rtm_dst_len = 128; 2255 rtm->rtm_dst_len = 128;
2217 rtm->rtm_src_len = 128; 2256 rtm->rtm_src_len = 128;
2218 rtm->rtm_tos = 0; 2257 rtm->rtm_tos = 0;
2219 rtm->rtm_table = mrt->id; 2258 rtm->rtm_table = mrt->id;
2220 if (nla_put_u32(skb, RTA_TABLE, mrt->id)) 2259 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2221 goto nla_put_failure; 2260 goto nla_put_failure;
2261 rtm->rtm_type = RTN_MULTICAST;
2222 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2262 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2223 rtm->rtm_protocol = RTPROT_UNSPEC; 2263 if (c->mfc_flags & MFC_STATIC)
2264 rtm->rtm_protocol = RTPROT_STATIC;
2265 else
2266 rtm->rtm_protocol = RTPROT_MROUTED;
2224 rtm->rtm_flags = 0; 2267 rtm->rtm_flags = 0;
2225 2268
2226 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) || 2269 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2227 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp)) 2270 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2228 goto nla_put_failure; 2271 goto nla_put_failure;
2229 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) 2272 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2273 /* do not break the dump if cache is unresolved */
2274 if (err < 0 && err != -ENOENT)
2230 goto nla_put_failure; 2275 goto nla_put_failure;
2231 2276
2232 return nlmsg_end(skb, nlh); 2277 return nlmsg_end(skb, nlh);
@@ -2236,6 +2281,52 @@ nla_put_failure:
2236 return -EMSGSIZE; 2281 return -EMSGSIZE;
2237} 2282}
2238 2283
2284static int mr6_msgsize(bool unresolved, int maxvif)
2285{
2286 size_t len =
2287 NLMSG_ALIGN(sizeof(struct rtmsg))
2288 + nla_total_size(4) /* RTA_TABLE */
2289 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2290 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2291 ;
2292
2293 if (!unresolved)
2294 len = len
2295 + nla_total_size(4) /* RTA_IIF */
2296 + nla_total_size(0) /* RTA_MULTIPATH */
2297 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2298 /* RTA_MFC_STATS */
2299 + nla_total_size(sizeof(struct rta_mfc_stats))
2300 ;
2301
2302 return len;
2303}
2304
2305static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2306 int cmd)
2307{
2308 struct net *net = read_pnet(&mrt->net);
2309 struct sk_buff *skb;
2310 int err = -ENOBUFS;
2311
2312 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2313 GFP_ATOMIC);
2314 if (skb == NULL)
2315 goto errout;
2316
2317 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
2318 if (err < 0)
2319 goto errout;
2320
2321 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2322 return;
2323
2324errout:
2325 kfree_skb(skb);
2326 if (err < 0)
2327 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2328}
2329
2239static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2330static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2240{ 2331{
2241 struct net *net = sock_net(skb->sk); 2332 struct net *net = sock_net(skb->sk);
@@ -2262,13 +2353,29 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2262 if (ip6mr_fill_mroute(mrt, skb, 2353 if (ip6mr_fill_mroute(mrt, skb,
2263 NETLINK_CB(cb->skb).portid, 2354 NETLINK_CB(cb->skb).portid,
2264 cb->nlh->nlmsg_seq, 2355 cb->nlh->nlmsg_seq,
2265 mfc) < 0) 2356 mfc, RTM_NEWROUTE) < 0)
2266 goto done; 2357 goto done;
2267next_entry: 2358next_entry:
2268 e++; 2359 e++;
2269 } 2360 }
2270 e = s_e = 0; 2361 e = s_e = 0;
2271 } 2362 }
2363 spin_lock_bh(&mfc_unres_lock);
2364 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2365 if (e < s_e)
2366 goto next_entry2;
2367 if (ip6mr_fill_mroute(mrt, skb,
2368 NETLINK_CB(cb->skb).portid,
2369 cb->nlh->nlmsg_seq,
2370 mfc, RTM_NEWROUTE) < 0) {
2371 spin_unlock_bh(&mfc_unres_lock);
2372 goto done;
2373 }
2374next_entry2:
2375 e++;
2376 }
2377 spin_unlock_bh(&mfc_unres_lock);
2378 e = s_e = 0;
2272 s_h = 0; 2379 s_h = 0;
2273next_table: 2380next_table:
2274 t++; 2381 t++;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ba6d13d1f1e1..ee94d31c9d4d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,7 +343,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
343 break; 343 break;
344 344
345 case IPV6_TRANSPARENT: 345 case IPV6_TRANSPARENT:
346 if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) { 346 if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) &&
347 !ns_capable(net->user_ns, CAP_NET_RAW)) {
347 retv = -EPERM; 348 retv = -EPERM;
348 break; 349 break;
349 } 350 }
@@ -381,7 +382,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
381 382
382 /* hop-by-hop / destination options are privileged option */ 383 /* hop-by-hop / destination options are privileged option */
383 retv = -EPERM; 384 retv = -EPERM;
384 if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW)) 385 if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
385 break; 386 break;
386 387
387 opt = ipv6_renew_options(sk, np->opt, optname, 388 opt = ipv6_renew_options(sk, np->opt, optname,
@@ -397,7 +398,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
397 if (optname == IPV6_RTHDR && opt && opt->srcrt) { 398 if (optname == IPV6_RTHDR && opt && opt->srcrt) {
398 struct ipv6_rt_hdr *rthdr = opt->srcrt; 399 struct ipv6_rt_hdr *rthdr = opt->srcrt;
399 switch (rthdr->type) { 400 switch (rthdr->type) {
400#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 401#if IS_ENABLED(CONFIG_IPV6_MIP6)
401 case IPV6_SRCRT_TYPE_2: 402 case IPV6_SRCRT_TYPE_2:
402 if (rthdr->hdrlen != 2 || 403 if (rthdr->hdrlen != 2 ||
403 rthdr->segments_left != 1) 404 rthdr->segments_left != 1)
@@ -754,7 +755,7 @@ done:
754 case IPV6_IPSEC_POLICY: 755 case IPV6_IPSEC_POLICY:
755 case IPV6_XFRM_POLICY: 756 case IPV6_XFRM_POLICY:
756 retv = -EPERM; 757 retv = -EPERM;
757 if (!capable(CAP_NET_ADMIN)) 758 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
758 break; 759 break;
759 retv = xfrm_user_policy(sk, optname, optval, optlen); 760 retv = xfrm_user_policy(sk, optname, optval, optlen);
760 break; 761 break;
@@ -827,6 +828,7 @@ pref_skip_coa:
827 if (val < 0 || val > 255) 828 if (val < 0 || val > 255)
828 goto e_inval; 829 goto e_inval;
829 np->min_hopcount = val; 830 np->min_hopcount = val;
831 retv = 0;
830 break; 832 break;
831 case IPV6_DONTFRAG: 833 case IPV6_DONTFRAG:
832 np->dontfrag = valbool; 834 np->dontfrag = valbool;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 92f8e48e4ba4..28dfa5f3801f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -163,7 +163,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
163 rt = rt6_lookup(net, addr, NULL, 0, 0); 163 rt = rt6_lookup(net, addr, NULL, 0, 0);
164 if (rt) { 164 if (rt) {
165 dev = rt->dst.dev; 165 dev = rt->dst.dev;
166 dst_release(&rt->dst); 166 ip6_rt_put(rt);
167 } 167 }
168 } else 168 } else
169 dev = dev_get_by_index_rcu(net, ifindex); 169 dev = dev_get_by_index_rcu(net, ifindex);
@@ -260,7 +260,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
260 260
261 if (rt) { 261 if (rt) {
262 dev = rt->dst.dev; 262 dev = rt->dst.dev;
263 dst_release(&rt->dst); 263 ip6_rt_put(rt);
264 } 264 }
265 } else 265 } else
266 dev = dev_get_by_index_rcu(net, ifindex); 266 dev = dev_get_by_index_rcu(net, ifindex);
@@ -284,6 +284,9 @@ void ipv6_sock_mc_close(struct sock *sk)
284 struct ipv6_mc_socklist *mc_lst; 284 struct ipv6_mc_socklist *mc_lst;
285 struct net *net = sock_net(sk); 285 struct net *net = sock_net(sk);
286 286
287 if (!rcu_access_pointer(np->ipv6_mc_list))
288 return;
289
287 spin_lock(&ipv6_sk_mc_lock); 290 spin_lock(&ipv6_sk_mc_lock);
288 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, 291 while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
289 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { 292 lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2edce30ef733..6574175795df 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -151,8 +151,8 @@ static inline int ndisc_opt_addr_space(struct net_device *dev)
151static u8 *ndisc_fill_addr_option(u8 *opt, int type, void *data, int data_len, 151static u8 *ndisc_fill_addr_option(u8 *opt, int type, void *data, int data_len,
152 unsigned short addr_type) 152 unsigned short addr_type)
153{ 153{
154 int space = NDISC_OPT_SPACE(data_len);
155 int pad = ndisc_addr_option_pad(addr_type); 154 int pad = ndisc_addr_option_pad(addr_type);
155 int space = NDISC_OPT_SPACE(data_len + pad);
156 156
157 opt[0] = type; 157 opt[0] = type;
158 opt[1] = space>>3; 158 opt[1] = space>>3;
@@ -370,12 +370,12 @@ static void pndisc_destructor(struct pneigh_entry *n)
370 ipv6_dev_mc_dec(dev, &maddr); 370 ipv6_dev_mc_dec(dev, &maddr);
371} 371}
372 372
373struct sk_buff *ndisc_build_skb(struct net_device *dev, 373static struct sk_buff *ndisc_build_skb(struct net_device *dev,
374 const struct in6_addr *daddr, 374 const struct in6_addr *daddr,
375 const struct in6_addr *saddr, 375 const struct in6_addr *saddr,
376 struct icmp6hdr *icmp6h, 376 struct icmp6hdr *icmp6h,
377 const struct in6_addr *target, 377 const struct in6_addr *target,
378 int llinfo) 378 int llinfo)
379{ 379{
380 struct net *net = dev_net(dev); 380 struct net *net = dev_net(dev);
381 struct sock *sk = net->ipv6.ndisc_sk; 381 struct sock *sk = net->ipv6.ndisc_sk;
@@ -431,14 +431,11 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
431 return skb; 431 return skb;
432} 432}
433 433
434EXPORT_SYMBOL(ndisc_build_skb); 434static void ndisc_send_skb(struct sk_buff *skb, struct net_device *dev,
435 435 struct neighbour *neigh,
436void ndisc_send_skb(struct sk_buff *skb, 436 const struct in6_addr *daddr,
437 struct net_device *dev, 437 const struct in6_addr *saddr,
438 struct neighbour *neigh, 438 struct icmp6hdr *icmp6h)
439 const struct in6_addr *daddr,
440 const struct in6_addr *saddr,
441 struct icmp6hdr *icmp6h)
442{ 439{
443 struct flowi6 fl6; 440 struct flowi6 fl6;
444 struct dst_entry *dst; 441 struct dst_entry *dst;
@@ -473,8 +470,6 @@ void ndisc_send_skb(struct sk_buff *skb,
473 rcu_read_unlock(); 470 rcu_read_unlock();
474} 471}
475 472
476EXPORT_SYMBOL(ndisc_send_skb);
477
478/* 473/*
479 * Send a Neighbour Discover packet 474 * Send a Neighbour Discover packet
480 */ 475 */
@@ -535,7 +530,6 @@ static void ndisc_send_unsol_na(struct net_device *dev)
535{ 530{
536 struct inet6_dev *idev; 531 struct inet6_dev *idev;
537 struct inet6_ifaddr *ifa; 532 struct inet6_ifaddr *ifa;
538 struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
539 533
540 idev = in6_dev_get(dev); 534 idev = in6_dev_get(dev);
541 if (!idev) 535 if (!idev)
@@ -543,7 +537,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
543 537
544 read_lock_bh(&idev->lock); 538 read_lock_bh(&idev->lock);
545 list_for_each_entry(ifa, &idev->addr_list, if_list) { 539 list_for_each_entry(ifa, &idev->addr_list, if_list) {
546 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, 540 ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &ifa->addr,
547 /*router=*/ !!idev->cnf.forwarding, 541 /*router=*/ !!idev->cnf.forwarding,
548 /*solicited=*/ false, /*override=*/ true, 542 /*solicited=*/ false, /*override=*/ true,
549 /*inc_opt=*/ true); 543 /*inc_opt=*/ true);
@@ -905,7 +899,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
905 if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && 899 if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
906 net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp && 900 net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp &&
907 pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) { 901 pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
908 /* XXX: idev->cnf.prixy_ndp */ 902 /* XXX: idev->cnf.proxy_ndp */
909 goto out; 903 goto out;
910 } 904 }
911 905
@@ -1033,18 +1027,6 @@ errout:
1033 rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err); 1027 rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err);
1034} 1028}
1035 1029
1036static inline int accept_ra(struct inet6_dev *in6_dev)
1037{
1038 /*
1039 * If forwarding is enabled, RA are not accepted unless the special
1040 * hybrid mode (accept_ra=2) is enabled.
1041 */
1042 if (in6_dev->cnf.forwarding && in6_dev->cnf.accept_ra < 2)
1043 return 0;
1044
1045 return in6_dev->cnf.accept_ra;
1046}
1047
1048static void ndisc_router_discovery(struct sk_buff *skb) 1030static void ndisc_router_discovery(struct sk_buff *skb)
1049{ 1031{
1050 struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb); 1032 struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
@@ -1092,7 +1074,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1092 return; 1074 return;
1093 } 1075 }
1094 1076
1095 if (!accept_ra(in6_dev)) 1077 if (!ipv6_accept_ra(in6_dev))
1096 goto skip_linkparms; 1078 goto skip_linkparms;
1097 1079
1098#ifdef CONFIG_IPV6_NDISC_NODETYPE 1080#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1144,7 +1126,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1144 ND_PRINTK(0, err, 1126 ND_PRINTK(0, err,
1145 "RA: %s got default router without neighbour\n", 1127 "RA: %s got default router without neighbour\n",
1146 __func__); 1128 __func__);
1147 dst_release(&rt->dst); 1129 ip6_rt_put(rt);
1148 return; 1130 return;
1149 } 1131 }
1150 } 1132 }
@@ -1169,7 +1151,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1169 ND_PRINTK(0, err, 1151 ND_PRINTK(0, err,
1170 "RA: %s got default router without neighbour\n", 1152 "RA: %s got default router without neighbour\n",
1171 __func__); 1153 __func__);
1172 dst_release(&rt->dst); 1154 ip6_rt_put(rt);
1173 return; 1155 return;
1174 } 1156 }
1175 neigh->flags |= NTF_ROUTER; 1157 neigh->flags |= NTF_ROUTER;
@@ -1248,7 +1230,7 @@ skip_linkparms:
1248 NEIGH_UPDATE_F_ISROUTER); 1230 NEIGH_UPDATE_F_ISROUTER);
1249 } 1231 }
1250 1232
1251 if (!accept_ra(in6_dev)) 1233 if (!ipv6_accept_ra(in6_dev))
1252 goto out; 1234 goto out;
1253 1235
1254#ifdef CONFIG_IPV6_ROUTE_INFO 1236#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -1325,14 +1307,19 @@ skip_routeinfo:
1325 ND_PRINTK(2, warn, "RA: invalid RA options\n"); 1307 ND_PRINTK(2, warn, "RA: invalid RA options\n");
1326 } 1308 }
1327out: 1309out:
1328 if (rt) 1310 ip6_rt_put(rt);
1329 dst_release(&rt->dst);
1330 if (neigh) 1311 if (neigh)
1331 neigh_release(neigh); 1312 neigh_release(neigh);
1332} 1313}
1333 1314
1334static void ndisc_redirect_rcv(struct sk_buff *skb) 1315static void ndisc_redirect_rcv(struct sk_buff *skb)
1335{ 1316{
1317 u8 *hdr;
1318 struct ndisc_options ndopts;
1319 struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
1320 u32 ndoptlen = skb->tail - (skb->transport_header +
1321 offsetof(struct rd_msg, opt));
1322
1336#ifdef CONFIG_IPV6_NDISC_NODETYPE 1323#ifdef CONFIG_IPV6_NDISC_NODETYPE
1337 switch (skb->ndisc_nodetype) { 1324 switch (skb->ndisc_nodetype) {
1338 case NDISC_NODETYPE_HOST: 1325 case NDISC_NODETYPE_HOST:
@@ -1349,6 +1336,17 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1349 return; 1336 return;
1350 } 1337 }
1351 1338
1339 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
1340 return;
1341
1342 if (!ndopts.nd_opts_rh)
1343 return;
1344
1345 hdr = (u8 *)ndopts.nd_opts_rh;
1346 hdr += 8;
1347 if (!pskb_pull(skb, hdr - skb_transport_header(skb)))
1348 return;
1349
1352 icmpv6_notify(skb, NDISC_REDIRECT, 0, 0); 1350 icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
1353} 1351}
1354 1352
@@ -1574,11 +1572,18 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1574{ 1572{
1575 struct net_device *dev = ptr; 1573 struct net_device *dev = ptr;
1576 struct net *net = dev_net(dev); 1574 struct net *net = dev_net(dev);
1575 struct inet6_dev *idev;
1577 1576
1578 switch (event) { 1577 switch (event) {
1579 case NETDEV_CHANGEADDR: 1578 case NETDEV_CHANGEADDR:
1580 neigh_changeaddr(&nd_tbl, dev); 1579 neigh_changeaddr(&nd_tbl, dev);
1581 fib6_run_gc(~0UL, net); 1580 fib6_run_gc(~0UL, net);
1581 idev = in6_dev_get(dev);
1582 if (!idev)
1583 break;
1584 if (idev->cnf.ndisc_notify)
1585 ndisc_send_unsol_na(dev);
1586 in6_dev_put(idev);
1582 break; 1587 break;
1583 case NETDEV_DOWN: 1588 case NETDEV_DOWN:
1584 neigh_ifdown(&nd_tbl, dev); 1589 neigh_ifdown(&nd_tbl, dev);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index d7cb04506c3d..125a90d6a795 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -207,8 +207,7 @@ ip6t_get_target_c(const struct ip6t_entry *e)
207 return ip6t_get_target((struct ip6t_entry *)e); 207 return ip6t_get_target((struct ip6t_entry *)e);
208} 208}
209 209
210#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 210#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
211 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
212/* This cries for unification! */ 211/* This cries for unification! */
213static const char *const hooknames[] = { 212static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING", 213 [NF_INET_PRE_ROUTING] = "PREROUTING",
@@ -381,8 +380,7 @@ ip6t_do_table(struct sk_buff *skb,
381 t = ip6t_get_target_c(e); 380 t = ip6t_get_target_c(e);
382 IP_NF_ASSERT(t->u.kernel.target); 381 IP_NF_ASSERT(t->u.kernel.target);
383 382
384#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 383#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
385 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
386 /* The packet is traced: log it */ 384 /* The packet is traced: log it */
387 if (unlikely(skb->nf_trace)) 385 if (unlikely(skb->nf_trace))
388 trace_packet(skb, hook, in, out, 386 trace_packet(skb, hook, in, out,
@@ -1856,7 +1854,7 @@ compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1856{ 1854{
1857 int ret; 1855 int ret;
1858 1856
1859 if (!capable(CAP_NET_ADMIN)) 1857 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1860 return -EPERM; 1858 return -EPERM;
1861 1859
1862 switch (cmd) { 1860 switch (cmd) {
@@ -1971,7 +1969,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1971{ 1969{
1972 int ret; 1970 int ret;
1973 1971
1974 if (!capable(CAP_NET_ADMIN)) 1972 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1975 return -EPERM; 1973 return -EPERM;
1976 1974
1977 switch (cmd) { 1975 switch (cmd) {
@@ -1993,7 +1991,7 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1993{ 1991{
1994 int ret; 1992 int ret;
1995 1993
1996 if (!capable(CAP_NET_ADMIN)) 1994 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1997 return -EPERM; 1995 return -EPERM;
1998 1996
1999 switch (cmd) { 1997 switch (cmd) {
@@ -2018,7 +2016,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2018{ 2016{
2019 int ret; 2017 int ret;
2020 2018
2021 if (!capable(CAP_NET_ADMIN)) 2019 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2022 return -EPERM; 2020 return -EPERM;
2023 2021
2024 switch (cmd) { 2022 switch (cmd) {
@@ -2273,112 +2271,9 @@ static void __exit ip6_tables_fini(void)
2273 unregister_pernet_subsys(&ip6_tables_net_ops); 2271 unregister_pernet_subsys(&ip6_tables_net_ops);
2274} 2272}
2275 2273
2276/*
2277 * find the offset to specified header or the protocol number of last header
2278 * if target < 0. "last header" is transport protocol header, ESP, or
2279 * "No next header".
2280 *
2281 * Note that *offset is used as input/output parameter. an if it is not zero,
2282 * then it must be a valid offset to an inner IPv6 header. This can be used
2283 * to explore inner IPv6 header, eg. ICMPv6 error messages.
2284 *
2285 * If target header is found, its offset is set in *offset and return protocol
2286 * number. Otherwise, return -1.
2287 *
2288 * If the first fragment doesn't contain the final protocol header or
2289 * NEXTHDR_NONE it is considered invalid.
2290 *
2291 * Note that non-1st fragment is special case that "the protocol number
2292 * of last header" is "next header" field in Fragment header. In this case,
2293 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2294 * isn't NULL.
2295 *
2296 * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG
2297 * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and
2298 * target < 0, then this function will stop at the AH header.
2299 */
2300int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2301 int target, unsigned short *fragoff, int *flags)
2302{
2303 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2304 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2305 unsigned int len;
2306
2307 if (fragoff)
2308 *fragoff = 0;
2309
2310 if (*offset) {
2311 struct ipv6hdr _ip6, *ip6;
2312
2313 ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
2314 if (!ip6 || (ip6->version != 6)) {
2315 printk(KERN_ERR "IPv6 header not found\n");
2316 return -EBADMSG;
2317 }
2318 start = *offset + sizeof(struct ipv6hdr);
2319 nexthdr = ip6->nexthdr;
2320 }
2321 len = skb->len - start;
2322
2323 while (nexthdr != target) {
2324 struct ipv6_opt_hdr _hdr, *hp;
2325 unsigned int hdrlen;
2326
2327 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2328 if (target < 0)
2329 break;
2330 return -ENOENT;
2331 }
2332
2333 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2334 if (hp == NULL)
2335 return -EBADMSG;
2336 if (nexthdr == NEXTHDR_FRAGMENT) {
2337 unsigned short _frag_off;
2338 __be16 *fp;
2339
2340 if (flags) /* Indicate that this is a fragment */
2341 *flags |= IP6T_FH_F_FRAG;
2342 fp = skb_header_pointer(skb,
2343 start+offsetof(struct frag_hdr,
2344 frag_off),
2345 sizeof(_frag_off),
2346 &_frag_off);
2347 if (fp == NULL)
2348 return -EBADMSG;
2349
2350 _frag_off = ntohs(*fp) & ~0x7;
2351 if (_frag_off) {
2352 if (target < 0 &&
2353 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2354 hp->nexthdr == NEXTHDR_NONE)) {
2355 if (fragoff)
2356 *fragoff = _frag_off;
2357 return hp->nexthdr;
2358 }
2359 return -ENOENT;
2360 }
2361 hdrlen = 8;
2362 } else if (nexthdr == NEXTHDR_AUTH) {
2363 if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0))
2364 break;
2365 hdrlen = (hp->hdrlen + 2) << 2;
2366 } else
2367 hdrlen = ipv6_optlen(hp);
2368
2369 nexthdr = hp->nexthdr;
2370 len -= hdrlen;
2371 start += hdrlen;
2372 }
2373
2374 *offset = start;
2375 return nexthdr;
2376}
2377
2378EXPORT_SYMBOL(ip6t_register_table); 2274EXPORT_SYMBOL(ip6t_register_table);
2379EXPORT_SYMBOL(ip6t_unregister_table); 2275EXPORT_SYMBOL(ip6t_unregister_table);
2380EXPORT_SYMBOL(ip6t_do_table); 2276EXPORT_SYMBOL(ip6t_do_table);
2381EXPORT_SYMBOL(ipv6_find_hdr);
2382 2277
2383module_init(ip6_tables_init); 2278module_init(ip6_tables_init);
2384module_exit(ip6_tables_fini); 2279module_exit(ip6_tables_fini);
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index e9486915eff6..7302b0b7b642 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -14,42 +14,23 @@
14#include <linux/netfilter_ipv6/ip6t_NPT.h> 14#include <linux/netfilter_ipv6/ip6t_NPT.h>
15#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
16 16
17static __sum16 csum16_complement(__sum16 a)
18{
19 return (__force __sum16)(0xffff - (__force u16)a);
20}
21
22static __sum16 csum16_add(__sum16 a, __sum16 b)
23{
24 u16 sum;
25
26 sum = (__force u16)a + (__force u16)b;
27 sum += (__force u16)a < (__force u16)b;
28 return (__force __sum16)sum;
29}
30
31static __sum16 csum16_sub(__sum16 a, __sum16 b)
32{
33 return csum16_add(a, csum16_complement(b));
34}
35
36static int ip6t_npt_checkentry(const struct xt_tgchk_param *par) 17static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
37{ 18{
38 struct ip6t_npt_tginfo *npt = par->targinfo; 19 struct ip6t_npt_tginfo *npt = par->targinfo;
39 __sum16 src_sum = 0, dst_sum = 0; 20 __wsum src_sum = 0, dst_sum = 0;
40 unsigned int i; 21 unsigned int i;
41 22
42 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64) 23 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
43 return -EINVAL; 24 return -EINVAL;
44 25
45 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) { 26 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
46 src_sum = csum16_add(src_sum, 27 src_sum = csum_add(src_sum,
47 (__force __sum16)npt->src_pfx.in6.s6_addr16[i]); 28 (__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
48 dst_sum = csum16_add(dst_sum, 29 dst_sum = csum_add(dst_sum,
49 (__force __sum16)npt->dst_pfx.in6.s6_addr16[i]); 30 (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
50 } 31 }
51 32
52 npt->adjustment = csum16_sub(src_sum, dst_sum); 33 npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum);
53 return 0; 34 return 0;
54} 35}
55 36
@@ -85,7 +66,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
85 return false; 66 return false;
86 } 67 }
87 68
88 sum = csum16_add((__force __sum16)addr->s6_addr16[idx], 69 sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx],
89 npt->adjustment); 70 npt->adjustment);
90 if (sum == CSUM_MANGLED_0) 71 if (sum == CSUM_MANGLED_0)
91 sum = 0; 72 sum = 0;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index fd4fb34c51c7..029623dbd411 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -132,6 +132,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
132 ip6h->saddr = oip6h->daddr; 132 ip6h->saddr = oip6h->daddr;
133 ip6h->daddr = oip6h->saddr; 133 ip6h->daddr = oip6h->saddr;
134 134
135 skb_reset_transport_header(nskb);
135 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 136 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
136 /* Truncate to length (no data) */ 137 /* Truncate to length (no data) */
137 tcph->doff = sizeof(struct tcphdr)/4; 138 tcph->doff = sizeof(struct tcphdr)/4;
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 5d1d8b04d694..5060d54199ab 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -67,7 +67,7 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
67 if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) 67 if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
68 ret = true; 68 ret = true;
69 out: 69 out:
70 dst_release(&rt->dst); 70 ip6_rt_put(rt);
71 return ret; 71 return ret;
72} 72}
73 73
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index d57dab17a182..e0e788d25b14 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -127,19 +127,28 @@ nf_nat_ipv6_fn(unsigned int hooknum,
127 ret = nf_nat_rule_find(skb, hooknum, in, out, ct); 127 ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
128 if (ret != NF_ACCEPT) 128 if (ret != NF_ACCEPT)
129 return ret; 129 return ret;
130 } else 130 } else {
131 pr_debug("Already setup manip %s for ct %p\n", 131 pr_debug("Already setup manip %s for ct %p\n",
132 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 132 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
133 ct); 133 ct);
134 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
135 goto oif_changed;
136 }
134 break; 137 break;
135 138
136 default: 139 default:
137 /* ESTABLISHED */ 140 /* ESTABLISHED */
138 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 141 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
139 ctinfo == IP_CT_ESTABLISHED_REPLY); 142 ctinfo == IP_CT_ESTABLISHED_REPLY);
143 if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
144 goto oif_changed;
140 } 145 }
141 146
142 return nf_nat_packet(ct, ctinfo, hooknum, skb); 147 return nf_nat_packet(ct, ctinfo, hooknum, skb);
148
149oif_changed:
150 nf_ct_kill_acct(ct, ctinfo, skb);
151 return NF_DROP;
143} 152}
144 153
145static unsigned int 154static unsigned int
@@ -277,9 +286,7 @@ static int __net_init ip6table_nat_net_init(struct net *net)
277 return -ENOMEM; 286 return -ENOMEM;
278 net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl); 287 net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl);
279 kfree(repl); 288 kfree(repl);
280 if (IS_ERR(net->ipv6.ip6table_nat)) 289 return PTR_RET(net->ipv6.ip6table_nat);
281 return PTR_ERR(net->ipv6.ip6table_nat);
282 return 0;
283} 290}
284 291
285static void __net_exit ip6table_nat_net_exit(struct net *net) 292static void __net_exit ip6table_nat_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 8860d23e61cf..137e245860ab 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/netfilter_bridge.h> 22#include <linux/netfilter_bridge.h>
23#include <linux/netfilter_ipv6.h> 23#include <linux/netfilter_ipv6.h>
24#include <linux/netfilter_ipv6/ip6_tables.h>
24#include <net/netfilter/nf_conntrack.h> 25#include <net/netfilter/nf_conntrack.h>
25#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
26#include <net/netfilter/nf_conntrack_l4proto.h> 27#include <net/netfilter/nf_conntrack_l4proto.h>
@@ -80,8 +81,8 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
80 } 81 }
81 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off); 82 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
82 /* 83 /*
83 * (protoff == skb->len) mean that the packet doesn't have no data 84 * (protoff == skb->len) means the packet has not data, just
84 * except of IPv6 & ext headers. but it's tracked anyway. - YK 85 * IPv6 and possibly extensions headers, but it is tracked anyway
85 */ 86 */
86 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { 87 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
87 pr_debug("ip6_conntrack_core: can't find proto in pkt\n"); 88 pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
@@ -295,7 +296,56 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
295 }, 296 },
296}; 297};
297 298
298#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 299static int
300ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
301{
302 const struct inet_sock *inet = inet_sk(sk);
303 const struct ipv6_pinfo *inet6 = inet6_sk(sk);
304 const struct nf_conntrack_tuple_hash *h;
305 struct sockaddr_in6 sin6;
306 struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
307 struct nf_conn *ct;
308
309 tuple.src.u3.in6 = inet6->rcv_saddr;
310 tuple.src.u.tcp.port = inet->inet_sport;
311 tuple.dst.u3.in6 = inet6->daddr;
312 tuple.dst.u.tcp.port = inet->inet_dport;
313 tuple.dst.protonum = sk->sk_protocol;
314
315 if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
316 return -ENOPROTOOPT;
317
318 if (*len < 0 || (unsigned int) *len < sizeof(sin6))
319 return -EINVAL;
320
321 h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
322 if (!h) {
323 pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
324 &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
325 &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port));
326 return -ENOENT;
327 }
328
329 ct = nf_ct_tuplehash_to_ctrack(h);
330
331 sin6.sin6_family = AF_INET6;
332 sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
333 sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
334 memcpy(&sin6.sin6_addr,
335 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
336 sizeof(sin6.sin6_addr));
337
338 nf_ct_put(ct);
339
340 if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
341 sin6.sin6_scope_id = sk->sk_bound_dev_if;
342 else
343 sin6.sin6_scope_id = 0;
344
345 return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
346}
347
348#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
299 349
300#include <linux/netfilter/nfnetlink.h> 350#include <linux/netfilter/nfnetlink.h>
301#include <linux/netfilter/nfnetlink_conntrack.h> 351#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -346,7 +396,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
346 .invert_tuple = ipv6_invert_tuple, 396 .invert_tuple = ipv6_invert_tuple,
347 .print_tuple = ipv6_print_tuple, 397 .print_tuple = ipv6_print_tuple,
348 .get_l4proto = ipv6_get_l4proto, 398 .get_l4proto = ipv6_get_l4proto,
349#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 399#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
350 .tuple_to_nlattr = ipv6_tuple_to_nlattr, 400 .tuple_to_nlattr = ipv6_tuple_to_nlattr,
351 .nlattr_tuple_size = ipv6_nlattr_tuple_size, 401 .nlattr_tuple_size = ipv6_nlattr_tuple_size,
352 .nlattr_to_tuple = ipv6_nlattr_to_tuple, 402 .nlattr_to_tuple = ipv6_nlattr_to_tuple,
@@ -359,6 +409,14 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
359MODULE_LICENSE("GPL"); 409MODULE_LICENSE("GPL");
360MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); 410MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
361 411
412static struct nf_sockopt_ops so_getorigdst6 = {
413 .pf = NFPROTO_IPV6,
414 .get_optmin = IP6T_SO_ORIGINAL_DST,
415 .get_optmax = IP6T_SO_ORIGINAL_DST + 1,
416 .get = ipv6_getorigdst,
417 .owner = THIS_MODULE,
418};
419
362static int ipv6_net_init(struct net *net) 420static int ipv6_net_init(struct net *net)
363{ 421{
364 int ret = 0; 422 int ret = 0;
@@ -425,6 +483,12 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
425 need_conntrack(); 483 need_conntrack();
426 nf_defrag_ipv6_enable(); 484 nf_defrag_ipv6_enable();
427 485
486 ret = nf_register_sockopt(&so_getorigdst6);
487 if (ret < 0) {
488 pr_err("Unable to register netfilter socket option\n");
489 return ret;
490 }
491
428 ret = register_pernet_subsys(&ipv6_net_ops); 492 ret = register_pernet_subsys(&ipv6_net_ops);
429 if (ret < 0) 493 if (ret < 0)
430 goto cleanup_pernet; 494 goto cleanup_pernet;
@@ -440,6 +504,7 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
440 cleanup_ipv6: 504 cleanup_ipv6:
441 unregister_pernet_subsys(&ipv6_net_ops); 505 unregister_pernet_subsys(&ipv6_net_ops);
442 cleanup_pernet: 506 cleanup_pernet:
507 nf_unregister_sockopt(&so_getorigdst6);
443 return ret; 508 return ret;
444} 509}
445 510
@@ -448,6 +513,7 @@ static void __exit nf_conntrack_l3proto_ipv6_fini(void)
448 synchronize_net(); 513 synchronize_net();
449 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); 514 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
450 unregister_pernet_subsys(&ipv6_net_ops); 515 unregister_pernet_subsys(&ipv6_net_ops);
516 nf_unregister_sockopt(&so_getorigdst6);
451} 517}
452 518
453module_init(nf_conntrack_l3proto_ipv6_init); 519module_init(nf_conntrack_l3proto_ipv6_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 2d54b2061d68..24df3dde0076 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -232,7 +232,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
232 return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum); 232 return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
233} 233}
234 234
235#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 235#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
236 236
237#include <linux/netfilter/nfnetlink.h> 237#include <linux/netfilter/nfnetlink.h>
238#include <linux/netfilter/nfnetlink_conntrack.h> 238#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -375,7 +375,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
375 .get_timeouts = icmpv6_get_timeouts, 375 .get_timeouts = icmpv6_get_timeouts,
376 .new = icmpv6_new, 376 .new = icmpv6_new,
377 .error = icmpv6_error, 377 .error = icmpv6_error,
378#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 378#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
379 .tuple_to_nlattr = icmpv6_tuple_to_nlattr, 379 .tuple_to_nlattr = icmpv6_tuple_to_nlattr,
380 .nlattr_tuple_size = icmpv6_nlattr_tuple_size, 380 .nlattr_tuple_size = icmpv6_nlattr_tuple_size,
381 .nlattr_to_tuple = icmpv6_nlattr_to_tuple, 381 .nlattr_to_tuple = icmpv6_nlattr_to_tuple,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 22c8ea951185..3dacecc99065 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -311,7 +311,10 @@ found:
311 else 311 else
312 fq->q.fragments = skb; 312 fq->q.fragments = skb;
313 313
314 skb->dev = NULL; 314 if (skb->dev) {
315 fq->iif = skb->dev->ifindex;
316 skb->dev = NULL;
317 }
315 fq->q.stamp = skb->tstamp; 318 fq->q.stamp = skb->tstamp;
316 fq->q.meat += skb->len; 319 fq->q.meat += skb->len;
317 if (payload_len > fq->q.max_size) 320 if (payload_len > fq->q.max_size)
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index cdd6d045e42e..aacd121fe8c5 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -19,7 +19,7 @@
19 19
20#include <linux/netfilter_ipv6.h> 20#include <linux/netfilter_ipv6.h>
21#include <linux/netfilter_bridge.h> 21#include <linux/netfilter_bridge.h>
22#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 22#if IS_ENABLED(CONFIG_NF_CONNTRACK)
23#include <net/netfilter/nf_conntrack.h> 23#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_helper.h> 24#include <net/netfilter/nf_conntrack_helper.h>
25#include <net/netfilter/nf_conntrack_l4proto.h> 25#include <net/netfilter/nf_conntrack_l4proto.h>
@@ -35,7 +35,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
35{ 35{
36 u16 zone = NF_CT_DEFAULT_ZONE; 36 u16 zone = NF_CT_DEFAULT_ZONE;
37 37
38#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 38#if IS_ENABLED(CONFIG_NF_CONNTRACK)
39 if (skb->nfct) 39 if (skb->nfct)
40 zone = nf_ct_zone((struct nf_conn *)skb->nfct); 40 zone = nf_ct_zone((struct nf_conn *)skb->nfct);
41#endif 41#endif
@@ -60,7 +60,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
60{ 60{
61 struct sk_buff *reasm; 61 struct sk_buff *reasm;
62 62
63#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 63#if IS_ENABLED(CONFIG_NF_CONNTRACK)
64 /* Previously seen (loopback)? */ 64 /* Previously seen (loopback)? */
65 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) 65 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
66 return NF_ACCEPT; 66 return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 5d6da784305b..61aaf70f376e 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -84,7 +84,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = {
84 .manip_pkt = icmpv6_manip_pkt, 84 .manip_pkt = icmpv6_manip_pkt,
85 .in_range = icmpv6_in_range, 85 .in_range = icmpv6_in_range,
86 .unique_tuple = icmpv6_unique_tuple, 86 .unique_tuple = icmpv6_unique_tuple,
87#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 87#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
88 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, 88 .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
89#endif 89#endif
90}; 90};
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
new file mode 100644
index 000000000000..c2e73e647e44
--- /dev/null
+++ b/net/ipv6/output_core.c
@@ -0,0 +1,76 @@
1/*
2 * IPv6 library code, needed by static components when full IPv6 support is
3 * not configured or static. These functions are needed by GSO/GRO implementation.
4 */
5#include <linux/export.h>
6#include <net/ipv6.h>
7#include <net/ip6_fib.h>
8
9void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
10{
11 static atomic_t ipv6_fragmentation_id;
12 int old, new;
13
14#if IS_ENABLED(CONFIG_IPV6)
15 if (rt && !(rt->dst.flags & DST_NOPEER)) {
16 struct inet_peer *peer;
17 struct net *net;
18
19 net = dev_net(rt->dst.dev);
20 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
21 if (peer) {
22 fhdr->identification = htonl(inet_getid(peer, 0));
23 inet_putpeer(peer);
24 return;
25 }
26 }
27#endif
28 do {
29 old = atomic_read(&ipv6_fragmentation_id);
30 new = old + 1;
31 if (!new)
32 new = 1;
33 } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
34 fhdr->identification = htonl(new);
35}
36EXPORT_SYMBOL(ipv6_select_ident);
37
38int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
39{
40 u16 offset = sizeof(struct ipv6hdr);
41 struct ipv6_opt_hdr *exthdr =
42 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
43 unsigned int packet_len = skb->tail - skb->network_header;
44 int found_rhdr = 0;
45 *nexthdr = &ipv6_hdr(skb)->nexthdr;
46
47 while (offset + 1 <= packet_len) {
48
49 switch (**nexthdr) {
50
51 case NEXTHDR_HOP:
52 break;
53 case NEXTHDR_ROUTING:
54 found_rhdr = 1;
55 break;
56 case NEXTHDR_DEST:
57#if IS_ENABLED(CONFIG_IPV6_MIP6)
58 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
59 break;
60#endif
61 if (found_rhdr)
62 return offset;
63 break;
64 default :
65 return offset;
66 }
67
68 offset += ipv6_optlen(exthdr);
69 *nexthdr = &exthdr->nexthdr;
70 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
71 offset);
72 }
73
74 return offset;
75}
76EXPORT_SYMBOL(ip6_find_1stfragopt);
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 053082dfc93e..22d1bd4670da 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,7 +25,9 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <net/protocol.h> 26#include <net/protocol.h>
27 27
28#if IS_ENABLED(CONFIG_IPV6)
28const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; 29const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
30EXPORT_SYMBOL(inet6_protos);
29 31
30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) 32int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
31{ 33{
@@ -50,3 +52,26 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
50 return ret; 52 return ret;
51} 53}
52EXPORT_SYMBOL(inet6_del_protocol); 54EXPORT_SYMBOL(inet6_del_protocol);
55#endif
56
57const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly;
58
59int inet6_add_offload(const struct net_offload *prot, unsigned char protocol)
60{
61 return !cmpxchg((const struct net_offload **)&inet6_offloads[protocol],
62 NULL, prot) ? 0 : -1;
63}
64EXPORT_SYMBOL(inet6_add_offload);
65
66int inet6_del_offload(const struct net_offload *prot, unsigned char protocol)
67{
68 int ret;
69
70 ret = (cmpxchg((const struct net_offload **)&inet6_offloads[protocol],
71 prot, NULL) == prot) ? 0 : -1;
72
73 synchronize_net();
74
75 return ret;
76}
77EXPORT_SYMBOL(inet6_del_offload);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d8e95c77db99..6cd29b1e8b92 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -50,7 +50,7 @@
50#include <net/udp.h> 50#include <net/udp.h>
51#include <net/inet_common.h> 51#include <net/inet_common.h>
52#include <net/tcp_states.h> 52#include <net/tcp_states.h>
53#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 53#if IS_ENABLED(CONFIG_IPV6_MIP6)
54#include <net/mip6.h> 54#include <net/mip6.h>
55#endif 55#endif
56#include <linux/mroute6.h> 56#include <linux/mroute6.h>
@@ -123,7 +123,7 @@ static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
123 return 1; 123 return 1;
124} 124}
125 125
126#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 126#if IS_ENABLED(CONFIG_IPV6_MIP6)
127typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb); 127typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
128 128
129static mh_filter_t __rcu *mh_filter __read_mostly; 129static mh_filter_t __rcu *mh_filter __read_mostly;
@@ -184,7 +184,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
184 filtered = icmpv6_filter(sk, skb); 184 filtered = icmpv6_filter(sk, skb);
185 break; 185 break;
186 186
187#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 187#if IS_ENABLED(CONFIG_IPV6_MIP6)
188 case IPPROTO_MH: 188 case IPPROTO_MH:
189 { 189 {
190 /* XXX: To validate MH only once for each packet, 190 /* XXX: To validate MH only once for each packet,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index da8a4e301b1b..e5253ec9e0fc 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -616,6 +616,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
616 table[0].data = &net->ipv6.frags.high_thresh; 616 table[0].data = &net->ipv6.frags.high_thresh;
617 table[1].data = &net->ipv6.frags.low_thresh; 617 table[1].data = &net->ipv6.frags.low_thresh;
618 table[2].data = &net->ipv6.frags.timeout; 618 table[2].data = &net->ipv6.frags.timeout;
619
620 /* Don't export sysctls to unprivileged users */
621 if (net->user_ns != &init_user_ns)
622 table[0].procname = NULL;
619 } 623 }
620 624
621 hdr = register_net_sysctl(net, "net/ipv6", table); 625 hdr = register_net_sysctl(net, "net/ipv6", table);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b1e6cf0b95fd..e229a3bc345d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -57,6 +57,7 @@
57#include <net/xfrm.h> 57#include <net/xfrm.h>
58#include <net/netevent.h> 58#include <net/netevent.h>
59#include <net/netlink.h> 59#include <net/netlink.h>
60#include <net/nexthop.h>
60 61
61#include <asm/uaccess.h> 62#include <asm/uaccess.h>
62 63
@@ -289,6 +290,8 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
289 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); 290 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
290 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); 291 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
291 rt->rt6i_genid = rt_genid(net); 292 rt->rt6i_genid = rt_genid(net);
293 INIT_LIST_HEAD(&rt->rt6i_siblings);
294 rt->rt6i_nsiblings = 0;
292 } 295 }
293 return rt; 296 return rt;
294} 297}
@@ -318,13 +321,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
318 } 321 }
319} 322}
320 323
321static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
322
323static u32 rt6_peer_genid(void)
324{
325 return atomic_read(&__rt6_peer_genid);
326}
327
328void rt6_bind_peer(struct rt6_info *rt, int create) 324void rt6_bind_peer(struct rt6_info *rt, int create)
329{ 325{
330 struct inet_peer_base *base; 326 struct inet_peer_base *base;
@@ -338,8 +334,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
338 if (peer) { 334 if (peer) {
339 if (!rt6_set_peer(rt, peer)) 335 if (!rt6_set_peer(rt, peer))
340 inet_putpeer(peer); 336 inet_putpeer(peer);
341 else
342 rt->rt6i_peer_genid = rt6_peer_genid();
343 } 337 }
344} 338}
345 339
@@ -385,6 +379,69 @@ static bool rt6_need_strict(const struct in6_addr *daddr)
385 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 379 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
386} 380}
387 381
382/* Multipath route selection:
383 * Hash based function using packet header and flowlabel.
384 * Adapted from fib_info_hashfn()
385 */
386static int rt6_info_hash_nhsfn(unsigned int candidate_count,
387 const struct flowi6 *fl6)
388{
389 unsigned int val = fl6->flowi6_proto;
390
391 val ^= (__force u32)fl6->daddr.s6_addr32[0];
392 val ^= (__force u32)fl6->daddr.s6_addr32[1];
393 val ^= (__force u32)fl6->daddr.s6_addr32[2];
394 val ^= (__force u32)fl6->daddr.s6_addr32[3];
395
396 val ^= (__force u32)fl6->saddr.s6_addr32[0];
397 val ^= (__force u32)fl6->saddr.s6_addr32[1];
398 val ^= (__force u32)fl6->saddr.s6_addr32[2];
399 val ^= (__force u32)fl6->saddr.s6_addr32[3];
400
401 /* Work only if this not encapsulated */
402 switch (fl6->flowi6_proto) {
403 case IPPROTO_UDP:
404 case IPPROTO_TCP:
405 case IPPROTO_SCTP:
406 val ^= (__force u16)fl6->fl6_sport;
407 val ^= (__force u16)fl6->fl6_dport;
408 break;
409
410 case IPPROTO_ICMPV6:
411 val ^= (__force u16)fl6->fl6_icmp_type;
412 val ^= (__force u16)fl6->fl6_icmp_code;
413 break;
414 }
415 /* RFC6438 recommands to use flowlabel */
416 val ^= (__force u32)fl6->flowlabel;
417
418 /* Perhaps, we need to tune, this function? */
419 val = val ^ (val >> 7) ^ (val >> 12);
420 return val % candidate_count;
421}
422
423static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
424 struct flowi6 *fl6)
425{
426 struct rt6_info *sibling, *next_sibling;
427 int route_choosen;
428
429 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
430 /* Don't change the route, if route_choosen == 0
431 * (siblings does not include ourself)
432 */
433 if (route_choosen)
434 list_for_each_entry_safe(sibling, next_sibling,
435 &match->rt6i_siblings, rt6i_siblings) {
436 route_choosen--;
437 if (route_choosen == 0) {
438 match = sibling;
439 break;
440 }
441 }
442 return match;
443}
444
388/* 445/*
389 * Route lookup. Any table->tb6_lock is implied. 446 * Route lookup. Any table->tb6_lock is implied.
390 */ 447 */
@@ -487,35 +544,32 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
487 return 0; 544 return 0;
488} 545}
489 546
490static inline int rt6_check_neigh(struct rt6_info *rt) 547static inline bool rt6_check_neigh(struct rt6_info *rt)
491{ 548{
492 struct neighbour *neigh; 549 struct neighbour *neigh;
493 int m; 550 bool ret = false;
494 551
495 neigh = rt->n; 552 neigh = rt->n;
496 if (rt->rt6i_flags & RTF_NONEXTHOP || 553 if (rt->rt6i_flags & RTF_NONEXTHOP ||
497 !(rt->rt6i_flags & RTF_GATEWAY)) 554 !(rt->rt6i_flags & RTF_GATEWAY))
498 m = 1; 555 ret = true;
499 else if (neigh) { 556 else if (neigh) {
500 read_lock_bh(&neigh->lock); 557 read_lock_bh(&neigh->lock);
501 if (neigh->nud_state & NUD_VALID) 558 if (neigh->nud_state & NUD_VALID)
502 m = 2; 559 ret = true;
503#ifdef CONFIG_IPV6_ROUTER_PREF 560#ifdef CONFIG_IPV6_ROUTER_PREF
504 else if (neigh->nud_state & NUD_FAILED) 561 else if (!(neigh->nud_state & NUD_FAILED))
505 m = 0; 562 ret = true;
506#endif 563#endif
507 else
508 m = 1;
509 read_unlock_bh(&neigh->lock); 564 read_unlock_bh(&neigh->lock);
510 } else 565 }
511 m = 0; 566 return ret;
512 return m;
513} 567}
514 568
515static int rt6_score_route(struct rt6_info *rt, int oif, 569static int rt6_score_route(struct rt6_info *rt, int oif,
516 int strict) 570 int strict)
517{ 571{
518 int m, n; 572 int m;
519 573
520 m = rt6_check_dev(rt, oif); 574 m = rt6_check_dev(rt, oif);
521 if (!m && (strict & RT6_LOOKUP_F_IFACE)) 575 if (!m && (strict & RT6_LOOKUP_F_IFACE))
@@ -523,8 +577,7 @@ static int rt6_score_route(struct rt6_info *rt, int oif,
523#ifdef CONFIG_IPV6_ROUTER_PREF 577#ifdef CONFIG_IPV6_ROUTER_PREF
524 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; 578 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
525#endif 579#endif
526 n = rt6_check_neigh(rt); 580 if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE))
527 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
528 return -1; 581 return -1;
529 return m; 582 return m;
530} 583}
@@ -666,7 +719,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
666 else 719 else
667 rt6_set_expires(rt, jiffies + HZ * lifetime); 720 rt6_set_expires(rt, jiffies + HZ * lifetime);
668 721
669 dst_release(&rt->dst); 722 ip6_rt_put(rt);
670 } 723 }
671 return 0; 724 return 0;
672} 725}
@@ -702,6 +755,8 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net,
702restart: 755restart:
703 rt = fn->leaf; 756 rt = fn->leaf;
704 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); 757 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
758 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
759 rt = rt6_multipath_select(rt, fl6);
705 BACKTRACK(net, &fl6->saddr); 760 BACKTRACK(net, &fl6->saddr);
706out: 761out:
707 dst_use(&rt->dst, jiffies); 762 dst_use(&rt->dst, jiffies);
@@ -863,7 +918,8 @@ restart_2:
863 918
864restart: 919restart:
865 rt = rt6_select(fn, oif, strict | reachable); 920 rt = rt6_select(fn, oif, strict | reachable);
866 921 if (rt->rt6i_nsiblings && oif == 0)
922 rt = rt6_multipath_select(rt, fl6);
867 BACKTRACK(net, &fl6->saddr); 923 BACKTRACK(net, &fl6->saddr);
868 if (rt == net->ipv6.ip6_null_entry || 924 if (rt == net->ipv6.ip6_null_entry ||
869 rt->rt6i_flags & RTF_CACHE) 925 rt->rt6i_flags & RTF_CACHE)
@@ -879,7 +935,7 @@ restart:
879 else 935 else
880 goto out2; 936 goto out2;
881 937
882 dst_release(&rt->dst); 938 ip6_rt_put(rt);
883 rt = nrt ? : net->ipv6.ip6_null_entry; 939 rt = nrt ? : net->ipv6.ip6_null_entry;
884 940
885 dst_hold(&rt->dst); 941 dst_hold(&rt->dst);
@@ -896,7 +952,7 @@ restart:
896 * Race condition! In the gap, when table->tb6_lock was 952 * Race condition! In the gap, when table->tb6_lock was
897 * released someone could insert this route. Relookup. 953 * released someone could insert this route. Relookup.
898 */ 954 */
899 dst_release(&rt->dst); 955 ip6_rt_put(rt);
900 goto relookup; 956 goto relookup;
901 957
902out: 958out:
@@ -1030,14 +1086,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1030 if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) 1086 if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
1031 return NULL; 1087 return NULL;
1032 1088
1033 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { 1089 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
1034 if (rt->rt6i_peer_genid != rt6_peer_genid()) {
1035 if (!rt6_has_peer(rt))
1036 rt6_bind_peer(rt, 0);
1037 rt->rt6i_peer_genid = rt6_peer_genid();
1038 }
1039 return dst; 1090 return dst;
1040 } 1091
1041 return NULL; 1092 return NULL;
1042} 1093}
1043 1094
@@ -1316,12 +1367,6 @@ out:
1316 return entries > rt_max_size; 1367 return entries > rt_max_size;
1317} 1368}
1318 1369
1319/* Clean host part of a prefix. Not necessary in radix tree,
1320 but results in cleaner routing tables.
1321
1322 Remove it only when all the things will work!
1323 */
1324
1325int ip6_dst_hoplimit(struct dst_entry *dst) 1370int ip6_dst_hoplimit(struct dst_entry *dst)
1326{ 1371{
1327 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); 1372 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
@@ -1507,7 +1552,7 @@ int ip6_route_add(struct fib6_config *cfg)
1507 goto out; 1552 goto out;
1508 if (dev) { 1553 if (dev) {
1509 if (dev != grt->dst.dev) { 1554 if (dev != grt->dst.dev) {
1510 dst_release(&grt->dst); 1555 ip6_rt_put(grt);
1511 goto out; 1556 goto out;
1512 } 1557 }
1513 } else { 1558 } else {
@@ -1518,7 +1563,7 @@ int ip6_route_add(struct fib6_config *cfg)
1518 } 1563 }
1519 if (!(grt->rt6i_flags & RTF_GATEWAY)) 1564 if (!(grt->rt6i_flags & RTF_GATEWAY))
1520 err = 0; 1565 err = 0;
1521 dst_release(&grt->dst); 1566 ip6_rt_put(grt);
1522 1567
1523 if (err) 1568 if (err)
1524 goto out; 1569 goto out;
@@ -1604,7 +1649,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1604 write_unlock_bh(&table->tb6_lock); 1649 write_unlock_bh(&table->tb6_lock);
1605 1650
1606out: 1651out:
1607 dst_release(&rt->dst); 1652 ip6_rt_put(rt);
1608 return err; 1653 return err;
1609} 1654}
1610 1655
@@ -1987,7 +2032,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1987 switch(cmd) { 2032 switch(cmd) {
1988 case SIOCADDRT: /* Add a route */ 2033 case SIOCADDRT: /* Add a route */
1989 case SIOCDELRT: /* Delete a route */ 2034 case SIOCDELRT: /* Delete a route */
1990 if (!capable(CAP_NET_ADMIN)) 2035 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1991 return -EPERM; 2036 return -EPERM;
1992 err = copy_from_user(&rtmsg, arg, 2037 err = copy_from_user(&rtmsg, arg,
1993 sizeof(struct in6_rtmsg)); 2038 sizeof(struct in6_rtmsg));
@@ -2249,6 +2294,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2249 [RTA_IIF] = { .type = NLA_U32 }, 2294 [RTA_IIF] = { .type = NLA_U32 },
2250 [RTA_PRIORITY] = { .type = NLA_U32 }, 2295 [RTA_PRIORITY] = { .type = NLA_U32 },
2251 [RTA_METRICS] = { .type = NLA_NESTED }, 2296 [RTA_METRICS] = { .type = NLA_NESTED },
2297 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2252}; 2298};
2253 2299
2254static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, 2300static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2326,11 +2372,71 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2326 if (tb[RTA_TABLE]) 2372 if (tb[RTA_TABLE])
2327 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); 2373 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2328 2374
2375 if (tb[RTA_MULTIPATH]) {
2376 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2377 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2378 }
2379
2329 err = 0; 2380 err = 0;
2330errout: 2381errout:
2331 return err; 2382 return err;
2332} 2383}
2333 2384
2385static int ip6_route_multipath(struct fib6_config *cfg, int add)
2386{
2387 struct fib6_config r_cfg;
2388 struct rtnexthop *rtnh;
2389 int remaining;
2390 int attrlen;
2391 int err = 0, last_err = 0;
2392
2393beginning:
2394 rtnh = (struct rtnexthop *)cfg->fc_mp;
2395 remaining = cfg->fc_mp_len;
2396
2397 /* Parse a Multipath Entry */
2398 while (rtnh_ok(rtnh, remaining)) {
2399 memcpy(&r_cfg, cfg, sizeof(*cfg));
2400 if (rtnh->rtnh_ifindex)
2401 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2402
2403 attrlen = rtnh_attrlen(rtnh);
2404 if (attrlen > 0) {
2405 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2406
2407 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2408 if (nla) {
2409 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2410 r_cfg.fc_flags |= RTF_GATEWAY;
2411 }
2412 }
2413 err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2414 if (err) {
2415 last_err = err;
2416 /* If we are trying to remove a route, do not stop the
2417 * loop when ip6_route_del() fails (because next hop is
2418 * already gone), we should try to remove all next hops.
2419 */
2420 if (add) {
2421 /* If add fails, we should try to delete all
2422 * next hops that have been already added.
2423 */
2424 add = 0;
2425 goto beginning;
2426 }
2427 }
2428 /* Because each route is added like a single route we remove
2429 * this flag after the first nexthop (if there is a collision,
2430 * we have already fail to add the first nexthop:
2431 * fib6_add_rt2node() has reject it).
2432 */
2433 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2434 rtnh = rtnh_next(rtnh, &remaining);
2435 }
2436
2437 return last_err;
2438}
2439
2334static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2440static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2335{ 2441{
2336 struct fib6_config cfg; 2442 struct fib6_config cfg;
@@ -2340,7 +2446,10 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a
2340 if (err < 0) 2446 if (err < 0)
2341 return err; 2447 return err;
2342 2448
2343 return ip6_route_del(&cfg); 2449 if (cfg.fc_mp)
2450 return ip6_route_multipath(&cfg, 0);
2451 else
2452 return ip6_route_del(&cfg);
2344} 2453}
2345 2454
2346static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2455static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
@@ -2352,7 +2461,10 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a
2352 if (err < 0) 2461 if (err < 0)
2353 return err; 2462 return err;
2354 2463
2355 return ip6_route_add(&cfg); 2464 if (cfg.fc_mp)
2465 return ip6_route_multipath(&cfg, 1);
2466 else
2467 return ip6_route_add(&cfg);
2356} 2468}
2357 2469
2358static inline size_t rt6_nlmsg_size(void) 2470static inline size_t rt6_nlmsg_size(void)
@@ -2596,7 +2708,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2596 2708
2597 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2709 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2598 if (!skb) { 2710 if (!skb) {
2599 dst_release(&rt->dst); 2711 ip6_rt_put(rt);
2600 err = -ENOBUFS; 2712 err = -ENOBUFS;
2601 goto errout; 2713 goto errout;
2602 } 2714 }
@@ -2873,6 +2985,10 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2873 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; 2985 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2874 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; 2986 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2875 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 2987 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2988
2989 /* Don't export sysctls to unprivileged users */
2990 if (net->user_ns != &init_user_ns)
2991 table[0].procname = NULL;
2876 } 2992 }
2877 2993
2878 return table; 2994 return table;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3ed54ffd8d50..cfba99b2c2a4 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -65,9 +65,14 @@
65#define HASH_SIZE 16 65#define HASH_SIZE 16
66#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 66#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
67 67
68static bool log_ecn_error = true;
69module_param(log_ecn_error, bool, 0644);
70MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
71
68static int ipip6_tunnel_init(struct net_device *dev); 72static int ipip6_tunnel_init(struct net_device *dev);
69static void ipip6_tunnel_setup(struct net_device *dev); 73static void ipip6_tunnel_setup(struct net_device *dev);
70static void ipip6_dev_free(struct net_device *dev); 74static void ipip6_dev_free(struct net_device *dev);
75static struct rtnl_link_ops sit_link_ops __read_mostly;
71 76
72static int sit_net_id __read_mostly; 77static int sit_net_id __read_mostly;
73struct sit_net { 78struct sit_net {
@@ -80,22 +85,6 @@ struct sit_net {
80 struct net_device *fb_tunnel_dev; 85 struct net_device *fb_tunnel_dev;
81}; 86};
82 87
83/*
84 * Locking : hash tables are protected by RCU and RTNL
85 */
86
87#define for_each_ip_tunnel_rcu(start) \
88 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
89
90/* often modified stats are per cpu, other are shared (netdev->stats) */
91struct pcpu_tstats {
92 u64 rx_packets;
93 u64 rx_bytes;
94 u64 tx_packets;
95 u64 tx_bytes;
96 struct u64_stats_sync syncp;
97};
98
99static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev, 88static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
100 struct rtnl_link_stats64 *tot) 89 struct rtnl_link_stats64 *tot)
101{ 90{
@@ -121,6 +110,7 @@ static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
121 } 110 }
122 111
123 tot->rx_errors = dev->stats.rx_errors; 112 tot->rx_errors = dev->stats.rx_errors;
113 tot->rx_frame_errors = dev->stats.rx_frame_errors;
124 tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 114 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
125 tot->tx_carrier_errors = dev->stats.tx_carrier_errors; 115 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
126 tot->tx_dropped = dev->stats.tx_dropped; 116 tot->tx_dropped = dev->stats.tx_dropped;
@@ -141,20 +131,20 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
141 struct ip_tunnel *t; 131 struct ip_tunnel *t;
142 struct sit_net *sitn = net_generic(net, sit_net_id); 132 struct sit_net *sitn = net_generic(net, sit_net_id);
143 133
144 for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) { 134 for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
145 if (local == t->parms.iph.saddr && 135 if (local == t->parms.iph.saddr &&
146 remote == t->parms.iph.daddr && 136 remote == t->parms.iph.daddr &&
147 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 137 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
148 (t->dev->flags & IFF_UP)) 138 (t->dev->flags & IFF_UP))
149 return t; 139 return t;
150 } 140 }
151 for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) { 141 for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
152 if (remote == t->parms.iph.daddr && 142 if (remote == t->parms.iph.daddr &&
153 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 143 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
154 (t->dev->flags & IFF_UP)) 144 (t->dev->flags & IFF_UP))
155 return t; 145 return t;
156 } 146 }
157 for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) { 147 for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
158 if (local == t->parms.iph.saddr && 148 if (local == t->parms.iph.saddr &&
159 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 149 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
160 (t->dev->flags & IFF_UP)) 150 (t->dev->flags & IFF_UP))
@@ -231,6 +221,37 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
231#endif 221#endif
232} 222}
233 223
224static int ipip6_tunnel_create(struct net_device *dev)
225{
226 struct ip_tunnel *t = netdev_priv(dev);
227 struct net *net = dev_net(dev);
228 struct sit_net *sitn = net_generic(net, sit_net_id);
229 int err;
230
231 err = ipip6_tunnel_init(dev);
232 if (err < 0)
233 goto out;
234 ipip6_tunnel_clone_6rd(dev, sitn);
235
236 if ((__force u16)t->parms.i_flags & SIT_ISATAP)
237 dev->priv_flags |= IFF_ISATAP;
238
239 err = register_netdevice(dev);
240 if (err < 0)
241 goto out;
242
243 strcpy(t->parms.name, dev->name);
244 dev->rtnl_link_ops = &sit_link_ops;
245
246 dev_hold(dev);
247
248 ipip6_tunnel_link(sitn, t);
249 return 0;
250
251out:
252 return err;
253}
254
234static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, 255static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
235 struct ip_tunnel_parm *parms, int create) 256 struct ip_tunnel_parm *parms, int create)
236{ 257{
@@ -271,21 +292,9 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
271 nt = netdev_priv(dev); 292 nt = netdev_priv(dev);
272 293
273 nt->parms = *parms; 294 nt->parms = *parms;
274 if (ipip6_tunnel_init(dev) < 0) 295 if (ipip6_tunnel_create(dev) < 0)
275 goto failed_free; 296 goto failed_free;
276 ipip6_tunnel_clone_6rd(dev, sitn);
277 297
278 if (parms->i_flags & SIT_ISATAP)
279 dev->priv_flags |= IFF_ISATAP;
280
281 if (register_netdevice(dev) < 0)
282 goto failed_free;
283
284 strcpy(nt->parms.name, dev->name);
285
286 dev_hold(dev);
287
288 ipip6_tunnel_link(sitn, nt);
289 return nt; 298 return nt;
290 299
291failed_free: 300failed_free:
@@ -581,16 +590,11 @@ out:
581 return err; 590 return err;
582} 591}
583 592
584static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
585{
586 if (INET_ECN_is_ce(iph->tos))
587 IP6_ECN_set_ce(ipv6_hdr(skb));
588}
589
590static int ipip6_rcv(struct sk_buff *skb) 593static int ipip6_rcv(struct sk_buff *skb)
591{ 594{
592 const struct iphdr *iph; 595 const struct iphdr *iph;
593 struct ip_tunnel *tunnel; 596 struct ip_tunnel *tunnel;
597 int err;
594 598
595 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 599 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
596 goto out; 600 goto out;
@@ -612,18 +616,27 @@ static int ipip6_rcv(struct sk_buff *skb)
612 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 616 if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
613 !isatap_chksrc(skb, iph, tunnel)) { 617 !isatap_chksrc(skb, iph, tunnel)) {
614 tunnel->dev->stats.rx_errors++; 618 tunnel->dev->stats.rx_errors++;
615 kfree_skb(skb); 619 goto out;
616 return 0; 620 }
621
622 __skb_tunnel_rx(skb, tunnel->dev);
623
624 err = IP_ECN_decapsulate(iph, skb);
625 if (unlikely(err)) {
626 if (log_ecn_error)
627 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
628 &iph->saddr, iph->tos);
629 if (err > 1) {
630 ++tunnel->dev->stats.rx_frame_errors;
631 ++tunnel->dev->stats.rx_errors;
632 goto out;
633 }
617 } 634 }
618 635
619 tstats = this_cpu_ptr(tunnel->dev->tstats); 636 tstats = this_cpu_ptr(tunnel->dev->tstats);
620 tstats->rx_packets++; 637 tstats->rx_packets++;
621 tstats->rx_bytes += skb->len; 638 tstats->rx_bytes += skb->len;
622 639
623 __skb_tunnel_rx(skb, tunnel->dev);
624
625 ipip6_ecn_decapsulate(iph, skb);
626
627 netif_rx(skb); 640 netif_rx(skb);
628 641
629 return 0; 642 return 0;
@@ -683,7 +696,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
683 struct net_device *dev) 696 struct net_device *dev)
684{ 697{
685 struct ip_tunnel *tunnel = netdev_priv(dev); 698 struct ip_tunnel *tunnel = netdev_priv(dev);
686 struct pcpu_tstats *tstats;
687 const struct iphdr *tiph = &tunnel->parms.iph; 699 const struct iphdr *tiph = &tunnel->parms.iph;
688 const struct ipv6hdr *iph6 = ipv6_hdr(skb); 700 const struct ipv6hdr *iph6 = ipv6_hdr(skb);
689 u8 tos = tunnel->parms.iph.tos; 701 u8 tos = tunnel->parms.iph.tos;
@@ -864,9 +876,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
864 if ((iph->ttl = tiph->ttl) == 0) 876 if ((iph->ttl = tiph->ttl) == 0)
865 iph->ttl = iph6->hop_limit; 877 iph->ttl = iph6->hop_limit;
866 878
867 nf_reset(skb); 879 iptunnel_xmit(skb, dev);
868 tstats = this_cpu_ptr(dev->tstats);
869 __IPTUNNEL_XMIT(tstats, &dev->stats);
870 return NETDEV_TX_OK; 880 return NETDEV_TX_OK;
871 881
872tx_error_icmp: 882tx_error_icmp:
@@ -914,6 +924,59 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
914 dev->iflink = tunnel->parms.link; 924 dev->iflink = tunnel->parms.link;
915} 925}
916 926
927static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
928{
929 struct net *net = dev_net(t->dev);
930 struct sit_net *sitn = net_generic(net, sit_net_id);
931
932 ipip6_tunnel_unlink(sitn, t);
933 synchronize_net();
934 t->parms.iph.saddr = p->iph.saddr;
935 t->parms.iph.daddr = p->iph.daddr;
936 memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
937 memcpy(t->dev->broadcast, &p->iph.daddr, 4);
938 ipip6_tunnel_link(sitn, t);
939 t->parms.iph.ttl = p->iph.ttl;
940 t->parms.iph.tos = p->iph.tos;
941 if (t->parms.link != p->link) {
942 t->parms.link = p->link;
943 ipip6_tunnel_bind_dev(t->dev);
944 }
945 netdev_state_change(t->dev);
946}
947
948#ifdef CONFIG_IPV6_SIT_6RD
949static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
950 struct ip_tunnel_6rd *ip6rd)
951{
952 struct in6_addr prefix;
953 __be32 relay_prefix;
954
955 if (ip6rd->relay_prefixlen > 32 ||
956 ip6rd->prefixlen + (32 - ip6rd->relay_prefixlen) > 64)
957 return -EINVAL;
958
959 ipv6_addr_prefix(&prefix, &ip6rd->prefix, ip6rd->prefixlen);
960 if (!ipv6_addr_equal(&prefix, &ip6rd->prefix))
961 return -EINVAL;
962 if (ip6rd->relay_prefixlen)
963 relay_prefix = ip6rd->relay_prefix &
964 htonl(0xffffffffUL <<
965 (32 - ip6rd->relay_prefixlen));
966 else
967 relay_prefix = 0;
968 if (relay_prefix != ip6rd->relay_prefix)
969 return -EINVAL;
970
971 t->ip6rd.prefix = prefix;
972 t->ip6rd.relay_prefix = relay_prefix;
973 t->ip6rd.prefixlen = ip6rd->prefixlen;
974 t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
975 netdev_state_change(t->dev);
976 return 0;
977}
978#endif
979
917static int 980static int
918ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 981ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
919{ 982{
@@ -966,7 +1029,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
966 case SIOCADDTUNNEL: 1029 case SIOCADDTUNNEL:
967 case SIOCCHGTUNNEL: 1030 case SIOCCHGTUNNEL:
968 err = -EPERM; 1031 err = -EPERM;
969 if (!capable(CAP_NET_ADMIN)) 1032 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
970 goto done; 1033 goto done;
971 1034
972 err = -EFAULT; 1035 err = -EFAULT;
@@ -995,28 +1058,13 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
995 break; 1058 break;
996 } 1059 }
997 t = netdev_priv(dev); 1060 t = netdev_priv(dev);
998 ipip6_tunnel_unlink(sitn, t);
999 synchronize_net();
1000 t->parms.iph.saddr = p.iph.saddr;
1001 t->parms.iph.daddr = p.iph.daddr;
1002 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1003 memcpy(dev->broadcast, &p.iph.daddr, 4);
1004 ipip6_tunnel_link(sitn, t);
1005 netdev_state_change(dev);
1006 } 1061 }
1062
1063 ipip6_tunnel_update(t, &p);
1007 } 1064 }
1008 1065
1009 if (t) { 1066 if (t) {
1010 err = 0; 1067 err = 0;
1011 if (cmd == SIOCCHGTUNNEL) {
1012 t->parms.iph.ttl = p.iph.ttl;
1013 t->parms.iph.tos = p.iph.tos;
1014 if (t->parms.link != p.link) {
1015 t->parms.link = p.link;
1016 ipip6_tunnel_bind_dev(dev);
1017 netdev_state_change(dev);
1018 }
1019 }
1020 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) 1068 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1021 err = -EFAULT; 1069 err = -EFAULT;
1022 } else 1070 } else
@@ -1025,7 +1073,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1025 1073
1026 case SIOCDELTUNNEL: 1074 case SIOCDELTUNNEL:
1027 err = -EPERM; 1075 err = -EPERM;
1028 if (!capable(CAP_NET_ADMIN)) 1076 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1029 goto done; 1077 goto done;
1030 1078
1031 if (dev == sitn->fb_tunnel_dev) { 1079 if (dev == sitn->fb_tunnel_dev) {
@@ -1058,7 +1106,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1058 case SIOCDELPRL: 1106 case SIOCDELPRL:
1059 case SIOCCHGPRL: 1107 case SIOCCHGPRL:
1060 err = -EPERM; 1108 err = -EPERM;
1061 if (!capable(CAP_NET_ADMIN)) 1109 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1062 goto done; 1110 goto done;
1063 err = -EINVAL; 1111 err = -EINVAL;
1064 if (dev == sitn->fb_tunnel_dev) 1112 if (dev == sitn->fb_tunnel_dev)
@@ -1087,7 +1135,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1087 case SIOCCHG6RD: 1135 case SIOCCHG6RD:
1088 case SIOCDEL6RD: 1136 case SIOCDEL6RD:
1089 err = -EPERM; 1137 err = -EPERM;
1090 if (!capable(CAP_NET_ADMIN)) 1138 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1091 goto done; 1139 goto done;
1092 1140
1093 err = -EFAULT; 1141 err = -EFAULT;
@@ -1098,31 +1146,9 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1098 t = netdev_priv(dev); 1146 t = netdev_priv(dev);
1099 1147
1100 if (cmd != SIOCDEL6RD) { 1148 if (cmd != SIOCDEL6RD) {
1101 struct in6_addr prefix; 1149 err = ipip6_tunnel_update_6rd(t, &ip6rd);
1102 __be32 relay_prefix; 1150 if (err < 0)
1103
1104 err = -EINVAL;
1105 if (ip6rd.relay_prefixlen > 32 ||
1106 ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
1107 goto done;
1108
1109 ipv6_addr_prefix(&prefix, &ip6rd.prefix,
1110 ip6rd.prefixlen);
1111 if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
1112 goto done;
1113 if (ip6rd.relay_prefixlen)
1114 relay_prefix = ip6rd.relay_prefix &
1115 htonl(0xffffffffUL <<
1116 (32 - ip6rd.relay_prefixlen));
1117 else
1118 relay_prefix = 0;
1119 if (relay_prefix != ip6rd.relay_prefix)
1120 goto done; 1151 goto done;
1121
1122 t->ip6rd.prefix = prefix;
1123 t->ip6rd.relay_prefix = relay_prefix;
1124 t->ip6rd.prefixlen = ip6rd.prefixlen;
1125 t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
1126 } else 1152 } else
1127 ipip6_tunnel_clone_6rd(dev, sitn); 1153 ipip6_tunnel_clone_6rd(dev, sitn);
1128 1154
@@ -1216,6 +1242,239 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1216 return 0; 1242 return 0;
1217} 1243}
1218 1244
1245static void ipip6_netlink_parms(struct nlattr *data[],
1246 struct ip_tunnel_parm *parms)
1247{
1248 memset(parms, 0, sizeof(*parms));
1249
1250 parms->iph.version = 4;
1251 parms->iph.protocol = IPPROTO_IPV6;
1252 parms->iph.ihl = 5;
1253 parms->iph.ttl = 64;
1254
1255 if (!data)
1256 return;
1257
1258 if (data[IFLA_IPTUN_LINK])
1259 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1260
1261 if (data[IFLA_IPTUN_LOCAL])
1262 parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]);
1263
1264 if (data[IFLA_IPTUN_REMOTE])
1265 parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]);
1266
1267 if (data[IFLA_IPTUN_TTL]) {
1268 parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]);
1269 if (parms->iph.ttl)
1270 parms->iph.frag_off = htons(IP_DF);
1271 }
1272
1273 if (data[IFLA_IPTUN_TOS])
1274 parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]);
1275
1276 if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
1277 parms->iph.frag_off = htons(IP_DF);
1278
1279 if (data[IFLA_IPTUN_FLAGS])
1280 parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
1281}
1282
1283#ifdef CONFIG_IPV6_SIT_6RD
1284/* This function returns true when 6RD attributes are present in the nl msg */
1285static bool ipip6_netlink_6rd_parms(struct nlattr *data[],
1286 struct ip_tunnel_6rd *ip6rd)
1287{
1288 bool ret = false;
1289 memset(ip6rd, 0, sizeof(*ip6rd));
1290
1291 if (!data)
1292 return ret;
1293
1294 if (data[IFLA_IPTUN_6RD_PREFIX]) {
1295 ret = true;
1296 nla_memcpy(&ip6rd->prefix, data[IFLA_IPTUN_6RD_PREFIX],
1297 sizeof(struct in6_addr));
1298 }
1299
1300 if (data[IFLA_IPTUN_6RD_RELAY_PREFIX]) {
1301 ret = true;
1302 ip6rd->relay_prefix =
1303 nla_get_be32(data[IFLA_IPTUN_6RD_RELAY_PREFIX]);
1304 }
1305
1306 if (data[IFLA_IPTUN_6RD_PREFIXLEN]) {
1307 ret = true;
1308 ip6rd->prefixlen = nla_get_u16(data[IFLA_IPTUN_6RD_PREFIXLEN]);
1309 }
1310
1311 if (data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]) {
1312 ret = true;
1313 ip6rd->relay_prefixlen =
1314 nla_get_u16(data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]);
1315 }
1316
1317 return ret;
1318}
1319#endif
1320
1321static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1322 struct nlattr *tb[], struct nlattr *data[])
1323{
1324 struct net *net = dev_net(dev);
1325 struct ip_tunnel *nt;
1326#ifdef CONFIG_IPV6_SIT_6RD
1327 struct ip_tunnel_6rd ip6rd;
1328#endif
1329 int err;
1330
1331 nt = netdev_priv(dev);
1332 ipip6_netlink_parms(data, &nt->parms);
1333
1334 if (ipip6_tunnel_locate(net, &nt->parms, 0))
1335 return -EEXIST;
1336
1337 err = ipip6_tunnel_create(dev);
1338 if (err < 0)
1339 return err;
1340
1341#ifdef CONFIG_IPV6_SIT_6RD
1342 if (ipip6_netlink_6rd_parms(data, &ip6rd))
1343 err = ipip6_tunnel_update_6rd(nt, &ip6rd);
1344#endif
1345
1346 return err;
1347}
1348
1349static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
1350 struct nlattr *data[])
1351{
1352 struct ip_tunnel *t;
1353 struct ip_tunnel_parm p;
1354 struct net *net = dev_net(dev);
1355 struct sit_net *sitn = net_generic(net, sit_net_id);
1356#ifdef CONFIG_IPV6_SIT_6RD
1357 struct ip_tunnel_6rd ip6rd;
1358#endif
1359
1360 if (dev == sitn->fb_tunnel_dev)
1361 return -EINVAL;
1362
1363 ipip6_netlink_parms(data, &p);
1364
1365 if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
1366 (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
1367 return -EINVAL;
1368
1369 t = ipip6_tunnel_locate(net, &p, 0);
1370
1371 if (t) {
1372 if (t->dev != dev)
1373 return -EEXIST;
1374 } else
1375 t = netdev_priv(dev);
1376
1377 ipip6_tunnel_update(t, &p);
1378
1379#ifdef CONFIG_IPV6_SIT_6RD
1380 if (ipip6_netlink_6rd_parms(data, &ip6rd))
1381 return ipip6_tunnel_update_6rd(t, &ip6rd);
1382#endif
1383
1384 return 0;
1385}
1386
1387static size_t ipip6_get_size(const struct net_device *dev)
1388{
1389 return
1390 /* IFLA_IPTUN_LINK */
1391 nla_total_size(4) +
1392 /* IFLA_IPTUN_LOCAL */
1393 nla_total_size(4) +
1394 /* IFLA_IPTUN_REMOTE */
1395 nla_total_size(4) +
1396 /* IFLA_IPTUN_TTL */
1397 nla_total_size(1) +
1398 /* IFLA_IPTUN_TOS */
1399 nla_total_size(1) +
1400 /* IFLA_IPTUN_PMTUDISC */
1401 nla_total_size(1) +
1402 /* IFLA_IPTUN_FLAGS */
1403 nla_total_size(2) +
1404#ifdef CONFIG_IPV6_SIT_6RD
1405 /* IFLA_IPTUN_6RD_PREFIX */
1406 nla_total_size(sizeof(struct in6_addr)) +
1407 /* IFLA_IPTUN_6RD_RELAY_PREFIX */
1408 nla_total_size(4) +
1409 /* IFLA_IPTUN_6RD_PREFIXLEN */
1410 nla_total_size(2) +
1411 /* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */
1412 nla_total_size(2) +
1413#endif
1414 0;
1415}
1416
1417static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
1418{
1419 struct ip_tunnel *tunnel = netdev_priv(dev);
1420 struct ip_tunnel_parm *parm = &tunnel->parms;
1421
1422 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
1423 nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
1424 nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) ||
1425 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) ||
1426 nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) ||
1427 nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
1428 !!(parm->iph.frag_off & htons(IP_DF))) ||
1429 nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags))
1430 goto nla_put_failure;
1431
1432#ifdef CONFIG_IPV6_SIT_6RD
1433 if (nla_put(skb, IFLA_IPTUN_6RD_PREFIX, sizeof(struct in6_addr),
1434 &tunnel->ip6rd.prefix) ||
1435 nla_put_be32(skb, IFLA_IPTUN_6RD_RELAY_PREFIX,
1436 tunnel->ip6rd.relay_prefix) ||
1437 nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN,
1438 tunnel->ip6rd.prefixlen) ||
1439 nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN,
1440 tunnel->ip6rd.relay_prefixlen))
1441 goto nla_put_failure;
1442#endif
1443
1444 return 0;
1445
1446nla_put_failure:
1447 return -EMSGSIZE;
1448}
1449
1450static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
1451 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
1452 [IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },
1453 [IFLA_IPTUN_REMOTE] = { .type = NLA_U32 },
1454 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
1455 [IFLA_IPTUN_TOS] = { .type = NLA_U8 },
1456 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
1457 [IFLA_IPTUN_FLAGS] = { .type = NLA_U16 },
1458#ifdef CONFIG_IPV6_SIT_6RD
1459 [IFLA_IPTUN_6RD_PREFIX] = { .len = sizeof(struct in6_addr) },
1460 [IFLA_IPTUN_6RD_RELAY_PREFIX] = { .type = NLA_U32 },
1461 [IFLA_IPTUN_6RD_PREFIXLEN] = { .type = NLA_U16 },
1462 [IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 },
1463#endif
1464};
1465
1466static struct rtnl_link_ops sit_link_ops __read_mostly = {
1467 .kind = "sit",
1468 .maxtype = IFLA_IPTUN_MAX,
1469 .policy = ipip6_policy,
1470 .priv_size = sizeof(struct ip_tunnel),
1471 .setup = ipip6_tunnel_setup,
1472 .newlink = ipip6_newlink,
1473 .changelink = ipip6_changelink,
1474 .get_size = ipip6_get_size,
1475 .fill_info = ipip6_fill_info,
1476};
1477
1219static struct xfrm_tunnel sit_handler __read_mostly = { 1478static struct xfrm_tunnel sit_handler __read_mostly = {
1220 .handler = ipip6_rcv, 1479 .handler = ipip6_rcv,
1221 .err_handler = ipip6_err, 1480 .err_handler = ipip6_err,
@@ -1302,6 +1561,7 @@ static struct pernet_operations sit_net_ops = {
1302 1561
1303static void __exit sit_cleanup(void) 1562static void __exit sit_cleanup(void)
1304{ 1563{
1564 rtnl_link_unregister(&sit_link_ops);
1305 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1565 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1306 1566
1307 unregister_pernet_device(&sit_net_ops); 1567 unregister_pernet_device(&sit_net_ops);
@@ -1319,10 +1579,21 @@ static int __init sit_init(void)
1319 return err; 1579 return err;
1320 err = xfrm4_tunnel_register(&sit_handler, AF_INET6); 1580 err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1321 if (err < 0) { 1581 if (err < 0) {
1322 unregister_pernet_device(&sit_net_ops);
1323 pr_info("%s: can't add protocol\n", __func__); 1582 pr_info("%s: can't add protocol\n", __func__);
1583 goto xfrm_tunnel_failed;
1324 } 1584 }
1585 err = rtnl_link_register(&sit_link_ops);
1586 if (err < 0)
1587 goto rtnl_link_failed;
1588
1589out:
1325 return err; 1590 return err;
1591
1592rtnl_link_failed:
1593 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1594xfrm_tunnel_failed:
1595 unregister_pernet_device(&sit_net_ops);
1596 goto out;
1326} 1597}
1327 1598
1328module_init(sit_init); 1599module_init(sit_init);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 182ab9a85d6c..40161977f7cf 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -214,7 +214,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
214 ireq6->iif = inet6_iif(skb); 214 ireq6->iif = inet6_iif(skb);
215 215
216 req->expires = 0UL; 216 req->expires = 0UL;
217 req->retrans = 0; 217 req->num_retrans = 0;
218 ireq->ecn_ok = ecn_ok; 218 ireq->ecn_ok = ecn_ok;
219 ireq->snd_wscale = tcp_opt.snd_wscale; 219 ireq->snd_wscale = tcp_opt.snd_wscale;
220 ireq->sack_ok = tcp_opt.sack_ok; 220 ireq->sack_ok = tcp_opt.sack_ok;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 26175bffbaa0..93825dd3a7c0 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -77,9 +77,6 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req); 77 struct request_sock *req);
78 78
79static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 79static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80static void __tcp_v6_send_check(struct sk_buff *skb,
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
83 80
84static const struct inet_connection_sock_af_ops ipv6_mapped; 81static const struct inet_connection_sock_af_ops ipv6_mapped;
85static const struct inet_connection_sock_af_ops ipv6_specific; 82static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -119,14 +116,6 @@ static void tcp_v6_hash(struct sock *sk)
119 } 116 }
120} 117}
121 118
122static __inline__ __sum16 tcp_v6_check(int len,
123 const struct in6_addr *saddr,
124 const struct in6_addr *daddr,
125 __wsum base)
126{
127 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
128}
129
130static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) 119static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
131{ 120{
132 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 121 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
@@ -306,7 +295,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
306 if (err) 295 if (err)
307 goto late_failure; 296 goto late_failure;
308 297
309 if (!tp->write_seq) 298 if (!tp->write_seq && likely(!tp->repair))
310 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
311 np->daddr.s6_addr32, 300 np->daddr.s6_addr32,
312 inet->inet_sport, 301 inet->inet_sport,
@@ -495,9 +484,12 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
495 struct request_values *rvp) 484 struct request_values *rvp)
496{ 485{
497 struct flowi6 fl6; 486 struct flowi6 fl6;
487 int res;
498 488
499 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 489 res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
500 return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0); 490 if (!res)
491 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
492 return res;
501} 493}
502 494
503static void tcp_v6_reqsk_destructor(struct request_sock *req) 495static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -719,94 +711,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
719}; 711};
720#endif 712#endif
721 713
722static void __tcp_v6_send_check(struct sk_buff *skb,
723 const struct in6_addr *saddr, const struct in6_addr *daddr)
724{
725 struct tcphdr *th = tcp_hdr(skb);
726
727 if (skb->ip_summed == CHECKSUM_PARTIAL) {
728 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
729 skb->csum_start = skb_transport_header(skb) - skb->head;
730 skb->csum_offset = offsetof(struct tcphdr, check);
731 } else {
732 th->check = tcp_v6_check(skb->len, saddr, daddr,
733 csum_partial(th, th->doff << 2,
734 skb->csum));
735 }
736}
737
738static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
739{
740 struct ipv6_pinfo *np = inet6_sk(sk);
741
742 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
743}
744
745static int tcp_v6_gso_send_check(struct sk_buff *skb)
746{
747 const struct ipv6hdr *ipv6h;
748 struct tcphdr *th;
749
750 if (!pskb_may_pull(skb, sizeof(*th)))
751 return -EINVAL;
752
753 ipv6h = ipv6_hdr(skb);
754 th = tcp_hdr(skb);
755
756 th->check = 0;
757 skb->ip_summed = CHECKSUM_PARTIAL;
758 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
759 return 0;
760}
761
762static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
763 struct sk_buff *skb)
764{
765 const struct ipv6hdr *iph = skb_gro_network_header(skb);
766 __wsum wsum;
767 __sum16 sum;
768
769 switch (skb->ip_summed) {
770 case CHECKSUM_COMPLETE:
771 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
772 skb->csum)) {
773 skb->ip_summed = CHECKSUM_UNNECESSARY;
774 break;
775 }
776flush:
777 NAPI_GRO_CB(skb)->flush = 1;
778 return NULL;
779
780 case CHECKSUM_NONE:
781 wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
782 skb_gro_len(skb),
783 IPPROTO_TCP, 0));
784 sum = csum_fold(skb_checksum(skb,
785 skb_gro_offset(skb),
786 skb_gro_len(skb),
787 wsum));
788 if (sum)
789 goto flush;
790
791 skb->ip_summed = CHECKSUM_UNNECESSARY;
792 break;
793 }
794
795 return tcp_gro_receive(head, skb);
796}
797
798static int tcp6_gro_complete(struct sk_buff *skb)
799{
800 const struct ipv6hdr *iph = ipv6_hdr(skb);
801 struct tcphdr *th = tcp_hdr(skb);
802
803 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
804 &iph->saddr, &iph->daddr, 0);
805 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
806
807 return tcp_gro_complete(skb);
808}
809
810static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 714static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
811 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass) 715 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
812{ 716{
@@ -1364,7 +1268,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1364 1268
1365 tcp_initialize_rcv_mss(newsk); 1269 tcp_initialize_rcv_mss(newsk);
1366 tcp_synack_rtt_meas(newsk, req); 1270 tcp_synack_rtt_meas(newsk, req);
1367 newtp->total_retrans = req->retrans; 1271 newtp->total_retrans = req->num_retrans;
1368 1272
1369 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 1273 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1370 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 1274 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
@@ -1384,7 +1288,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1384#endif 1288#endif
1385 1289
1386 if (__inet_inherit_port(sk, newsk) < 0) { 1290 if (__inet_inherit_port(sk, newsk) < 0) {
1387 sock_put(newsk); 1291 inet_csk_prepare_forced_close(newsk);
1292 tcp_done(newsk);
1388 goto out; 1293 goto out;
1389 } 1294 }
1390 __inet6_hash(newsk, NULL); 1295 __inet6_hash(newsk, NULL);
@@ -1741,11 +1646,11 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
1741 skb->destructor = sock_edemux; 1646 skb->destructor = sock_edemux;
1742 if (sk->sk_state != TCP_TIME_WAIT) { 1647 if (sk->sk_state != TCP_TIME_WAIT) {
1743 struct dst_entry *dst = sk->sk_rx_dst; 1648 struct dst_entry *dst = sk->sk_rx_dst;
1744 struct inet_sock *icsk = inet_sk(sk); 1649
1745 if (dst) 1650 if (dst)
1746 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1651 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1747 if (dst && 1652 if (dst &&
1748 icsk->rx_dst_ifindex == skb->skb_iif) 1653 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1749 skb_dst_set_noref(skb, dst); 1654 skb_dst_set_noref(skb, dst);
1750 } 1655 }
1751 } 1656 }
@@ -1866,7 +1771,7 @@ static void get_openreq6(struct seq_file *seq,
1866 0,0, /* could print option size, but that is af dependent. */ 1771 0,0, /* could print option size, but that is af dependent. */
1867 1, /* timers active (only the expire timer) */ 1772 1, /* timers active (only the expire timer) */
1868 jiffies_to_clock_t(ttd), 1773 jiffies_to_clock_t(ttd),
1869 req->retrans, 1774 req->num_timeout,
1870 from_kuid_munged(seq_user_ns(seq), uid), 1775 from_kuid_munged(seq_user_ns(seq), uid),
1871 0, /* non standard timer */ 1776 0, /* non standard timer */
1872 0, /* open_requests have no inode */ 1777 0, /* open_requests have no inode */
@@ -2063,10 +1968,6 @@ static const struct inet6_protocol tcpv6_protocol = {
2063 .early_demux = tcp_v6_early_demux, 1968 .early_demux = tcp_v6_early_demux,
2064 .handler = tcp_v6_rcv, 1969 .handler = tcp_v6_rcv,
2065 .err_handler = tcp_v6_err, 1970 .err_handler = tcp_v6_err,
2066 .gso_send_check = tcp_v6_gso_send_check,
2067 .gso_segment = tcp_tso_segment,
2068 .gro_receive = tcp6_gro_receive,
2069 .gro_complete = tcp6_gro_complete,
2070 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1971 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2071}; 1972};
2072 1973
@@ -2121,10 +2022,10 @@ int __init tcpv6_init(void)
2121out: 2022out:
2122 return ret; 2023 return ret;
2123 2024
2124out_tcpv6_protocol:
2125 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2126out_tcpv6_protosw: 2025out_tcpv6_protosw:
2127 inet6_unregister_protosw(&tcpv6_protosw); 2026 inet6_unregister_protosw(&tcpv6_protosw);
2027out_tcpv6_protocol:
2028 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2128 goto out; 2029 goto out;
2129} 2030}
2130 2031
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
new file mode 100644
index 000000000000..2ec6bf6a0aa0
--- /dev/null
+++ b/net/ipv6/tcpv6_offload.c
@@ -0,0 +1,95 @@
1/*
2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv6 GSO/GRO support
11 */
12#include <linux/skbuff.h>
13#include <net/protocol.h>
14#include <net/tcp.h>
15#include <net/ip6_checksum.h>
16#include "ip6_offload.h"
17
18static int tcp_v6_gso_send_check(struct sk_buff *skb)
19{
20 const struct ipv6hdr *ipv6h;
21 struct tcphdr *th;
22
23 if (!pskb_may_pull(skb, sizeof(*th)))
24 return -EINVAL;
25
26 ipv6h = ipv6_hdr(skb);
27 th = tcp_hdr(skb);
28
29 th->check = 0;
30 skb->ip_summed = CHECKSUM_PARTIAL;
31 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
32 return 0;
33}
34
35static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
36 struct sk_buff *skb)
37{
38 const struct ipv6hdr *iph = skb_gro_network_header(skb);
39 __wsum wsum;
40 __sum16 sum;
41
42 switch (skb->ip_summed) {
43 case CHECKSUM_COMPLETE:
44 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
45 skb->csum)) {
46 skb->ip_summed = CHECKSUM_UNNECESSARY;
47 break;
48 }
49flush:
50 NAPI_GRO_CB(skb)->flush = 1;
51 return NULL;
52
53 case CHECKSUM_NONE:
54 wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
55 skb_gro_len(skb),
56 IPPROTO_TCP, 0));
57 sum = csum_fold(skb_checksum(skb,
58 skb_gro_offset(skb),
59 skb_gro_len(skb),
60 wsum));
61 if (sum)
62 goto flush;
63
64 skb->ip_summed = CHECKSUM_UNNECESSARY;
65 break;
66 }
67
68 return tcp_gro_receive(head, skb);
69}
70
71static int tcp6_gro_complete(struct sk_buff *skb)
72{
73 const struct ipv6hdr *iph = ipv6_hdr(skb);
74 struct tcphdr *th = tcp_hdr(skb);
75
76 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
77 &iph->saddr, &iph->daddr, 0);
78 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
79
80 return tcp_gro_complete(skb);
81}
82
83static const struct net_offload tcpv6_offload = {
84 .callbacks = {
85 .gso_send_check = tcp_v6_gso_send_check,
86 .gso_segment = tcp_tso_segment,
87 .gro_receive = tcp6_gro_receive,
88 .gro_complete = tcp6_gro_complete,
89 },
90};
91
92int __init tcpv6_offload_init(void)
93{
94 return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
95}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fc9997260a6b..dfaa29b8b293 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1343,103 +1343,9 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1343} 1343}
1344#endif 1344#endif
1345 1345
1346static int udp6_ufo_send_check(struct sk_buff *skb)
1347{
1348 const struct ipv6hdr *ipv6h;
1349 struct udphdr *uh;
1350
1351 if (!pskb_may_pull(skb, sizeof(*uh)))
1352 return -EINVAL;
1353
1354 ipv6h = ipv6_hdr(skb);
1355 uh = udp_hdr(skb);
1356
1357 uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
1358 IPPROTO_UDP, 0);
1359 skb->csum_start = skb_transport_header(skb) - skb->head;
1360 skb->csum_offset = offsetof(struct udphdr, check);
1361 skb->ip_summed = CHECKSUM_PARTIAL;
1362 return 0;
1363}
1364
1365static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
1366 netdev_features_t features)
1367{
1368 struct sk_buff *segs = ERR_PTR(-EINVAL);
1369 unsigned int mss;
1370 unsigned int unfrag_ip6hlen, unfrag_len;
1371 struct frag_hdr *fptr;
1372 u8 *mac_start, *prevhdr;
1373 u8 nexthdr;
1374 u8 frag_hdr_sz = sizeof(struct frag_hdr);
1375 int offset;
1376 __wsum csum;
1377
1378 mss = skb_shinfo(skb)->gso_size;
1379 if (unlikely(skb->len <= mss))
1380 goto out;
1381
1382 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
1383 /* Packet is from an untrusted source, reset gso_segs. */
1384 int type = skb_shinfo(skb)->gso_type;
1385
1386 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
1387 !(type & (SKB_GSO_UDP))))
1388 goto out;
1389
1390 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
1391
1392 segs = NULL;
1393 goto out;
1394 }
1395
1396 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
1397 * do checksum of UDP packets sent as multiple IP fragments.
1398 */
1399 offset = skb_checksum_start_offset(skb);
1400 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1401 offset += skb->csum_offset;
1402 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1403 skb->ip_summed = CHECKSUM_NONE;
1404
1405 /* Check if there is enough headroom to insert fragment header. */
1406 if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
1407 pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
1408 goto out;
1409
1410 /* Find the unfragmentable header and shift it left by frag_hdr_sz
1411 * bytes to insert fragment header.
1412 */
1413 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
1414 nexthdr = *prevhdr;
1415 *prevhdr = NEXTHDR_FRAGMENT;
1416 unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
1417 unfrag_ip6hlen;
1418 mac_start = skb_mac_header(skb);
1419 memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
1420
1421 skb->mac_header -= frag_hdr_sz;
1422 skb->network_header -= frag_hdr_sz;
1423
1424 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
1425 fptr->nexthdr = nexthdr;
1426 fptr->reserved = 0;
1427 ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
1428
1429 /* Fragment the skb. ipv6 header and the remaining fields of the
1430 * fragment header are updated in ipv6_gso_segment()
1431 */
1432 segs = skb_segment(skb, features);
1433
1434out:
1435 return segs;
1436}
1437
1438static const struct inet6_protocol udpv6_protocol = { 1346static const struct inet6_protocol udpv6_protocol = {
1439 .handler = udpv6_rcv, 1347 .handler = udpv6_rcv,
1440 .err_handler = udpv6_err, 1348 .err_handler = udpv6_err,
1441 .gso_send_check = udp6_ufo_send_check,
1442 .gso_segment = udp6_ufo_fragment,
1443 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1349 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1444}; 1350};
1445 1351
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
new file mode 100644
index 000000000000..0c8934a317c2
--- /dev/null
+++ b/net/ipv6/udp_offload.c
@@ -0,0 +1,120 @@
1/*
2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * UDPv6 GSO support
11 */
12#include <linux/skbuff.h>
13#include <net/protocol.h>
14#include <net/ipv6.h>
15#include <net/udp.h>
16#include <net/ip6_checksum.h>
17#include "ip6_offload.h"
18
19static int udp6_ufo_send_check(struct sk_buff *skb)
20{
21 const struct ipv6hdr *ipv6h;
22 struct udphdr *uh;
23
24 if (!pskb_may_pull(skb, sizeof(*uh)))
25 return -EINVAL;
26
27 ipv6h = ipv6_hdr(skb);
28 uh = udp_hdr(skb);
29
30 uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
31 IPPROTO_UDP, 0);
32 skb->csum_start = skb_transport_header(skb) - skb->head;
33 skb->csum_offset = offsetof(struct udphdr, check);
34 skb->ip_summed = CHECKSUM_PARTIAL;
35 return 0;
36}
37
38static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
39 netdev_features_t features)
40{
41 struct sk_buff *segs = ERR_PTR(-EINVAL);
42 unsigned int mss;
43 unsigned int unfrag_ip6hlen, unfrag_len;
44 struct frag_hdr *fptr;
45 u8 *mac_start, *prevhdr;
46 u8 nexthdr;
47 u8 frag_hdr_sz = sizeof(struct frag_hdr);
48 int offset;
49 __wsum csum;
50
51 mss = skb_shinfo(skb)->gso_size;
52 if (unlikely(skb->len <= mss))
53 goto out;
54
55 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
56 /* Packet is from an untrusted source, reset gso_segs. */
57 int type = skb_shinfo(skb)->gso_type;
58
59 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
60 !(type & (SKB_GSO_UDP))))
61 goto out;
62
63 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
64
65 segs = NULL;
66 goto out;
67 }
68
69 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
70 * do checksum of UDP packets sent as multiple IP fragments.
71 */
72 offset = skb_checksum_start_offset(skb);
73 csum = skb_checksum(skb, offset, skb->len - offset, 0);
74 offset += skb->csum_offset;
75 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
76 skb->ip_summed = CHECKSUM_NONE;
77
78 /* Check if there is enough headroom to insert fragment header. */
79 if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
80 pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
81 goto out;
82
83 /* Find the unfragmentable header and shift it left by frag_hdr_sz
84 * bytes to insert fragment header.
85 */
86 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
87 nexthdr = *prevhdr;
88 *prevhdr = NEXTHDR_FRAGMENT;
89 unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
90 unfrag_ip6hlen;
91 mac_start = skb_mac_header(skb);
92 memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
93
94 skb->mac_header -= frag_hdr_sz;
95 skb->network_header -= frag_hdr_sz;
96
97 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
98 fptr->nexthdr = nexthdr;
99 fptr->reserved = 0;
100 ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
101
102 /* Fragment the skb. ipv6 header and the remaining fields of the
103 * fragment header are updated in ipv6_gso_segment()
104 */
105 segs = skb_segment(skb, features);
106
107out:
108 return segs;
109}
110static const struct net_offload udpv6_offload = {
111 .callbacks = {
112 .gso_send_check = udp6_ufo_send_check,
113 .gso_segment = udp6_ufo_fragment,
114 },
115};
116
117int __init udp_offload_init(void)
118{
119 return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
120}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index f8c4c08ffb60..c9844135c9ca 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -20,7 +20,7 @@
20#include <net/ip.h> 20#include <net/ip.h>
21#include <net/ipv6.h> 21#include <net/ipv6.h>
22#include <net/ip6_route.h> 22#include <net/ip6_route.h>
23#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 23#if IS_ENABLED(CONFIG_IPV6_MIP6)
24#include <net/mip6.h> 24#include <net/mip6.h>
25#endif 25#endif
26 26
@@ -182,7 +182,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
182 fl6->flowi6_proto = nexthdr; 182 fl6->flowi6_proto = nexthdr;
183 return; 183 return;
184 184
185#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 185#if IS_ENABLED(CONFIG_IPV6_MIP6)
186 case IPPROTO_MH: 186 case IPPROTO_MH:
187 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { 187 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
188 struct ip6_mh *mh; 188 struct ip6_mh *mh;
@@ -327,21 +327,7 @@ static struct ctl_table_header *sysctl_hdr;
327int __init xfrm6_init(void) 327int __init xfrm6_init(void)
328{ 328{
329 int ret; 329 int ret;
330 unsigned int gc_thresh; 330
331
332 /*
333 * We need a good default value for the xfrm6 gc threshold.
334 * In ipv4 we set it to the route hash table size * 8, which
335 * is half the size of the maximaum route cache for ipv4. It
336 * would be good to do the same thing for v6, except the table is
337 * constructed differently here. Here each table for a net namespace
338 * can have FIB_TABLE_HASHSZ entries, so lets go with the same
339 * computation that we used for ipv4 here. Also, lets keep the initial
340 * gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults
341 * to that as a minimum as well
342 */
343 gc_thresh = FIB6_TABLE_HASHSZ * 8;
344 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
345 dst_entries_init(&xfrm6_dst_ops); 331 dst_entries_init(&xfrm6_dst_ops);
346 332
347 ret = xfrm6_policy_init(); 333 ret = xfrm6_policy_init();
@@ -370,7 +356,6 @@ void xfrm6_fini(void)
370 if (sysctl_hdr) 356 if (sysctl_hdr)
371 unregister_net_sysctl_table(sysctl_hdr); 357 unregister_net_sysctl_table(sysctl_hdr);
372#endif 358#endif
373 //xfrm6_input_fini();
374 xfrm6_policy_fini(); 359 xfrm6_policy_fini();
375 xfrm6_state_fini(); 360 xfrm6_state_fini();
376 dst_entries_destroy(&xfrm6_dst_ops); 361 dst_entries_destroy(&xfrm6_dst_ops);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 3f2f7c4ab721..d8c70b8efc24 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -101,7 +101,7 @@ static int __xfrm6_state_sort_cmp(void *p)
101 return 1; 101 return 1;
102 else 102 else
103 return 3; 103 return 3;
104#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 104#if IS_ENABLED(CONFIG_IPV6_MIP6)
105 case XFRM_MODE_ROUTEOPTIMIZATION: 105 case XFRM_MODE_ROUTEOPTIMIZATION:
106 case XFRM_MODE_IN_TRIGGER: 106 case XFRM_MODE_IN_TRIGGER:
107 return 2; 107 return 2;
@@ -134,7 +134,7 @@ static int __xfrm6_tmpl_sort_cmp(void *p)
134 switch (v->mode) { 134 switch (v->mode) {
135 case XFRM_MODE_TRANSPORT: 135 case XFRM_MODE_TRANSPORT:
136 return 1; 136 return 1;
137#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 137#if IS_ENABLED(CONFIG_IPV6_MIP6)
138 case XFRM_MODE_ROUTEOPTIMIZATION: 138 case XFRM_MODE_ROUTEOPTIMIZATION:
139 case XFRM_MODE_IN_TRIGGER: 139 case XFRM_MODE_IN_TRIGGER:
140 return 2; 140 return 2;
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 496ce2cebcd7..a68c88cdec6e 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -183,6 +183,7 @@ static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self)
183 ircomm_tty_shutdown(self); 183 ircomm_tty_shutdown(self);
184 184
185 self->magic = 0; 185 self->magic = 0;
186 tty_port_destroy(&self->port);
186 kfree(self); 187 kfree(self);
187} 188}
188 189
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 1002e3396f72..ae43c62f9045 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -441,6 +441,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
441 lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); 441 lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
442 if (lsap == NULL) { 442 if (lsap == NULL) {
443 IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__); 443 IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__);
444 __irttp_close_tsap(self);
444 return NULL; 445 return NULL;
445 } 446 }
446 447
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3ad1f9db5f8b..df082508362d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1806,7 +1806,7 @@ static void iucv_external_interrupt(struct ext_code ext_code,
1806 struct iucv_irq_data *p; 1806 struct iucv_irq_data *p;
1807 struct iucv_irq_list *work; 1807 struct iucv_irq_list *work;
1808 1808
1809 kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++; 1809 inc_irq_stat(IRQEXT_IUC);
1810 p = iucv_irq_data[smp_processor_id()]; 1810 p = iucv_irq_data[smp_processor_id()];
1811 if (p->ippathid >= iucv_max_pathid) { 1811 if (p->ippathid >= iucv_max_pathid) {
1812 WARN_ON(p->ippathid >= iucv_max_pathid); 1812 WARN_ON(p->ippathid >= iucv_max_pathid);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 08897a3c7ec7..5b426a646544 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -141,7 +141,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
141 struct sock *sk; 141 struct sock *sk;
142 int err; 142 int err;
143 143
144 if (!capable(CAP_NET_ADMIN)) 144 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
145 return -EPERM; 145 return -EPERM;
146 if (sock->type != SOCK_RAW) 146 if (sock->type != SOCK_RAW)
147 return -ESOCKTNOSUPPORT; 147 return -ESOCKTNOSUPPORT;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 6c4cc12c7414..bbba3a19e944 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -632,7 +632,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
632 nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) 632 nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
633 goto nla_put_failure; 633 goto nla_put_failure;
634 634
635 if ((session->ifname && session->ifname[0] && 635 if ((session->ifname[0] &&
636 nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || 636 nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
637 (session->cookie_len && 637 (session->cookie_len &&
638 nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, 638 nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index c2190005a114..88709882c464 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -160,7 +160,7 @@ static int llc_ui_create(struct net *net, struct socket *sock, int protocol,
160 struct sock *sk; 160 struct sock *sk;
161 int rc = -ESOCKTNOSUPPORT; 161 int rc = -ESOCKTNOSUPPORT;
162 162
163 if (!capable(CAP_NET_RAW)) 163 if (!ns_capable(net->user_ns, CAP_NET_RAW))
164 return -EPERM; 164 return -EPERM;
165 165
166 if (!net_eq(net, &init_net)) 166 if (!net_eq(net, &init_net))
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 63af25458fda..b4ecf267a34b 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -248,7 +248,7 @@ config MAC80211_MHWMP_DEBUG
248 Do not select this option. 248 Do not select this option.
249 249
250config MAC80211_MESH_SYNC_DEBUG 250config MAC80211_MESH_SYNC_DEBUG
251 bool "Verbose mesh mesh synchronization debugging" 251 bool "Verbose mesh synchronization debugging"
252 depends on MAC80211_DEBUG_MENU 252 depends on MAC80211_DEBUG_MENU
253 depends on MAC80211_MESH 253 depends on MAC80211_MESH
254 ---help--- 254 ---help---
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index a7dd110faafa..4911202334d9 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -8,6 +8,7 @@ mac80211-y := \
8 wpa.o \ 8 wpa.o \
9 scan.o offchannel.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 vht.o \
11 ibss.o \ 12 ibss.o \
12 iface.o \ 13 iface.o \
13 rate.o \ 14 rate.o \
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index a04752e91023..537488cbf941 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13#include <linux/export.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <crypto/aes.h> 15#include <crypto/aes.h>
15 16
@@ -126,3 +127,20 @@ void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
126{ 127{
127 crypto_free_cipher(tfm); 128 crypto_free_cipher(tfm);
128} 129}
130
131void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
132 u8 *k1, u8 *k2)
133{
134 u8 l[AES_BLOCK_SIZE] = {};
135 struct ieee80211_key *key =
136 container_of(keyconf, struct ieee80211_key, conf);
137
138 crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
139
140 memcpy(k1, l, AES_BLOCK_SIZE);
141 gf_mulx(k1);
142
143 memcpy(k2, k1, AES_BLOCK_SIZE);
144 gf_mulx(k2);
145}
146EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 186d9919b043..808338a1bce5 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -118,7 +118,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
118 return; 118 return;
119 } 119 }
120 120
121 for (i = 0; i < STA_TID_NUM; i++) 121 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
122 if (ba_rx_bitmap & BIT(i)) 122 if (ba_rx_bitmap & BIT(i))
123 set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested); 123 set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested);
124 124
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 3195a6307f50..eb9df22418f0 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -445,10 +445,10 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
445 445
446 trace_api_start_tx_ba_session(pubsta, tid); 446 trace_api_start_tx_ba_session(pubsta, tid);
447 447
448 if (WARN_ON(!local->ops->ampdu_action)) 448 if (WARN_ON_ONCE(!local->ops->ampdu_action))
449 return -EINVAL; 449 return -EINVAL;
450 450
451 if ((tid >= STA_TID_NUM) || 451 if ((tid >= IEEE80211_NUM_TIDS) ||
452 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) || 452 !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
453 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) 453 (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
454 return -EINVAL; 454 return -EINVAL;
@@ -605,9 +605,9 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
605 605
606 trace_api_start_tx_ba_cb(sdata, ra, tid); 606 trace_api_start_tx_ba_cb(sdata, ra, tid);
607 607
608 if (tid >= STA_TID_NUM) { 608 if (tid >= IEEE80211_NUM_TIDS) {
609 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 609 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
610 tid, STA_TID_NUM); 610 tid, IEEE80211_NUM_TIDS);
611 return; 611 return;
612 } 612 }
613 613
@@ -687,7 +687,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
687 if (!local->ops->ampdu_action) 687 if (!local->ops->ampdu_action)
688 return -EINVAL; 688 return -EINVAL;
689 689
690 if (tid >= STA_TID_NUM) 690 if (tid >= IEEE80211_NUM_TIDS)
691 return -EINVAL; 691 return -EINVAL;
692 692
693 spin_lock_bh(&sta->lock); 693 spin_lock_bh(&sta->lock);
@@ -722,9 +722,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
722 722
723 trace_api_stop_tx_ba_cb(sdata, ra, tid); 723 trace_api_stop_tx_ba_cb(sdata, ra, tid);
724 724
725 if (tid >= STA_TID_NUM) { 725 if (tid >= IEEE80211_NUM_TIDS) {
726 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 726 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
727 tid, STA_TID_NUM); 727 tid, IEEE80211_NUM_TIDS);
728 return; 728 return;
729 } 729 }
730 730
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 05f3a313db88..47e0aca614b7 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -370,29 +370,64 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
370 return 0; 370 return 0;
371} 371}
372 372
373static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx)
374{
375 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
376 struct ieee80211_supported_band *sband;
377 sband = sta->local->hw.wiphy->bands[
378 sta->local->oper_channel->band];
379 rate->legacy = sband->bitrates[idx].bitrate;
380 } else
381 rate->mcs = idx;
382}
383
384void sta_set_rate_info_tx(struct sta_info *sta, 373void sta_set_rate_info_tx(struct sta_info *sta,
385 const struct ieee80211_tx_rate *rate, 374 const struct ieee80211_tx_rate *rate,
386 struct rate_info *rinfo) 375 struct rate_info *rinfo)
387{ 376{
388 rinfo->flags = 0; 377 rinfo->flags = 0;
389 if (rate->flags & IEEE80211_TX_RC_MCS) 378 if (rate->flags & IEEE80211_TX_RC_MCS) {
390 rinfo->flags |= RATE_INFO_FLAGS_MCS; 379 rinfo->flags |= RATE_INFO_FLAGS_MCS;
380 rinfo->mcs = rate->idx;
381 } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
382 rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
383 rinfo->mcs = ieee80211_rate_get_vht_mcs(rate);
384 rinfo->nss = ieee80211_rate_get_vht_nss(rate);
385 } else {
386 struct ieee80211_supported_band *sband;
387 sband = sta->local->hw.wiphy->bands[
388 ieee80211_get_sdata_band(sta->sdata)];
389 rinfo->legacy = sband->bitrates[rate->idx].bitrate;
390 }
391 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 391 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
392 rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; 392 rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
393 if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
394 rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
395 if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
396 rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
393 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 397 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
394 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 398 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
395 rate_idx_to_bitrate(rinfo, sta, rate->idx); 399}
400
401void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
402{
403 rinfo->flags = 0;
404
405 if (sta->last_rx_rate_flag & RX_FLAG_HT) {
406 rinfo->flags |= RATE_INFO_FLAGS_MCS;
407 rinfo->mcs = sta->last_rx_rate_idx;
408 } else if (sta->last_rx_rate_flag & RX_FLAG_VHT) {
409 rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
410 rinfo->nss = sta->last_rx_rate_vht_nss;
411 rinfo->mcs = sta->last_rx_rate_idx;
412 } else {
413 struct ieee80211_supported_band *sband;
414
415 sband = sta->local->hw.wiphy->bands[
416 ieee80211_get_sdata_band(sta->sdata)];
417 rinfo->legacy =
418 sband->bitrates[sta->last_rx_rate_idx].bitrate;
419 }
420
421 if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
422 rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
423 if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
424 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
425 if (sta->last_rx_rate_flag & RX_FLAG_80MHZ)
426 rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
427 if (sta->last_rx_rate_flag & RX_FLAG_80P80MHZ)
428 rinfo->flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
429 if (sta->last_rx_rate_flag & RX_FLAG_160MHZ)
430 rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
396} 431}
397 432
398static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 433static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
@@ -441,15 +476,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
441 } 476 }
442 477
443 sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); 478 sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
444 479 sta_set_rate_info_rx(sta, &sinfo->rxrate);
445 sinfo->rxrate.flags = 0;
446 if (sta->last_rx_rate_flag & RX_FLAG_HT)
447 sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS;
448 if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
449 sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
450 if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
451 sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
452 rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx);
453 480
454 if (ieee80211_vif_is_mesh(&sdata->vif)) { 481 if (ieee80211_vif_is_mesh(&sdata->vif)) {
455#ifdef CONFIG_MAC80211_MESH 482#ifdef CONFIG_MAC80211_MESH
@@ -532,6 +559,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
532 u64 *data) 559 u64 *data)
533{ 560{
534 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 561 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
562 struct ieee80211_chanctx_conf *chanctx_conf;
563 struct ieee80211_channel *channel;
535 struct sta_info *sta; 564 struct sta_info *sta;
536 struct ieee80211_local *local = sdata->local; 565 struct ieee80211_local *local = sdata->local;
537 struct station_info sinfo; 566 struct station_info sinfo;
@@ -607,19 +636,26 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
607do_survey: 636do_survey:
608 i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; 637 i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
609 /* Get survey stats for current channel */ 638 /* Get survey stats for current channel */
610 q = 0; 639 survey.filled = 0;
611 while (true) {
612 survey.filled = 0;
613 if (drv_get_survey(local, q, &survey) != 0) {
614 survey.filled = 0;
615 break;
616 }
617 640
618 if (survey.channel && 641 rcu_read_lock();
619 (local->oper_channel->center_freq == 642 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
620 survey.channel->center_freq)) 643 if (chanctx_conf)
621 break; 644 channel = chanctx_conf->def.chan;
622 q++; 645 else
646 channel = NULL;
647 rcu_read_unlock();
648
649 if (channel) {
650 q = 0;
651 do {
652 survey.filled = 0;
653 if (drv_get_survey(local, q, &survey) != 0) {
654 survey.filled = 0;
655 break;
656 }
657 q++;
658 } while (channel != survey.channel);
623 } 659 }
624 660
625 if (survey.filled) 661 if (survey.filled)
@@ -724,47 +760,37 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
724 return ret; 760 return ret;
725} 761}
726 762
727static int ieee80211_set_channel(struct wiphy *wiphy, 763static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
728 struct net_device *netdev, 764 struct cfg80211_chan_def *chandef)
729 struct ieee80211_channel *chan,
730 enum nl80211_channel_type channel_type)
731{ 765{
732 struct ieee80211_local *local = wiphy_priv(wiphy); 766 struct ieee80211_local *local = wiphy_priv(wiphy);
733 struct ieee80211_sub_if_data *sdata = NULL; 767 struct ieee80211_sub_if_data *sdata;
734 768 int ret = 0;
735 if (netdev)
736 sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
737
738 switch (ieee80211_get_channel_mode(local, NULL)) {
739 case CHAN_MODE_HOPPING:
740 return -EBUSY;
741 case CHAN_MODE_FIXED:
742 if (local->oper_channel != chan ||
743 (!sdata && local->_oper_channel_type != channel_type))
744 return -EBUSY;
745 if (!sdata && local->_oper_channel_type == channel_type)
746 return 0;
747 break;
748 case CHAN_MODE_UNDEFINED:
749 break;
750 }
751
752 if (!ieee80211_set_channel_type(local, sdata, channel_type))
753 return -EBUSY;
754 769
755 local->oper_channel = chan; 770 if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
771 return 0;
756 772
757 /* auto-detects changes */ 773 mutex_lock(&local->iflist_mtx);
758 ieee80211_hw_config(local, 0); 774 if (local->use_chanctx) {
775 sdata = rcu_dereference_protected(
776 local->monitor_sdata,
777 lockdep_is_held(&local->iflist_mtx));
778 if (sdata) {
779 ieee80211_vif_release_channel(sdata);
780 ret = ieee80211_vif_use_channel(sdata, chandef,
781 IEEE80211_CHANCTX_EXCLUSIVE);
782 }
783 } else if (local->open_count == local->monitors) {
784 local->_oper_channel = chandef->chan;
785 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
786 ieee80211_hw_config(local, 0);
787 }
759 788
760 return 0; 789 if (ret == 0)
761} 790 local->monitor_chandef = *chandef;
791 mutex_unlock(&local->iflist_mtx);
762 792
763static int ieee80211_set_monitor_channel(struct wiphy *wiphy, 793 return ret;
764 struct ieee80211_channel *chan,
765 enum nl80211_channel_type channel_type)
766{
767 return ieee80211_set_channel(wiphy, NULL, chan, channel_type);
768} 794}
769 795
770static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, 796static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
@@ -872,15 +898,20 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
872 u32 changed = BSS_CHANGED_BEACON_INT | 898 u32 changed = BSS_CHANGED_BEACON_INT |
873 BSS_CHANGED_BEACON_ENABLED | 899 BSS_CHANGED_BEACON_ENABLED |
874 BSS_CHANGED_BEACON | 900 BSS_CHANGED_BEACON |
875 BSS_CHANGED_SSID; 901 BSS_CHANGED_SSID |
902 BSS_CHANGED_P2P_PS;
876 int err; 903 int err;
877 904
878 old = rtnl_dereference(sdata->u.ap.beacon); 905 old = rtnl_dereference(sdata->u.ap.beacon);
879 if (old) 906 if (old)
880 return -EALREADY; 907 return -EALREADY;
881 908
882 err = ieee80211_set_channel(wiphy, dev, params->channel, 909 /* TODO: make hostapd tell us what it wants */
883 params->channel_type); 910 sdata->smps_mode = IEEE80211_SMPS_OFF;
911 sdata->needed_rx_chains = sdata->local->rx_chains;
912
913 err = ieee80211_vif_use_channel(sdata, &params->chandef,
914 IEEE80211_CHANCTX_SHARED);
884 if (err) 915 if (err)
885 return err; 916 return err;
886 917
@@ -907,11 +938,23 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
907 sdata->vif.bss_conf.hidden_ssid = 938 sdata->vif.bss_conf.hidden_ssid =
908 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); 939 (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
909 940
941 sdata->vif.bss_conf.p2p_ctwindow = params->p2p_ctwindow;
942 sdata->vif.bss_conf.p2p_oppps = params->p2p_opp_ps;
943
910 err = ieee80211_assign_beacon(sdata, &params->beacon); 944 err = ieee80211_assign_beacon(sdata, &params->beacon);
911 if (err < 0) 945 if (err < 0)
912 return err; 946 return err;
913 changed |= err; 947 changed |= err;
914 948
949 err = drv_start_ap(sdata->local, sdata);
950 if (err) {
951 old = rtnl_dereference(sdata->u.ap.beacon);
952 if (old)
953 kfree_rcu(old, rcu_head);
954 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
955 return err;
956 }
957
915 ieee80211_bss_info_change_notify(sdata, changed); 958 ieee80211_bss_info_change_notify(sdata, changed);
916 959
917 netif_carrier_on(dev); 960 netif_carrier_on(dev);
@@ -943,26 +986,42 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
943 986
944static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) 987static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
945{ 988{
946 struct ieee80211_sub_if_data *sdata, *vlan; 989 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
947 struct beacon_data *old; 990 struct ieee80211_sub_if_data *vlan;
948 991 struct ieee80211_local *local = sdata->local;
949 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 992 struct beacon_data *old_beacon;
993 struct probe_resp *old_probe_resp;
950 994
951 old = rtnl_dereference(sdata->u.ap.beacon); 995 old_beacon = rtnl_dereference(sdata->u.ap.beacon);
952 if (!old) 996 if (!old_beacon)
953 return -ENOENT; 997 return -ENOENT;
998 old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp);
954 999
1000 /* turn off carrier for this interface and dependent VLANs */
955 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 1001 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
956 netif_carrier_off(vlan->dev); 1002 netif_carrier_off(vlan->dev);
957 netif_carrier_off(dev); 1003 netif_carrier_off(dev);
958 1004
1005 /* remove beacon and probe response */
959 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); 1006 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
1007 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
1008 kfree_rcu(old_beacon, rcu_head);
1009 if (old_probe_resp)
1010 kfree_rcu(old_probe_resp, rcu_head);
960 1011
961 kfree_rcu(old, rcu_head); 1012 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
962 1013 sta_info_flush(local, vlan);
963 sta_info_flush(sdata->local, sdata); 1014 sta_info_flush(local, sdata);
964 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 1015 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
965 1016
1017 drv_stop_ap(sdata->local, sdata);
1018
1019 /* free all potentially still buffered bcast frames */
1020 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
1021 skb_queue_purge(&sdata->u.ap.ps.bc_buf);
1022
1023 ieee80211_vif_release_channel(sdata);
1024
966 return 0; 1025 return 0;
967} 1026}
968 1027
@@ -1019,9 +1078,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1019 int i, j; 1078 int i, j;
1020 struct ieee80211_supported_band *sband; 1079 struct ieee80211_supported_band *sband;
1021 struct ieee80211_sub_if_data *sdata = sta->sdata; 1080 struct ieee80211_sub_if_data *sdata = sta->sdata;
1081 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
1022 u32 mask, set; 1082 u32 mask, set;
1023 1083
1024 sband = local->hw.wiphy->bands[local->oper_channel->band]; 1084 sband = local->hw.wiphy->bands[band];
1025 1085
1026 mask = params->sta_flags_mask; 1086 mask = params->sta_flags_mask;
1027 set = params->sta_flags_set; 1087 set = params->sta_flags_set;
@@ -1136,7 +1196,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1136 rates |= BIT(j); 1196 rates |= BIT(j);
1137 } 1197 }
1138 } 1198 }
1139 sta->sta.supp_rates[local->oper_channel->band] = rates; 1199 sta->sta.supp_rates[band] = rates;
1140 } 1200 }
1141 1201
1142 if (params->ht_capa) 1202 if (params->ht_capa)
@@ -1144,6 +1204,11 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1144 params->ht_capa, 1204 params->ht_capa,
1145 &sta->sta.ht_cap); 1205 &sta->sta.ht_cap);
1146 1206
1207 if (params->vht_capa)
1208 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
1209 params->vht_capa,
1210 &sta->sta.vht_cap);
1211
1147 if (ieee80211_vif_is_mesh(&sdata->vif)) { 1212 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1148#ifdef CONFIG_MAC80211_MESH 1213#ifdef CONFIG_MAC80211_MESH
1149 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) 1214 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED)
@@ -1664,8 +1729,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
1664 if (err) 1729 if (err)
1665 return err; 1730 return err;
1666 1731
1667 err = ieee80211_set_channel(wiphy, dev, setup->channel, 1732 /* can mesh use other SMPS modes? */
1668 setup->channel_type); 1733 sdata->smps_mode = IEEE80211_SMPS_OFF;
1734 sdata->needed_rx_chains = sdata->local->rx_chains;
1735
1736 err = ieee80211_vif_use_channel(sdata, &setup->chandef,
1737 IEEE80211_CHANCTX_SHARED);
1669 if (err) 1738 if (err)
1670 return err; 1739 return err;
1671 1740
@@ -1679,6 +1748,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
1679 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1748 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1680 1749
1681 ieee80211_stop_mesh(sdata); 1750 ieee80211_stop_mesh(sdata);
1751 ieee80211_vif_release_channel(sdata);
1682 1752
1683 return 0; 1753 return 0;
1684} 1754}
@@ -1688,10 +1758,14 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1688 struct net_device *dev, 1758 struct net_device *dev,
1689 struct bss_parameters *params) 1759 struct bss_parameters *params)
1690{ 1760{
1691 struct ieee80211_sub_if_data *sdata; 1761 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1762 enum ieee80211_band band;
1692 u32 changed = 0; 1763 u32 changed = 0;
1693 1764
1694 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1765 if (!rtnl_dereference(sdata->u.ap.beacon))
1766 return -ENOENT;
1767
1768 band = ieee80211_get_sdata_band(sdata);
1695 1769
1696 if (params->use_cts_prot >= 0) { 1770 if (params->use_cts_prot >= 0) {
1697 sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; 1771 sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
@@ -1704,7 +1778,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1704 } 1778 }
1705 1779
1706 if (!sdata->vif.bss_conf.use_short_slot && 1780 if (!sdata->vif.bss_conf.use_short_slot &&
1707 sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) { 1781 band == IEEE80211_BAND_5GHZ) {
1708 sdata->vif.bss_conf.use_short_slot = true; 1782 sdata->vif.bss_conf.use_short_slot = true;
1709 changed |= BSS_CHANGED_ERP_SLOT; 1783 changed |= BSS_CHANGED_ERP_SLOT;
1710 } 1784 }
@@ -1718,9 +1792,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1718 if (params->basic_rates) { 1792 if (params->basic_rates) {
1719 int i, j; 1793 int i, j;
1720 u32 rates = 0; 1794 u32 rates = 0;
1721 struct ieee80211_local *local = wiphy_priv(wiphy); 1795 struct ieee80211_supported_band *sband = wiphy->bands[band];
1722 struct ieee80211_supported_band *sband =
1723 wiphy->bands[local->oper_channel->band];
1724 1796
1725 for (i = 0; i < params->basic_rates_len; i++) { 1797 for (i = 0; i < params->basic_rates_len; i++) {
1726 int rate = (params->basic_rates[i] & 0x7f) * 5; 1798 int rate = (params->basic_rates[i] & 0x7f) * 5;
@@ -1746,6 +1818,16 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1746 changed |= BSS_CHANGED_HT; 1818 changed |= BSS_CHANGED_HT;
1747 } 1819 }
1748 1820
1821 if (params->p2p_ctwindow >= 0) {
1822 sdata->vif.bss_conf.p2p_ctwindow = params->p2p_ctwindow;
1823 changed |= BSS_CHANGED_P2P_PS;
1824 }
1825
1826 if (params->p2p_opp_ps >= 0) {
1827 sdata->vif.bss_conf.p2p_oppps = params->p2p_opp_ps;
1828 changed |= BSS_CHANGED_P2P_PS;
1829 }
1830
1749 ieee80211_bss_info_change_notify(sdata, changed); 1831 ieee80211_bss_info_change_notify(sdata, changed);
1750 1832
1751 return 0; 1833 return 0;
@@ -1829,7 +1911,16 @@ static int ieee80211_scan(struct wiphy *wiphy,
1829 * beaconing hasn't been configured yet 1911 * beaconing hasn't been configured yet
1830 */ 1912 */
1831 case NL80211_IFTYPE_AP: 1913 case NL80211_IFTYPE_AP:
1832 if (sdata->u.ap.beacon) 1914 /*
1915 * If the scan has been forced (and the driver supports
1916 * forcing), don't care about being beaconing already.
1917 * This will create problems to the attached stations (e.g. all
1918 * the frames sent while scanning on other channel will be
1919 * lost)
1920 */
1921 if (sdata->u.ap.beacon &&
1922 (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
1923 !(req->flags & NL80211_SCAN_FLAG_AP)))
1833 return -EOPNOTSUPP; 1924 return -EOPNOTSUPP;
1834 break; 1925 break;
1835 default: 1926 default:
@@ -1872,20 +1963,6 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
1872static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, 1963static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1873 struct cfg80211_assoc_request *req) 1964 struct cfg80211_assoc_request *req)
1874{ 1965{
1875 struct ieee80211_local *local = wiphy_priv(wiphy);
1876 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1877
1878 switch (ieee80211_get_channel_mode(local, sdata)) {
1879 case CHAN_MODE_HOPPING:
1880 return -EBUSY;
1881 case CHAN_MODE_FIXED:
1882 if (local->oper_channel == req->bss->channel)
1883 break;
1884 return -EBUSY;
1885 case CHAN_MODE_UNDEFINED:
1886 break;
1887 }
1888
1889 return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); 1966 return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
1890} 1967}
1891 1968
@@ -1904,30 +1981,22 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
1904static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, 1981static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1905 struct cfg80211_ibss_params *params) 1982 struct cfg80211_ibss_params *params)
1906{ 1983{
1907 struct ieee80211_local *local = wiphy_priv(wiphy); 1984 return ieee80211_ibss_join(IEEE80211_DEV_TO_SUB_IF(dev), params);
1908 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1909
1910 switch (ieee80211_get_channel_mode(local, sdata)) {
1911 case CHAN_MODE_HOPPING:
1912 return -EBUSY;
1913 case CHAN_MODE_FIXED:
1914 if (!params->channel_fixed)
1915 return -EBUSY;
1916 if (local->oper_channel == params->channel)
1917 break;
1918 return -EBUSY;
1919 case CHAN_MODE_UNDEFINED:
1920 break;
1921 }
1922
1923 return ieee80211_ibss_join(sdata, params);
1924} 1985}
1925 1986
1926static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) 1987static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1927{ 1988{
1989 return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev));
1990}
1991
1992static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
1993 int rate[IEEE80211_NUM_BANDS])
1994{
1928 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1995 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1929 1996
1930 return ieee80211_ibss_leave(sdata); 1997 memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate));
1998
1999 return 0;
1931} 2000}
1932 2001
1933static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) 2002static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
@@ -1956,10 +2025,16 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1956 return err; 2025 return err;
1957 } 2026 }
1958 2027
1959 if (changed & WIPHY_PARAM_RETRY_SHORT) 2028 if (changed & WIPHY_PARAM_RETRY_SHORT) {
2029 if (wiphy->retry_short > IEEE80211_MAX_TX_RETRY)
2030 return -EINVAL;
1960 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 2031 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
1961 if (changed & WIPHY_PARAM_RETRY_LONG) 2032 }
2033 if (changed & WIPHY_PARAM_RETRY_LONG) {
2034 if (wiphy->retry_long > IEEE80211_MAX_TX_RETRY)
2035 return -EINVAL;
1962 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 2036 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
2037 }
1963 if (changed & 2038 if (changed &
1964 (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) 2039 (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG))
1965 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); 2040 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
@@ -1968,41 +2043,65 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1968} 2043}
1969 2044
1970static int ieee80211_set_tx_power(struct wiphy *wiphy, 2045static int ieee80211_set_tx_power(struct wiphy *wiphy,
2046 struct wireless_dev *wdev,
1971 enum nl80211_tx_power_setting type, int mbm) 2047 enum nl80211_tx_power_setting type, int mbm)
1972{ 2048{
1973 struct ieee80211_local *local = wiphy_priv(wiphy); 2049 struct ieee80211_local *local = wiphy_priv(wiphy);
1974 struct ieee80211_channel *chan = local->oper_channel; 2050 struct ieee80211_sub_if_data *sdata;
1975 u32 changes = 0; 2051
2052 if (wdev) {
2053 sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
2054
2055 switch (type) {
2056 case NL80211_TX_POWER_AUTOMATIC:
2057 sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
2058 break;
2059 case NL80211_TX_POWER_LIMITED:
2060 case NL80211_TX_POWER_FIXED:
2061 if (mbm < 0 || (mbm % 100))
2062 return -EOPNOTSUPP;
2063 sdata->user_power_level = MBM_TO_DBM(mbm);
2064 break;
2065 }
2066
2067 ieee80211_recalc_txpower(sdata);
2068
2069 return 0;
2070 }
1976 2071
1977 switch (type) { 2072 switch (type) {
1978 case NL80211_TX_POWER_AUTOMATIC: 2073 case NL80211_TX_POWER_AUTOMATIC:
1979 local->user_power_level = -1; 2074 local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
1980 break; 2075 break;
1981 case NL80211_TX_POWER_LIMITED: 2076 case NL80211_TX_POWER_LIMITED:
1982 if (mbm < 0 || (mbm % 100))
1983 return -EOPNOTSUPP;
1984 local->user_power_level = MBM_TO_DBM(mbm);
1985 break;
1986 case NL80211_TX_POWER_FIXED: 2077 case NL80211_TX_POWER_FIXED:
1987 if (mbm < 0 || (mbm % 100)) 2078 if (mbm < 0 || (mbm % 100))
1988 return -EOPNOTSUPP; 2079 return -EOPNOTSUPP;
1989 /* TODO: move to cfg80211 when it knows the channel */
1990 if (MBM_TO_DBM(mbm) > chan->max_power)
1991 return -EINVAL;
1992 local->user_power_level = MBM_TO_DBM(mbm); 2080 local->user_power_level = MBM_TO_DBM(mbm);
1993 break; 2081 break;
1994 } 2082 }
1995 2083
1996 ieee80211_hw_config(local, changes); 2084 mutex_lock(&local->iflist_mtx);
2085 list_for_each_entry(sdata, &local->interfaces, list)
2086 sdata->user_power_level = local->user_power_level;
2087 list_for_each_entry(sdata, &local->interfaces, list)
2088 ieee80211_recalc_txpower(sdata);
2089 mutex_unlock(&local->iflist_mtx);
1997 2090
1998 return 0; 2091 return 0;
1999} 2092}
2000 2093
2001static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm) 2094static int ieee80211_get_tx_power(struct wiphy *wiphy,
2095 struct wireless_dev *wdev,
2096 int *dbm)
2002{ 2097{
2003 struct ieee80211_local *local = wiphy_priv(wiphy); 2098 struct ieee80211_local *local = wiphy_priv(wiphy);
2099 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
2004 2100
2005 *dbm = local->hw.conf.power_level; 2101 if (!local->use_chanctx)
2102 *dbm = local->hw.conf.power_level;
2103 else
2104 *dbm = sdata->vif.bss_conf.txpower;
2006 2105
2007 return 0; 2106 return 0;
2008} 2107}
@@ -2067,13 +2166,12 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
2067 2166
2068 /* 2167 /*
2069 * If not associated, or current association is not an HT 2168 * If not associated, or current association is not an HT
2070 * association, there's no need to send an action frame. 2169 * association, there's no need to do anything, just store
2170 * the new value until we associate.
2071 */ 2171 */
2072 if (!sdata->u.mgd.associated || 2172 if (!sdata->u.mgd.associated ||
2073 sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) { 2173 sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
2074 ieee80211_recalc_smps(sdata->local);
2075 return 0; 2174 return 0;
2076 }
2077 2175
2078 ap = sdata->u.mgd.associated->bssid; 2176 ap = sdata->u.mgd.associated->bssid;
2079 2177
@@ -2179,7 +2277,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2179static int ieee80211_start_roc_work(struct ieee80211_local *local, 2277static int ieee80211_start_roc_work(struct ieee80211_local *local,
2180 struct ieee80211_sub_if_data *sdata, 2278 struct ieee80211_sub_if_data *sdata,
2181 struct ieee80211_channel *channel, 2279 struct ieee80211_channel *channel,
2182 enum nl80211_channel_type channel_type,
2183 unsigned int duration, u64 *cookie, 2280 unsigned int duration, u64 *cookie,
2184 struct sk_buff *txskb) 2281 struct sk_buff *txskb)
2185{ 2282{
@@ -2189,12 +2286,14 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2189 2286
2190 lockdep_assert_held(&local->mtx); 2287 lockdep_assert_held(&local->mtx);
2191 2288
2289 if (local->use_chanctx && !local->ops->remain_on_channel)
2290 return -EOPNOTSUPP;
2291
2192 roc = kzalloc(sizeof(*roc), GFP_KERNEL); 2292 roc = kzalloc(sizeof(*roc), GFP_KERNEL);
2193 if (!roc) 2293 if (!roc)
2194 return -ENOMEM; 2294 return -ENOMEM;
2195 2295
2196 roc->chan = channel; 2296 roc->chan = channel;
2197 roc->chan_type = channel_type;
2198 roc->duration = duration; 2297 roc->duration = duration;
2199 roc->req_duration = duration; 2298 roc->req_duration = duration;
2200 roc->frame = txskb; 2299 roc->frame = txskb;
@@ -2227,7 +2326,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2227 if (!duration) 2326 if (!duration)
2228 duration = 10; 2327 duration = 10;
2229 2328
2230 ret = drv_remain_on_channel(local, channel, channel_type, duration); 2329 ret = drv_remain_on_channel(local, sdata, channel, duration);
2231 if (ret) { 2330 if (ret) {
2232 kfree(roc); 2331 kfree(roc);
2233 return ret; 2332 return ret;
@@ -2238,7 +2337,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2238 2337
2239 out_check_combine: 2338 out_check_combine:
2240 list_for_each_entry(tmp, &local->roc_list, list) { 2339 list_for_each_entry(tmp, &local->roc_list, list) {
2241 if (tmp->chan != channel || tmp->chan_type != channel_type) 2340 if (tmp->chan != channel || tmp->sdata != sdata)
2242 continue; 2341 continue;
2243 2342
2244 /* 2343 /*
@@ -2332,13 +2431,22 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2332 list_add_tail(&roc->list, &local->roc_list); 2431 list_add_tail(&roc->list, &local->roc_list);
2333 2432
2334 /* 2433 /*
2335 * cookie is either the roc (for normal roc) 2434 * cookie is either the roc cookie (for normal roc)
2336 * or the SKB (for mgmt TX) 2435 * or the SKB (for mgmt TX)
2337 */ 2436 */
2338 if (txskb) 2437 if (!txskb) {
2438 /* local->mtx protects this */
2439 local->roc_cookie_counter++;
2440 roc->cookie = local->roc_cookie_counter;
2441 /* wow, you wrapped 64 bits ... more likely a bug */
2442 if (WARN_ON(roc->cookie == 0)) {
2443 roc->cookie = 1;
2444 local->roc_cookie_counter++;
2445 }
2446 *cookie = roc->cookie;
2447 } else {
2339 *cookie = (unsigned long)txskb; 2448 *cookie = (unsigned long)txskb;
2340 else 2449 }
2341 *cookie = (unsigned long)roc;
2342 2450
2343 return 0; 2451 return 0;
2344} 2452}
@@ -2346,7 +2454,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2346static int ieee80211_remain_on_channel(struct wiphy *wiphy, 2454static int ieee80211_remain_on_channel(struct wiphy *wiphy,
2347 struct wireless_dev *wdev, 2455 struct wireless_dev *wdev,
2348 struct ieee80211_channel *chan, 2456 struct ieee80211_channel *chan,
2349 enum nl80211_channel_type channel_type,
2350 unsigned int duration, 2457 unsigned int duration,
2351 u64 *cookie) 2458 u64 *cookie)
2352{ 2459{
@@ -2355,7 +2462,7 @@ static int ieee80211_remain_on_channel(struct wiphy *wiphy,
2355 int ret; 2462 int ret;
2356 2463
2357 mutex_lock(&local->mtx); 2464 mutex_lock(&local->mtx);
2358 ret = ieee80211_start_roc_work(local, sdata, chan, channel_type, 2465 ret = ieee80211_start_roc_work(local, sdata, chan,
2359 duration, cookie, NULL); 2466 duration, cookie, NULL);
2360 mutex_unlock(&local->mtx); 2467 mutex_unlock(&local->mtx);
2361 2468
@@ -2373,7 +2480,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
2373 struct ieee80211_roc_work *dep, *tmp2; 2480 struct ieee80211_roc_work *dep, *tmp2;
2374 2481
2375 list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) { 2482 list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) {
2376 if (!mgmt_tx && (unsigned long)dep != cookie) 2483 if (!mgmt_tx && dep->cookie != cookie)
2377 continue; 2484 continue;
2378 else if (mgmt_tx && dep->mgmt_tx_cookie != cookie) 2485 else if (mgmt_tx && dep->mgmt_tx_cookie != cookie)
2379 continue; 2486 continue;
@@ -2385,7 +2492,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
2385 return 0; 2492 return 0;
2386 } 2493 }
2387 2494
2388 if (!mgmt_tx && (unsigned long)roc != cookie) 2495 if (!mgmt_tx && roc->cookie != cookie)
2389 continue; 2496 continue;
2390 else if (mgmt_tx && roc->mgmt_tx_cookie != cookie) 2497 else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
2391 continue; 2498 continue;
@@ -2448,10 +2555,8 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
2448 2555
2449static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 2556static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2450 struct ieee80211_channel *chan, bool offchan, 2557 struct ieee80211_channel *chan, bool offchan,
2451 enum nl80211_channel_type channel_type, 2558 unsigned int wait, const u8 *buf, size_t len,
2452 bool channel_type_valid, unsigned int wait, 2559 bool no_cck, bool dont_wait_for_ack, u64 *cookie)
2453 const u8 *buf, size_t len, bool no_cck,
2454 bool dont_wait_for_ack, u64 *cookie)
2455{ 2560{
2456 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); 2561 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
2457 struct ieee80211_local *local = sdata->local; 2562 struct ieee80211_local *local = sdata->local;
@@ -2515,10 +2620,16 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2515 2620
2516 /* Check if the operating channel is the requested channel */ 2621 /* Check if the operating channel is the requested channel */
2517 if (!need_offchan) { 2622 if (!need_offchan) {
2518 need_offchan = chan != local->oper_channel; 2623 struct ieee80211_chanctx_conf *chanctx_conf;
2519 if (channel_type_valid && 2624
2520 channel_type != local->_oper_channel_type) 2625 rcu_read_lock();
2626 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2627
2628 if (chanctx_conf)
2629 need_offchan = chan != chanctx_conf->def.chan;
2630 else
2521 need_offchan = true; 2631 need_offchan = true;
2632 rcu_read_unlock();
2522 } 2633 }
2523 2634
2524 if (need_offchan && !offchan) { 2635 if (need_offchan && !offchan) {
@@ -2552,7 +2663,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2552 local->hw.offchannel_tx_hw_queue; 2663 local->hw.offchannel_tx_hw_queue;
2553 2664
2554 /* This will handle all kinds of coalescing and immediate TX */ 2665 /* This will handle all kinds of coalescing and immediate TX */
2555 ret = ieee80211_start_roc_work(local, sdata, chan, channel_type, 2666 ret = ieee80211_start_roc_work(local, sdata, chan,
2556 wait, cookie, skb); 2667 wait, cookie, skb);
2557 if (ret) 2668 if (ret)
2558 kfree_skb(skb); 2669 kfree_skb(skb);
@@ -2594,6 +2705,9 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
2594 else 2705 else
2595 local->probe_req_reg--; 2706 local->probe_req_reg--;
2596 2707
2708 if (!local->open_count)
2709 break;
2710
2597 ieee80211_queue_work(&local->hw, &local->reconfig_filter); 2711 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
2598 break; 2712 break;
2599 default: 2713 default:
@@ -2667,7 +2781,7 @@ static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
2667 u16 capab; 2781 u16 capab;
2668 2782
2669 capab = 0; 2783 capab = 0;
2670 if (local->oper_channel->band != IEEE80211_BAND_2GHZ) 2784 if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
2671 return capab; 2785 return capab;
2672 2786
2673 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)) 2787 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
@@ -2699,7 +2813,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2699 u16 status_code, struct sk_buff *skb) 2813 u16 status_code, struct sk_buff *skb)
2700{ 2814{
2701 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2815 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2702 struct ieee80211_local *local = sdata->local; 2816 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
2703 struct ieee80211_tdls_data *tf; 2817 struct ieee80211_tdls_data *tf;
2704 2818
2705 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); 2819 tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2719,10 +2833,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2719 tf->u.setup_req.capability = 2833 tf->u.setup_req.capability =
2720 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2834 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2721 2835
2722 ieee80211_add_srates_ie(sdata, skb, false, 2836 ieee80211_add_srates_ie(sdata, skb, false, band);
2723 local->oper_channel->band); 2837 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
2724 ieee80211_add_ext_srates_ie(sdata, skb, false,
2725 local->oper_channel->band);
2726 ieee80211_tdls_add_ext_capab(skb); 2838 ieee80211_tdls_add_ext_capab(skb);
2727 break; 2839 break;
2728 case WLAN_TDLS_SETUP_RESPONSE: 2840 case WLAN_TDLS_SETUP_RESPONSE:
@@ -2735,10 +2847,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2735 tf->u.setup_resp.capability = 2847 tf->u.setup_resp.capability =
2736 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2848 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2737 2849
2738 ieee80211_add_srates_ie(sdata, skb, false, 2850 ieee80211_add_srates_ie(sdata, skb, false, band);
2739 local->oper_channel->band); 2851 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
2740 ieee80211_add_ext_srates_ie(sdata, skb, false,
2741 local->oper_channel->band);
2742 ieee80211_tdls_add_ext_capab(skb); 2852 ieee80211_tdls_add_ext_capab(skb);
2743 break; 2853 break;
2744 case WLAN_TDLS_SETUP_CONFIRM: 2854 case WLAN_TDLS_SETUP_CONFIRM:
@@ -2776,7 +2886,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2776 u16 status_code, struct sk_buff *skb) 2886 u16 status_code, struct sk_buff *skb)
2777{ 2887{
2778 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2888 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2779 struct ieee80211_local *local = sdata->local; 2889 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
2780 struct ieee80211_mgmt *mgmt; 2890 struct ieee80211_mgmt *mgmt;
2781 2891
2782 mgmt = (void *)skb_put(skb, 24); 2892 mgmt = (void *)skb_put(skb, 24);
@@ -2799,10 +2909,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2799 mgmt->u.action.u.tdls_discover_resp.capability = 2909 mgmt->u.action.u.tdls_discover_resp.capability =
2800 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2910 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2801 2911
2802 ieee80211_add_srates_ie(sdata, skb, false, 2912 ieee80211_add_srates_ie(sdata, skb, false, band);
2803 local->oper_channel->band); 2913 ieee80211_add_ext_srates_ie(sdata, skb, false, band);
2804 ieee80211_add_ext_srates_ie(sdata, skb, false,
2805 local->oper_channel->band);
2806 ieee80211_tdls_add_ext_capab(skb); 2914 ieee80211_tdls_add_ext_capab(skb);
2807 break; 2915 break;
2808 default: 2916 default:
@@ -2819,7 +2927,6 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2819{ 2927{
2820 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2928 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2821 struct ieee80211_local *local = sdata->local; 2929 struct ieee80211_local *local = sdata->local;
2822 struct ieee80211_tx_info *info;
2823 struct sk_buff *skb = NULL; 2930 struct sk_buff *skb = NULL;
2824 bool send_direct; 2931 bool send_direct;
2825 int ret; 2932 int ret;
@@ -2845,7 +2952,6 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
2845 if (!skb) 2952 if (!skb)
2846 return -ENOMEM; 2953 return -ENOMEM;
2847 2954
2848 info = IEEE80211_SKB_CB(skb);
2849 skb_reserve(skb, local->hw.extra_tx_headroom); 2955 skb_reserve(skb, local->hw.extra_tx_headroom);
2850 2956
2851 switch (action_code) { 2957 switch (action_code) {
@@ -2982,12 +3088,19 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
2982 bool qos; 3088 bool qos;
2983 struct ieee80211_tx_info *info; 3089 struct ieee80211_tx_info *info;
2984 struct sta_info *sta; 3090 struct sta_info *sta;
3091 struct ieee80211_chanctx_conf *chanctx_conf;
3092 enum ieee80211_band band;
2985 3093
2986 rcu_read_lock(); 3094 rcu_read_lock();
3095 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
3096 if (WARN_ON(!chanctx_conf)) {
3097 rcu_read_unlock();
3098 return -EINVAL;
3099 }
3100 band = chanctx_conf->def.chan->band;
2987 sta = sta_info_get(sdata, peer); 3101 sta = sta_info_get(sdata, peer);
2988 if (sta) { 3102 if (sta) {
2989 qos = test_sta_flag(sta, WLAN_STA_WME); 3103 qos = test_sta_flag(sta, WLAN_STA_WME);
2990 rcu_read_unlock();
2991 } else { 3104 } else {
2992 rcu_read_unlock(); 3105 rcu_read_unlock();
2993 return -ENOLINK; 3106 return -ENOLINK;
@@ -3005,8 +3118,10 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
3005 } 3118 }
3006 3119
3007 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 3120 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
3008 if (!skb) 3121 if (!skb) {
3122 rcu_read_unlock();
3009 return -ENOMEM; 3123 return -ENOMEM;
3124 }
3010 3125
3011 skb->dev = dev; 3126 skb->dev = dev;
3012 3127
@@ -3031,21 +3146,31 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
3031 nullfunc->qos_ctrl = cpu_to_le16(7); 3146 nullfunc->qos_ctrl = cpu_to_le16(7);
3032 3147
3033 local_bh_disable(); 3148 local_bh_disable();
3034 ieee80211_xmit(sdata, skb); 3149 ieee80211_xmit(sdata, skb, band);
3035 local_bh_enable(); 3150 local_bh_enable();
3151 rcu_read_unlock();
3036 3152
3037 *cookie = (unsigned long) skb; 3153 *cookie = (unsigned long) skb;
3038 return 0; 3154 return 0;
3039} 3155}
3040 3156
3041static struct ieee80211_channel * 3157static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
3042ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, 3158 struct wireless_dev *wdev,
3043 enum nl80211_channel_type *type) 3159 struct cfg80211_chan_def *chandef)
3044{ 3160{
3045 struct ieee80211_local *local = wiphy_priv(wiphy); 3161 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
3162 struct ieee80211_chanctx_conf *chanctx_conf;
3163 int ret = -ENODATA;
3164
3165 rcu_read_lock();
3166 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
3167 if (chanctx_conf) {
3168 *chandef = chanctx_conf->def;
3169 ret = 0;
3170 }
3171 rcu_read_unlock();
3046 3172
3047 *type = local->_oper_channel_type; 3173 return ret;
3048 return local->oper_channel;
3049} 3174}
3050 3175
3051#ifdef CONFIG_PM 3176#ifdef CONFIG_PM
@@ -3100,6 +3225,7 @@ struct cfg80211_ops mac80211_config_ops = {
3100 .disassoc = ieee80211_disassoc, 3225 .disassoc = ieee80211_disassoc,
3101 .join_ibss = ieee80211_join_ibss, 3226 .join_ibss = ieee80211_join_ibss,
3102 .leave_ibss = ieee80211_leave_ibss, 3227 .leave_ibss = ieee80211_leave_ibss,
3228 .set_mcast_rate = ieee80211_set_mcast_rate,
3103 .set_wiphy_params = ieee80211_set_wiphy_params, 3229 .set_wiphy_params = ieee80211_set_wiphy_params,
3104 .set_tx_power = ieee80211_set_tx_power, 3230 .set_tx_power = ieee80211_set_tx_power,
3105 .get_tx_power = ieee80211_get_tx_power, 3231 .get_tx_power = ieee80211_get_tx_power,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 0bfc914ddd15..80e55527504b 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -3,168 +3,385 @@
3 */ 3 */
4 4
5#include <linux/nl80211.h> 5#include <linux/nl80211.h>
6#include <linux/export.h>
7#include <linux/rtnetlink.h>
6#include <net/cfg80211.h> 8#include <net/cfg80211.h>
7#include "ieee80211_i.h" 9#include "ieee80211_i.h"
10#include "driver-ops.h"
8 11
9static enum ieee80211_chan_mode 12static void ieee80211_change_chandef(struct ieee80211_local *local,
10__ieee80211_get_channel_mode(struct ieee80211_local *local, 13 struct ieee80211_chanctx *ctx,
11 struct ieee80211_sub_if_data *ignore) 14 const struct cfg80211_chan_def *chandef)
12{ 15{
16 if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
17 return;
18
19 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
20
21 ctx->conf.def = *chandef;
22 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
23
24 if (!local->use_chanctx) {
25 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
26 ieee80211_hw_config(local, 0);
27 }
28}
29
30static struct ieee80211_chanctx *
31ieee80211_find_chanctx(struct ieee80211_local *local,
32 const struct cfg80211_chan_def *chandef,
33 enum ieee80211_chanctx_mode mode)
34{
35 struct ieee80211_chanctx *ctx;
36
37 lockdep_assert_held(&local->chanctx_mtx);
38
39 if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
40 return NULL;
41
42 list_for_each_entry(ctx, &local->chanctx_list, list) {
43 const struct cfg80211_chan_def *compat;
44
45 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
46 continue;
47
48 compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
49 if (!compat)
50 continue;
51
52 ieee80211_change_chandef(local, ctx, compat);
53
54 return ctx;
55 }
56
57 return NULL;
58}
59
60static struct ieee80211_chanctx *
61ieee80211_new_chanctx(struct ieee80211_local *local,
62 const struct cfg80211_chan_def *chandef,
63 enum ieee80211_chanctx_mode mode)
64{
65 struct ieee80211_chanctx *ctx;
66 int err;
67
68 lockdep_assert_held(&local->chanctx_mtx);
69
70 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
71 if (!ctx)
72 return ERR_PTR(-ENOMEM);
73
74 ctx->conf.def = *chandef;
75 ctx->conf.rx_chains_static = 1;
76 ctx->conf.rx_chains_dynamic = 1;
77 ctx->mode = mode;
78
79 if (!local->use_chanctx) {
80 local->_oper_channel_type =
81 cfg80211_get_chandef_type(chandef);
82 local->_oper_channel = chandef->chan;
83 ieee80211_hw_config(local, 0);
84 } else {
85 err = drv_add_chanctx(local, ctx);
86 if (err) {
87 kfree(ctx);
88 return ERR_PTR(err);
89 }
90 }
91
92 list_add_rcu(&ctx->list, &local->chanctx_list);
93
94 return ctx;
95}
96
97static void ieee80211_free_chanctx(struct ieee80211_local *local,
98 struct ieee80211_chanctx *ctx)
99{
100 lockdep_assert_held(&local->chanctx_mtx);
101
102 WARN_ON_ONCE(ctx->refcount != 0);
103
104 if (!local->use_chanctx) {
105 local->_oper_channel_type = NL80211_CHAN_NO_HT;
106 ieee80211_hw_config(local, 0);
107 } else {
108 drv_remove_chanctx(local, ctx);
109 }
110
111 list_del_rcu(&ctx->list);
112 kfree_rcu(ctx, rcu_head);
113}
114
115static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
116 struct ieee80211_chanctx *ctx)
117{
118 struct ieee80211_local *local = sdata->local;
119 int ret;
120
121 lockdep_assert_held(&local->chanctx_mtx);
122
123 ret = drv_assign_vif_chanctx(local, sdata, ctx);
124 if (ret)
125 return ret;
126
127 rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
128 ctx->refcount++;
129
130 ieee80211_recalc_txpower(sdata);
131
132 return 0;
133}
134
135static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
136 struct ieee80211_chanctx *ctx)
137{
138 struct ieee80211_chanctx_conf *conf = &ctx->conf;
13 struct ieee80211_sub_if_data *sdata; 139 struct ieee80211_sub_if_data *sdata;
140 const struct cfg80211_chan_def *compat = NULL;
141
142 lockdep_assert_held(&local->chanctx_mtx);
14 143
15 lockdep_assert_held(&local->iflist_mtx); 144 rcu_read_lock();
145 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
16 146
17 list_for_each_entry(sdata, &local->interfaces, list) { 147 if (!ieee80211_sdata_running(sdata))
18 if (sdata == ignore) 148 continue;
149 if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
19 continue; 150 continue;
20 151
152 if (!compat)
153 compat = &sdata->vif.bss_conf.chandef;
154
155 compat = cfg80211_chandef_compatible(
156 &sdata->vif.bss_conf.chandef, compat);
157 if (!compat)
158 break;
159 }
160 rcu_read_unlock();
161
162 if (WARN_ON_ONCE(!compat))
163 return;
164
165 ieee80211_change_chandef(local, ctx, compat);
166}
167
168static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
169 struct ieee80211_chanctx *ctx)
170{
171 struct ieee80211_local *local = sdata->local;
172
173 lockdep_assert_held(&local->chanctx_mtx);
174
175 ctx->refcount--;
176 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
177
178 drv_unassign_vif_chanctx(local, sdata, ctx);
179
180 if (ctx->refcount > 0) {
181 ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
182 ieee80211_recalc_smps_chanctx(local, ctx);
183 }
184}
185
186static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
187{
188 struct ieee80211_local *local = sdata->local;
189 struct ieee80211_chanctx_conf *conf;
190 struct ieee80211_chanctx *ctx;
191
192 lockdep_assert_held(&local->chanctx_mtx);
193
194 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
195 lockdep_is_held(&local->chanctx_mtx));
196 if (!conf)
197 return;
198
199 ctx = container_of(conf, struct ieee80211_chanctx, conf);
200
201 if (sdata->vif.type == NL80211_IFTYPE_AP) {
202 struct ieee80211_sub_if_data *vlan;
203
204 /* for the VLAN list */
205 ASSERT_RTNL();
206 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
207 rcu_assign_pointer(vlan->vif.chanctx_conf, NULL);
208 }
209
210 ieee80211_unassign_vif_chanctx(sdata, ctx);
211 if (ctx->refcount == 0)
212 ieee80211_free_chanctx(local, ctx);
213}
214
215void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
216 struct ieee80211_chanctx *chanctx)
217{
218 struct ieee80211_sub_if_data *sdata;
219 u8 rx_chains_static, rx_chains_dynamic;
220
221 lockdep_assert_held(&local->chanctx_mtx);
222
223 rx_chains_static = 1;
224 rx_chains_dynamic = 1;
225
226 rcu_read_lock();
227 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
228 u8 needed_static, needed_dynamic;
229
21 if (!ieee80211_sdata_running(sdata)) 230 if (!ieee80211_sdata_running(sdata))
22 continue; 231 continue;
23 232
233 if (rcu_access_pointer(sdata->vif.chanctx_conf) !=
234 &chanctx->conf)
235 continue;
236
24 switch (sdata->vif.type) { 237 switch (sdata->vif.type) {
25 case NL80211_IFTYPE_MONITOR: 238 case NL80211_IFTYPE_P2P_DEVICE:
26 continue; 239 continue;
27 case NL80211_IFTYPE_STATION: 240 case NL80211_IFTYPE_STATION:
28 if (!sdata->u.mgd.associated) 241 if (!sdata->u.mgd.associated)
29 continue; 242 continue;
30 break; 243 break;
31 case NL80211_IFTYPE_ADHOC:
32 if (!sdata->u.ibss.ssid_len)
33 continue;
34 if (!sdata->u.ibss.fixed_channel)
35 return CHAN_MODE_HOPPING;
36 break;
37 case NL80211_IFTYPE_AP_VLAN: 244 case NL80211_IFTYPE_AP_VLAN:
38 /* will also have _AP interface */
39 continue; 245 continue;
40 case NL80211_IFTYPE_AP: 246 case NL80211_IFTYPE_AP:
41 if (!sdata->u.ap.beacon) 247 case NL80211_IFTYPE_ADHOC:
42 continue; 248 case NL80211_IFTYPE_WDS:
43 break;
44 case NL80211_IFTYPE_MESH_POINT: 249 case NL80211_IFTYPE_MESH_POINT:
45 if (!sdata->wdev.mesh_id_len)
46 continue;
47 break; 250 break;
48 default: 251 default:
252 WARN_ON_ONCE(1);
253 }
254
255 switch (sdata->smps_mode) {
256 default:
257 WARN_ONCE(1, "Invalid SMPS mode %d\n",
258 sdata->smps_mode);
259 /* fall through */
260 case IEEE80211_SMPS_OFF:
261 needed_static = sdata->needed_rx_chains;
262 needed_dynamic = sdata->needed_rx_chains;
263 break;
264 case IEEE80211_SMPS_DYNAMIC:
265 needed_static = 1;
266 needed_dynamic = sdata->needed_rx_chains;
267 break;
268 case IEEE80211_SMPS_STATIC:
269 needed_static = 1;
270 needed_dynamic = 1;
49 break; 271 break;
50 } 272 }
51 273
52 return CHAN_MODE_FIXED; 274 rx_chains_static = max(rx_chains_static, needed_static);
275 rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
53 } 276 }
277 rcu_read_unlock();
54 278
55 return CHAN_MODE_UNDEFINED; 279 if (!local->use_chanctx) {
280 if (rx_chains_static > 1)
281 local->smps_mode = IEEE80211_SMPS_OFF;
282 else if (rx_chains_dynamic > 1)
283 local->smps_mode = IEEE80211_SMPS_DYNAMIC;
284 else
285 local->smps_mode = IEEE80211_SMPS_STATIC;
286 ieee80211_hw_config(local, 0);
287 }
288
289 if (rx_chains_static == chanctx->conf.rx_chains_static &&
290 rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
291 return;
292
293 chanctx->conf.rx_chains_static = rx_chains_static;
294 chanctx->conf.rx_chains_dynamic = rx_chains_dynamic;
295 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS);
56} 296}
57 297
58enum ieee80211_chan_mode 298int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
59ieee80211_get_channel_mode(struct ieee80211_local *local, 299 const struct cfg80211_chan_def *chandef,
60 struct ieee80211_sub_if_data *ignore) 300 enum ieee80211_chanctx_mode mode)
61{ 301{
62 enum ieee80211_chan_mode mode; 302 struct ieee80211_local *local = sdata->local;
303 struct ieee80211_chanctx *ctx;
304 int ret;
63 305
64 mutex_lock(&local->iflist_mtx); 306 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
65 mode = __ieee80211_get_channel_mode(local, ignore);
66 mutex_unlock(&local->iflist_mtx);
67 307
68 return mode; 308 mutex_lock(&local->chanctx_mtx);
69} 309 __ieee80211_vif_release_channel(sdata);
70 310
71static enum nl80211_channel_type 311 ctx = ieee80211_find_chanctx(local, chandef, mode);
72ieee80211_get_superchan(struct ieee80211_local *local, 312 if (!ctx)
73 struct ieee80211_sub_if_data *sdata) 313 ctx = ieee80211_new_chanctx(local, chandef, mode);
74{ 314 if (IS_ERR(ctx)) {
75 enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT; 315 ret = PTR_ERR(ctx);
76 struct ieee80211_sub_if_data *tmp; 316 goto out;
317 }
77 318
78 mutex_lock(&local->iflist_mtx); 319 sdata->vif.bss_conf.chandef = *chandef;
79 list_for_each_entry(tmp, &local->interfaces, list) {
80 if (tmp == sdata)
81 continue;
82 320
83 if (!ieee80211_sdata_running(tmp)) 321 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
84 continue; 322 if (ret) {
323 /* if assign fails refcount stays the same */
324 if (ctx->refcount == 0)
325 ieee80211_free_chanctx(local, ctx);
326 goto out;
327 }
85 328
86 switch (tmp->vif.bss_conf.channel_type) { 329 if (sdata->vif.type == NL80211_IFTYPE_AP) {
87 case NL80211_CHAN_NO_HT: 330 struct ieee80211_sub_if_data *vlan;
88 case NL80211_CHAN_HT20:
89 if (superchan > tmp->vif.bss_conf.channel_type)
90 break;
91 331
92 superchan = tmp->vif.bss_conf.channel_type; 332 /* for the VLAN list */
93 break; 333 ASSERT_RTNL();
94 case NL80211_CHAN_HT40PLUS: 334 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
95 WARN_ON(superchan == NL80211_CHAN_HT40MINUS); 335 rcu_assign_pointer(vlan->vif.chanctx_conf, &ctx->conf);
96 superchan = NL80211_CHAN_HT40PLUS;
97 break;
98 case NL80211_CHAN_HT40MINUS:
99 WARN_ON(superchan == NL80211_CHAN_HT40PLUS);
100 superchan = NL80211_CHAN_HT40MINUS;
101 break;
102 }
103 } 336 }
104 mutex_unlock(&local->iflist_mtx);
105 337
106 return superchan; 338 ieee80211_recalc_smps_chanctx(local, ctx);
339 out:
340 mutex_unlock(&local->chanctx_mtx);
341 return ret;
107} 342}
108 343
109static bool 344void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
110ieee80211_channel_types_are_compatible(enum nl80211_channel_type chantype1,
111 enum nl80211_channel_type chantype2,
112 enum nl80211_channel_type *compat)
113{ 345{
114 /* 346 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
115 * start out with chantype1 being the result,
116 * overwriting later if needed
117 */
118 if (compat)
119 *compat = chantype1;
120
121 switch (chantype1) {
122 case NL80211_CHAN_NO_HT:
123 if (compat)
124 *compat = chantype2;
125 break;
126 case NL80211_CHAN_HT20:
127 /*
128 * allow any change that doesn't go to no-HT
129 * (if it already is no-HT no change is needed)
130 */
131 if (chantype2 == NL80211_CHAN_NO_HT)
132 break;
133 if (compat)
134 *compat = chantype2;
135 break;
136 case NL80211_CHAN_HT40PLUS:
137 case NL80211_CHAN_HT40MINUS:
138 /* allow smaller bandwidth and same */
139 if (chantype2 == NL80211_CHAN_NO_HT)
140 break;
141 if (chantype2 == NL80211_CHAN_HT20)
142 break;
143 if (chantype2 == chantype1)
144 break;
145 return false;
146 }
147 347
148 return true; 348 mutex_lock(&sdata->local->chanctx_mtx);
349 __ieee80211_vif_release_channel(sdata);
350 mutex_unlock(&sdata->local->chanctx_mtx);
149} 351}
150 352
151bool ieee80211_set_channel_type(struct ieee80211_local *local, 353void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
152 struct ieee80211_sub_if_data *sdata,
153 enum nl80211_channel_type chantype)
154{ 354{
155 enum nl80211_channel_type superchan; 355 struct ieee80211_local *local = sdata->local;
156 enum nl80211_channel_type compatchan; 356 struct ieee80211_sub_if_data *ap;
357 struct ieee80211_chanctx_conf *conf;
157 358
158 superchan = ieee80211_get_superchan(local, sdata); 359 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
159 if (!ieee80211_channel_types_are_compatible(superchan, chantype, 360 return;
160 &compatchan))
161 return false;
162 361
163 local->_oper_channel_type = compatchan; 362 ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
164 363
165 if (sdata) 364 mutex_lock(&local->chanctx_mtx);
166 sdata->vif.bss_conf.channel_type = chantype;
167 365
168 return true; 366 conf = rcu_dereference_protected(ap->vif.chanctx_conf,
367 lockdep_is_held(&local->chanctx_mtx));
368 rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
369 mutex_unlock(&local->chanctx_mtx);
370}
371
372void ieee80211_iter_chan_contexts_atomic(
373 struct ieee80211_hw *hw,
374 void (*iter)(struct ieee80211_hw *hw,
375 struct ieee80211_chanctx_conf *chanctx_conf,
376 void *data),
377 void *iter_data)
378{
379 struct ieee80211_local *local = hw_to_local(hw);
380 struct ieee80211_chanctx *ctx;
169 381
382 rcu_read_lock();
383 list_for_each_entry_rcu(ctx, &local->chanctx_list, list)
384 iter(hw, &ctx->conf, iter_data);
385 rcu_read_unlock();
170} 386}
387EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic);
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 9be4e6d71d00..214ed4ecd739 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -2,9 +2,9 @@
2#define __MAC80211_DEBUGFS_H 2#define __MAC80211_DEBUGFS_H
3 3
4#ifdef CONFIG_MAC80211_DEBUGFS 4#ifdef CONFIG_MAC80211_DEBUGFS
5extern void debugfs_hw_add(struct ieee80211_local *local); 5void debugfs_hw_add(struct ieee80211_local *local);
6extern int mac80211_format_buffer(char __user *userbuf, size_t count, 6int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count,
7 loff_t *ppos, char *fmt, ...); 7 loff_t *ppos, char *fmt, ...);
8#else 8#else
9static inline void debugfs_hw_add(struct ieee80211_local *local) 9static inline void debugfs_hw_add(struct ieee80211_local *local)
10{ 10{
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 090d08ff22c4..c3a3082b72e5 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -116,7 +116,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
116 size_t count, loff_t *ppos) 116 size_t count, loff_t *ppos)
117{ 117{
118 struct ieee80211_key *key = file->private_data; 118 struct ieee80211_key *key = file->private_data;
119 char buf[14*NUM_RX_DATA_QUEUES+1], *p = buf; 119 char buf[14*IEEE80211_NUM_TIDS+1], *p = buf;
120 int i, len; 120 int i, len;
121 const u8 *rpn; 121 const u8 *rpn;
122 122
@@ -126,7 +126,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
126 len = scnprintf(buf, sizeof(buf), "\n"); 126 len = scnprintf(buf, sizeof(buf), "\n");
127 break; 127 break;
128 case WLAN_CIPHER_SUITE_TKIP: 128 case WLAN_CIPHER_SUITE_TKIP:
129 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 129 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
130 p += scnprintf(p, sizeof(buf)+buf-p, 130 p += scnprintf(p, sizeof(buf)+buf-p,
131 "%08x %04x\n", 131 "%08x %04x\n",
132 key->u.tkip.rx[i].iv32, 132 key->u.tkip.rx[i].iv32,
@@ -134,7 +134,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
134 len = p - buf; 134 len = p - buf;
135 break; 135 break;
136 case WLAN_CIPHER_SUITE_CCMP: 136 case WLAN_CIPHER_SUITE_CCMP:
137 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) { 137 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
138 rpn = key->u.ccmp.rx_pn[i]; 138 rpn = key->u.ccmp.rx_pn[i];
139 p += scnprintf(p, sizeof(buf)+buf-p, 139 p += scnprintf(p, sizeof(buf)+buf-p,
140 "%02x%02x%02x%02x%02x%02x\n", 140 "%02x%02x%02x%02x%02x%02x\n",
@@ -199,6 +199,22 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
199} 199}
200KEY_OPS(icverrors); 200KEY_OPS(icverrors);
201 201
202static ssize_t key_mic_failures_read(struct file *file, char __user *userbuf,
203 size_t count, loff_t *ppos)
204{
205 struct ieee80211_key *key = file->private_data;
206 char buf[20];
207 int len;
208
209 if (key->conf.cipher != WLAN_CIPHER_SUITE_TKIP)
210 return -EINVAL;
211
212 len = scnprintf(buf, sizeof(buf), "%u\n", key->u.tkip.mic_failures);
213
214 return simple_read_from_buffer(userbuf, count, ppos, buf, len);
215}
216KEY_OPS(mic_failures);
217
202static ssize_t key_key_read(struct file *file, char __user *userbuf, 218static ssize_t key_key_read(struct file *file, char __user *userbuf,
203 size_t count, loff_t *ppos) 219 size_t count, loff_t *ppos)
204{ 220{
@@ -260,6 +276,7 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
260 DEBUGFS_ADD(rx_spec); 276 DEBUGFS_ADD(rx_spec);
261 DEBUGFS_ADD(replays); 277 DEBUGFS_ADD(replays);
262 DEBUGFS_ADD(icverrors); 278 DEBUGFS_ADD(icverrors);
279 DEBUGFS_ADD(mic_failures);
263 DEBUGFS_ADD(key); 280 DEBUGFS_ADD(key);
264 DEBUGFS_ADD(ifindex); 281 DEBUGFS_ADD(ifindex);
265}; 282};
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 6d5aec9418ee..cbde5cc49a40 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/if.h> 12#include <linux/if.h>
13#include <linux/if_ether.h>
13#include <linux/interrupt.h> 14#include <linux/interrupt.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
15#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
@@ -167,7 +168,29 @@ IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
167 168
168IEEE80211_IF_FILE(flags, flags, HEX); 169IEEE80211_IF_FILE(flags, flags, HEX);
169IEEE80211_IF_FILE(state, state, LHEX); 170IEEE80211_IF_FILE(state, state, LHEX);
170IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC); 171IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
172IEEE80211_IF_FILE(ap_power_level, ap_power_level, DEC);
173IEEE80211_IF_FILE(user_power_level, user_power_level, DEC);
174
175static ssize_t
176ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata,
177 char *buf, int buflen)
178{
179 int len;
180
181 len = scnprintf(buf, buflen, "AC queues: VO:%d VI:%d BE:%d BK:%d\n",
182 sdata->vif.hw_queue[IEEE80211_AC_VO],
183 sdata->vif.hw_queue[IEEE80211_AC_VI],
184 sdata->vif.hw_queue[IEEE80211_AC_BE],
185 sdata->vif.hw_queue[IEEE80211_AC_BK]);
186
187 if (sdata->vif.type == NL80211_IFTYPE_AP)
188 len += scnprintf(buf + len, buflen - len, "cab queue: %d\n",
189 sdata->vif.cab_queue);
190
191 return len;
192}
193__IEEE80211_IF_FILE(hw_queues, NULL);
171 194
172/* STA attributes */ 195/* STA attributes */
173IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); 196IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
@@ -217,7 +240,7 @@ static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
217 240
218 return snprintf(buf, buflen, "request: %s\nused: %s\n", 241 return snprintf(buf, buflen, "request: %s\nused: %s\n",
219 smps_modes[sdata->u.mgd.req_smps], 242 smps_modes[sdata->u.mgd.req_smps],
220 smps_modes[sdata->u.mgd.ap_smps]); 243 smps_modes[sdata->smps_mode]);
221} 244}
222 245
223static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata, 246static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
@@ -245,27 +268,6 @@ static ssize_t ieee80211_if_fmt_tkip_mic_test(
245 return -EOPNOTSUPP; 268 return -EOPNOTSUPP;
246} 269}
247 270
248static int hwaddr_aton(const char *txt, u8 *addr)
249{
250 int i;
251
252 for (i = 0; i < ETH_ALEN; i++) {
253 int a, b;
254
255 a = hex_to_bin(*txt++);
256 if (a < 0)
257 return -1;
258 b = hex_to_bin(*txt++);
259 if (b < 0)
260 return -1;
261 *addr++ = (a << 4) | b;
262 if (i < 5 && *txt++ != ':')
263 return -1;
264 }
265
266 return 0;
267}
268
269static ssize_t ieee80211_if_parse_tkip_mic_test( 271static ssize_t ieee80211_if_parse_tkip_mic_test(
270 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) 272 struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
271{ 273{
@@ -275,13 +277,7 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
275 struct ieee80211_hdr *hdr; 277 struct ieee80211_hdr *hdr;
276 __le16 fc; 278 __le16 fc;
277 279
278 /* 280 if (!mac_pton(buf, addr))
279 * Assume colon-delimited MAC address with possible white space
280 * following.
281 */
282 if (buflen < 3 * ETH_ALEN - 1)
283 return -EINVAL;
284 if (hwaddr_aton(buf, addr) < 0)
285 return -EINVAL; 281 return -EINVAL;
286 282
287 if (!ieee80211_sdata_running(sdata)) 283 if (!ieee80211_sdata_running(sdata))
@@ -307,13 +303,16 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
307 case NL80211_IFTYPE_STATION: 303 case NL80211_IFTYPE_STATION:
308 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 304 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
309 /* BSSID SA DA */ 305 /* BSSID SA DA */
310 if (sdata->vif.bss_conf.bssid == NULL) { 306 mutex_lock(&sdata->u.mgd.mtx);
307 if (!sdata->u.mgd.associated) {
308 mutex_unlock(&sdata->u.mgd.mtx);
311 dev_kfree_skb(skb); 309 dev_kfree_skb(skb);
312 return -ENOTCONN; 310 return -ENOTCONN;
313 } 311 }
314 memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN); 312 memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN);
315 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); 313 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
316 memcpy(hdr->addr3, addr, ETH_ALEN); 314 memcpy(hdr->addr3, addr, ETH_ALEN);
315 mutex_unlock(&sdata->u.mgd.mtx);
317 break; 316 break;
318 default: 317 default:
319 dev_kfree_skb(skb); 318 dev_kfree_skb(skb);
@@ -395,14 +394,14 @@ __IEEE80211_IF_FILE_W(uapsd_max_sp_len);
395 394
396/* AP attributes */ 395/* AP attributes */
397IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); 396IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
398IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 397IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC);
399IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); 398IEEE80211_IF_FILE(dtim_count, u.ap.ps.dtim_count, DEC);
400 399
401static ssize_t ieee80211_if_fmt_num_buffered_multicast( 400static ssize_t ieee80211_if_fmt_num_buffered_multicast(
402 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) 401 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
403{ 402{
404 return scnprintf(buf, buflen, "%u\n", 403 return scnprintf(buf, buflen, "%u\n",
405 skb_queue_len(&sdata->u.ap.ps_bc_buf)); 404 skb_queue_len(&sdata->u.ap.ps.bc_buf));
406} 405}
407__IEEE80211_IF_FILE(num_buffered_multicast, NULL); 406__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
408 407
@@ -443,7 +442,7 @@ static ssize_t ieee80211_if_parse_tsf(
443 } 442 }
444 ret = kstrtoull(buf, 10, &tsf); 443 ret = kstrtoull(buf, 10, &tsf);
445 if (ret < 0) 444 if (ret < 0)
446 return -EINVAL; 445 return ret;
447 if (tsf_is_delta) 446 if (tsf_is_delta)
448 tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf; 447 tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf;
449 if (local->ops->set_tsf) { 448 if (local->ops->set_tsf) {
@@ -471,7 +470,7 @@ IEEE80211_IF_FILE(dropped_frames_congestion,
471 u.mesh.mshstats.dropped_frames_congestion, DEC); 470 u.mesh.mshstats.dropped_frames_congestion, DEC);
472IEEE80211_IF_FILE(dropped_frames_no_route, 471IEEE80211_IF_FILE(dropped_frames_no_route,
473 u.mesh.mshstats.dropped_frames_no_route, DEC); 472 u.mesh.mshstats.dropped_frames_no_route, DEC);
474IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); 473IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC);
475 474
476/* Mesh parameters */ 475/* Mesh parameters */
477IEEE80211_IF_FILE(dot11MeshMaxRetries, 476IEEE80211_IF_FILE(dot11MeshMaxRetries,
@@ -531,6 +530,7 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
531 DEBUGFS_ADD(rc_rateidx_mask_5ghz); 530 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
532 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); 531 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
533 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); 532 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
533 DEBUGFS_ADD(hw_queues);
534} 534}
535 535
536static void add_sta_files(struct ieee80211_sub_if_data *sdata) 536static void add_sta_files(struct ieee80211_sub_if_data *sdata)
@@ -631,7 +631,9 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
631 631
632 DEBUGFS_ADD(flags); 632 DEBUGFS_ADD(flags);
633 DEBUGFS_ADD(state); 633 DEBUGFS_ADD(state);
634 DEBUGFS_ADD(channel_type); 634 DEBUGFS_ADD(txpower);
635 DEBUGFS_ADD(user_power_level);
636 DEBUGFS_ADD(ap_power_level);
635 637
636 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 638 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
637 add_common_files(sdata); 639 add_common_files(sdata);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 5ccec2c1e9f6..6fb1168b9f16 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -14,6 +14,7 @@
14#include "debugfs.h" 14#include "debugfs.h"
15#include "debugfs_sta.h" 15#include "debugfs_sta.h"
16#include "sta_info.h" 16#include "sta_info.h"
17#include "driver-ops.h"
17 18
18/* sta attributtes */ 19/* sta attributtes */
19 20
@@ -52,6 +53,7 @@ static const struct file_operations sta_ ##name## _ops = { \
52STA_FILE(aid, sta.aid, D); 53STA_FILE(aid, sta.aid, D);
53STA_FILE(dev, sdata->name, S); 54STA_FILE(dev, sdata->name, S);
54STA_FILE(last_signal, last_signal, D); 55STA_FILE(last_signal, last_signal, D);
56STA_FILE(last_ack_signal, last_ack_signal, D);
55 57
56static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 58static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
57 size_t count, loff_t *ppos) 59 size_t count, loff_t *ppos)
@@ -131,10 +133,10 @@ STA_OPS(connected_time);
131static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, 133static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
132 size_t count, loff_t *ppos) 134 size_t count, loff_t *ppos)
133{ 135{
134 char buf[15*NUM_RX_DATA_QUEUES], *p = buf; 136 char buf[15*IEEE80211_NUM_TIDS], *p = buf;
135 int i; 137 int i;
136 struct sta_info *sta = file->private_data; 138 struct sta_info *sta = file->private_data;
137 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 139 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
138 p += scnprintf(p, sizeof(buf)+buf-p, "%x ", 140 p += scnprintf(p, sizeof(buf)+buf-p, "%x ",
139 le16_to_cpu(sta->last_seq_ctrl[i])); 141 le16_to_cpu(sta->last_seq_ctrl[i]));
140 p += scnprintf(p, sizeof(buf)+buf-p, "\n"); 142 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
@@ -145,7 +147,7 @@ STA_OPS(last_seq_ctrl);
145static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 147static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
146 size_t count, loff_t *ppos) 148 size_t count, loff_t *ppos)
147{ 149{
148 char buf[71 + STA_TID_NUM * 40], *p = buf; 150 char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf;
149 int i; 151 int i;
150 struct sta_info *sta = file->private_data; 152 struct sta_info *sta = file->private_data;
151 struct tid_ampdu_rx *tid_rx; 153 struct tid_ampdu_rx *tid_rx;
@@ -158,7 +160,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
158 p += scnprintf(p, sizeof(buf) + buf - p, 160 p += scnprintf(p, sizeof(buf) + buf - p,
159 "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); 161 "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
160 162
161 for (i = 0; i < STA_TID_NUM; i++) { 163 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
162 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); 164 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
163 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); 165 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]);
164 166
@@ -218,9 +220,11 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
218 } else 220 } else
219 return -EINVAL; 221 return -EINVAL;
220 222
221 tid = simple_strtoul(buf, NULL, 0); 223 ret = kstrtoul(buf, 0, &tid);
224 if (ret)
225 return ret;
222 226
223 if (tid >= STA_TID_NUM) 227 if (tid >= IEEE80211_NUM_TIDS)
224 return -EINVAL; 228 return -EINVAL;
225 229
226 if (tx) { 230 if (tx) {
@@ -320,6 +324,38 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
320} 324}
321STA_OPS(ht_capa); 325STA_OPS(ht_capa);
322 326
327static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf,
328 size_t count, loff_t *ppos)
329{
330 struct sta_info *sta = file->private_data;
331 struct rate_info rinfo;
332 u16 rate;
333 sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
334 rate = cfg80211_calculate_bitrate(&rinfo);
335
336 return mac80211_format_buffer(userbuf, count, ppos,
337 "%d.%d MBit/s\n",
338 rate/10, rate%10);
339}
340STA_OPS(current_tx_rate);
341
342static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf,
343 size_t count, loff_t *ppos)
344{
345 struct sta_info *sta = file->private_data;
346 struct rate_info rinfo;
347 u16 rate;
348
349 sta_set_rate_info_rx(sta, &rinfo);
350
351 rate = cfg80211_calculate_bitrate(&rinfo);
352
353 return mac80211_format_buffer(userbuf, count, ppos,
354 "%d.%d MBit/s\n",
355 rate/10, rate%10);
356}
357STA_OPS(last_rx_rate);
358
323#define DEBUGFS_ADD(name) \ 359#define DEBUGFS_ADD(name) \
324 debugfs_create_file(#name, 0400, \ 360 debugfs_create_file(#name, 0400, \
325 sta->debugfs.dir, sta, &sta_ ##name## _ops); 361 sta->debugfs.dir, sta, &sta_ ##name## _ops);
@@ -334,6 +370,8 @@ STA_OPS(ht_capa);
334 370
335void ieee80211_sta_debugfs_add(struct sta_info *sta) 371void ieee80211_sta_debugfs_add(struct sta_info *sta)
336{ 372{
373 struct ieee80211_local *local = sta->local;
374 struct ieee80211_sub_if_data *sdata = sta->sdata;
337 struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; 375 struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations;
338 u8 mac[3*ETH_ALEN]; 376 u8 mac[3*ETH_ALEN];
339 377
@@ -366,6 +404,9 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
366 DEBUGFS_ADD(dev); 404 DEBUGFS_ADD(dev);
367 DEBUGFS_ADD(last_signal); 405 DEBUGFS_ADD(last_signal);
368 DEBUGFS_ADD(ht_capa); 406 DEBUGFS_ADD(ht_capa);
407 DEBUGFS_ADD(last_ack_signal);
408 DEBUGFS_ADD(current_tx_rate);
409 DEBUGFS_ADD(last_rx_rate);
369 410
370 DEBUGFS_ADD_COUNTER(rx_packets, rx_packets); 411 DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
371 DEBUGFS_ADD_COUNTER(tx_packets, tx_packets); 412 DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
@@ -379,10 +420,16 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
379 DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); 420 DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
380 DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); 421 DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
381 DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); 422 DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
423
424 drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
382} 425}
383 426
384void ieee80211_sta_debugfs_remove(struct sta_info *sta) 427void ieee80211_sta_debugfs_remove(struct sta_info *sta)
385{ 428{
429 struct ieee80211_local *local = sta->local;
430 struct ieee80211_sub_if_data *sdata = sta->sdata;
431
432 drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
386 debugfs_remove_recursive(sta->debugfs.dir); 433 debugfs_remove_recursive(sta->debugfs.dir);
387 sta->debugfs.dir = NULL; 434 sta->debugfs.dir = NULL;
388} 435}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index da9003b20004..698dc7e6f309 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -490,6 +490,38 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
490 trace_drv_return_void(local); 490 trace_drv_return_void(local);
491} 491}
492 492
493#ifdef CONFIG_MAC80211_DEBUGFS
494static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
495 struct ieee80211_sub_if_data *sdata,
496 struct ieee80211_sta *sta,
497 struct dentry *dir)
498{
499 might_sleep();
500
501 sdata = get_bss_sdata(sdata);
502 check_sdata_in_driver(sdata);
503
504 if (local->ops->sta_add_debugfs)
505 local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
506 sta, dir);
507}
508
509static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
510 struct ieee80211_sub_if_data *sdata,
511 struct ieee80211_sta *sta,
512 struct dentry *dir)
513{
514 might_sleep();
515
516 sdata = get_bss_sdata(sdata);
517 check_sdata_in_driver(sdata);
518
519 if (local->ops->sta_remove_debugfs)
520 local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
521 sta, dir);
522}
523#endif
524
493static inline __must_check 525static inline __must_check
494int drv_sta_state(struct ieee80211_local *local, 526int drv_sta_state(struct ieee80211_local *local,
495 struct ieee80211_sub_if_data *sdata, 527 struct ieee80211_sub_if_data *sdata,
@@ -602,7 +634,7 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
602 634
603static inline int drv_tx_last_beacon(struct ieee80211_local *local) 635static inline int drv_tx_last_beacon(struct ieee80211_local *local)
604{ 636{
605 int ret = 0; /* default unsuported op for less congestion */ 637 int ret = 0; /* default unsupported op for less congestion */
606 638
607 might_sleep(); 639 might_sleep();
608 640
@@ -704,17 +736,17 @@ static inline int drv_get_antenna(struct ieee80211_local *local,
704} 736}
705 737
706static inline int drv_remain_on_channel(struct ieee80211_local *local, 738static inline int drv_remain_on_channel(struct ieee80211_local *local,
739 struct ieee80211_sub_if_data *sdata,
707 struct ieee80211_channel *chan, 740 struct ieee80211_channel *chan,
708 enum nl80211_channel_type chantype,
709 unsigned int duration) 741 unsigned int duration)
710{ 742{
711 int ret; 743 int ret;
712 744
713 might_sleep(); 745 might_sleep();
714 746
715 trace_drv_remain_on_channel(local, chan, chantype, duration); 747 trace_drv_remain_on_channel(local, sdata, chan, duration);
716 ret = local->ops->remain_on_channel(&local->hw, chan, chantype, 748 ret = local->ops->remain_on_channel(&local->hw, &sdata->vif,
717 duration); 749 chan, duration);
718 trace_drv_return_int(local, ret); 750 trace_drv_return_int(local, ret);
719 751
720 return ret; 752 return ret;
@@ -871,4 +903,104 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
871 local->ops->mgd_prepare_tx(&local->hw, &sdata->vif); 903 local->ops->mgd_prepare_tx(&local->hw, &sdata->vif);
872 trace_drv_return_void(local); 904 trace_drv_return_void(local);
873} 905}
906
907static inline int drv_add_chanctx(struct ieee80211_local *local,
908 struct ieee80211_chanctx *ctx)
909{
910 int ret = -EOPNOTSUPP;
911
912 trace_drv_add_chanctx(local, ctx);
913 if (local->ops->add_chanctx)
914 ret = local->ops->add_chanctx(&local->hw, &ctx->conf);
915 trace_drv_return_int(local, ret);
916
917 return ret;
918}
919
920static inline void drv_remove_chanctx(struct ieee80211_local *local,
921 struct ieee80211_chanctx *ctx)
922{
923 trace_drv_remove_chanctx(local, ctx);
924 if (local->ops->remove_chanctx)
925 local->ops->remove_chanctx(&local->hw, &ctx->conf);
926 trace_drv_return_void(local);
927}
928
929static inline void drv_change_chanctx(struct ieee80211_local *local,
930 struct ieee80211_chanctx *ctx,
931 u32 changed)
932{
933 trace_drv_change_chanctx(local, ctx, changed);
934 if (local->ops->change_chanctx)
935 local->ops->change_chanctx(&local->hw, &ctx->conf, changed);
936 trace_drv_return_void(local);
937}
938
939static inline int drv_assign_vif_chanctx(struct ieee80211_local *local,
940 struct ieee80211_sub_if_data *sdata,
941 struct ieee80211_chanctx *ctx)
942{
943 int ret = 0;
944
945 check_sdata_in_driver(sdata);
946
947 trace_drv_assign_vif_chanctx(local, sdata, ctx);
948 if (local->ops->assign_vif_chanctx)
949 ret = local->ops->assign_vif_chanctx(&local->hw,
950 &sdata->vif,
951 &ctx->conf);
952 trace_drv_return_int(local, ret);
953
954 return ret;
955}
956
957static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local,
958 struct ieee80211_sub_if_data *sdata,
959 struct ieee80211_chanctx *ctx)
960{
961 check_sdata_in_driver(sdata);
962
963 trace_drv_unassign_vif_chanctx(local, sdata, ctx);
964 if (local->ops->unassign_vif_chanctx)
965 local->ops->unassign_vif_chanctx(&local->hw,
966 &sdata->vif,
967 &ctx->conf);
968 trace_drv_return_void(local);
969}
970
971static inline int drv_start_ap(struct ieee80211_local *local,
972 struct ieee80211_sub_if_data *sdata)
973{
974 int ret = 0;
975
976 check_sdata_in_driver(sdata);
977
978 trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf);
979 if (local->ops->start_ap)
980 ret = local->ops->start_ap(&local->hw, &sdata->vif);
981 trace_drv_return_int(local, ret);
982 return ret;
983}
984
985static inline void drv_stop_ap(struct ieee80211_local *local,
986 struct ieee80211_sub_if_data *sdata)
987{
988 check_sdata_in_driver(sdata);
989
990 trace_drv_stop_ap(local, sdata);
991 if (local->ops->stop_ap)
992 local->ops->stop_ap(&local->hw, &sdata->vif);
993 trace_drv_return_void(local);
994}
995
996static inline void drv_restart_complete(struct ieee80211_local *local)
997{
998 might_sleep();
999
1000 trace_drv_restart_complete(local);
1001 if (local->ops->restart_complete)
1002 local->ops->restart_complete(&local->hw);
1003 trace_drv_return_void(local);
1004}
1005
874#endif /* __MAC80211_DRIVER_OPS */ 1006#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 4b4538d63925..a71d891794a4 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -185,7 +185,7 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx)
185 185
186 cancel_work_sync(&sta->ampdu_mlme.work); 186 cancel_work_sync(&sta->ampdu_mlme.work);
187 187
188 for (i = 0; i < STA_TID_NUM; i++) { 188 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
189 __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR, tx); 189 __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR, tx);
190 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 190 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
191 WLAN_REASON_QSTA_LEAVE_QBSS, tx); 191 WLAN_REASON_QSTA_LEAVE_QBSS, tx);
@@ -209,7 +209,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
209 return; 209 return;
210 210
211 mutex_lock(&sta->ampdu_mlme.mtx); 211 mutex_lock(&sta->ampdu_mlme.mtx);
212 for (tid = 0; tid < STA_TID_NUM; tid++) { 212 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
213 if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) 213 if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
214 ___ieee80211_stop_rx_ba_session( 214 ___ieee80211_stop_rx_ba_session(
215 sta, tid, WLAN_BACK_RECIPIENT, 215 sta, tid, WLAN_BACK_RECIPIENT,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index bf87c70ac6c5..6b7644e818d8 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -26,7 +26,6 @@
26#include "rate.h" 26#include "rate.h"
27 27
28#define IEEE80211_SCAN_INTERVAL (2 * HZ) 28#define IEEE80211_SCAN_INTERVAL (2 * HZ)
29#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ)
30#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) 29#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
31 30
32#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) 31#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
@@ -39,7 +38,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
39 const u8 *bssid, const int beacon_int, 38 const u8 *bssid, const int beacon_int,
40 struct ieee80211_channel *chan, 39 struct ieee80211_channel *chan,
41 const u32 basic_rates, 40 const u32 basic_rates,
42 const u16 capability, u64 tsf) 41 const u16 capability, u64 tsf,
42 bool creator)
43{ 43{
44 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 44 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
45 struct ieee80211_local *local = sdata->local; 45 struct ieee80211_local *local = sdata->local;
@@ -51,7 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
51 struct cfg80211_bss *bss; 51 struct cfg80211_bss *bss;
52 u32 bss_change; 52 u32 bss_change;
53 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 53 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
54 enum nl80211_channel_type channel_type; 54 struct cfg80211_chan_def chandef;
55 55
56 lockdep_assert_held(&ifibss->mtx); 56 lockdep_assert_held(&ifibss->mtx);
57 57
@@ -72,25 +72,29 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
72 /* if merging, indicate to driver that we leave the old IBSS */ 72 /* if merging, indicate to driver that we leave the old IBSS */
73 if (sdata->vif.bss_conf.ibss_joined) { 73 if (sdata->vif.bss_conf.ibss_joined) {
74 sdata->vif.bss_conf.ibss_joined = false; 74 sdata->vif.bss_conf.ibss_joined = false;
75 sdata->vif.bss_conf.ibss_creator = false;
75 netif_carrier_off(sdata->dev); 76 netif_carrier_off(sdata->dev);
76 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS); 77 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
77 } 78 }
78 79
79 memcpy(ifibss->bssid, bssid, ETH_ALEN);
80
81 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; 80 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
82 81
83 local->oper_channel = chan; 82 cfg80211_chandef_create(&chandef, chan, ifibss->channel_type);
84 channel_type = ifibss->channel_type; 83 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
85 if (!cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type)) 84 chandef.width = NL80211_CHAN_WIDTH_20;
86 channel_type = NL80211_CHAN_HT20; 85 chandef.center_freq1 = chan->center_freq;
87 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 86 }
88 /* can only fail due to HT40+/- mismatch */ 87
89 channel_type = NL80211_CHAN_HT20; 88 ieee80211_vif_release_channel(sdata);
90 WARN_ON(!ieee80211_set_channel_type(local, sdata, 89 if (ieee80211_vif_use_channel(sdata, &chandef,
91 NL80211_CHAN_HT20)); 90 ifibss->fixed_channel ?
91 IEEE80211_CHANCTX_SHARED :
92 IEEE80211_CHANCTX_EXCLUSIVE)) {
93 sdata_info(sdata, "Failed to join IBSS, no channel context\n");
94 return;
92 } 95 }
93 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 96
97 memcpy(ifibss->bssid, bssid, ETH_ALEN);
94 98
95 sband = local->hw.wiphy->bands[chan->band]; 99 sband = local->hw.wiphy->bands[chan->band];
96 100
@@ -156,7 +160,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
156 ifibss->ie, ifibss->ie_len); 160 ifibss->ie, ifibss->ie_len);
157 161
158 /* add HT capability and information IEs */ 162 /* add HT capability and information IEs */
159 if (channel_type && sband->ht_cap.ht_supported) { 163 if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
164 sband->ht_cap.ht_supported) {
160 pos = skb_put(skb, 4 + 165 pos = skb_put(skb, 4 +
161 sizeof(struct ieee80211_ht_cap) + 166 sizeof(struct ieee80211_ht_cap) +
162 sizeof(struct ieee80211_ht_operation)); 167 sizeof(struct ieee80211_ht_operation));
@@ -168,7 +173,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
168 * keep them at 0 173 * keep them at 0
169 */ 174 */
170 pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, 175 pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
171 chan, channel_type, 0); 176 &chandef, 0);
172 } 177 }
173 178
174 if (local->hw.queues >= IEEE80211_NUM_ACS) { 179 if (local->hw.queues >= IEEE80211_NUM_ACS) {
@@ -196,7 +201,22 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
196 bss_change |= BSS_CHANGED_BASIC_RATES; 201 bss_change |= BSS_CHANGED_BASIC_RATES;
197 bss_change |= BSS_CHANGED_HT; 202 bss_change |= BSS_CHANGED_HT;
198 bss_change |= BSS_CHANGED_IBSS; 203 bss_change |= BSS_CHANGED_IBSS;
204
205 /*
206 * In 5 GHz/802.11a, we can always use short slot time.
207 * (IEEE 802.11-2012 18.3.8.7)
208 *
209 * In 2.4GHz, we must always use long slots in IBSS for compatibility
210 * reasons.
211 * (IEEE 802.11-2012 19.4.5)
212 *
213 * HT follows these specifications (IEEE 802.11-2012 20.3.18)
214 */
215 sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ;
216 bss_change |= BSS_CHANGED_ERP_SLOT;
217
199 sdata->vif.bss_conf.ibss_joined = true; 218 sdata->vif.bss_conf.ibss_joined = true;
219 sdata->vif.bss_conf.ibss_creator = creator;
200 ieee80211_bss_info_change_notify(sdata, bss_change); 220 ieee80211_bss_info_change_notify(sdata, bss_change);
201 221
202 ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates); 222 ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
@@ -249,7 +269,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
249 cbss->channel, 269 cbss->channel,
250 basic_rates, 270 basic_rates,
251 cbss->capability, 271 cbss->capability,
252 cbss->tsf); 272 cbss->tsf,
273 false);
253} 274}
254 275
255static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, 276static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
@@ -279,7 +300,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
279 ibss_dbg(sdata, 300 ibss_dbg(sdata,
280 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n", 301 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
281 sdata->vif.addr, addr, sdata->u.ibss.bssid); 302 sdata->vif.addr, addr, sdata->u.ibss.bssid);
282 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0, 303 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0,
283 addr, sdata->u.ibss.bssid, NULL, 0, 0); 304 addr, sdata->u.ibss.bssid, NULL, 0, 0);
284 } 305 }
285 return sta; 306 return sta;
@@ -294,7 +315,8 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
294 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 315 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
295 struct ieee80211_local *local = sdata->local; 316 struct ieee80211_local *local = sdata->local;
296 struct sta_info *sta; 317 struct sta_info *sta;
297 int band = local->oper_channel->band; 318 struct ieee80211_chanctx_conf *chanctx_conf;
319 int band;
298 320
299 /* 321 /*
300 * XXX: Consider removing the least recently used entry and 322 * XXX: Consider removing the least recently used entry and
@@ -317,6 +339,13 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
317 return NULL; 339 return NULL;
318 } 340 }
319 341
342 rcu_read_lock();
343 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
344 if (WARN_ON_ONCE(!chanctx_conf))
345 return NULL;
346 band = chanctx_conf->def.chan->band;
347 rcu_read_unlock();
348
320 sta = sta_info_alloc(sdata, addr, GFP_KERNEL); 349 sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
321 if (!sta) { 350 if (!sta) {
322 rcu_read_lock(); 351 rcu_read_lock();
@@ -362,11 +391,13 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
362 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 391 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
363 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 392 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
364 393
365 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
366 return;
367 ibss_dbg(sdata, 394 ibss_dbg(sdata,
368 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", 395 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
369 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); 396 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
397
398 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
399 return;
400
370 sta_info_destroy_addr(sdata, mgmt->sa); 401 sta_info_destroy_addr(sdata, mgmt->sa);
371 sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false); 402 sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
372 rcu_read_unlock(); 403 rcu_read_unlock();
@@ -389,7 +420,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
389 * However, try to reply to authentication attempts if someone 420 * However, try to reply to authentication attempts if someone
390 * has actually implemented this. 421 * has actually implemented this.
391 */ 422 */
392 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0, 423 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0,
393 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0); 424 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0);
394} 425}
395 426
@@ -461,9 +492,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
461 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { 492 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
462 /* we both use HT */ 493 /* we both use HT */
463 struct ieee80211_sta_ht_cap sta_ht_cap_new; 494 struct ieee80211_sta_ht_cap sta_ht_cap_new;
464 enum nl80211_channel_type channel_type = 495 struct cfg80211_chan_def chandef;
465 ieee80211_ht_oper_to_channel_type( 496
466 elems->ht_operation); 497 ieee80211_ht_oper_to_chandef(channel,
498 elems->ht_operation,
499 &chandef);
467 500
468 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 501 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
469 elems->ht_cap_elem, 502 elems->ht_cap_elem,
@@ -473,9 +506,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
473 * fall back to HT20 if we don't use or use 506 * fall back to HT20 if we don't use or use
474 * the other extension channel 507 * the other extension channel
475 */ 508 */
476 if (!(channel_type == NL80211_CHAN_HT40MINUS || 509 if (chandef.width != NL80211_CHAN_WIDTH_40 ||
477 channel_type == NL80211_CHAN_HT40PLUS) || 510 cfg80211_get_chandef_type(&chandef) !=
478 channel_type != sdata->u.ibss.channel_type) 511 sdata->u.ibss.channel_type)
479 sta_ht_cap_new.cap &= 512 sta_ht_cap_new.cap &=
480 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 513 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
481 514
@@ -517,7 +550,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
517 goto put_bss; 550 goto put_bss;
518 551
519 /* different channel */ 552 /* different channel */
520 if (cbss->channel != local->oper_channel) 553 if (sdata->u.ibss.fixed_channel &&
554 sdata->u.ibss.channel != cbss->channel)
521 goto put_bss; 555 goto put_bss;
522 556
523 /* different SSID */ 557 /* different SSID */
@@ -530,30 +564,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
530 if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) 564 if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
531 goto put_bss; 565 goto put_bss;
532 566
533 if (rx_status->flag & RX_FLAG_MACTIME_MPDU) { 567 if (ieee80211_have_rx_timestamp(rx_status)) {
534 /* 568 /* time when timestamp field was received */
535 * For correct IBSS merging we need mactime; since mactime is 569 rx_timestamp =
536 * defined as the time the first data symbol of the frame hits 570 ieee80211_calculate_rx_timestamp(local, rx_status,
537 * the PHY, and the timestamp of the beacon is defined as "the 571 len + FCS_LEN, 24);
538 * time that the data symbol containing the first bit of the
539 * timestamp is transmitted to the PHY plus the transmitting
540 * STA's delays through its local PHY from the MAC-PHY
541 * interface to its interface with the WM" (802.11 11.1.2)
542 * - equals the time this bit arrives at the receiver - we have
543 * to take into account the offset between the two.
544 *
545 * E.g. at 1 MBit that means mactime is 192 usec earlier
546 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
547 */
548 int rate;
549
550 if (rx_status->flag & RX_FLAG_HT)
551 rate = 65; /* TODO: HT rates */
552 else
553 rate = local->hw.wiphy->bands[band]->
554 bitrates[rx_status->rate_idx].bitrate;
555
556 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
557 } else { 572 } else {
558 /* 573 /*
559 * second best option: get current TSF 574 * second best option: get current TSF
@@ -592,7 +607,8 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
592 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 607 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
593 struct ieee80211_local *local = sdata->local; 608 struct ieee80211_local *local = sdata->local;
594 struct sta_info *sta; 609 struct sta_info *sta;
595 int band = local->oper_channel->band; 610 struct ieee80211_chanctx_conf *chanctx_conf;
611 int band;
596 612
597 /* 613 /*
598 * XXX: Consider removing the least recently used entry and 614 * XXX: Consider removing the least recently used entry and
@@ -610,6 +626,15 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
610 if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) 626 if (!ether_addr_equal(bssid, sdata->u.ibss.bssid))
611 return; 627 return;
612 628
629 rcu_read_lock();
630 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
631 if (WARN_ON_ONCE(!chanctx_conf)) {
632 rcu_read_unlock();
633 return;
634 }
635 band = chanctx_conf->def.chan->band;
636 rcu_read_unlock();
637
613 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 638 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
614 if (!sta) 639 if (!sta)
615 return; 640 return;
@@ -678,8 +703,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
678 sdata_info(sdata, 703 sdata_info(sdata,
679 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n"); 704 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
680 705
681 ieee80211_request_internal_scan(sdata, 706 ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
682 ifibss->ssid, ifibss->ssid_len, NULL); 707 NULL);
683} 708}
684 709
685static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 710static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -715,7 +740,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
715 740
716 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, 741 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
717 ifibss->channel, ifibss->basic_rates, 742 ifibss->channel, ifibss->basic_rates,
718 capability, 0); 743 capability, 0, true);
719} 744}
720 745
721/* 746/*
@@ -777,25 +802,14 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
777 IEEE80211_SCAN_INTERVAL)) { 802 IEEE80211_SCAN_INTERVAL)) {
778 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n"); 803 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
779 804
780 ieee80211_request_internal_scan(sdata, 805 ieee80211_request_ibss_scan(sdata, ifibss->ssid,
781 ifibss->ssid, ifibss->ssid_len, 806 ifibss->ssid_len, chan);
782 ifibss->fixed_channel ? ifibss->channel : NULL);
783 } else { 807 } else {
784 int interval = IEEE80211_SCAN_INTERVAL; 808 int interval = IEEE80211_SCAN_INTERVAL;
785 809
786 if (time_after(jiffies, ifibss->ibss_join_req + 810 if (time_after(jiffies, ifibss->ibss_join_req +
787 IEEE80211_IBSS_JOIN_TIMEOUT)) { 811 IEEE80211_IBSS_JOIN_TIMEOUT))
788 if (!(local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)) { 812 ieee80211_sta_create_ibss(sdata);
789 ieee80211_sta_create_ibss(sdata);
790 return;
791 }
792 sdata_info(sdata, "IBSS not allowed on %d MHz\n",
793 local->oper_channel->center_freq);
794
795 /* No IBSS found - decrease scan interval and continue
796 * scanning. */
797 interval = IEEE80211_SCAN_INTERVAL_SLOW;
798 }
799 813
800 mod_timer(&ifibss->timer, 814 mod_timer(&ifibss->timer,
801 round_jiffies(jiffies + interval)); 815 round_jiffies(jiffies + interval));
@@ -1082,21 +1096,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1082 1096
1083 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 1097 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
1084 1098
1085 sdata->u.ibss.channel = params->channel; 1099 sdata->u.ibss.channel = params->chandef.chan;
1086 sdata->u.ibss.channel_type = params->channel_type; 1100 sdata->u.ibss.channel_type =
1101 cfg80211_get_chandef_type(&params->chandef);
1087 sdata->u.ibss.fixed_channel = params->channel_fixed; 1102 sdata->u.ibss.fixed_channel = params->channel_fixed;
1088 1103
1089 /* fix ourselves to that channel now already */
1090 if (params->channel_fixed) {
1091 sdata->local->oper_channel = params->channel;
1092 if (!ieee80211_set_channel_type(sdata->local, sdata,
1093 params->channel_type)) {
1094 mutex_unlock(&sdata->u.ibss.mtx);
1095 kfree_skb(skb);
1096 return -EINVAL;
1097 }
1098 }
1099
1100 if (params->ie) { 1104 if (params->ie) {
1101 sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, 1105 sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len,
1102 GFP_KERNEL); 1106 GFP_KERNEL);
@@ -1134,6 +1138,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1134 changed |= BSS_CHANGED_HT; 1138 changed |= BSS_CHANGED_HT;
1135 ieee80211_bss_info_change_notify(sdata, changed); 1139 ieee80211_bss_info_change_notify(sdata, changed);
1136 1140
1141 sdata->smps_mode = IEEE80211_SMPS_OFF;
1142 sdata->needed_rx_chains = sdata->local->rx_chains;
1143
1137 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 1144 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1138 1145
1139 return 0; 1146 return 0;
@@ -1151,10 +1158,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1151 1158
1152 mutex_lock(&sdata->u.ibss.mtx); 1159 mutex_lock(&sdata->u.ibss.mtx);
1153 1160
1154 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
1155 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
1156 sdata->u.ibss.ssid_len = 0;
1157
1158 active_ibss = ieee80211_sta_active_ibss(sdata); 1161 active_ibss = ieee80211_sta_active_ibss(sdata);
1159 1162
1160 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { 1163 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -1175,6 +1178,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1175 } 1178 }
1176 } 1179 }
1177 1180
1181 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
1182 memset(ifibss->bssid, 0, ETH_ALEN);
1183 ifibss->ssid_len = 0;
1184
1178 sta_info_flush(sdata->local, sdata); 1185 sta_info_flush(sdata->local, sdata);
1179 1186
1180 spin_lock_bh(&ifibss->incomplete_lock); 1187 spin_lock_bh(&ifibss->incomplete_lock);
@@ -1197,6 +1204,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1197 lockdep_is_held(&sdata->u.ibss.mtx)); 1204 lockdep_is_held(&sdata->u.ibss.mtx));
1198 RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); 1205 RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
1199 sdata->vif.bss_conf.ibss_joined = false; 1206 sdata->vif.bss_conf.ibss_joined = false;
1207 sdata->vif.bss_conf.ibss_creator = false;
1200 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1208 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
1201 BSS_CHANGED_IBSS); 1209 BSS_CHANGED_IBSS);
1202 synchronize_rcu(); 1210 synchronize_rcu();
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8c804550465b..8563b9a5cac3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -56,6 +56,9 @@ struct ieee80211_local;
56#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) 56#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
57#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) 57#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
58 58
59/* power level hasn't been configured (or set to automatic) */
60#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
61
59/* 62/*
60 * Some APs experience problems when working with U-APSD. Decrease the 63 * Some APs experience problems when working with U-APSD. Decrease the
61 * probability of that happening by using legacy mode for all ACs but VO. 64 * probability of that happening by using legacy mode for all ACs but VO.
@@ -89,8 +92,6 @@ struct ieee80211_bss {
89 92
90 u32 device_ts; 93 u32 device_ts;
91 94
92 u8 dtim_period;
93
94 bool wmm_used; 95 bool wmm_used;
95 bool uapsd_supported; 96 bool uapsd_supported;
96 97
@@ -137,7 +138,6 @@ enum ieee80211_bss_corrupt_data_flags {
137 138
138/** 139/**
139 * enum ieee80211_valid_data_flags - BSS valid data flags 140 * enum ieee80211_valid_data_flags - BSS valid data flags
140 * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
141 * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE 141 * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
142 * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE 142 * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
143 * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE 143 * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
@@ -148,7 +148,6 @@ enum ieee80211_bss_corrupt_data_flags {
148 * beacon/probe response. 148 * beacon/probe response.
149 */ 149 */
150enum ieee80211_bss_valid_data_flags { 150enum ieee80211_bss_valid_data_flags {
151 IEEE80211_BSS_VALID_DTIM = BIT(0),
152 IEEE80211_BSS_VALID_WMM = BIT(1), 151 IEEE80211_BSS_VALID_WMM = BIT(1),
153 IEEE80211_BSS_VALID_RATES = BIT(2), 152 IEEE80211_BSS_VALID_RATES = BIT(2),
154 IEEE80211_BSS_VALID_ERP = BIT(3) 153 IEEE80211_BSS_VALID_ERP = BIT(3)
@@ -280,23 +279,27 @@ struct probe_resp {
280 u8 data[0]; 279 u8 data[0];
281}; 280};
282 281
283struct ieee80211_if_ap { 282struct ps_data {
284 struct beacon_data __rcu *beacon;
285 struct probe_resp __rcu *probe_resp;
286
287 struct list_head vlans;
288
289 /* yes, this looks ugly, but guarantees that we can later use 283 /* yes, this looks ugly, but guarantees that we can later use
290 * bitmap_empty :) 284 * bitmap_empty :)
291 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ 285 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
292 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; 286 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
293 struct sk_buff_head ps_bc_buf; 287 struct sk_buff_head bc_buf;
294 atomic_t num_sta_ps; /* number of stations in PS mode */ 288 atomic_t num_sta_ps; /* number of stations in PS mode */
295 atomic_t num_mcast_sta; /* number of stations receiving multicast */
296 int dtim_count; 289 int dtim_count;
297 bool dtim_bc_mc; 290 bool dtim_bc_mc;
298}; 291};
299 292
293struct ieee80211_if_ap {
294 struct beacon_data __rcu *beacon;
295 struct probe_resp __rcu *probe_resp;
296
297 struct list_head vlans;
298
299 struct ps_data ps;
300 atomic_t num_mcast_sta; /* number of stations receiving multicast */
301};
302
300struct ieee80211_if_wds { 303struct ieee80211_if_wds {
301 struct sta_info *sta; 304 struct sta_info *sta;
302 u8 remote_addr[ETH_ALEN]; 305 u8 remote_addr[ETH_ALEN];
@@ -316,7 +319,6 @@ struct mesh_stats {
316 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ 319 __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
317 __u32 dropped_frames_no_route; /* Not transmitted, no route found */ 320 __u32 dropped_frames_no_route; /* Not transmitted, no route found */
318 __u32 dropped_frames_congestion;/* Not forwarded due to congestion */ 321 __u32 dropped_frames_congestion;/* Not forwarded due to congestion */
319 atomic_t estab_plinks;
320}; 322};
321 323
322#define PREQ_Q_F_START 0x1 324#define PREQ_Q_F_START 0x1
@@ -342,7 +344,6 @@ struct ieee80211_roc_work {
342 struct ieee80211_sub_if_data *sdata; 344 struct ieee80211_sub_if_data *sdata;
343 345
344 struct ieee80211_channel *chan; 346 struct ieee80211_channel *chan;
345 enum nl80211_channel_type chan_type;
346 347
347 bool started, abort, hw_begun, notified; 348 bool started, abort, hw_begun, notified;
348 349
@@ -350,7 +351,7 @@ struct ieee80211_roc_work {
350 351
351 u32 duration, req_duration; 352 u32 duration, req_duration;
352 struct sk_buff *frame; 353 struct sk_buff *frame;
353 u64 mgmt_tx_cookie; 354 u64 cookie, mgmt_tx_cookie;
354}; 355};
355 356
356/* flags used in struct ieee80211_if_managed.flags */ 357/* flags used in struct ieee80211_if_managed.flags */
@@ -358,7 +359,7 @@ enum ieee80211_sta_flags {
358 IEEE80211_STA_BEACON_POLL = BIT(0), 359 IEEE80211_STA_BEACON_POLL = BIT(0),
359 IEEE80211_STA_CONNECTION_POLL = BIT(1), 360 IEEE80211_STA_CONNECTION_POLL = BIT(1),
360 IEEE80211_STA_CONTROL_PORT = BIT(2), 361 IEEE80211_STA_CONTROL_PORT = BIT(2),
361 IEEE80211_STA_DISABLE_11N = BIT(4), 362 IEEE80211_STA_DISABLE_HT = BIT(4),
362 IEEE80211_STA_CSA_RECEIVED = BIT(5), 363 IEEE80211_STA_CSA_RECEIVED = BIT(5),
363 IEEE80211_STA_MFP_ENABLED = BIT(6), 364 IEEE80211_STA_MFP_ENABLED = BIT(6),
364 IEEE80211_STA_UAPSD_ENABLED = BIT(7), 365 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
@@ -366,6 +367,8 @@ enum ieee80211_sta_flags {
366 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), 367 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
367 IEEE80211_STA_DISABLE_40MHZ = BIT(10), 368 IEEE80211_STA_DISABLE_40MHZ = BIT(10),
368 IEEE80211_STA_DISABLE_VHT = BIT(11), 369 IEEE80211_STA_DISABLE_VHT = BIT(11),
370 IEEE80211_STA_DISABLE_80P80MHZ = BIT(12),
371 IEEE80211_STA_DISABLE_160MHZ = BIT(13),
369}; 372};
370 373
371struct ieee80211_mgd_auth_data { 374struct ieee80211_mgd_auth_data {
@@ -378,8 +381,9 @@ struct ieee80211_mgd_auth_data {
378 u8 key_len, key_idx; 381 u8 key_len, key_idx;
379 bool done; 382 bool done;
380 383
381 size_t ie_len; 384 u16 sae_trans, sae_status;
382 u8 ie[]; 385 size_t data_len;
386 u8 data[];
383}; 387};
384 388
385struct ieee80211_mgd_assoc_data { 389struct ieee80211_mgd_assoc_data {
@@ -432,8 +436,8 @@ struct ieee80211_if_managed {
432 unsigned long timers_running; /* used for quiesce/restart */ 436 unsigned long timers_running; /* used for quiesce/restart */
433 bool powersave; /* powersave requested for this iface */ 437 bool powersave; /* powersave requested for this iface */
434 bool broken_ap; /* AP is broken -- turn off powersave */ 438 bool broken_ap; /* AP is broken -- turn off powersave */
439 u8 dtim_period;
435 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 440 enum ieee80211_smps_mode req_smps, /* requested smps mode */
436 ap_smps, /* smps mode AP thinks we're in */
437 driver_smps_mode; /* smps mode request */ 441 driver_smps_mode; /* smps mode request */
438 442
439 struct work_struct request_smps_work; 443 struct work_struct request_smps_work;
@@ -467,6 +471,8 @@ struct ieee80211_if_managed {
467 471
468 u8 use_4addr; 472 u8 use_4addr;
469 473
474 u8 p2p_noa_index;
475
470 /* Signal strength from the last Beacon frame in the current BSS. */ 476 /* Signal strength from the last Beacon frame in the current BSS. */
471 int last_beacon_signal; 477 int last_beacon_signal;
472 478
@@ -599,6 +605,7 @@ struct ieee80211_if_mesh {
599 int preq_queue_len; 605 int preq_queue_len;
600 struct mesh_stats mshstats; 606 struct mesh_stats mshstats;
601 struct mesh_config mshcfg; 607 struct mesh_config mshcfg;
608 atomic_t estab_plinks;
602 u32 mesh_seqnum; 609 u32 mesh_seqnum;
603 bool accepting_plinks; 610 bool accepting_plinks;
604 int num_gates; 611 int num_gates;
@@ -610,7 +617,7 @@ struct ieee80211_if_mesh {
610 IEEE80211_MESH_SEC_SECURED = 0x2, 617 IEEE80211_MESH_SEC_SECURED = 0x2,
611 } security; 618 } security;
612 /* Extensible Synchronization Framework */ 619 /* Extensible Synchronization Framework */
613 struct ieee80211_mesh_sync_ops *sync_ops; 620 const struct ieee80211_mesh_sync_ops *sync_ops;
614 s64 sync_offset_clockdrift_max; 621 s64 sync_offset_clockdrift_max;
615 spinlock_t sync_offset_lock; 622 spinlock_t sync_offset_lock;
616 bool adjusting_tbtt; 623 bool adjusting_tbtt;
@@ -658,6 +665,30 @@ enum ieee80211_sdata_state_bits {
658 SDATA_STATE_OFFCHANNEL, 665 SDATA_STATE_OFFCHANNEL,
659}; 666};
660 667
668/**
669 * enum ieee80211_chanctx_mode - channel context configuration mode
670 *
671 * @IEEE80211_CHANCTX_SHARED: channel context may be used by
672 * multiple interfaces
673 * @IEEE80211_CHANCTX_EXCLUSIVE: channel context can be used
674 * only by a single interface. This can be used for example for
675 * non-fixed channel IBSS.
676 */
677enum ieee80211_chanctx_mode {
678 IEEE80211_CHANCTX_SHARED,
679 IEEE80211_CHANCTX_EXCLUSIVE
680};
681
682struct ieee80211_chanctx {
683 struct list_head list;
684 struct rcu_head rcu_head;
685
686 enum ieee80211_chanctx_mode mode;
687 int refcount;
688
689 struct ieee80211_chanctx_conf conf;
690};
691
661struct ieee80211_sub_if_data { 692struct ieee80211_sub_if_data {
662 struct list_head list; 693 struct list_head list;
663 694
@@ -704,11 +735,20 @@ struct ieee80211_sub_if_data {
704 735
705 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; 736 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
706 737
738 /* used to reconfigure hardware SM PS */
739 struct work_struct recalc_smps;
740
707 struct work_struct work; 741 struct work_struct work;
708 struct sk_buff_head skb_queue; 742 struct sk_buff_head skb_queue;
709 743
710 bool arp_filter_state; 744 bool arp_filter_state;
711 745
746 u8 needed_rx_chains;
747 enum ieee80211_smps_mode smps_mode;
748
749 int user_power_level; /* in dBm */
750 int ap_power_level; /* in dBm */
751
712 /* 752 /*
713 * AP this belongs to: self in AP mode and 753 * AP this belongs to: self in AP mode and
714 * corresponding AP in VLAN mode, NULL for 754 * corresponding AP in VLAN mode, NULL for
@@ -730,6 +770,10 @@ struct ieee80211_sub_if_data {
730 u32 mntr_flags; 770 u32 mntr_flags;
731 } u; 771 } u;
732 772
773 spinlock_t cleanup_stations_lock;
774 struct list_head cleanup_stations;
775 struct work_struct cleanup_stations_wk;
776
733#ifdef CONFIG_MAC80211_DEBUGFS 777#ifdef CONFIG_MAC80211_DEBUGFS
734 struct { 778 struct {
735 struct dentry *dir; 779 struct dentry *dir;
@@ -749,6 +793,21 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
749 return container_of(p, struct ieee80211_sub_if_data, vif); 793 return container_of(p, struct ieee80211_sub_if_data, vif);
750} 794}
751 795
796static inline enum ieee80211_band
797ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
798{
799 enum ieee80211_band band = IEEE80211_BAND_2GHZ;
800 struct ieee80211_chanctx_conf *chanctx_conf;
801
802 rcu_read_lock();
803 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
804 if (!WARN_ON(!chanctx_conf))
805 band = chanctx_conf->def.chan->band;
806 rcu_read_unlock();
807
808 return band;
809}
810
752enum sdata_queue_type { 811enum sdata_queue_type {
753 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, 812 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
754 IEEE80211_SDATA_QUEUE_AGG_START = 1, 813 IEEE80211_SDATA_QUEUE_AGG_START = 1,
@@ -821,6 +880,7 @@ enum {
821 * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to 880 * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
822 * send out data 881 * send out data
823 * @SCAN_RESUME: Resume the scan and scan the next channel 882 * @SCAN_RESUME: Resume the scan and scan the next channel
883 * @SCAN_ABORT: Abort the scan and go back to operating channel
824 */ 884 */
825enum mac80211_scan_state { 885enum mac80211_scan_state {
826 SCAN_DECISION, 886 SCAN_DECISION,
@@ -828,6 +888,7 @@ enum mac80211_scan_state {
828 SCAN_SEND_PROBE, 888 SCAN_SEND_PROBE,
829 SCAN_SUSPEND, 889 SCAN_SUSPEND,
830 SCAN_RESUME, 890 SCAN_RESUME,
891 SCAN_ABORT,
831}; 892};
832 893
833struct ieee80211_local { 894struct ieee80211_local {
@@ -858,15 +919,14 @@ struct ieee80211_local {
858 919
859 bool wiphy_ciphers_allocated; 920 bool wiphy_ciphers_allocated;
860 921
922 bool use_chanctx;
923
861 /* protects the aggregated multicast list and filter calls */ 924 /* protects the aggregated multicast list and filter calls */
862 spinlock_t filter_lock; 925 spinlock_t filter_lock;
863 926
864 /* used for uploading changed mc list */ 927 /* used for uploading changed mc list */
865 struct work_struct reconfig_filter; 928 struct work_struct reconfig_filter;
866 929
867 /* used to reconfigure hardware SM PS */
868 struct work_struct recalc_smps;
869
870 /* aggregated multicast list */ 930 /* aggregated multicast list */
871 struct netdev_hw_addr_list mc_list; 931 struct netdev_hw_addr_list mc_list;
872 932
@@ -903,6 +963,9 @@ struct ieee80211_local {
903 /* wowlan is enabled -- don't reconfig on resume */ 963 /* wowlan is enabled -- don't reconfig on resume */
904 bool wowlan; 964 bool wowlan;
905 965
966 /* number of RX chains the hardware has */
967 u8 rx_chains;
968
906 int tx_headroom; /* required headroom for hardware/radiotap */ 969 int tx_headroom; /* required headroom for hardware/radiotap */
907 970
908 /* Tasklet and skb queue to process calls from IRQ mode. All frames 971 /* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -972,6 +1035,7 @@ struct ieee80211_local {
972 enum ieee80211_band hw_scan_band; 1035 enum ieee80211_band hw_scan_band;
973 int scan_channel_idx; 1036 int scan_channel_idx;
974 int scan_ies_len; 1037 int scan_ies_len;
1038 int hw_scan_ies_bufsize;
975 1039
976 struct work_struct sched_scan_stopped_work; 1040 struct work_struct sched_scan_stopped_work;
977 struct ieee80211_sub_if_data __rcu *sched_scan_sdata; 1041 struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
@@ -980,12 +1044,17 @@ struct ieee80211_local {
980 enum mac80211_scan_state next_scan_state; 1044 enum mac80211_scan_state next_scan_state;
981 struct delayed_work scan_work; 1045 struct delayed_work scan_work;
982 struct ieee80211_sub_if_data __rcu *scan_sdata; 1046 struct ieee80211_sub_if_data __rcu *scan_sdata;
1047 struct ieee80211_channel *csa_channel;
1048 /* For backward compatibility only -- do not use */
1049 struct ieee80211_channel *_oper_channel;
983 enum nl80211_channel_type _oper_channel_type; 1050 enum nl80211_channel_type _oper_channel_type;
984 struct ieee80211_channel *oper_channel, *csa_channel;
985 1051
986 /* Temporary remain-on-channel for off-channel operations */ 1052 /* Temporary remain-on-channel for off-channel operations */
987 struct ieee80211_channel *tmp_channel; 1053 struct ieee80211_channel *tmp_channel;
988 enum nl80211_channel_type tmp_channel_type; 1054
1055 /* channel contexts */
1056 struct list_head chanctx_list;
1057 struct mutex chanctx_mtx;
989 1058
990 /* SNMP counters */ 1059 /* SNMP counters */
991 /* dot11CountersTable */ 1060 /* dot11CountersTable */
@@ -1058,8 +1127,7 @@ struct ieee80211_local {
1058 int dynamic_ps_user_timeout; 1127 int dynamic_ps_user_timeout;
1059 bool disable_dynamic_ps; 1128 bool disable_dynamic_ps;
1060 1129
1061 int user_power_level; /* in dBm */ 1130 int user_power_level; /* in dBm, for all interfaces */
1062 int ap_power_level; /* in dBm */
1063 1131
1064 enum ieee80211_smps_mode smps_mode; 1132 enum ieee80211_smps_mode smps_mode;
1065 1133
@@ -1078,6 +1146,7 @@ struct ieee80211_local {
1078 struct list_head roc_list; 1146 struct list_head roc_list;
1079 struct work_struct hw_roc_start, hw_roc_done; 1147 struct work_struct hw_roc_start, hw_roc_done;
1080 unsigned long hw_roc_start_time; 1148 unsigned long hw_roc_start_time;
1149 u64 roc_cookie_counter;
1081 1150
1082 struct idr ack_status_frames; 1151 struct idr ack_status_frames;
1083 spinlock_t ack_status_lock; 1152 spinlock_t ack_status_lock;
@@ -1091,6 +1160,7 @@ struct ieee80211_local {
1091 1160
1092 /* virtual monitor interface */ 1161 /* virtual monitor interface */
1093 struct ieee80211_sub_if_data __rcu *monitor_sdata; 1162 struct ieee80211_sub_if_data __rcu *monitor_sdata;
1163 struct cfg80211_chan_def monitor_chandef;
1094}; 1164};
1095 1165
1096static inline struct ieee80211_sub_if_data * 1166static inline struct ieee80211_sub_if_data *
@@ -1133,6 +1203,8 @@ struct ieee802_11_elems {
1133 u8 *wmm_param; 1203 u8 *wmm_param;
1134 struct ieee80211_ht_cap *ht_cap_elem; 1204 struct ieee80211_ht_cap *ht_cap_elem;
1135 struct ieee80211_ht_operation *ht_operation; 1205 struct ieee80211_ht_operation *ht_operation;
1206 struct ieee80211_vht_cap *vht_cap_elem;
1207 struct ieee80211_vht_operation *vht_operation;
1136 struct ieee80211_meshconf_ie *mesh_config; 1208 struct ieee80211_meshconf_ie *mesh_config;
1137 u8 *mesh_id; 1209 u8 *mesh_id;
1138 u8 *peering; 1210 u8 *peering;
@@ -1188,7 +1260,18 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
1188 is_broadcast_ether_addr(raddr); 1260 is_broadcast_ether_addr(raddr);
1189} 1261}
1190 1262
1263static inline bool
1264ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
1265{
1266 WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
1267 status->flag & RX_FLAG_MACTIME_END);
1268 return status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END);
1269}
1191 1270
1271u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
1272 struct ieee80211_rx_status *status,
1273 unsigned int mpdu_len,
1274 unsigned int mpdu_offset);
1192int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); 1275int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
1193void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); 1276void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
1194void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 1277void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
@@ -1247,9 +1330,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1247 1330
1248/* scan/BSS handling */ 1331/* scan/BSS handling */
1249void ieee80211_scan_work(struct work_struct *work); 1332void ieee80211_scan_work(struct work_struct *work);
1250int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, 1333int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
1251 const u8 *ssid, u8 ssid_len, 1334 const u8 *ssid, u8 ssid_len,
1252 struct ieee80211_channel *chan); 1335 struct ieee80211_channel *chan);
1253int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 1336int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
1254 struct cfg80211_scan_request *req); 1337 struct cfg80211_scan_request *req);
1255void ieee80211_scan_cancel(struct ieee80211_local *local); 1338void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1302,6 +1385,9 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
1302int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up); 1385int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
1303void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); 1386void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
1304 1387
1388bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
1389void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
1390
1305static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) 1391static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1306{ 1392{
1307 return test_bit(SDATA_STATE_RUNNING, &sdata->state); 1393 return test_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -1314,6 +1400,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1314 struct net_device *dev); 1400 struct net_device *dev);
1315netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, 1401netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1316 struct net_device *dev); 1402 struct net_device *dev);
1403void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
1404 struct sk_buff_head *skbs);
1317 1405
1318/* HT */ 1406/* HT */
1319void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 1407void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
@@ -1359,6 +1447,13 @@ void ieee80211_ba_session_work(struct work_struct *work);
1359void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1447void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1360void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); 1448void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
1361 1449
1450u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
1451
1452/* VHT */
1453void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
1454 struct ieee80211_supported_band *sband,
1455 struct ieee80211_vht_cap *vht_cap_ie,
1456 struct ieee80211_sta_vht_cap *vht_cap);
1362/* Spectrum management */ 1457/* Spectrum management */
1363void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1458void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1364 struct ieee80211_mgmt *mgmt, 1459 struct ieee80211_mgmt *mgmt,
@@ -1393,11 +1488,42 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
1393 gfp_t gfp); 1488 gfp_t gfp);
1394void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, 1489void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
1395 bool bss_notify); 1490 bool bss_notify);
1396void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 1491void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1492 enum ieee80211_band band);
1493
1494void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
1495 struct sk_buff *skb, int tid,
1496 enum ieee80211_band band);
1397 1497
1398void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, 1498static inline void
1399 struct sk_buff *skb, int tid); 1499ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
1400static void inline ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, 1500 struct sk_buff *skb, int tid,
1501 enum ieee80211_band band)
1502{
1503 rcu_read_lock();
1504 __ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
1505 rcu_read_unlock();
1506}
1507
1508static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
1509 struct sk_buff *skb, int tid)
1510{
1511 struct ieee80211_chanctx_conf *chanctx_conf;
1512
1513 rcu_read_lock();
1514 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1515 if (WARN_ON(!chanctx_conf)) {
1516 rcu_read_unlock();
1517 kfree_skb(skb);
1518 return;
1519 }
1520
1521 __ieee80211_tx_skb_tid_band(sdata, skb, tid,
1522 chanctx_conf->def.chan->band);
1523 rcu_read_unlock();
1524}
1525
1526static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
1401 struct sk_buff *skb) 1527 struct sk_buff *skb)
1402{ 1528{
1403 /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ 1529 /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
@@ -1444,14 +1570,14 @@ static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1444} 1570}
1445 1571
1446void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1572void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1447 u16 transaction, u16 auth_alg, 1573 u16 transaction, u16 auth_alg, u16 status,
1448 u8 *extra, size_t extra_len, const u8 *bssid, 1574 u8 *extra, size_t extra_len, const u8 *bssid,
1449 const u8 *da, const u8 *key, u8 key_len, u8 key_idx); 1575 const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
1450void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 1576void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
1451 const u8 *bssid, u16 stype, u16 reason, 1577 const u8 *bssid, u16 stype, u16 reason,
1452 bool send_frame, u8 *frame_buf); 1578 bool send_frame, u8 *frame_buf);
1453int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 1579int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1454 const u8 *ie, size_t ie_len, 1580 size_t buffer_len, const u8 *ie, size_t ie_len,
1455 enum ieee80211_band band, u32 rate_mask, 1581 enum ieee80211_band band, u32 rate_mask,
1456 u8 channel); 1582 u8 channel);
1457struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1583struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
@@ -1464,7 +1590,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1464 const u8 *ssid, size_t ssid_len, 1590 const u8 *ssid, size_t ssid_len,
1465 const u8 *ie, size_t ie_len, 1591 const u8 *ie, size_t ie_len,
1466 u32 ratemask, bool directed, bool no_cck, 1592 u32 ratemask, bool directed, bool no_cck,
1467 struct ieee80211_channel *channel); 1593 struct ieee80211_channel *channel, bool scan);
1468 1594
1469void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1595void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1470 const size_t supp_rates_len, 1596 const size_t supp_rates_len,
@@ -1474,7 +1600,7 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1474 enum ieee80211_band band, u32 *basic_rates); 1600 enum ieee80211_band band, u32 *basic_rates);
1475int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, 1601int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1476 enum ieee80211_smps_mode smps_mode); 1602 enum ieee80211_smps_mode smps_mode);
1477void ieee80211_recalc_smps(struct ieee80211_local *local); 1603void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
1478 1604
1479size_t ieee80211_ie_split(const u8 *ies, size_t ielen, 1605size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1480 const u8 *ids, int n_ids, size_t offset); 1606 const u8 *ids, int n_ids, size_t offset);
@@ -1482,8 +1608,7 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1482u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, 1608u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1483 u16 cap); 1609 u16 cap);
1484u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, 1610u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1485 struct ieee80211_channel *channel, 1611 const struct cfg80211_chan_def *chandef,
1486 enum nl80211_channel_type channel_type,
1487 u16 prot_mode); 1612 u16 prot_mode);
1488u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, 1613u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1489 u32 cap); 1614 u32 cap);
@@ -1495,20 +1620,19 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1495 enum ieee80211_band band); 1620 enum ieee80211_band band);
1496 1621
1497/* channel management */ 1622/* channel management */
1498enum ieee80211_chan_mode { 1623void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
1499 CHAN_MODE_UNDEFINED, 1624 struct ieee80211_ht_operation *ht_oper,
1500 CHAN_MODE_HOPPING, 1625 struct cfg80211_chan_def *chandef);
1501 CHAN_MODE_FIXED, 1626
1502}; 1627int __must_check
1503 1628ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
1504enum ieee80211_chan_mode 1629 const struct cfg80211_chan_def *chandef,
1505ieee80211_get_channel_mode(struct ieee80211_local *local, 1630 enum ieee80211_chanctx_mode mode);
1506 struct ieee80211_sub_if_data *ignore); 1631void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
1507bool ieee80211_set_channel_type(struct ieee80211_local *local, 1632void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
1508 struct ieee80211_sub_if_data *sdata, 1633
1509 enum nl80211_channel_type chantype); 1634void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
1510enum nl80211_channel_type 1635 struct ieee80211_chanctx *chanctx);
1511ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper);
1512 1636
1513#ifdef CONFIG_MAC80211_NOINLINE 1637#ifdef CONFIG_MAC80211_NOINLINE
1514#define debug_noinline noinline 1638#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7de7717ad67d..8be854e86cd9 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -42,6 +42,41 @@
42 * by either the RTNL, the iflist_mtx or RCU. 42 * by either the RTNL, the iflist_mtx or RCU.
43 */ 43 */
44 44
45bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
46{
47 struct ieee80211_chanctx_conf *chanctx_conf;
48 int power;
49
50 rcu_read_lock();
51 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
52 if (!chanctx_conf) {
53 rcu_read_unlock();
54 return false;
55 }
56
57 power = chanctx_conf->def.chan->max_power;
58 rcu_read_unlock();
59
60 if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
61 power = min(power, sdata->user_power_level);
62
63 if (sdata->ap_power_level != IEEE80211_UNSET_POWER_LEVEL)
64 power = min(power, sdata->ap_power_level);
65
66 if (power != sdata->vif.bss_conf.txpower) {
67 sdata->vif.bss_conf.txpower = power;
68 ieee80211_hw_config(sdata->local, 0);
69 return true;
70 }
71
72 return false;
73}
74
75void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
76{
77 if (__ieee80211_recalc_txpower(sdata))
78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
79}
45 80
46static u32 ieee80211_idle_off(struct ieee80211_local *local, 81static u32 ieee80211_idle_off(struct ieee80211_local *local,
47 const char *reason) 82 const char *reason)
@@ -172,22 +207,54 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
172 207
173static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) 208static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
174{ 209{
175 int meshhdrlen; 210 if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN)
176 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
177
178 meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
179
180 /* FIX: what would be proper limits for MTU?
181 * This interface uses 802.3 frames. */
182 if (new_mtu < 256 ||
183 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
184 return -EINVAL; 211 return -EINVAL;
185 }
186 212
187 dev->mtu = new_mtu; 213 dev->mtu = new_mtu;
188 return 0; 214 return 0;
189} 215}
190 216
217static int ieee80211_verify_mac(struct ieee80211_local *local, u8 *addr)
218{
219 struct ieee80211_sub_if_data *sdata;
220 u64 new, mask, tmp;
221 u8 *m;
222 int ret = 0;
223
224 if (is_zero_ether_addr(local->hw.wiphy->addr_mask))
225 return 0;
226
227 m = addr;
228 new = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
229 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
230 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
231
232 m = local->hw.wiphy->addr_mask;
233 mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
234 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
235 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
236
237
238 mutex_lock(&local->iflist_mtx);
239 list_for_each_entry(sdata, &local->interfaces, list) {
240 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
241 continue;
242
243 m = sdata->vif.addr;
244 tmp = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
245 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
246 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
247
248 if ((new & ~mask) != (tmp & ~mask)) {
249 ret = -EINVAL;
250 break;
251 }
252 }
253 mutex_unlock(&local->iflist_mtx);
254
255 return ret;
256}
257
191static int ieee80211_change_mac(struct net_device *dev, void *addr) 258static int ieee80211_change_mac(struct net_device *dev, void *addr)
192{ 259{
193 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 260 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -197,6 +264,10 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
197 if (ieee80211_sdata_running(sdata)) 264 if (ieee80211_sdata_running(sdata))
198 return -EBUSY; 265 return -EBUSY;
199 266
267 ret = ieee80211_verify_mac(sdata->local, sa->sa_data);
268 if (ret)
269 return ret;
270
200 ret = eth_mac_addr(dev, sa); 271 ret = eth_mac_addr(dev, sa);
201 272
202 if (ret == 0) 273 if (ret == 0)
@@ -380,6 +451,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
380 goto out_unlock; 451 goto out_unlock;
381 } 452 }
382 453
454 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
455 IEEE80211_CHANCTX_EXCLUSIVE);
456 if (ret) {
457 drv_remove_interface(local, sdata);
458 kfree(sdata);
459 goto out_unlock;
460 }
461
383 rcu_assign_pointer(local->monitor_sdata, sdata); 462 rcu_assign_pointer(local->monitor_sdata, sdata);
384 out_unlock: 463 out_unlock:
385 mutex_unlock(&local->iflist_mtx); 464 mutex_unlock(&local->iflist_mtx);
@@ -403,6 +482,8 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
403 rcu_assign_pointer(local->monitor_sdata, NULL); 482 rcu_assign_pointer(local->monitor_sdata, NULL);
404 synchronize_net(); 483 synchronize_net();
405 484
485 ieee80211_vif_release_channel(sdata);
486
406 drv_remove_interface(local, sdata); 487 drv_remove_interface(local, sdata);
407 488
408 kfree(sdata); 489 kfree(sdata);
@@ -496,11 +577,13 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
496 577
497 switch (sdata->vif.type) { 578 switch (sdata->vif.type) {
498 case NL80211_IFTYPE_AP_VLAN: 579 case NL80211_IFTYPE_AP_VLAN:
499 /* no need to tell driver, but set carrier */ 580 /* no need to tell driver, but set carrier and chanctx */
500 if (rtnl_dereference(sdata->bss->beacon)) 581 if (rtnl_dereference(sdata->bss->beacon)) {
582 ieee80211_vif_vlan_copy_chanctx(sdata);
501 netif_carrier_on(dev); 583 netif_carrier_on(dev);
502 else 584 } else {
503 netif_carrier_off(dev); 585 netif_carrier_off(dev);
586 }
504 break; 587 break;
505 case NL80211_IFTYPE_MONITOR: 588 case NL80211_IFTYPE_MONITOR:
506 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { 589 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -665,7 +748,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
665 struct sk_buff *skb, *tmp; 748 struct sk_buff *skb, *tmp;
666 u32 hw_reconf_flags = 0; 749 u32 hw_reconf_flags = 0;
667 int i; 750 int i;
668 enum nl80211_channel_type orig_ct;
669 751
670 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 752 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
671 753
@@ -729,34 +811,17 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
729 del_timer_sync(&local->dynamic_ps_timer); 811 del_timer_sync(&local->dynamic_ps_timer);
730 cancel_work_sync(&local->dynamic_ps_enable_work); 812 cancel_work_sync(&local->dynamic_ps_enable_work);
731 813
814 cancel_work_sync(&sdata->recalc_smps);
815
732 /* APs need special treatment */ 816 /* APs need special treatment */
733 if (sdata->vif.type == NL80211_IFTYPE_AP) { 817 if (sdata->vif.type == NL80211_IFTYPE_AP) {
734 struct ieee80211_sub_if_data *vlan, *tmpsdata; 818 struct ieee80211_sub_if_data *vlan, *tmpsdata;
735 struct beacon_data *old_beacon =
736 rtnl_dereference(sdata->u.ap.beacon);
737 struct probe_resp *old_probe_resp =
738 rtnl_dereference(sdata->u.ap.probe_resp);
739
740 /* sdata_running will return false, so this will disable */
741 ieee80211_bss_info_change_notify(sdata,
742 BSS_CHANGED_BEACON_ENABLED);
743
744 /* remove beacon and probe response */
745 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
746 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
747 synchronize_rcu();
748 kfree(old_beacon);
749 kfree(old_probe_resp);
750 819
751 /* down all dependent devices, that is VLANs */ 820 /* down all dependent devices, that is VLANs */
752 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 821 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
753 u.vlan.list) 822 u.vlan.list)
754 dev_close(vlan->dev); 823 dev_close(vlan->dev);
755 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 824 WARN_ON(!list_empty(&sdata->u.ap.vlans));
756
757 /* free all potentially still buffered bcast frames */
758 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
759 skb_queue_purge(&sdata->u.ap.ps_bc_buf);
760 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) { 825 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
761 ieee80211_mgd_stop(sdata); 826 ieee80211_mgd_stop(sdata);
762 } 827 }
@@ -767,6 +832,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
767 switch (sdata->vif.type) { 832 switch (sdata->vif.type) {
768 case NL80211_IFTYPE_AP_VLAN: 833 case NL80211_IFTYPE_AP_VLAN:
769 list_del(&sdata->u.vlan.list); 834 list_del(&sdata->u.vlan.list);
835 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
770 /* no need to tell driver */ 836 /* no need to tell driver */
771 break; 837 break;
772 case NL80211_IFTYPE_MONITOR: 838 case NL80211_IFTYPE_MONITOR:
@@ -790,23 +856,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
790 rcu_assign_pointer(local->p2p_sdata, NULL); 856 rcu_assign_pointer(local->p2p_sdata, NULL);
791 /* fall through */ 857 /* fall through */
792 default: 858 default:
793 flush_work(&sdata->work); 859 cancel_work_sync(&sdata->work);
794 /* 860 /*
795 * When we get here, the interface is marked down. 861 * When we get here, the interface is marked down.
796 * Call rcu_barrier() to wait both for the RX path 862 * Call synchronize_rcu() to wait for the RX path
797 * should it be using the interface and enqueuing 863 * should it be using the interface and enqueuing
798 * frames at this very time on another CPU, and 864 * frames at this very time on another CPU.
799 * for the sta free call_rcu callbacks.
800 */
801 rcu_barrier();
802
803 /*
804 * free_sta_rcu() enqueues a work for the actual
805 * sta cleanup, so we need to flush it while
806 * sdata is still valid.
807 */ 865 */
808 flush_workqueue(local->workqueue); 866 synchronize_rcu();
809
810 skb_queue_purge(&sdata->skb_queue); 867 skb_queue_purge(&sdata->skb_queue);
811 868
812 /* 869 /*
@@ -837,14 +894,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
837 hw_reconf_flags = 0; 894 hw_reconf_flags = 0;
838 } 895 }
839 896
840 /* Re-calculate channel-type, in case there are multiple vifs
841 * on different channel types.
842 */
843 orig_ct = local->_oper_channel_type;
844 ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT);
845
846 /* do after stop to avoid reconfiguring when we stop anyway */ 897 /* do after stop to avoid reconfiguring when we stop anyway */
847 if (hw_reconf_flags || (orig_ct != local->_oper_channel_type)) 898 if (hw_reconf_flags)
848 ieee80211_hw_config(local, hw_reconf_flags); 899 ieee80211_hw_config(local, hw_reconf_flags);
849 900
850 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 901 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
@@ -1121,6 +1172,13 @@ static void ieee80211_iface_work(struct work_struct *work)
1121 } 1172 }
1122} 1173}
1123 1174
1175static void ieee80211_recalc_smps_work(struct work_struct *work)
1176{
1177 struct ieee80211_sub_if_data *sdata =
1178 container_of(work, struct ieee80211_sub_if_data, recalc_smps);
1179
1180 ieee80211_recalc_smps(sdata);
1181}
1124 1182
1125/* 1183/*
1126 * Helper function to initialise an interface to a specific type. 1184 * Helper function to initialise an interface to a specific type.
@@ -1149,6 +1207,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1149 1207
1150 skb_queue_head_init(&sdata->skb_queue); 1208 skb_queue_head_init(&sdata->skb_queue);
1151 INIT_WORK(&sdata->work, ieee80211_iface_work); 1209 INIT_WORK(&sdata->work, ieee80211_iface_work);
1210 INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
1152 1211
1153 switch (type) { 1212 switch (type) {
1154 case NL80211_IFTYPE_P2P_GO: 1213 case NL80211_IFTYPE_P2P_GO:
@@ -1157,7 +1216,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
1157 sdata->vif.p2p = true; 1216 sdata->vif.p2p = true;
1158 /* fall through */ 1217 /* fall through */
1159 case NL80211_IFTYPE_AP: 1218 case NL80211_IFTYPE_AP:
1160 skb_queue_head_init(&sdata->u.ap.ps_bc_buf); 1219 skb_queue_head_init(&sdata->u.ap.ps.bc_buf);
1161 INIT_LIST_HEAD(&sdata->u.ap.vlans); 1220 INIT_LIST_HEAD(&sdata->u.ap.vlans);
1162 break; 1221 break;
1163 case NL80211_IFTYPE_P2P_CLIENT: 1222 case NL80211_IFTYPE_P2P_CLIENT:
@@ -1282,11 +1341,6 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1282 if (type == ieee80211_vif_type_p2p(&sdata->vif)) 1341 if (type == ieee80211_vif_type_p2p(&sdata->vif))
1283 return 0; 1342 return 0;
1284 1343
1285 /* Setting ad-hoc mode on non-IBSS channel is not supported. */
1286 if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
1287 type == NL80211_IFTYPE_ADHOC)
1288 return -EOPNOTSUPP;
1289
1290 if (ieee80211_sdata_running(sdata)) { 1344 if (ieee80211_sdata_running(sdata)) {
1291 ret = ieee80211_runtime_change_iftype(sdata, type); 1345 ret = ieee80211_runtime_change_iftype(sdata, type);
1292 if (ret) 1346 if (ret)
@@ -1298,9 +1352,6 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
1298 } 1352 }
1299 1353
1300 /* reset some values that shouldn't be kept across type changes */ 1354 /* reset some values that shouldn't be kept across type changes */
1301 sdata->vif.bss_conf.basic_rates =
1302 ieee80211_mandatory_rates(sdata->local,
1303 sdata->local->oper_channel->band);
1304 sdata->drop_unencrypted = 0; 1355 sdata->drop_unencrypted = 0;
1305 if (type == NL80211_IFTYPE_STATION) 1356 if (type == NL80211_IFTYPE_STATION)
1306 sdata->u.mgd.use_4addr = false; 1357 sdata->u.mgd.use_4addr = false;
@@ -1432,6 +1483,15 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1432 mutex_unlock(&local->iflist_mtx); 1483 mutex_unlock(&local->iflist_mtx);
1433} 1484}
1434 1485
1486static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
1487{
1488 struct ieee80211_sub_if_data *sdata;
1489
1490 sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
1491
1492 ieee80211_cleanup_sdata_stas(sdata);
1493}
1494
1435int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1495int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1436 struct wireless_dev **new_wdev, enum nl80211_iftype type, 1496 struct wireless_dev **new_wdev, enum nl80211_iftype type,
1437 struct vif_params *params) 1497 struct vif_params *params)
@@ -1507,6 +1567,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1507 1567
1508 INIT_LIST_HEAD(&sdata->key_list); 1568 INIT_LIST_HEAD(&sdata->key_list);
1509 1569
1570 spin_lock_init(&sdata->cleanup_stations_lock);
1571 INIT_LIST_HEAD(&sdata->cleanup_stations);
1572 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
1573
1510 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 1574 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1511 struct ieee80211_supported_band *sband; 1575 struct ieee80211_supported_band *sband;
1512 sband = local->hw.wiphy->bands[i]; 1576 sband = local->hw.wiphy->bands[i];
@@ -1523,6 +1587,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1523 1587
1524 ieee80211_set_default_queues(sdata); 1588 ieee80211_set_default_queues(sdata);
1525 1589
1590 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
1591 sdata->user_power_level = local->user_power_level;
1592
1526 /* setup type-dependent data */ 1593 /* setup type-dependent data */
1527 ieee80211_setup_sdata(sdata, type); 1594 ieee80211_setup_sdata(sdata, type);
1528 1595
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index d27e61aaa71b..619c5d697999 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -339,7 +339,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
339 key->conf.iv_len = TKIP_IV_LEN; 339 key->conf.iv_len = TKIP_IV_LEN;
340 key->conf.icv_len = TKIP_ICV_LEN; 340 key->conf.icv_len = TKIP_ICV_LEN;
341 if (seq) { 341 if (seq) {
342 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { 342 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
343 key->u.tkip.rx[i].iv32 = 343 key->u.tkip.rx[i].iv32 =
344 get_unaligned_le32(&seq[2]); 344 get_unaligned_le32(&seq[2]);
345 key->u.tkip.rx[i].iv16 = 345 key->u.tkip.rx[i].iv16 =
@@ -352,7 +352,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
352 key->conf.iv_len = CCMP_HDR_LEN; 352 key->conf.iv_len = CCMP_HDR_LEN;
353 key->conf.icv_len = CCMP_MIC_LEN; 353 key->conf.icv_len = CCMP_MIC_LEN;
354 if (seq) { 354 if (seq) {
355 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) 355 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
356 for (j = 0; j < CCMP_PN_LEN; j++) 356 for (j = 0; j < CCMP_PN_LEN; j++)
357 key->u.ccmp.rx_pn[i][j] = 357 key->u.ccmp.rx_pn[i][j] =
358 seq[CCMP_PN_LEN - j - 1]; 358 seq[CCMP_PN_LEN - j - 1];
@@ -372,8 +372,9 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
372 key->conf.iv_len = 0; 372 key->conf.iv_len = 0;
373 key->conf.icv_len = sizeof(struct ieee80211_mmie); 373 key->conf.icv_len = sizeof(struct ieee80211_mmie);
374 if (seq) 374 if (seq)
375 for (j = 0; j < 6; j++) 375 for (j = 0; j < CMAC_PN_LEN; j++)
376 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1]; 376 key->u.aes_cmac.rx_pn[j] =
377 seq[CMAC_PN_LEN - j - 1];
377 /* 378 /*
378 * Initialize AES key state here as an optimization so that 379 * Initialize AES key state here as an optimization so that
379 * it does not need to be initialized for every packet. 380 * it does not need to be initialized for every packet.
@@ -654,16 +655,16 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
654 655
655 switch (key->conf.cipher) { 656 switch (key->conf.cipher) {
656 case WLAN_CIPHER_SUITE_TKIP: 657 case WLAN_CIPHER_SUITE_TKIP:
657 if (WARN_ON(tid < 0 || tid >= NUM_RX_DATA_QUEUES)) 658 if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS))
658 return; 659 return;
659 seq->tkip.iv32 = key->u.tkip.rx[tid].iv32; 660 seq->tkip.iv32 = key->u.tkip.rx[tid].iv32;
660 seq->tkip.iv16 = key->u.tkip.rx[tid].iv16; 661 seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
661 break; 662 break;
662 case WLAN_CIPHER_SUITE_CCMP: 663 case WLAN_CIPHER_SUITE_CCMP:
663 if (WARN_ON(tid < -1 || tid >= NUM_RX_DATA_QUEUES)) 664 if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
664 return; 665 return;
665 if (tid < 0) 666 if (tid < 0)
666 pn = key->u.ccmp.rx_pn[NUM_RX_DATA_QUEUES]; 667 pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
667 else 668 else
668 pn = key->u.ccmp.rx_pn[tid]; 669 pn = key->u.ccmp.rx_pn[tid];
669 memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN); 670 memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 7d4e31f037d7..382dc44ed330 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -30,8 +30,6 @@
30#define TKIP_ICV_LEN 4 30#define TKIP_ICV_LEN 4
31#define CMAC_PN_LEN 6 31#define CMAC_PN_LEN 6
32 32
33#define NUM_RX_DATA_QUEUES 16
34
35struct ieee80211_local; 33struct ieee80211_local;
36struct ieee80211_sub_if_data; 34struct ieee80211_sub_if_data;
37struct sta_info; 35struct sta_info;
@@ -82,17 +80,20 @@ struct ieee80211_key {
82 struct tkip_ctx tx; 80 struct tkip_ctx tx;
83 81
84 /* last received RSC */ 82 /* last received RSC */
85 struct tkip_ctx rx[NUM_RX_DATA_QUEUES]; 83 struct tkip_ctx rx[IEEE80211_NUM_TIDS];
84
85 /* number of mic failures */
86 u32 mic_failures;
86 } tkip; 87 } tkip;
87 struct { 88 struct {
88 atomic64_t tx_pn; 89 atomic64_t tx_pn;
89 /* 90 /*
90 * Last received packet number. The first 91 * Last received packet number. The first
91 * NUM_RX_DATA_QUEUES counters are used with Data 92 * IEEE80211_NUM_TIDS counters are used with Data
92 * frames and the last counter is used with Robust 93 * frames and the last counter is used with Robust
93 * Management frames. 94 * Management frames.
94 */ 95 */
95 u8 rx_pn[NUM_RX_DATA_QUEUES + 1][CCMP_PN_LEN]; 96 u8 rx_pn[IEEE80211_NUM_TIDS + 1][CCMP_PN_LEN];
96 struct crypto_cipher *tfm; 97 struct crypto_cipher *tfm;
97 u32 replays; /* dot11RSNAStatsCCMPReplays */ 98 u32 replays; /* dot11RSNAStatsCCMPReplays */
98 } ccmp; 99 } ccmp;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index c80c4490351c..1b087fff93e7 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -93,15 +93,15 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
93 ieee80211_configure_filter(local); 93 ieee80211_configure_filter(local);
94} 94}
95 95
96int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) 96static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
97{ 97{
98 struct ieee80211_sub_if_data *sdata;
98 struct ieee80211_channel *chan; 99 struct ieee80211_channel *chan;
99 int ret = 0; 100 u32 changed = 0;
100 int power; 101 int power;
101 enum nl80211_channel_type channel_type; 102 enum nl80211_channel_type channel_type;
102 u32 offchannel_flag; 103 u32 offchannel_flag;
103 104 bool scanning = false;
104 might_sleep();
105 105
106 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; 106 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
107 if (local->scan_channel) { 107 if (local->scan_channel) {
@@ -109,19 +109,19 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
109 /* If scanning on oper channel, use whatever channel-type 109 /* If scanning on oper channel, use whatever channel-type
110 * is currently in use. 110 * is currently in use.
111 */ 111 */
112 if (chan == local->oper_channel) 112 if (chan == local->_oper_channel)
113 channel_type = local->_oper_channel_type; 113 channel_type = local->_oper_channel_type;
114 else 114 else
115 channel_type = NL80211_CHAN_NO_HT; 115 channel_type = NL80211_CHAN_NO_HT;
116 } else if (local->tmp_channel) { 116 } else if (local->tmp_channel) {
117 chan = local->tmp_channel; 117 chan = local->tmp_channel;
118 channel_type = local->tmp_channel_type; 118 channel_type = NL80211_CHAN_NO_HT;
119 } else { 119 } else {
120 chan = local->oper_channel; 120 chan = local->_oper_channel;
121 channel_type = local->_oper_channel_type; 121 channel_type = local->_oper_channel_type;
122 } 122 }
123 123
124 if (chan != local->oper_channel || 124 if (chan != local->_oper_channel ||
125 channel_type != local->_oper_channel_type) 125 channel_type != local->_oper_channel_type)
126 local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; 126 local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
127 else 127 else
@@ -148,22 +148,39 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
148 changed |= IEEE80211_CONF_CHANGE_SMPS; 148 changed |= IEEE80211_CONF_CHANGE_SMPS;
149 } 149 }
150 150
151 if (test_bit(SCAN_SW_SCANNING, &local->scanning) || 151 scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
152 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || 152 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
153 test_bit(SCAN_HW_SCANNING, &local->scanning) || 153 test_bit(SCAN_HW_SCANNING, &local->scanning);
154 !local->ap_power_level) 154 power = chan->max_power;
155 power = chan->max_power;
156 else
157 power = min(chan->max_power, local->ap_power_level);
158 155
159 if (local->user_power_level >= 0) 156 rcu_read_lock();
160 power = min(power, local->user_power_level); 157 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
158 if (!rcu_access_pointer(sdata->vif.chanctx_conf))
159 continue;
160 power = min(power, sdata->vif.bss_conf.txpower);
161 }
162 rcu_read_unlock();
161 163
162 if (local->hw.conf.power_level != power) { 164 if (local->hw.conf.power_level != power) {
163 changed |= IEEE80211_CONF_CHANGE_POWER; 165 changed |= IEEE80211_CONF_CHANGE_POWER;
164 local->hw.conf.power_level = power; 166 local->hw.conf.power_level = power;
165 } 167 }
166 168
169 return changed;
170}
171
172int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
173{
174 int ret = 0;
175
176 might_sleep();
177
178 if (!local->use_chanctx)
179 changed |= ieee80211_hw_conf_chan(local);
180 else
181 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
182 IEEE80211_CONF_CHANGE_POWER);
183
167 if (changed && local->open_count) { 184 if (changed && local->open_count) {
168 ret = drv_config(local, changed); 185 ret = drv_config(local, changed);
169 /* 186 /*
@@ -359,14 +376,6 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
359} 376}
360EXPORT_SYMBOL(ieee80211_restart_hw); 377EXPORT_SYMBOL(ieee80211_restart_hw);
361 378
362static void ieee80211_recalc_smps_work(struct work_struct *work)
363{
364 struct ieee80211_local *local =
365 container_of(work, struct ieee80211_local, recalc_smps);
366
367 ieee80211_recalc_smps(local);
368}
369
370#ifdef CONFIG_INET 379#ifdef CONFIG_INET
371static int ieee80211_ifa_changed(struct notifier_block *nb, 380static int ieee80211_ifa_changed(struct notifier_block *nb,
372 unsigned long data, void *arg) 381 unsigned long data, void *arg)
@@ -465,7 +474,8 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
465 .tx = 0xffff, 474 .tx = 0xffff,
466 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | 475 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
467 BIT(IEEE80211_STYPE_AUTH >> 4) | 476 BIT(IEEE80211_STYPE_AUTH >> 4) |
468 BIT(IEEE80211_STYPE_DEAUTH >> 4), 477 BIT(IEEE80211_STYPE_DEAUTH >> 4) |
478 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
469 }, 479 },
470 [NL80211_IFTYPE_STATION] = { 480 [NL80211_IFTYPE_STATION] = {
471 .tx = 0xffff, 481 .tx = 0xffff,
@@ -540,6 +550,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
540 struct ieee80211_local *local; 550 struct ieee80211_local *local;
541 int priv_size, i; 551 int priv_size, i;
542 struct wiphy *wiphy; 552 struct wiphy *wiphy;
553 bool use_chanctx;
543 554
544 if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || 555 if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
545 !ops->add_interface || !ops->remove_interface || 556 !ops->add_interface || !ops->remove_interface ||
@@ -549,6 +560,14 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
549 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) 560 if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
550 return NULL; 561 return NULL;
551 562
563 /* check all or no channel context operations exist */
564 i = !!ops->add_chanctx + !!ops->remove_chanctx +
565 !!ops->change_chanctx + !!ops->assign_vif_chanctx +
566 !!ops->unassign_vif_chanctx;
567 if (WARN_ON(i != 0 && i != 5))
568 return NULL;
569 use_chanctx = i == 5;
570
552 /* Ensure 32-byte alignment of our private data and hw private data. 571 /* Ensure 32-byte alignment of our private data and hw private data.
553 * We use the wiphy priv data for both our ieee80211_local and for 572 * We use the wiphy priv data for both our ieee80211_local and for
554 * the driver's private data 573 * the driver's private data
@@ -584,8 +603,15 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
584 if (ops->remain_on_channel) 603 if (ops->remain_on_channel)
585 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 604 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
586 605
587 wiphy->features = NL80211_FEATURE_SK_TX_STATUS | 606 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
588 NL80211_FEATURE_HT_IBSS; 607 NL80211_FEATURE_SAE |
608 NL80211_FEATURE_HT_IBSS |
609 NL80211_FEATURE_VIF_TXPOWER;
610
611 if (!ops->hw_scan)
612 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
613 NL80211_FEATURE_AP_SCAN;
614
589 615
590 if (!ops->set_key) 616 if (!ops->set_key)
591 wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 617 wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -599,6 +625,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
599 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); 625 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
600 626
601 local->ops = ops; 627 local->ops = ops;
628 local->use_chanctx = use_chanctx;
602 629
603 /* set up some defaults */ 630 /* set up some defaults */
604 local->hw.queues = 1; 631 local->hw.queues = 1;
@@ -612,7 +639,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
612 local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | 639 local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
613 IEEE80211_RADIOTAP_MCS_HAVE_GI | 640 IEEE80211_RADIOTAP_MCS_HAVE_GI |
614 IEEE80211_RADIOTAP_MCS_HAVE_BW; 641 IEEE80211_RADIOTAP_MCS_HAVE_BW;
615 local->user_power_level = -1; 642 local->hw.radiotap_vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI |
643 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
644 local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
616 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; 645 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
617 646
618 INIT_LIST_HEAD(&local->interfaces); 647 INIT_LIST_HEAD(&local->interfaces);
@@ -626,6 +655,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
626 spin_lock_init(&local->filter_lock); 655 spin_lock_init(&local->filter_lock);
627 spin_lock_init(&local->queue_stop_reason_lock); 656 spin_lock_init(&local->queue_stop_reason_lock);
628 657
658 INIT_LIST_HEAD(&local->chanctx_list);
659 mutex_init(&local->chanctx_mtx);
660
629 /* 661 /*
630 * The rx_skb_queue is only accessed from tasklets, 662 * The rx_skb_queue is only accessed from tasklets,
631 * but other SKB queues are used from within IRQ 663 * but other SKB queues are used from within IRQ
@@ -641,7 +673,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
641 INIT_WORK(&local->restart_work, ieee80211_restart_work); 673 INIT_WORK(&local->restart_work, ieee80211_restart_work);
642 674
643 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 675 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
644 INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
645 local->smps_mode = IEEE80211_SMPS_OFF; 676 local->smps_mode = IEEE80211_SMPS_OFF;
646 677
647 INIT_WORK(&local->dynamic_ps_enable_work, 678 INIT_WORK(&local->dynamic_ps_enable_work,
@@ -719,6 +750,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
719 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan) 750 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
720 return -EINVAL; 751 return -EINVAL;
721 752
753 if (!local->use_chanctx) {
754 for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
755 const struct ieee80211_iface_combination *comb;
756
757 comb = &local->hw.wiphy->iface_combinations[i];
758
759 if (comb->num_different_channels > 1)
760 return -EINVAL;
761 }
762 } else {
763 /*
764 * WDS is currently prohibited when channel contexts are used
765 * because there's no clear definition of which channel WDS
766 * type interfaces use
767 */
768 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))
769 return -EINVAL;
770 }
771
722 /* Only HW csum features are currently compatible with mac80211 */ 772 /* Only HW csum features are currently compatible with mac80211 */
723 feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 773 feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
724 NETIF_F_HW_CSUM; 774 NETIF_F_HW_CSUM;
@@ -728,6 +778,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
728 if (hw->max_report_rates == 0) 778 if (hw->max_report_rates == 0)
729 hw->max_report_rates = hw->max_rates; 779 hw->max_report_rates = hw->max_rates;
730 780
781 local->rx_chains = 1;
782
731 /* 783 /*
732 * generic code guarantees at least one band, 784 * generic code guarantees at least one band,
733 * set this very early because much code assumes 785 * set this very early because much code assumes
@@ -743,18 +795,28 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
743 sband = local->hw.wiphy->bands[band]; 795 sband = local->hw.wiphy->bands[band];
744 if (!sband) 796 if (!sband)
745 continue; 797 continue;
746 if (!local->oper_channel) { 798 if (!local->use_chanctx && !local->_oper_channel) {
747 /* init channel we're on */ 799 /* init channel we're on */
748 local->hw.conf.channel = 800 local->hw.conf.channel =
749 local->oper_channel = &sband->channels[0]; 801 local->_oper_channel = &sband->channels[0];
750 local->hw.conf.channel_type = NL80211_CHAN_NO_HT; 802 local->hw.conf.channel_type = NL80211_CHAN_NO_HT;
751 } 803 }
804 cfg80211_chandef_create(&local->monitor_chandef,
805 &sband->channels[0],
806 NL80211_CHAN_NO_HT);
752 channels += sband->n_channels; 807 channels += sband->n_channels;
753 808
754 if (max_bitrates < sband->n_bitrates) 809 if (max_bitrates < sband->n_bitrates)
755 max_bitrates = sband->n_bitrates; 810 max_bitrates = sband->n_bitrates;
756 supp_ht = supp_ht || sband->ht_cap.ht_supported; 811 supp_ht = supp_ht || sband->ht_cap.ht_supported;
757 supp_vht = supp_vht || sband->vht_cap.vht_supported; 812 supp_vht = supp_vht || sband->vht_cap.vht_supported;
813
814 if (sband->ht_cap.ht_supported)
815 local->rx_chains =
816 max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
817 local->rx_chains);
818
819 /* TODO: consider VHT for RX chains, hopefully it's the same */
758 } 820 }
759 821
760 local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + 822 local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
@@ -778,19 +840,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
778 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); 840 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
779 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); 841 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
780 842
781 /* 843 /* mac80211 doesn't support more than one IBSS interface right now */
782 * mac80211 doesn't support more than 1 channel, and also not more
783 * than one IBSS interface
784 */
785 for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { 844 for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
786 const struct ieee80211_iface_combination *c; 845 const struct ieee80211_iface_combination *c;
787 int j; 846 int j;
788 847
789 c = &hw->wiphy->iface_combinations[i]; 848 c = &hw->wiphy->iface_combinations[i];
790 849
791 if (c->num_different_channels > 1)
792 return -EINVAL;
793
794 for (j = 0; j < c->n_limits; j++) 850 for (j = 0; j < c->n_limits; j++)
795 if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && 851 if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
796 c->limits[j].max > 1) 852 c->limits[j].max > 1)
@@ -830,9 +886,21 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
830 if (supp_ht) 886 if (supp_ht)
831 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); 887 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
832 888
833 if (supp_vht) 889 if (supp_vht) {
834 local->scan_ies_len += 890 local->scan_ies_len +=
835 2 + sizeof(struct ieee80211_vht_capabilities); 891 2 + sizeof(struct ieee80211_vht_cap);
892
893 /*
894 * (for now at least), drivers wanting to use VHT must
895 * support channel contexts, as they contain all the
896 * necessary VHT information and the global hw config
897 * doesn't (yet)
898 */
899 if (WARN_ON(!local->use_chanctx)) {
900 result = -EINVAL;
901 goto fail_wiphy_register;
902 }
903 }
836 904
837 if (!local->ops->hw_scan) { 905 if (!local->ops->hw_scan) {
838 /* For hw_scan, driver needs to set these up. */ 906 /* For hw_scan, driver needs to set these up. */
@@ -871,8 +939,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
871 local->hw.wiphy->cipher_suites, 939 local->hw.wiphy->cipher_suites,
872 sizeof(u32) * local->hw.wiphy->n_cipher_suites, 940 sizeof(u32) * local->hw.wiphy->n_cipher_suites,
873 GFP_KERNEL); 941 GFP_KERNEL);
874 if (!suites) 942 if (!suites) {
875 return -ENOMEM; 943 result = -ENOMEM;
944 goto fail_wiphy_register;
945 }
876 for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { 946 for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
877 u32 suite = local->hw.wiphy->cipher_suites[r]; 947 u32 suite = local->hw.wiphy->cipher_suites[r];
878 if (suite == WLAN_CIPHER_SUITE_WEP40 || 948 if (suite == WLAN_CIPHER_SUITE_WEP40 ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index ff0296c7bab8..649ad513547f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -76,7 +76,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
76 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 76 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
77 struct ieee80211_local *local = sdata->local; 77 struct ieee80211_local *local = sdata->local;
78 u32 basic_rates = 0; 78 u32 basic_rates = 0;
79 enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT; 79 struct cfg80211_chan_def sta_chan_def;
80 80
81 /* 81 /*
82 * As support for each feature is added, check for matching 82 * As support for each feature is added, check for matching
@@ -97,23 +97,17 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
97 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) 97 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
98 goto mismatch; 98 goto mismatch;
99 99
100 ieee80211_sta_get_rates(local, ie, local->oper_channel->band, 100 ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata),
101 &basic_rates); 101 &basic_rates);
102 102
103 if (sdata->vif.bss_conf.basic_rates != basic_rates) 103 if (sdata->vif.bss_conf.basic_rates != basic_rates)
104 goto mismatch; 104 goto mismatch;
105 105
106 if (ie->ht_operation) 106 ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
107 sta_channel_type = 107 ie->ht_operation, &sta_chan_def);
108 ieee80211_ht_oper_to_channel_type(ie->ht_operation); 108
109 109 if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
110 /* Disallow HT40+/- mismatch */ 110 &sta_chan_def))
111 if (ie->ht_operation &&
112 (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
113 sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
114 (sta_channel_type == NL80211_CHAN_HT40MINUS ||
115 sta_channel_type == NL80211_CHAN_HT40PLUS) &&
116 sdata->vif.bss_conf.channel_type != sta_channel_type)
117 goto mismatch; 111 goto mismatch;
118 112
119 return true; 113 return true;
@@ -129,7 +123,7 @@ mismatch:
129bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) 123bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
130{ 124{
131 return (ie->mesh_config->meshconf_cap & 125 return (ie->mesh_config->meshconf_cap &
132 MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; 126 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0;
133} 127}
134 128
135/** 129/**
@@ -169,7 +163,7 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
169 return -ENOMEM; 163 return -ENOMEM;
170 sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; 164 sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
171 for (i = 0; i < RMC_BUCKETS; i++) 165 for (i = 0; i < RMC_BUCKETS; i++)
172 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list); 166 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
173 return 0; 167 return 0;
174} 168}
175 169
@@ -183,7 +177,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
183 return; 177 return;
184 178
185 for (i = 0; i < RMC_BUCKETS; i++) 179 for (i = 0; i < RMC_BUCKETS; i++)
186 list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) { 180 list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
187 list_del(&p->list); 181 list_del(&p->list);
188 kmem_cache_free(rm_cache, p); 182 kmem_cache_free(rm_cache, p);
189 } 183 }
@@ -216,7 +210,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
216 /* Don't care about endianness since only match matters */ 210 /* Don't care about endianness since only match matters */
217 memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); 211 memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
218 idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; 212 idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
219 list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) { 213 list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
220 ++entries; 214 ++entries;
221 if (time_after(jiffies, p->exp_time) || 215 if (time_after(jiffies, p->exp_time) ||
222 (entries == RMC_QUEUE_MAX_LEN)) { 216 (entries == RMC_QUEUE_MAX_LEN)) {
@@ -235,7 +229,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
235 p->seqnum = seqnum; 229 p->seqnum = seqnum;
236 p->exp_time = jiffies + RMC_TIMEOUT; 230 p->exp_time = jiffies + RMC_TIMEOUT;
237 memcpy(p->sa, sa, ETH_ALEN); 231 memcpy(p->sa, sa, ETH_ALEN);
238 list_add(&p->list, &rmc->bucket[idx].list); 232 list_add(&p->list, &rmc->bucket[idx]);
239 return 0; 233 return 0;
240} 234}
241 235
@@ -264,16 +258,16 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
264 /* Authentication Protocol identifier */ 258 /* Authentication Protocol identifier */
265 *pos++ = ifmsh->mesh_auth_id; 259 *pos++ = ifmsh->mesh_auth_id;
266 /* Mesh Formation Info - number of neighbors */ 260 /* Mesh Formation Info - number of neighbors */
267 neighbors = atomic_read(&ifmsh->mshstats.estab_plinks); 261 neighbors = atomic_read(&ifmsh->estab_plinks);
268 /* Number of neighbor mesh STAs or 15 whichever is smaller */ 262 /* Number of neighbor mesh STAs or 15 whichever is smaller */
269 neighbors = (neighbors > 15) ? 15 : neighbors; 263 neighbors = (neighbors > 15) ? 15 : neighbors;
270 *pos++ = neighbors << 1; 264 *pos++ = neighbors << 1;
271 /* Mesh capability */ 265 /* Mesh capability */
272 *pos = MESHCONF_CAPAB_FORWARDING; 266 *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
273 *pos |= ifmsh->accepting_plinks ? 267 *pos |= ifmsh->accepting_plinks ?
274 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 268 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
275 *pos++ |= ifmsh->adjusting_tbtt ? 269 *pos++ |= ifmsh->adjusting_tbtt ?
276 MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; 270 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
277 *pos++ = 0x00; 271 *pos++ = 0x00;
278 272
279 return 0; 273 return 0;
@@ -355,12 +349,22 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
355{ 349{
356 struct ieee80211_local *local = sdata->local; 350 struct ieee80211_local *local = sdata->local;
357 struct ieee80211_supported_band *sband; 351 struct ieee80211_supported_band *sband;
358 struct ieee80211_channel *chan = local->oper_channel; 352 struct ieee80211_chanctx_conf *chanctx_conf;
353 struct ieee80211_channel *chan;
359 u8 *pos; 354 u8 *pos;
360 355
361 if (skb_tailroom(skb) < 3) 356 if (skb_tailroom(skb) < 3)
362 return -ENOMEM; 357 return -ENOMEM;
363 358
359 rcu_read_lock();
360 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
361 if (WARN_ON(!chanctx_conf)) {
362 rcu_read_unlock();
363 return -EINVAL;
364 }
365 chan = chanctx_conf->def.chan;
366 rcu_read_unlock();
367
364 sband = local->hw.wiphy->bands[chan->band]; 368 sband = local->hw.wiphy->bands[chan->band];
365 if (sband->band == IEEE80211_BAND_2GHZ) { 369 if (sband->band == IEEE80211_BAND_2GHZ) {
366 pos = skb_put(skb, 2 + 1); 370 pos = skb_put(skb, 2 + 1);
@@ -376,12 +380,13 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
376 struct ieee80211_sub_if_data *sdata) 380 struct ieee80211_sub_if_data *sdata)
377{ 381{
378 struct ieee80211_local *local = sdata->local; 382 struct ieee80211_local *local = sdata->local;
383 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
379 struct ieee80211_supported_band *sband; 384 struct ieee80211_supported_band *sband;
380 u8 *pos; 385 u8 *pos;
381 386
382 sband = local->hw.wiphy->bands[local->oper_channel->band]; 387 sband = local->hw.wiphy->bands[band];
383 if (!sband->ht_cap.ht_supported || 388 if (!sband->ht_cap.ht_supported ||
384 sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) 389 sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
385 return 0; 390 return 0;
386 391
387 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) 392 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -397,14 +402,26 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
397 struct ieee80211_sub_if_data *sdata) 402 struct ieee80211_sub_if_data *sdata)
398{ 403{
399 struct ieee80211_local *local = sdata->local; 404 struct ieee80211_local *local = sdata->local;
400 struct ieee80211_channel *channel = local->oper_channel; 405 struct ieee80211_chanctx_conf *chanctx_conf;
406 struct ieee80211_channel *channel;
401 enum nl80211_channel_type channel_type = 407 enum nl80211_channel_type channel_type =
402 sdata->vif.bss_conf.channel_type; 408 cfg80211_get_chandef_type(&sdata->vif.bss_conf.chandef);
403 struct ieee80211_supported_band *sband = 409 struct ieee80211_supported_band *sband;
404 local->hw.wiphy->bands[channel->band]; 410 struct ieee80211_sta_ht_cap *ht_cap;
405 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
406 u8 *pos; 411 u8 *pos;
407 412
413 rcu_read_lock();
414 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
415 if (WARN_ON(!chanctx_conf)) {
416 rcu_read_unlock();
417 return -EINVAL;
418 }
419 channel = chanctx_conf->def.chan;
420 rcu_read_unlock();
421
422 sband = local->hw.wiphy->bands[channel->band];
423 ht_cap = &sband->ht_cap;
424
408 if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) 425 if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
409 return 0; 426 return 0;
410 427
@@ -412,7 +429,7 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
412 return -ENOMEM; 429 return -ENOMEM;
413 430
414 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); 431 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
415 ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type, 432 ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef,
416 sdata->vif.bss_conf.ht_operation_mode); 433 sdata->vif.bss_conf.ht_operation_mode);
417 434
418 return 0; 435 return 0;
@@ -610,7 +627,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
610 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 627 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
611 sdata->vif.bss_conf.basic_rates = 628 sdata->vif.bss_conf.basic_rates =
612 ieee80211_mandatory_rates(sdata->local, 629 ieee80211_mandatory_rates(sdata->local,
613 sdata->local->oper_channel->band); 630 ieee80211_get_sdata_band(sdata));
614 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 631 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
615 BSS_CHANGED_BEACON_ENABLED | 632 BSS_CHANGED_BEACON_ENABLED |
616 BSS_CHANGED_HT | 633 BSS_CHANGED_HT |
@@ -680,8 +697,10 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
680 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 697 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
681 &elems); 698 &elems);
682 699
683 /* ignore beacons from secure mesh peers if our security is off */ 700 /* ignore non-mesh or secure / unsecure mismatch */
684 if (elems.rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) 701 if ((!elems.mesh_id || !elems.mesh_config) ||
702 (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) ||
703 (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE))
685 return; 704 return;
686 705
687 if (elems.ds_params && elems.ds_params_len == 1) 706 if (elems.ds_params && elems.ds_params_len == 1)
@@ -694,8 +713,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
694 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) 713 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
695 return; 714 return;
696 715
697 if (elems.mesh_id && elems.mesh_config && 716 if (mesh_matches_local(sdata, &elems))
698 mesh_matches_local(sdata, &elems))
699 mesh_neighbour_update(sdata, mgmt->sa, &elems); 717 mesh_neighbour_update(sdata, mgmt->sa, &elems);
700 718
701 if (ifmsh->sync_ops) 719 if (ifmsh->sync_ops)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 25d0f17dec71..84c28c6101cd 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -19,20 +19,6 @@
19/* Data structures */ 19/* Data structures */
20 20
21/** 21/**
22 * enum mesh_config_capab_flags - mesh config IE capability flags
23 *
24 * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
25 * additional mesh peerings with other mesh STAs
26 * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
27 * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing
28 */
29enum mesh_config_capab_flags {
30 MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0),
31 MESHCONF_CAPAB_FORWARDING = BIT(3),
32 MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5),
33};
34
35/**
36 * enum mesh_path_flags - mac80211 mesh path flags 22 * enum mesh_path_flags - mac80211 mesh path flags
37 * 23 *
38 * 24 *
@@ -198,7 +184,7 @@ struct rmc_entry {
198}; 184};
199 185
200struct mesh_rmc { 186struct mesh_rmc {
201 struct rmc_entry bucket[RMC_BUCKETS]; 187 struct list_head bucket[RMC_BUCKETS];
202 u32 idx_mask; 188 u32 idx_mask;
203}; 189};
204 190
@@ -256,7 +242,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
256void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 242void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
257void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); 243void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
258void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); 244void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
259struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); 245const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
260 246
261/* Mesh paths */ 247/* Mesh paths */
262int mesh_nexthop_lookup(struct sk_buff *skb, 248int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -324,7 +310,7 @@ extern int mesh_allocated;
324static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) 310static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
325{ 311{
326 return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - 312 return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks -
327 atomic_read(&sdata->u.mesh.mshstats.estab_plinks); 313 atomic_read(&sdata->u.mesh.estab_plinks);
328} 314}
329 315
330static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) 316static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3ab34d816897..4b274e9c91a5 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -19,12 +19,6 @@
19#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ 19#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
20 jiffies + HZ * t / 1000)) 20 jiffies + HZ * t / 1000))
21 21
22#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
23#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
24#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
25#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
26#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
27
28/* We only need a valid sta if user configured a minimum rssi_threshold. */ 22/* We only need a valid sta if user configured a minimum rssi_threshold. */
29#define rssi_threshold_check(sta, sdata) \ 23#define rssi_threshold_check(sta, sdata) \
30 (sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\ 24 (sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\
@@ -50,14 +44,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
50static inline 44static inline
51u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) 45u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
52{ 46{
53 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); 47 atomic_inc(&sdata->u.mesh.estab_plinks);
54 return mesh_accept_plinks_update(sdata); 48 return mesh_accept_plinks_update(sdata);
55} 49}
56 50
57static inline 51static inline
58u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) 52u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
59{ 53{
60 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); 54 atomic_dec(&sdata->u.mesh.estab_plinks);
61 return mesh_accept_plinks_update(sdata); 55 return mesh_accept_plinks_update(sdata);
62} 56}
63 57
@@ -117,7 +111,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
117 u16 ht_opmode; 111 u16 ht_opmode;
118 bool non_ht_sta = false, ht20_sta = false; 112 bool non_ht_sta = false, ht20_sta = false;
119 113
120 if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) 114 if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
121 return 0; 115 return 0;
122 116
123 rcu_read_lock(); 117 rcu_read_lock();
@@ -126,14 +120,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
126 sta->plink_state != NL80211_PLINK_ESTAB) 120 sta->plink_state != NL80211_PLINK_ESTAB)
127 continue; 121 continue;
128 122
129 switch (sta->ch_type) { 123 switch (sta->ch_width) {
130 case NL80211_CHAN_NO_HT: 124 case NL80211_CHAN_WIDTH_20_NOHT:
131 mpl_dbg(sdata, 125 mpl_dbg(sdata,
132 "mesh_plink %pM: nonHT sta (%pM) is present\n", 126 "mesh_plink %pM: nonHT sta (%pM) is present\n",
133 sdata->vif.addr, sta->sta.addr); 127 sdata->vif.addr, sta->sta.addr);
134 non_ht_sta = true; 128 non_ht_sta = true;
135 goto out; 129 goto out;
136 case NL80211_CHAN_HT20: 130 case NL80211_CHAN_WIDTH_20:
137 mpl_dbg(sdata, 131 mpl_dbg(sdata,
138 "mesh_plink %pM: HT20 sta (%pM) is present\n", 132 "mesh_plink %pM: HT20 sta (%pM) is present\n",
139 sdata->vif.addr, sta->sta.addr); 133 sdata->vif.addr, sta->sta.addr);
@@ -148,7 +142,7 @@ out:
148 if (non_ht_sta) 142 if (non_ht_sta)
149 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; 143 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
150 else if (ht20_sta && 144 else if (ht20_sta &&
151 sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20) 145 sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
152 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; 146 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
153 else 147 else
154 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 148 ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -252,6 +246,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
252 mgmt->u.action.u.self_prot.action_code = action; 246 mgmt->u.action.u.self_prot.action_code = action;
253 247
254 if (action != WLAN_SP_MESH_PEERING_CLOSE) { 248 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
249 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
250
255 /* capability info */ 251 /* capability info */
256 pos = skb_put(skb, 2); 252 pos = skb_put(skb, 2);
257 memset(pos, 0, 2); 253 memset(pos, 0, 2);
@@ -260,10 +256,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
260 pos = skb_put(skb, 2); 256 pos = skb_put(skb, 2);
261 memcpy(pos + 2, &plid, 2); 257 memcpy(pos + 2, &plid, 2);
262 } 258 }
263 if (ieee80211_add_srates_ie(sdata, skb, true, 259 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
264 local->oper_channel->band) || 260 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
265 ieee80211_add_ext_srates_ie(sdata, skb, true,
266 local->oper_channel->band) ||
267 mesh_add_rsn_ie(skb, sdata) || 261 mesh_add_rsn_ie(skb, sdata) ||
268 mesh_add_meshid_ie(skb, sdata) || 262 mesh_add_meshid_ie(skb, sdata) ||
269 mesh_add_meshconf_ie(skb, sdata)) 263 mesh_add_meshconf_ie(skb, sdata))
@@ -343,7 +337,7 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
343 struct ieee802_11_elems *elems) 337 struct ieee802_11_elems *elems)
344{ 338{
345 struct ieee80211_local *local = sdata->local; 339 struct ieee80211_local *local = sdata->local;
346 enum ieee80211_band band = local->oper_channel->band; 340 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
347 struct ieee80211_supported_band *sband; 341 struct ieee80211_supported_band *sband;
348 u32 rates, basic_rates = 0; 342 u32 rates, basic_rates = 0;
349 struct sta_info *sta; 343 struct sta_info *sta;
@@ -378,7 +372,7 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
378 372
379 sta->sta.supp_rates[band] = rates; 373 sta->sta.supp_rates[band] = rates;
380 if (elems->ht_cap_elem && 374 if (elems->ht_cap_elem &&
381 sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT) 375 sdata->vif.bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
382 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 376 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
383 elems->ht_cap_elem, 377 elems->ht_cap_elem,
384 &sta->sta.ht_cap); 378 &sta->sta.ht_cap);
@@ -386,15 +380,19 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
386 memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); 380 memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
387 381
388 if (elems->ht_operation) { 382 if (elems->ht_operation) {
383 struct cfg80211_chan_def chandef;
384
389 if (!(elems->ht_operation->ht_param & 385 if (!(elems->ht_operation->ht_param &
390 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) 386 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
391 sta->sta.ht_cap.cap &= 387 sta->sta.ht_cap.cap &=
392 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 388 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
393 sta->ch_type = 389 ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
394 ieee80211_ht_oper_to_channel_type(elems->ht_operation); 390 elems->ht_operation, &chandef);
391 sta->ch_width = chandef.width;
395 } 392 }
396 393
397 rate_control_rate_init(sta); 394 if (insert)
395 rate_control_rate_init(sta);
398 spin_unlock_bh(&sta->lock); 396 spin_unlock_bh(&sta->lock);
399 397
400 if (insert && sta_info_insert(sta)) 398 if (insert && sta_info_insert(sta))
@@ -430,6 +428,7 @@ static void mesh_plink_timer(unsigned long data)
430 struct sta_info *sta; 428 struct sta_info *sta;
431 __le16 llid, plid, reason; 429 __le16 llid, plid, reason;
432 struct ieee80211_sub_if_data *sdata; 430 struct ieee80211_sub_if_data *sdata;
431 struct mesh_config *mshcfg;
433 432
434 /* 433 /*
435 * This STA is valid because sta_info_destroy() will 434 * This STA is valid because sta_info_destroy() will
@@ -456,12 +455,13 @@ static void mesh_plink_timer(unsigned long data)
456 llid = sta->llid; 455 llid = sta->llid;
457 plid = sta->plid; 456 plid = sta->plid;
458 sdata = sta->sdata; 457 sdata = sta->sdata;
458 mshcfg = &sdata->u.mesh.mshcfg;
459 459
460 switch (sta->plink_state) { 460 switch (sta->plink_state) {
461 case NL80211_PLINK_OPN_RCVD: 461 case NL80211_PLINK_OPN_RCVD:
462 case NL80211_PLINK_OPN_SNT: 462 case NL80211_PLINK_OPN_SNT:
463 /* retry timer */ 463 /* retry timer */
464 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { 464 if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
465 u32 rand; 465 u32 rand;
466 mpl_dbg(sta->sdata, 466 mpl_dbg(sta->sdata,
467 "Mesh plink for %pM (retry, timeout): %d %d\n", 467 "Mesh plink for %pM (retry, timeout): %d %d\n",
@@ -484,7 +484,7 @@ static void mesh_plink_timer(unsigned long data)
484 if (!reason) 484 if (!reason)
485 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT); 485 reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT);
486 sta->plink_state = NL80211_PLINK_HOLDING; 486 sta->plink_state = NL80211_PLINK_HOLDING;
487 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 487 mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
488 spin_unlock_bh(&sta->lock); 488 spin_unlock_bh(&sta->lock);
489 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, 489 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
490 sta->sta.addr, llid, plid, reason); 490 sta->sta.addr, llid, plid, reason);
@@ -543,7 +543,7 @@ int mesh_plink_open(struct sta_info *sta)
543 return -EBUSY; 543 return -EBUSY;
544 } 544 }
545 sta->plink_state = NL80211_PLINK_OPN_SNT; 545 sta->plink_state = NL80211_PLINK_OPN_SNT;
546 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 546 mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
547 spin_unlock_bh(&sta->lock); 547 spin_unlock_bh(&sta->lock);
548 mpl_dbg(sdata, 548 mpl_dbg(sdata,
549 "Mesh plink: starting establishment with %pM\n", 549 "Mesh plink: starting establishment with %pM\n",
@@ -570,6 +570,7 @@ void mesh_plink_block(struct sta_info *sta)
570void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, 570void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
571 size_t len, struct ieee80211_rx_status *rx_status) 571 size_t len, struct ieee80211_rx_status *rx_status)
572{ 572{
573 struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
573 struct ieee802_11_elems elems; 574 struct ieee802_11_elems elems;
574 struct sta_info *sta; 575 struct sta_info *sta;
575 enum plink_event event; 576 enum plink_event event;
@@ -777,7 +778,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
777 sta->plid = plid; 778 sta->plid = plid;
778 get_random_bytes(&llid, 2); 779 get_random_bytes(&llid, 2);
779 sta->llid = llid; 780 sta->llid = llid;
780 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 781 mesh_plink_timer_set(sta,
782 mshcfg->dot11MeshRetryTimeout);
781 spin_unlock_bh(&sta->lock); 783 spin_unlock_bh(&sta->lock);
782 mesh_plink_frame_tx(sdata, 784 mesh_plink_frame_tx(sdata,
783 WLAN_SP_MESH_PEERING_OPEN, 785 WLAN_SP_MESH_PEERING_OPEN,
@@ -803,7 +805,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
803 sta->reason = reason; 805 sta->reason = reason;
804 sta->plink_state = NL80211_PLINK_HOLDING; 806 sta->plink_state = NL80211_PLINK_HOLDING;
805 if (!mod_plink_timer(sta, 807 if (!mod_plink_timer(sta,
806 dot11MeshHoldingTimeout(sdata))) 808 mshcfg->dot11MeshHoldingTimeout))
807 sta->ignore_plink_timer = true; 809 sta->ignore_plink_timer = true;
808 810
809 llid = sta->llid; 811 llid = sta->llid;
@@ -825,7 +827,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
825 case CNF_ACPT: 827 case CNF_ACPT:
826 sta->plink_state = NL80211_PLINK_CNF_RCVD; 828 sta->plink_state = NL80211_PLINK_CNF_RCVD;
827 if (!mod_plink_timer(sta, 829 if (!mod_plink_timer(sta,
828 dot11MeshConfirmTimeout(sdata))) 830 mshcfg->dot11MeshConfirmTimeout))
829 sta->ignore_plink_timer = true; 831 sta->ignore_plink_timer = true;
830 832
831 spin_unlock_bh(&sta->lock); 833 spin_unlock_bh(&sta->lock);
@@ -847,7 +849,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
847 sta->reason = reason; 849 sta->reason = reason;
848 sta->plink_state = NL80211_PLINK_HOLDING; 850 sta->plink_state = NL80211_PLINK_HOLDING;
849 if (!mod_plink_timer(sta, 851 if (!mod_plink_timer(sta,
850 dot11MeshHoldingTimeout(sdata))) 852 mshcfg->dot11MeshHoldingTimeout))
851 sta->ignore_plink_timer = true; 853 sta->ignore_plink_timer = true;
852 854
853 llid = sta->llid; 855 llid = sta->llid;
@@ -888,7 +890,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
888 sta->reason = reason; 890 sta->reason = reason;
889 sta->plink_state = NL80211_PLINK_HOLDING; 891 sta->plink_state = NL80211_PLINK_HOLDING;
890 if (!mod_plink_timer(sta, 892 if (!mod_plink_timer(sta,
891 dot11MeshHoldingTimeout(sdata))) 893 mshcfg->dot11MeshHoldingTimeout))
892 sta->ignore_plink_timer = true; 894 sta->ignore_plink_timer = true;
893 895
894 llid = sta->llid; 896 llid = sta->llid;
@@ -923,7 +925,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
923 changed |= __mesh_plink_deactivate(sta); 925 changed |= __mesh_plink_deactivate(sta);
924 sta->plink_state = NL80211_PLINK_HOLDING; 926 sta->plink_state = NL80211_PLINK_HOLDING;
925 llid = sta->llid; 927 llid = sta->llid;
926 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 928 mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
927 spin_unlock_bh(&sta->lock); 929 spin_unlock_bh(&sta->lock);
928 changed |= mesh_set_ht_prot_mode(sdata); 930 changed |= mesh_set_ht_prot_mode(sdata);
929 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, 931 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index a16b7b4b1e02..aa8d1e437385 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -43,7 +43,7 @@ struct sync_method {
43static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) 43static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
44{ 44{
45 return (ie->mesh_config->meshconf_cap & 45 return (ie->mesh_config->meshconf_cap &
46 MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; 46 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
47} 47}
48 48
49void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) 49void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
@@ -116,43 +116,13 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
116 goto no_sync; 116 goto no_sync;
117 } 117 }
118 118
119 if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) { 119 if (ieee80211_have_rx_timestamp(rx_status))
120 /* 120 /* time when timestamp field was received */
121 * The mactime is defined as the time the first data symbol 121 t_r = ieee80211_calculate_rx_timestamp(local, rx_status,
122 * of the frame hits the PHY, and the timestamp of the beacon 122 24 + 12 +
123 * is defined as "the time that the data symbol containing the 123 elems->total_len +
124 * first bit of the timestamp is transmitted to the PHY plus 124 FCS_LEN,
125 * the transmitting STA's delays through its local PHY from the 125 24);
126 * MAC-PHY interface to its interface with the WM" (802.11
127 * 11.1.2)
128 *
129 * T_r, in 13.13.2.2.2, is just defined as "the frame reception
130 * time" but we unless we interpret that time to be the same
131 * time of the beacon timestamp, the offset calculation will be
132 * off. Below we adjust t_r to be "the time at which the first
133 * symbol of the timestamp element in the beacon is received".
134 * This correction depends on the rate.
135 *
136 * Based on similar code in ibss.c
137 */
138 int rate;
139
140 if (rx_status->flag & RX_FLAG_HT) {
141 /* TODO:
142 * In principle there could be HT-beacons (Dual Beacon
143 * HT Operation options), but for now ignore them and
144 * just use the primary (i.e. non-HT) beacons for
145 * synchronization.
146 * */
147 goto no_sync;
148 } else
149 rate = local->hw.wiphy->bands[rx_status->band]->
150 bitrates[rx_status->rate_idx].bitrate;
151
152 /* 24 bytes of header * 8 bits/byte *
153 * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/
154 t_r = rx_status->mactime + (24 * 8 * 10 / rate);
155 }
156 126
157 /* Timing offset calculation (see 13.13.2.2.2) */ 127 /* Timing offset calculation (see 13.13.2.2.2) */
158 t_t = le64_to_cpu(mgmt->u.beacon.timestamp); 128 t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
@@ -225,58 +195,20 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
225 ifmsh->sync_offset_clockdrift_max); 195 ifmsh->sync_offset_clockdrift_max);
226 set_bit(MESH_WORK_DRIFT_ADJUST, 196 set_bit(MESH_WORK_DRIFT_ADJUST,
227 &ifmsh->wrkq_flags); 197 &ifmsh->wrkq_flags);
198
199 ifmsh->adjusting_tbtt = true;
228 } else { 200 } else {
229 msync_dbg(sdata, 201 msync_dbg(sdata,
230 "TBTT : max clockdrift=%lld; too small to adjust\n", 202 "TBTT : max clockdrift=%lld; too small to adjust\n",
231 (long long)ifmsh->sync_offset_clockdrift_max); 203 (long long)ifmsh->sync_offset_clockdrift_max);
232 ifmsh->sync_offset_clockdrift_max = 0; 204 ifmsh->sync_offset_clockdrift_max = 0;
205
206 ifmsh->adjusting_tbtt = false;
233 } 207 }
234 spin_unlock_bh(&ifmsh->sync_offset_lock); 208 spin_unlock_bh(&ifmsh->sync_offset_lock);
235} 209}
236 210
237static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata) 211static const struct sync_method sync_methods[] = {
238{
239 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
240 u8 offset;
241
242 if (!ifmsh->ie || !ifmsh->ie_len)
243 return NULL;
244
245 offset = ieee80211_ie_split_vendor(ifmsh->ie,
246 ifmsh->ie_len, 0);
247
248 if (!offset)
249 return NULL;
250
251 return ifmsh->ie + offset + 2;
252}
253
254static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
255 u16 stype,
256 struct ieee80211_mgmt *mgmt,
257 struct ieee802_11_elems *elems,
258 struct ieee80211_rx_status *rx_status)
259{
260 const u8 *oui;
261
262 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
263 msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n");
264 oui = mesh_get_vendor_oui(sdata);
265 /* here you would implement the vendor offset tracking for this oui */
266}
267
268static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
269{
270 const u8 *oui;
271
272 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
273 msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n");
274 oui = mesh_get_vendor_oui(sdata);
275 /* here you would implement the vendor tsf adjustment for this oui */
276}
277
278/* global variable */
279static struct sync_method sync_methods[] = {
280 { 212 {
281 .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, 213 .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
282 .ops = { 214 .ops = {
@@ -284,18 +216,11 @@ static struct sync_method sync_methods[] = {
284 .adjust_tbtt = &mesh_sync_offset_adjust_tbtt, 216 .adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
285 } 217 }
286 }, 218 },
287 {
288 .method = IEEE80211_SYNC_METHOD_VENDOR,
289 .ops = {
290 .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp,
291 .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt,
292 }
293 },
294}; 219};
295 220
296struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) 221const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
297{ 222{
298 struct ieee80211_mesh_sync_ops *ops = NULL; 223 const struct ieee80211_mesh_sync_ops *ops = NULL;
299 u8 i; 224 u8 i;
300 225
301 for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { 226 for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1b7eed252fe9..a3552929a21d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -178,20 +178,32 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
178{ 178{
179 struct ieee80211_local *local = sdata->local; 179 struct ieee80211_local *local = sdata->local;
180 struct ieee80211_supported_band *sband; 180 struct ieee80211_supported_band *sband;
181 struct ieee80211_chanctx_conf *chanctx_conf;
182 struct ieee80211_channel *chan;
181 struct sta_info *sta; 183 struct sta_info *sta;
182 u32 changed = 0; 184 u32 changed = 0;
183 u16 ht_opmode; 185 u16 ht_opmode;
184 bool disable_40 = false; 186 bool disable_40 = false;
185 187
186 sband = local->hw.wiphy->bands[local->oper_channel->band]; 188 rcu_read_lock();
189 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
190 if (WARN_ON(!chanctx_conf)) {
191 rcu_read_unlock();
192 return 0;
193 }
194 chan = chanctx_conf->def.chan;
195 rcu_read_unlock();
196 sband = local->hw.wiphy->bands[chan->band];
187 197
188 switch (sdata->vif.bss_conf.channel_type) { 198 switch (sdata->vif.bss_conf.chandef.width) {
189 case NL80211_CHAN_HT40PLUS: 199 case NL80211_CHAN_WIDTH_40:
190 if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS) 200 if (sdata->vif.bss_conf.chandef.chan->center_freq >
201 sdata->vif.bss_conf.chandef.center_freq1 &&
202 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
191 disable_40 = true; 203 disable_40 = true;
192 break; 204 if (sdata->vif.bss_conf.chandef.chan->center_freq <
193 case NL80211_CHAN_HT40MINUS: 205 sdata->vif.bss_conf.chandef.center_freq1 &&
194 if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS) 206 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
195 disable_40 = true; 207 disable_40 = true;
196 break; 208 break;
197 default: 209 default:
@@ -342,8 +354,18 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
342 /* determine capability flags */ 354 /* determine capability flags */
343 cap = vht_cap.cap; 355 cap = vht_cap.cap;
344 356
357 if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_80P80MHZ) {
358 cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
359 cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
360 }
361
362 if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_160MHZ) {
363 cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
364 cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
365 }
366
345 /* reserve and fill IE */ 367 /* reserve and fill IE */
346 pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2); 368 pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
347 ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); 369 ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
348} 370}
349 371
@@ -359,11 +381,21 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
359 int i, count, rates_len, supp_rates_len; 381 int i, count, rates_len, supp_rates_len;
360 u16 capab; 382 u16 capab;
361 struct ieee80211_supported_band *sband; 383 struct ieee80211_supported_band *sband;
384 struct ieee80211_chanctx_conf *chanctx_conf;
385 struct ieee80211_channel *chan;
362 u32 rates = 0; 386 u32 rates = 0;
363 387
364 lockdep_assert_held(&ifmgd->mtx); 388 lockdep_assert_held(&ifmgd->mtx);
365 389
366 sband = local->hw.wiphy->bands[local->oper_channel->band]; 390 rcu_read_lock();
391 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
392 if (WARN_ON(!chanctx_conf)) {
393 rcu_read_unlock();
394 return;
395 }
396 chan = chanctx_conf->def.chan;
397 rcu_read_unlock();
398 sband = local->hw.wiphy->bands[chan->band];
367 399
368 if (assoc_data->supp_rates_len) { 400 if (assoc_data->supp_rates_len) {
369 /* 401 /*
@@ -392,7 +424,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
392 4 + /* power capability */ 424 4 + /* power capability */
393 2 + 2 * sband->n_channels + /* supported channels */ 425 2 + 2 * sband->n_channels + /* supported channels */
394 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ 426 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
395 2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */ 427 2 + sizeof(struct ieee80211_vht_cap) + /* VHT */
396 assoc_data->ie_len + /* extra IEs */ 428 assoc_data->ie_len + /* extra IEs */
397 9, /* WMM */ 429 9, /* WMM */
398 GFP_KERNEL); 430 GFP_KERNEL);
@@ -485,7 +517,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
485 *pos++ = WLAN_EID_PWR_CAPABILITY; 517 *pos++ = WLAN_EID_PWR_CAPABILITY;
486 *pos++ = 2; 518 *pos++ = 2;
487 *pos++ = 0; /* min tx power */ 519 *pos++ = 0; /* min tx power */
488 *pos++ = local->oper_channel->max_power; /* max tx power */ 520 *pos++ = chan->max_power; /* max tx power */
489 521
490 /* 2. supported channels */ 522 /* 2. supported channels */
491 /* TODO: get this in reg domain format */ 523 /* TODO: get this in reg domain format */
@@ -521,9 +553,13 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
521 offset = noffset; 553 offset = noffset;
522 } 554 }
523 555
524 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 556 if (WARN_ON_ONCE((ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
557 !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)))
558 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
559
560 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
525 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param, 561 ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
526 sband, local->oper_channel, ifmgd->ap_smps); 562 sband, chan, sdata->smps_mode);
527 563
528 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) 564 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
529 ieee80211_add_vht_ie(sdata, skb, sband); 565 ieee80211_add_vht_ie(sdata, skb, sband);
@@ -657,18 +693,18 @@ static void ieee80211_chswitch_work(struct work_struct *work)
657 if (!ifmgd->associated) 693 if (!ifmgd->associated)
658 goto out; 694 goto out;
659 695
660 sdata->local->oper_channel = sdata->local->csa_channel; 696 sdata->local->_oper_channel = sdata->local->csa_channel;
661 if (!sdata->local->ops->channel_switch) { 697 if (!sdata->local->ops->channel_switch) {
662 /* call "hw_config" only if doing sw channel switch */ 698 /* call "hw_config" only if doing sw channel switch */
663 ieee80211_hw_config(sdata->local, 699 ieee80211_hw_config(sdata->local,
664 IEEE80211_CONF_CHANGE_CHANNEL); 700 IEEE80211_CONF_CHANGE_CHANNEL);
665 } else { 701 } else {
666 /* update the device channel directly */ 702 /* update the device channel directly */
667 sdata->local->hw.conf.channel = sdata->local->oper_channel; 703 sdata->local->hw.conf.channel = sdata->local->_oper_channel;
668 } 704 }
669 705
670 /* XXX: shouldn't really modify cfg80211-owned data! */ 706 /* XXX: shouldn't really modify cfg80211-owned data! */
671 ifmgd->associated->channel = sdata->local->oper_channel; 707 ifmgd->associated->channel = sdata->local->_oper_channel;
672 708
673 /* XXX: wait for a beacon first? */ 709 /* XXX: wait for a beacon first? */
674 ieee80211_wake_queues_by_reason(&sdata->local->hw, 710 ieee80211_wake_queues_by_reason(&sdata->local->hw,
@@ -680,11 +716,8 @@ static void ieee80211_chswitch_work(struct work_struct *work)
680 716
681void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) 717void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
682{ 718{
683 struct ieee80211_sub_if_data *sdata; 719 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
684 struct ieee80211_if_managed *ifmgd; 720 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
685
686 sdata = vif_to_sdata(vif);
687 ifmgd = &sdata->u.mgd;
688 721
689 trace_api_chswitch_done(sdata, success); 722 trace_api_chswitch_done(sdata, success);
690 if (!success) { 723 if (!success) {
@@ -723,6 +756,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
723 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 756 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
724 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num, 757 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num,
725 cbss->channel->band); 758 cbss->channel->band);
759 struct ieee80211_chanctx *chanctx;
726 760
727 ASSERT_MGD_MTX(ifmgd); 761 ASSERT_MGD_MTX(ifmgd);
728 762
@@ -748,10 +782,35 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
748 return; 782 return;
749 } 783 }
750 784
751 sdata->local->csa_channel = new_ch;
752
753 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; 785 ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
754 786
787 if (sdata->local->use_chanctx) {
788 sdata_info(sdata,
789 "not handling channel switch with channel contexts\n");
790 ieee80211_queue_work(&sdata->local->hw,
791 &ifmgd->csa_connection_drop_work);
792 return;
793 }
794
795 mutex_lock(&sdata->local->chanctx_mtx);
796 if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) {
797 mutex_unlock(&sdata->local->chanctx_mtx);
798 return;
799 }
800 chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
801 struct ieee80211_chanctx, conf);
802 if (chanctx->refcount > 1) {
803 sdata_info(sdata,
804 "channel switch with multiple interfaces on the same channel, disconnecting\n");
805 ieee80211_queue_work(&sdata->local->hw,
806 &ifmgd->csa_connection_drop_work);
807 mutex_unlock(&sdata->local->chanctx_mtx);
808 return;
809 }
810 mutex_unlock(&sdata->local->chanctx_mtx);
811
812 sdata->local->csa_channel = new_ch;
813
755 if (sw_elem->mode) 814 if (sw_elem->mode)
756 ieee80211_stop_queues_by_reason(&sdata->local->hw, 815 ieee80211_stop_queues_by_reason(&sdata->local->hw,
757 IEEE80211_QUEUE_STOP_REASON_CSA); 816 IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -778,10 +837,10 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
778 cbss->beacon_interval)); 837 cbss->beacon_interval));
779} 838}
780 839
781static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, 840static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
782 struct ieee80211_channel *channel, 841 struct ieee80211_channel *channel,
783 const u8 *country_ie, u8 country_ie_len, 842 const u8 *country_ie, u8 country_ie_len,
784 const u8 *pwr_constr_elem) 843 const u8 *pwr_constr_elem)
785{ 844{
786 struct ieee80211_country_ie_triplet *triplet; 845 struct ieee80211_country_ie_triplet *triplet;
787 int chan = ieee80211_frequency_to_channel(channel->center_freq); 846 int chan = ieee80211_frequency_to_channel(channel->center_freq);
@@ -790,7 +849,7 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
790 849
791 /* Invalid IE */ 850 /* Invalid IE */
792 if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) 851 if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
793 return; 852 return 0;
794 853
795 triplet = (void *)(country_ie + 3); 854 triplet = (void *)(country_ie + 3);
796 country_ie_len -= 3; 855 country_ie_len -= 3;
@@ -831,19 +890,21 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
831 } 890 }
832 891
833 if (!have_chan_pwr) 892 if (!have_chan_pwr)
834 return; 893 return 0;
835 894
836 new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem); 895 new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem);
837 896
838 if (sdata->local->ap_power_level == new_ap_level) 897 if (sdata->ap_power_level == new_ap_level)
839 return; 898 return 0;
840 899
841 sdata_info(sdata, 900 sdata_info(sdata,
842 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", 901 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
843 new_ap_level, chan_pwr, *pwr_constr_elem, 902 new_ap_level, chan_pwr, *pwr_constr_elem,
844 sdata->u.mgd.bssid); 903 sdata->u.mgd.bssid);
845 sdata->local->ap_power_level = new_ap_level; 904 sdata->ap_power_level = new_ap_level;
846 ieee80211_hw_config(sdata->local, 0); 905 if (__ieee80211_recalc_txpower(sdata))
906 return BSS_CHANGED_TXPOWER;
907 return 0;
847} 908}
848 909
849void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif) 910void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
@@ -1013,12 +1074,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1013 if (beaconint_us > latency) { 1074 if (beaconint_us > latency) {
1014 local->ps_sdata = NULL; 1075 local->ps_sdata = NULL;
1015 } else { 1076 } else {
1016 struct ieee80211_bss *bss;
1017 int maxslp = 1; 1077 int maxslp = 1;
1018 u8 dtimper; 1078 u8 dtimper = found->u.mgd.dtim_period;
1019
1020 bss = (void *)found->u.mgd.associated->priv;
1021 dtimper = bss->dtim_period;
1022 1079
1023 /* If the TIM IE is invalid, pretend the value is 1 */ 1080 /* If the TIM IE is invalid, pretend the value is 1 */
1024 if (!dtimper) 1081 if (!dtimper)
@@ -1280,7 +1337,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
1280 } 1337 }
1281 1338
1282 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); 1339 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
1283 if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) 1340 if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_5GHZ)
1284 use_short_slot = true; 1341 use_short_slot = true;
1285 1342
1286 if (use_protection != bss_conf->use_cts_prot) { 1343 if (use_protection != bss_conf->use_cts_prot) {
@@ -1321,15 +1378,45 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1321 1378
1322 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; 1379 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
1323 1380
1381 if (sdata->vif.p2p) {
1382 const struct cfg80211_bss_ies *ies;
1383
1384 rcu_read_lock();
1385 ies = rcu_dereference(cbss->ies);
1386 if (ies) {
1387 u8 noa[2];
1388 int ret;
1389
1390 ret = cfg80211_get_p2p_attr(
1391 ies->data, ies->len,
1392 IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
1393 noa, sizeof(noa));
1394 if (ret >= 2) {
1395 bss_conf->p2p_oppps = noa[1] & 0x80;
1396 bss_conf->p2p_ctwindow = noa[1] & 0x7f;
1397 bss_info_changed |= BSS_CHANGED_P2P_PS;
1398 sdata->u.mgd.p2p_noa_index = noa[0];
1399 }
1400 }
1401 rcu_read_unlock();
1402 }
1403
1324 /* just to be sure */ 1404 /* just to be sure */
1325 ieee80211_stop_poll(sdata); 1405 ieee80211_stop_poll(sdata);
1326 1406
1327 ieee80211_led_assoc(local, 1); 1407 ieee80211_led_assoc(local, 1);
1328 1408
1329 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) 1409 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
1330 bss_conf->dtim_period = bss->dtim_period; 1410 /*
1331 else 1411 * If the AP is buggy we may get here with no DTIM period
1412 * known, so assume it's 1 which is the only safe assumption
1413 * in that case, although if the TIM IE is broken powersave
1414 * probably just won't work at all.
1415 */
1416 bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
1417 } else {
1332 bss_conf->dtim_period = 0; 1418 bss_conf->dtim_period = 0;
1419 }
1333 1420
1334 bss_conf->assoc = 1; 1421 bss_conf->assoc = 1;
1335 1422
@@ -1350,7 +1437,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1350 ieee80211_recalc_ps(local, -1); 1437 ieee80211_recalc_ps(local, -1);
1351 mutex_unlock(&local->iflist_mtx); 1438 mutex_unlock(&local->iflist_mtx);
1352 1439
1353 ieee80211_recalc_smps(local); 1440 ieee80211_recalc_smps(sdata);
1354 ieee80211_recalc_ps_vif(sdata); 1441 ieee80211_recalc_ps_vif(sdata);
1355 1442
1356 netif_tx_start_all_queues(sdata->dev); 1443 netif_tx_start_all_queues(sdata->dev);
@@ -1443,11 +1530,14 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1443 changed |= BSS_CHANGED_ASSOC; 1530 changed |= BSS_CHANGED_ASSOC;
1444 sdata->vif.bss_conf.assoc = false; 1531 sdata->vif.bss_conf.assoc = false;
1445 1532
1533 sdata->vif.bss_conf.p2p_ctwindow = 0;
1534 sdata->vif.bss_conf.p2p_oppps = false;
1535
1446 /* on the next assoc, re-program HT parameters */ 1536 /* on the next assoc, re-program HT parameters */
1447 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); 1537 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
1448 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); 1538 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
1449 1539
1450 local->ap_power_level = 0; 1540 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
1451 1541
1452 del_timer_sync(&local->dynamic_ps_timer); 1542 del_timer_sync(&local->dynamic_ps_timer);
1453 cancel_work_sync(&local->dynamic_ps_enable_work); 1543 cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -1465,10 +1555,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1465 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; 1555 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
1466 ieee80211_bss_info_change_notify(sdata, changed); 1556 ieee80211_bss_info_change_notify(sdata, changed);
1467 1557
1468 /* channel(_type) changes are handled by ieee80211_hw_config */
1469 WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
1470 ieee80211_hw_config(local, 0);
1471
1472 /* disassociated - set to defaults now */ 1558 /* disassociated - set to defaults now */
1473 ieee80211_set_wmm_default(sdata, false); 1559 ieee80211_set_wmm_default(sdata, false);
1474 1560
@@ -1478,6 +1564,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1478 del_timer_sync(&sdata->u.mgd.chswitch_timer); 1564 del_timer_sync(&sdata->u.mgd.chswitch_timer);
1479 1565
1480 sdata->u.mgd.timers_running = 0; 1566 sdata->u.mgd.timers_running = 0;
1567
1568 sdata->vif.bss_conf.dtim_period = 0;
1569
1570 ifmgd->flags = 0;
1571 ieee80211_vif_release_channel(sdata);
1481} 1572}
1482 1573
1483void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1574void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1581,6 +1672,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1581 } else { 1672 } else {
1582 int ssid_len; 1673 int ssid_len;
1583 1674
1675 rcu_read_lock();
1584 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1676 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1585 if (WARN_ON_ONCE(ssid == NULL)) 1677 if (WARN_ON_ONCE(ssid == NULL))
1586 ssid_len = 0; 1678 ssid_len = 0;
@@ -1589,7 +1681,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1589 1681
1590 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, 1682 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
1591 0, (u32) -1, true, false, 1683 0, (u32) -1, true, false,
1592 ifmgd->associated->channel); 1684 ifmgd->associated->channel, false);
1685 rcu_read_unlock();
1593 } 1686 }
1594 1687
1595 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); 1688 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1685,6 +1778,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1685 else 1778 else
1686 return NULL; 1779 return NULL;
1687 1780
1781 rcu_read_lock();
1688 ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID); 1782 ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
1689 if (WARN_ON_ONCE(ssid == NULL)) 1783 if (WARN_ON_ONCE(ssid == NULL))
1690 ssid_len = 0; 1784 ssid_len = 0;
@@ -1692,10 +1786,10 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1692 ssid_len = ssid[1]; 1786 ssid_len = ssid[1];
1693 1787
1694 skb = ieee80211_build_probe_req(sdata, cbss->bssid, 1788 skb = ieee80211_build_probe_req(sdata, cbss->bssid,
1695 (u32) -1, 1789 (u32) -1, cbss->channel,
1696 sdata->local->oper_channel,
1697 ssid + 2, ssid_len, 1790 ssid + 2, ssid_len,
1698 NULL, 0, true); 1791 NULL, 0, true);
1792 rcu_read_unlock();
1699 1793
1700 return skb; 1794 return skb;
1701} 1795}
@@ -1804,6 +1898,8 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
1804 1898
1805 memset(sdata->u.mgd.bssid, 0, ETH_ALEN); 1899 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
1806 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); 1900 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
1901 sdata->u.mgd.flags = 0;
1902 ieee80211_vif_release_channel(sdata);
1807 } 1903 }
1808 1904
1809 cfg80211_put_bss(auth_data->bss); 1905 cfg80211_put_bss(auth_data->bss);
@@ -1824,7 +1920,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1824 return; 1920 return;
1825 auth_data->expected_transaction = 4; 1921 auth_data->expected_transaction = 4;
1826 drv_mgd_prepare_tx(sdata->local, sdata); 1922 drv_mgd_prepare_tx(sdata->local, sdata);
1827 ieee80211_send_auth(sdata, 3, auth_data->algorithm, 1923 ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
1828 elems.challenge - 2, elems.challenge_len + 2, 1924 elems.challenge - 2, elems.challenge_len + 2,
1829 auth_data->bss->bssid, auth_data->bss->bssid, 1925 auth_data->bss->bssid, auth_data->bss->bssid,
1830 auth_data->key, auth_data->key_len, 1926 auth_data->key, auth_data->key_len,
@@ -1858,8 +1954,13 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1858 status_code = le16_to_cpu(mgmt->u.auth.status_code); 1954 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1859 1955
1860 if (auth_alg != ifmgd->auth_data->algorithm || 1956 if (auth_alg != ifmgd->auth_data->algorithm ||
1861 auth_transaction != ifmgd->auth_data->expected_transaction) 1957 auth_transaction != ifmgd->auth_data->expected_transaction) {
1958 sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n",
1959 mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
1960 auth_transaction,
1961 ifmgd->auth_data->expected_transaction);
1862 return RX_MGMT_NONE; 1962 return RX_MGMT_NONE;
1963 }
1863 1964
1864 if (status_code != WLAN_STATUS_SUCCESS) { 1965 if (status_code != WLAN_STATUS_SUCCESS) {
1865 sdata_info(sdata, "%pM denied authentication (status %d)\n", 1966 sdata_info(sdata, "%pM denied authentication (status %d)\n",
@@ -1872,6 +1973,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1872 case WLAN_AUTH_OPEN: 1973 case WLAN_AUTH_OPEN:
1873 case WLAN_AUTH_LEAP: 1974 case WLAN_AUTH_LEAP:
1874 case WLAN_AUTH_FT: 1975 case WLAN_AUTH_FT:
1976 case WLAN_AUTH_SAE:
1875 break; 1977 break;
1876 case WLAN_AUTH_SHARED_KEY: 1978 case WLAN_AUTH_SHARED_KEY:
1877 if (ifmgd->auth_data->expected_transaction != 4) { 1979 if (ifmgd->auth_data->expected_transaction != 4) {
@@ -1891,6 +1993,15 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1891 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; 1993 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
1892 run_again(ifmgd, ifmgd->auth_data->timeout); 1994 run_again(ifmgd, ifmgd->auth_data->timeout);
1893 1995
1996 if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
1997 ifmgd->auth_data->expected_transaction != 2) {
1998 /*
1999 * Report auth frame to user space for processing since another
2000 * round of Authentication frames is still needed.
2001 */
2002 return RX_MGMT_CFG80211_RX_AUTH;
2003 }
2004
1894 /* move station state to auth */ 2005 /* move station state to auth */
1895 mutex_lock(&sdata->local->sta_mtx); 2006 mutex_lock(&sdata->local->sta_mtx);
1896 sta = sta_info_get(sdata, bssid); 2007 sta = sta_info_get(sdata, bssid);
@@ -2030,6 +2141,8 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
2030 2141
2031 memset(sdata->u.mgd.bssid, 0, ETH_ALEN); 2142 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
2032 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); 2143 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
2144 sdata->u.mgd.flags = 0;
2145 ieee80211_vif_release_channel(sdata);
2033 } 2146 }
2034 2147
2035 kfree(assoc_data); 2148 kfree(assoc_data);
@@ -2091,15 +2204,20 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2091 return false; 2204 return false;
2092 } 2205 }
2093 2206
2094 sband = local->hw.wiphy->bands[local->oper_channel->band]; 2207 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
2095 2208
2096 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 2209 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
2097 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2210 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
2098 elems.ht_cap_elem, &sta->sta.ht_cap); 2211 elems.ht_cap_elem, &sta->sta.ht_cap);
2099 2212
2100 sta->supports_40mhz = 2213 sta->supports_40mhz =
2101 sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; 2214 sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
2102 2215
2216 if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
2217 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
2218 elems.vht_cap_elem,
2219 &sta->sta.vht_cap);
2220
2103 rate_control_rate_init(sta); 2221 rate_control_rate_init(sta);
2104 2222
2105 if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) 2223 if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
@@ -2140,7 +2258,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2140 changed |= BSS_CHANGED_QOS; 2258 changed |= BSS_CHANGED_QOS;
2141 2259
2142 if (elems.ht_operation && elems.wmm_param && 2260 if (elems.ht_operation && elems.wmm_param &&
2143 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 2261 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
2144 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, 2262 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2145 cbss->bssid, false); 2263 cbss->bssid, false);
2146 2264
@@ -2247,9 +2365,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2247 2365
2248 return RX_MGMT_CFG80211_RX_ASSOC; 2366 return RX_MGMT_CFG80211_RX_ASSOC;
2249} 2367}
2368
2250static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 2369static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2251 struct ieee80211_mgmt *mgmt, 2370 struct ieee80211_mgmt *mgmt, size_t len,
2252 size_t len,
2253 struct ieee80211_rx_status *rx_status, 2371 struct ieee80211_rx_status *rx_status,
2254 struct ieee802_11_elems *elems, 2372 struct ieee802_11_elems *elems,
2255 bool beacon) 2373 bool beacon)
@@ -2260,11 +2378,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2260 struct ieee80211_channel *channel; 2378 struct ieee80211_channel *channel;
2261 bool need_ps = false; 2379 bool need_ps = false;
2262 2380
2263 if (sdata->u.mgd.associated && 2381 if ((sdata->u.mgd.associated &&
2264 ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) { 2382 ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) ||
2265 bss = (void *)sdata->u.mgd.associated->priv; 2383 (sdata->u.mgd.assoc_data &&
2384 ether_addr_equal(mgmt->bssid,
2385 sdata->u.mgd.assoc_data->bss->bssid))) {
2266 /* not previously set so we may need to recalc */ 2386 /* not previously set so we may need to recalc */
2267 need_ps = !bss->dtim_period; 2387 need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
2388
2389 if (elems->tim && !elems->parse_error) {
2390 struct ieee80211_tim_ie *tim_ie = elems->tim;
2391 sdata->u.mgd.dtim_period = tim_ie->dtim_period;
2392 }
2268 } 2393 }
2269 2394
2270 if (elems->ds_params && elems->ds_params_len == 1) 2395 if (elems->ds_params && elems->ds_params_len == 1)
@@ -2369,8 +2494,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2369 size_t baselen; 2494 size_t baselen;
2370 struct ieee802_11_elems elems; 2495 struct ieee802_11_elems elems;
2371 struct ieee80211_local *local = sdata->local; 2496 struct ieee80211_local *local = sdata->local;
2497 struct ieee80211_chanctx_conf *chanctx_conf;
2498 struct ieee80211_channel *chan;
2372 u32 changed = 0; 2499 u32 changed = 0;
2373 bool erp_valid, directed_tim = false; 2500 bool erp_valid;
2374 u8 erp_value = 0; 2501 u8 erp_value = 0;
2375 u32 ncrc; 2502 u32 ncrc;
2376 u8 *bssid; 2503 u8 *bssid;
@@ -2382,8 +2509,19 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2382 if (baselen > len) 2509 if (baselen > len)
2383 return; 2510 return;
2384 2511
2385 if (rx_status->freq != local->oper_channel->center_freq) 2512 rcu_read_lock();
2513 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2514 if (!chanctx_conf) {
2515 rcu_read_unlock();
2386 return; 2516 return;
2517 }
2518
2519 if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
2520 rcu_read_unlock();
2521 return;
2522 }
2523 chan = chanctx_conf->def.chan;
2524 rcu_read_unlock();
2387 2525
2388 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && 2526 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
2389 ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { 2527 ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
@@ -2490,11 +2628,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2490 len - baselen, &elems, 2628 len - baselen, &elems,
2491 care_about_ies, ncrc); 2629 care_about_ies, ncrc);
2492 2630
2493 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
2494 directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
2495 ifmgd->aid);
2496
2497 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { 2631 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
2632 bool directed_tim = ieee80211_check_tim(elems.tim,
2633 elems.tim_len,
2634 ifmgd->aid);
2498 if (directed_tim) { 2635 if (directed_tim) {
2499 if (local->hw.conf.dynamic_ps_timeout > 0) { 2636 if (local->hw.conf.dynamic_ps_timeout > 0) {
2500 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 2637 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -2519,6 +2656,27 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2519 } 2656 }
2520 } 2657 }
2521 2658
2659 if (sdata->vif.p2p) {
2660 u8 noa[2];
2661 int ret;
2662
2663 ret = cfg80211_get_p2p_attr(mgmt->u.beacon.variable,
2664 len - baselen,
2665 IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
2666 noa, sizeof(noa));
2667 if (ret >= 2 && sdata->u.mgd.p2p_noa_index != noa[0]) {
2668 bss_conf->p2p_oppps = noa[1] & 0x80;
2669 bss_conf->p2p_ctwindow = noa[1] & 0x7f;
2670 changed |= BSS_CHANGED_P2P_PS;
2671 sdata->u.mgd.p2p_noa_index = noa[0];
2672 /*
2673 * make sure we update all information, the CRC
2674 * mechanism doesn't look at P2P attributes.
2675 */
2676 ifmgd->beacon_crc_valid = false;
2677 }
2678 }
2679
2522 if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) 2680 if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
2523 return; 2681 return;
2524 ifmgd->beacon_crc = ncrc; 2682 ifmgd->beacon_crc = ncrc;
@@ -2543,22 +2701,17 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2543 2701
2544 2702
2545 if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && 2703 if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param &&
2546 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { 2704 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
2547 struct ieee80211_supported_band *sband;
2548
2549 sband = local->hw.wiphy->bands[local->oper_channel->band];
2550
2551 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, 2705 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2552 bssid, true); 2706 bssid, true);
2553 }
2554 2707
2555 if (elems.country_elem && elems.pwr_constr_elem && 2708 if (elems.country_elem && elems.pwr_constr_elem &&
2556 mgmt->u.probe_resp.capab_info & 2709 mgmt->u.probe_resp.capab_info &
2557 cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT)) 2710 cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT))
2558 ieee80211_handle_pwr_constr(sdata, local->oper_channel, 2711 changed |= ieee80211_handle_pwr_constr(sdata, chan,
2559 elems.country_elem, 2712 elems.country_elem,
2560 elems.country_elem_len, 2713 elems.country_elem_len,
2561 elems.pwr_constr_elem); 2714 elems.pwr_constr_elem);
2562 2715
2563 ieee80211_bss_info_change_notify(sdata, changed); 2716 ieee80211_bss_info_change_notify(sdata, changed);
2564} 2717}
@@ -2703,13 +2856,23 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2703 drv_mgd_prepare_tx(local, sdata); 2856 drv_mgd_prepare_tx(local, sdata);
2704 2857
2705 if (auth_data->bss->proberesp_ies) { 2858 if (auth_data->bss->proberesp_ies) {
2859 u16 trans = 1;
2860 u16 status = 0;
2861
2706 sdata_info(sdata, "send auth to %pM (try %d/%d)\n", 2862 sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
2707 auth_data->bss->bssid, auth_data->tries, 2863 auth_data->bss->bssid, auth_data->tries,
2708 IEEE80211_AUTH_MAX_TRIES); 2864 IEEE80211_AUTH_MAX_TRIES);
2709 2865
2710 auth_data->expected_transaction = 2; 2866 auth_data->expected_transaction = 2;
2711 ieee80211_send_auth(sdata, 1, auth_data->algorithm, 2867
2712 auth_data->ie, auth_data->ie_len, 2868 if (auth_data->algorithm == WLAN_AUTH_SAE) {
2869 trans = auth_data->sae_trans;
2870 status = auth_data->sae_status;
2871 auth_data->expected_transaction = trans;
2872 }
2873
2874 ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
2875 auth_data->data, auth_data->data_len,
2713 auth_data->bss->bssid, 2876 auth_data->bss->bssid,
2714 auth_data->bss->bssid, NULL, 0, 0); 2877 auth_data->bss->bssid, NULL, 0, 0);
2715 } else { 2878 } else {
@@ -2719,16 +2882,20 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2719 auth_data->bss->bssid, auth_data->tries, 2882 auth_data->bss->bssid, auth_data->tries,
2720 IEEE80211_AUTH_MAX_TRIES); 2883 IEEE80211_AUTH_MAX_TRIES);
2721 2884
2885 rcu_read_lock();
2722 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID); 2886 ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
2723 if (!ssidie) 2887 if (!ssidie) {
2888 rcu_read_unlock();
2724 return -EINVAL; 2889 return -EINVAL;
2890 }
2725 /* 2891 /*
2726 * Direct probe is sent to broadcast address as some APs 2892 * Direct probe is sent to broadcast address as some APs
2727 * will not answer to direct packet in unassociated state. 2893 * will not answer to direct packet in unassociated state.
2728 */ 2894 */
2729 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], 2895 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
2730 NULL, 0, (u32) -1, true, false, 2896 NULL, 0, (u32) -1, true, false,
2731 auth_data->bss->channel); 2897 auth_data->bss->channel, false);
2898 rcu_read_unlock();
2732 } 2899 }
2733 2900
2734 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 2901 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -3058,90 +3225,313 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
3058 return 0; 3225 return 0;
3059} 3226}
3060 3227
3228static u32 chandef_downgrade(struct cfg80211_chan_def *c)
3229{
3230 u32 ret;
3231 int tmp;
3232
3233 switch (c->width) {
3234 case NL80211_CHAN_WIDTH_20:
3235 c->width = NL80211_CHAN_WIDTH_20_NOHT;
3236 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3237 break;
3238 case NL80211_CHAN_WIDTH_40:
3239 c->width = NL80211_CHAN_WIDTH_20;
3240 c->center_freq1 = c->chan->center_freq;
3241 ret = IEEE80211_STA_DISABLE_40MHZ |
3242 IEEE80211_STA_DISABLE_VHT;
3243 break;
3244 case NL80211_CHAN_WIDTH_80:
3245 tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
3246 /* n_P40 */
3247 tmp /= 2;
3248 /* freq_P40 */
3249 c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
3250 c->width = NL80211_CHAN_WIDTH_40;
3251 ret = IEEE80211_STA_DISABLE_VHT;
3252 break;
3253 case NL80211_CHAN_WIDTH_80P80:
3254 c->center_freq2 = 0;
3255 c->width = NL80211_CHAN_WIDTH_80;
3256 ret = IEEE80211_STA_DISABLE_80P80MHZ |
3257 IEEE80211_STA_DISABLE_160MHZ;
3258 break;
3259 case NL80211_CHAN_WIDTH_160:
3260 /* n_P20 */
3261 tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
3262 /* n_P80 */
3263 tmp /= 4;
3264 c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
3265 c->width = NL80211_CHAN_WIDTH_80;
3266 ret = IEEE80211_STA_DISABLE_80P80MHZ |
3267 IEEE80211_STA_DISABLE_160MHZ;
3268 break;
3269 default:
3270 case NL80211_CHAN_WIDTH_20_NOHT:
3271 WARN_ON_ONCE(1);
3272 c->width = NL80211_CHAN_WIDTH_20_NOHT;
3273 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3274 break;
3275 }
3276
3277 WARN_ON_ONCE(!cfg80211_chandef_valid(c));
3278
3279 return ret;
3280}
3281
3282static u32
3283ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
3284 struct ieee80211_supported_band *sband,
3285 struct ieee80211_channel *channel,
3286 const struct ieee80211_ht_operation *ht_oper,
3287 const struct ieee80211_vht_operation *vht_oper,
3288 struct cfg80211_chan_def *chandef)
3289{
3290 struct cfg80211_chan_def vht_chandef;
3291 u32 ht_cfreq, ret;
3292
3293 chandef->chan = channel;
3294 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
3295 chandef->center_freq1 = channel->center_freq;
3296 chandef->center_freq2 = 0;
3297
3298 if (!ht_oper || !sband->ht_cap.ht_supported) {
3299 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3300 goto out;
3301 }
3302
3303 chandef->width = NL80211_CHAN_WIDTH_20;
3304
3305 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
3306 channel->band);
3307 /* check that channel matches the right operating channel */
3308 if (channel->center_freq != ht_cfreq) {
3309 /*
3310 * It's possible that some APs are confused here;
3311 * Netgear WNDR3700 sometimes reports 4 higher than
3312 * the actual channel in association responses, but
3313 * since we look at probe response/beacon data here
3314 * it should be OK.
3315 */
3316 sdata_info(sdata,
3317 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
3318 channel->center_freq, ht_cfreq,
3319 ht_oper->primary_chan, channel->band);
3320 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3321 goto out;
3322 }
3323
3324 /* check 40 MHz support, if we have it */
3325 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
3326 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
3327 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3328 chandef->width = NL80211_CHAN_WIDTH_40;
3329 chandef->center_freq1 += 10;
3330 break;
3331 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3332 chandef->width = NL80211_CHAN_WIDTH_40;
3333 chandef->center_freq1 -= 10;
3334 break;
3335 }
3336 } else {
3337 /* 40 MHz (and 80 MHz) must be supported for VHT */
3338 ret = IEEE80211_STA_DISABLE_VHT;
3339 goto out;
3340 }
3341
3342 if (!vht_oper || !sband->vht_cap.vht_supported) {
3343 ret = IEEE80211_STA_DISABLE_VHT;
3344 goto out;
3345 }
3346
3347 vht_chandef.chan = channel;
3348 vht_chandef.center_freq1 =
3349 ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
3350 channel->band);
3351 vht_chandef.center_freq2 = 0;
3352
3353 if (vht_oper->center_freq_seg2_idx)
3354 vht_chandef.center_freq2 =
3355 ieee80211_channel_to_frequency(
3356 vht_oper->center_freq_seg2_idx,
3357 channel->band);
3358
3359 switch (vht_oper->chan_width) {
3360 case IEEE80211_VHT_CHANWIDTH_USE_HT:
3361 vht_chandef.width = chandef->width;
3362 break;
3363 case IEEE80211_VHT_CHANWIDTH_80MHZ:
3364 vht_chandef.width = NL80211_CHAN_WIDTH_80;
3365 break;
3366 case IEEE80211_VHT_CHANWIDTH_160MHZ:
3367 vht_chandef.width = NL80211_CHAN_WIDTH_160;
3368 break;
3369 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
3370 vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
3371 break;
3372 default:
3373 sdata_info(sdata,
3374 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
3375 vht_oper->chan_width);
3376 ret = IEEE80211_STA_DISABLE_VHT;
3377 goto out;
3378 }
3379
3380 if (!cfg80211_chandef_valid(&vht_chandef)) {
3381 sdata_info(sdata,
3382 "AP VHT information is invalid, disable VHT\n");
3383 ret = IEEE80211_STA_DISABLE_VHT;
3384 goto out;
3385 }
3386
3387 if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
3388 ret = 0;
3389 goto out;
3390 }
3391
3392 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
3393 sdata_info(sdata,
3394 "AP VHT information doesn't match HT, disable VHT\n");
3395 ret = IEEE80211_STA_DISABLE_VHT;
3396 goto out;
3397 }
3398
3399 *chandef = vht_chandef;
3400
3401 ret = 0;
3402
3403 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
3404 IEEE80211_CHAN_DISABLED)) {
3405 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
3406 ret = IEEE80211_STA_DISABLE_HT |
3407 IEEE80211_STA_DISABLE_VHT;
3408 goto out;
3409 }
3410
3411 ret = chandef_downgrade(chandef);
3412 }
3413
3414 if (chandef->width != vht_chandef.width)
3415 sdata_info(sdata,
3416 "local regulatory prevented using AP HT/VHT configuration, downgraded\n");
3417
3418out:
3419 WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
3420 return ret;
3421}
3422
3423static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
3424 struct cfg80211_bss *cbss)
3425{
3426 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3427 const u8 *ht_cap_ie, *vht_cap_ie;
3428 const struct ieee80211_ht_cap *ht_cap;
3429 const struct ieee80211_vht_cap *vht_cap;
3430 u8 chains = 1;
3431
3432 if (ifmgd->flags & IEEE80211_STA_DISABLE_HT)
3433 return chains;
3434
3435 ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY);
3436 if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) {
3437 ht_cap = (void *)(ht_cap_ie + 2);
3438 chains = ieee80211_mcs_to_chains(&ht_cap->mcs);
3439 /*
3440 * TODO: use "Tx Maximum Number Spatial Streams Supported" and
3441 * "Tx Unequal Modulation Supported" fields.
3442 */
3443 }
3444
3445 if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
3446 return chains;
3447
3448 vht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY);
3449 if (vht_cap_ie && vht_cap_ie[1] >= sizeof(*vht_cap)) {
3450 u8 nss;
3451 u16 tx_mcs_map;
3452
3453 vht_cap = (void *)(vht_cap_ie + 2);
3454 tx_mcs_map = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
3455 for (nss = 8; nss > 0; nss--) {
3456 if (((tx_mcs_map >> (2 * (nss - 1))) & 3) !=
3457 IEEE80211_VHT_MCS_NOT_SUPPORTED)
3458 break;
3459 }
3460 /* TODO: use "Tx Highest Supported Long GI Data Rate" field? */
3461 chains = max(chains, nss);
3462 }
3463
3464 return chains;
3465}
3466
3061static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, 3467static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3062 struct cfg80211_bss *cbss) 3468 struct cfg80211_bss *cbss)
3063{ 3469{
3064 struct ieee80211_local *local = sdata->local; 3470 struct ieee80211_local *local = sdata->local;
3065 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3471 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3066 int ht_cfreq;
3067 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
3068 const u8 *ht_oper_ie;
3069 const struct ieee80211_ht_operation *ht_oper = NULL; 3472 const struct ieee80211_ht_operation *ht_oper = NULL;
3473 const struct ieee80211_vht_operation *vht_oper = NULL;
3070 struct ieee80211_supported_band *sband; 3474 struct ieee80211_supported_band *sband;
3475 struct cfg80211_chan_def chandef;
3476 int ret;
3071 3477
3072 sband = local->hw.wiphy->bands[cbss->channel->band]; 3478 sband = local->hw.wiphy->bands[cbss->channel->band];
3073 3479
3074 ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ; 3480 ifmgd->flags &= ~(IEEE80211_STA_DISABLE_40MHZ |
3481 IEEE80211_STA_DISABLE_80P80MHZ |
3482 IEEE80211_STA_DISABLE_160MHZ);
3075 3483
3076 if (sband->ht_cap.ht_supported) { 3484 rcu_read_lock();
3077 ht_oper_ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION, 3485
3078 cbss->information_elements, 3486 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
3079 cbss->len_information_elements); 3487 sband->ht_cap.ht_supported) {
3488 const u8 *ht_oper_ie;
3489
3490 ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION);
3080 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) 3491 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
3081 ht_oper = (void *)(ht_oper_ie + 2); 3492 ht_oper = (void *)(ht_oper_ie + 2);
3082 } 3493 }
3083 3494
3084 if (ht_oper) { 3495 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
3085 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, 3496 sband->vht_cap.vht_supported) {
3086 cbss->channel->band); 3497 const u8 *vht_oper_ie;
3087 /* check that channel matches the right operating channel */ 3498
3088 if (cbss->channel->center_freq != ht_cfreq) { 3499 vht_oper_ie = ieee80211_bss_get_ie(cbss,
3089 /* 3500 WLAN_EID_VHT_OPERATION);
3090 * It's possible that some APs are confused here; 3501 if (vht_oper_ie && vht_oper_ie[1] >= sizeof(*vht_oper))
3091 * Netgear WNDR3700 sometimes reports 4 higher than 3502 vht_oper = (void *)(vht_oper_ie + 2);
3092 * the actual channel in association responses, but 3503 if (vht_oper && !ht_oper) {
3093 * since we look at probe response/beacon data here 3504 vht_oper = NULL;
3094 * it should be OK.
3095 */
3096 sdata_info(sdata, 3505 sdata_info(sdata,
3097 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", 3506 "AP advertised VHT without HT, disabling both\n");
3098 cbss->channel->center_freq, 3507 sdata->flags |= IEEE80211_STA_DISABLE_HT;
3099 ht_cfreq, ht_oper->primary_chan, 3508 sdata->flags |= IEEE80211_STA_DISABLE_VHT;
3100 cbss->channel->band);
3101 ht_oper = NULL;
3102 } else {
3103 channel_type = NL80211_CHAN_HT20;
3104 } 3509 }
3105 } 3510 }
3106 3511
3107 if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { 3512 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
3108 /* 3513 cbss->channel,
3109 * cfg80211 already verified that the channel itself can 3514 ht_oper, vht_oper,
3110 * be used, but it didn't check that we can do the right 3515 &chandef);
3111 * HT type, so do that here as well. If HT40 isn't allowed
3112 * on this channel, disable 40 MHz operation.
3113 */
3114 3516
3115 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 3517 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
3116 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3518 local->rx_chains);
3117 if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
3118 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3119 else
3120 channel_type = NL80211_CHAN_HT40PLUS;
3121 break;
3122 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3123 if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
3124 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3125 else
3126 channel_type = NL80211_CHAN_HT40MINUS;
3127 break;
3128 }
3129 }
3130 3519
3131 if (!ieee80211_set_channel_type(local, sdata, channel_type)) { 3520 rcu_read_unlock();
3132 /* can only fail due to HT40+/- mismatch */
3133 channel_type = NL80211_CHAN_HT20;
3134 sdata_info(sdata,
3135 "disabling 40 MHz due to multi-vif mismatch\n");
3136 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3137 WARN_ON(!ieee80211_set_channel_type(local, sdata,
3138 channel_type));
3139 }
3140 3521
3141 local->oper_channel = cbss->channel; 3522 /* will change later if needed */
3142 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 3523 sdata->smps_mode = IEEE80211_SMPS_OFF;
3143 3524
3144 return 0; 3525 /*
3526 * If this fails (possibly due to channel context sharing
3527 * on incompatible channels, e.g. 80+80 and 160 sharing the
3528 * same control channel) try to use a smaller bandwidth.
3529 */
3530 ret = ieee80211_vif_use_channel(sdata, &chandef,
3531 IEEE80211_CHANCTX_SHARED);
3532 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
3533 ifmgd->flags |= chandef_downgrade(&chandef);
3534 return ret;
3145} 3535}
3146 3536
3147static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, 3537static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
@@ -3211,7 +3601,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3211 sdata->vif.bss_conf.basic_rates = basic_rates; 3601 sdata->vif.bss_conf.basic_rates = basic_rates;
3212 3602
3213 /* cf. IEEE 802.11 9.2.12 */ 3603 /* cf. IEEE 802.11 9.2.12 */
3214 if (local->oper_channel->band == IEEE80211_BAND_2GHZ && 3604 if (cbss->channel->band == IEEE80211_BAND_2GHZ &&
3215 have_higher_than_11mbit) 3605 have_higher_than_11mbit)
3216 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; 3606 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
3217 else 3607 else
@@ -3273,19 +3663,33 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
3273 case NL80211_AUTHTYPE_NETWORK_EAP: 3663 case NL80211_AUTHTYPE_NETWORK_EAP:
3274 auth_alg = WLAN_AUTH_LEAP; 3664 auth_alg = WLAN_AUTH_LEAP;
3275 break; 3665 break;
3666 case NL80211_AUTHTYPE_SAE:
3667 auth_alg = WLAN_AUTH_SAE;
3668 break;
3276 default: 3669 default:
3277 return -EOPNOTSUPP; 3670 return -EOPNOTSUPP;
3278 } 3671 }
3279 3672
3280 auth_data = kzalloc(sizeof(*auth_data) + req->ie_len, GFP_KERNEL); 3673 auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len +
3674 req->ie_len, GFP_KERNEL);
3281 if (!auth_data) 3675 if (!auth_data)
3282 return -ENOMEM; 3676 return -ENOMEM;
3283 3677
3284 auth_data->bss = req->bss; 3678 auth_data->bss = req->bss;
3285 3679
3680 if (req->sae_data_len >= 4) {
3681 __le16 *pos = (__le16 *) req->sae_data;
3682 auth_data->sae_trans = le16_to_cpu(pos[0]);
3683 auth_data->sae_status = le16_to_cpu(pos[1]);
3684 memcpy(auth_data->data, req->sae_data + 4,
3685 req->sae_data_len - 4);
3686 auth_data->data_len += req->sae_data_len - 4;
3687 }
3688
3286 if (req->ie && req->ie_len) { 3689 if (req->ie && req->ie_len) {
3287 memcpy(auth_data->ie, req->ie, req->ie_len); 3690 memcpy(&auth_data->data[auth_data->data_len],
3288 auth_data->ie_len = req->ie_len; 3691 req->ie, req->ie_len);
3692 auth_data->data_len += req->ie_len;
3289 } 3693 }
3290 3694
3291 if (req->key && req->key_len) { 3695 if (req->key && req->key_len) {
@@ -3355,14 +3759,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3355 const u8 *ssidie, *ht_ie; 3759 const u8 *ssidie, *ht_ie;
3356 int i, err; 3760 int i, err;
3357 3761
3358 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
3359 if (!ssidie)
3360 return -EINVAL;
3361
3362 assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL); 3762 assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
3363 if (!assoc_data) 3763 if (!assoc_data)
3364 return -ENOMEM; 3764 return -ENOMEM;
3365 3765
3766 rcu_read_lock();
3767 ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
3768 if (!ssidie) {
3769 rcu_read_unlock();
3770 kfree(assoc_data);
3771 return -EINVAL;
3772 }
3773 memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]);
3774 assoc_data->ssid_len = ssidie[1];
3775 rcu_read_unlock();
3776
3366 mutex_lock(&ifmgd->mtx); 3777 mutex_lock(&ifmgd->mtx);
3367 3778
3368 if (ifmgd->associated) 3779 if (ifmgd->associated)
@@ -3388,13 +3799,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3388 3799
3389 /* prepare assoc data */ 3800 /* prepare assoc data */
3390 3801
3391 /*
3392 * keep only the 40 MHz disable bit set as it might have
3393 * been set during authentication already, all other bits
3394 * should be reset for a new connection
3395 */
3396 ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
3397
3398 ifmgd->beacon_crc_valid = false; 3802 ifmgd->beacon_crc_valid = false;
3399 3803
3400 /* 3804 /*
@@ -3408,7 +3812,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3408 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 3812 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
3409 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || 3813 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
3410 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { 3814 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
3411 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3815 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3412 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; 3816 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3413 netdev_info(sdata->dev, 3817 netdev_info(sdata->dev,
3414 "disabling HT/VHT due to WEP/TKIP use\n"); 3818 "disabling HT/VHT due to WEP/TKIP use\n");
@@ -3416,7 +3820,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3416 } 3820 }
3417 3821
3418 if (req->flags & ASSOC_REQ_DISABLE_HT) { 3822 if (req->flags & ASSOC_REQ_DISABLE_HT) {
3419 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3823 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3420 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; 3824 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3421 } 3825 }
3422 3826
@@ -3424,7 +3828,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3424 sband = local->hw.wiphy->bands[req->bss->channel->band]; 3828 sband = local->hw.wiphy->bands[req->bss->channel->band];
3425 if (!sband->ht_cap.ht_supported || 3829 if (!sband->ht_cap.ht_supported ||
3426 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) { 3830 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
3427 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3831 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3428 if (!bss->wmm_used) 3832 if (!bss->wmm_used)
3429 netdev_info(sdata->dev, 3833 netdev_info(sdata->dev,
3430 "disabling HT as WMM/QoS is not supported by the AP\n"); 3834 "disabling HT as WMM/QoS is not supported by the AP\n");
@@ -3452,11 +3856,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3452 3856
3453 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { 3857 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
3454 if (ifmgd->powersave) 3858 if (ifmgd->powersave)
3455 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC; 3859 sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
3456 else 3860 else
3457 ifmgd->ap_smps = IEEE80211_SMPS_OFF; 3861 sdata->smps_mode = IEEE80211_SMPS_OFF;
3458 } else 3862 } else
3459 ifmgd->ap_smps = ifmgd->req_smps; 3863 sdata->smps_mode = ifmgd->req_smps;
3460 3864
3461 assoc_data->capability = req->bss->capability; 3865 assoc_data->capability = req->bss->capability;
3462 assoc_data->wmm = bss->wmm_used && 3866 assoc_data->wmm = bss->wmm_used &&
@@ -3464,12 +3868,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3464 assoc_data->supp_rates = bss->supp_rates; 3868 assoc_data->supp_rates = bss->supp_rates;
3465 assoc_data->supp_rates_len = bss->supp_rates_len; 3869 assoc_data->supp_rates_len = bss->supp_rates_len;
3466 3870
3871 rcu_read_lock();
3467 ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION); 3872 ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
3468 if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation)) 3873 if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation))
3469 assoc_data->ap_ht_param = 3874 assoc_data->ap_ht_param =
3470 ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param; 3875 ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
3471 else 3876 else
3472 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3877 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3878 rcu_read_unlock();
3473 3879
3474 if (bss->wmm_used && bss->uapsd_supported && 3880 if (bss->wmm_used && bss->uapsd_supported &&
3475 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { 3881 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3480,9 +3886,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3480 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; 3886 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
3481 } 3887 }
3482 3888
3483 memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]);
3484 assoc_data->ssid_len = ssidie[1];
3485
3486 if (req->prev_bssid) 3889 if (req->prev_bssid)
3487 memcpy(assoc_data->prev_bssid, req->prev_bssid, ETH_ALEN); 3890 memcpy(assoc_data->prev_bssid, req->prev_bssid, ETH_ALEN);
3488 3891
@@ -3505,20 +3908,41 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3505 /* kick off associate process */ 3908 /* kick off associate process */
3506 3909
3507 ifmgd->assoc_data = assoc_data; 3910 ifmgd->assoc_data = assoc_data;
3911 ifmgd->dtim_period = 0;
3508 3912
3509 err = ieee80211_prep_connection(sdata, req->bss, true); 3913 err = ieee80211_prep_connection(sdata, req->bss, true);
3510 if (err) 3914 if (err)
3511 goto err_clear; 3915 goto err_clear;
3512 3916
3513 if (!bss->dtim_period && 3917 if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
3514 sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) { 3918 const struct cfg80211_bss_ies *beacon_ies;
3515 /* 3919
3516 * Wait up to one beacon interval ... 3920 rcu_read_lock();
3517 * should this be more if we miss one? 3921 beacon_ies = rcu_dereference(req->bss->beacon_ies);
3518 */ 3922 if (!beacon_ies) {
3519 sdata_info(sdata, "waiting for beacon from %pM\n", 3923 /*
3520 ifmgd->bssid); 3924 * Wait up to one beacon interval ...
3521 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval); 3925 * should this be more if we miss one?
3926 */
3927 sdata_info(sdata, "waiting for beacon from %pM\n",
3928 ifmgd->bssid);
3929 assoc_data->timeout =
3930 TU_TO_EXP_TIME(req->bss->beacon_interval);
3931 } else {
3932 const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
3933 beacon_ies->data,
3934 beacon_ies->len);
3935 if (tim_ie && tim_ie[1] >=
3936 sizeof(struct ieee80211_tim_ie)) {
3937 const struct ieee80211_tim_ie *tim;
3938 tim = (void *)(tim_ie + 2);
3939 ifmgd->dtim_period = tim->dtim_period;
3940 }
3941 assoc_data->have_beacon = true;
3942 assoc_data->sent_assoc = false;
3943 assoc_data->timeout = jiffies;
3944 }
3945 rcu_read_unlock();
3522 } else { 3946 } else {
3523 assoc_data->have_beacon = true; 3947 assoc_data->have_beacon = true;
3524 assoc_data->sent_assoc = false; 3948 assoc_data->sent_assoc = false;
@@ -3560,40 +3984,44 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
3560 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3984 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3561 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 3985 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
3562 bool tx = !req->local_state_change; 3986 bool tx = !req->local_state_change;
3987 bool sent_frame = false;
3563 3988
3564 mutex_lock(&ifmgd->mtx); 3989 mutex_lock(&ifmgd->mtx);
3565 3990
3566 if (ifmgd->auth_data) {
3567 ieee80211_destroy_auth_data(sdata, false);
3568 mutex_unlock(&ifmgd->mtx);
3569 return 0;
3570 }
3571
3572 sdata_info(sdata, 3991 sdata_info(sdata,
3573 "deauthenticating from %pM by local choice (reason=%d)\n", 3992 "deauthenticating from %pM by local choice (reason=%d)\n",
3574 req->bssid, req->reason_code); 3993 req->bssid, req->reason_code);
3575 3994
3576 if (ifmgd->associated && 3995 if (ifmgd->auth_data) {
3577 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
3578 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3579 req->reason_code, tx, frame_buf);
3580 } else {
3581 drv_mgd_prepare_tx(sdata->local, sdata); 3996 drv_mgd_prepare_tx(sdata->local, sdata);
3582 ieee80211_send_deauth_disassoc(sdata, req->bssid, 3997 ieee80211_send_deauth_disassoc(sdata, req->bssid,
3583 IEEE80211_STYPE_DEAUTH, 3998 IEEE80211_STYPE_DEAUTH,
3584 req->reason_code, tx, 3999 req->reason_code, tx,
3585 frame_buf); 4000 frame_buf);
4001 ieee80211_destroy_auth_data(sdata, false);
4002 mutex_unlock(&ifmgd->mtx);
4003
4004 sent_frame = tx;
4005 goto out;
3586 } 4006 }
3587 4007
4008 if (ifmgd->associated &&
4009 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
4010 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
4011 req->reason_code, tx, frame_buf);
4012 sent_frame = tx;
4013 }
3588 mutex_unlock(&ifmgd->mtx); 4014 mutex_unlock(&ifmgd->mtx);
3589 4015
3590 __cfg80211_send_deauth(sdata->dev, frame_buf, 4016 out:
3591 IEEE80211_DEAUTH_FRAME_LEN);
3592
3593 mutex_lock(&sdata->local->mtx); 4017 mutex_lock(&sdata->local->mtx);
3594 ieee80211_recalc_idle(sdata->local); 4018 ieee80211_recalc_idle(sdata->local);
3595 mutex_unlock(&sdata->local->mtx); 4019 mutex_unlock(&sdata->local->mtx);
3596 4020
4021 if (sent_frame)
4022 __cfg80211_send_deauth(sdata->dev, frame_buf,
4023 IEEE80211_DEAUTH_FRAME_LEN);
4024
3597 return 0; 4025 return 0;
3598} 4026}
3599 4027
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 83608ac16780..a5379aea7d09 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -107,6 +107,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
107{ 107{
108 struct ieee80211_sub_if_data *sdata; 108 struct ieee80211_sub_if_data *sdata;
109 109
110 if (WARN_ON(local->use_chanctx))
111 return;
112
110 /* 113 /*
111 * notify the AP about us leaving the channel and stop all 114 * notify the AP about us leaving the channel and stop all
112 * STA interfaces. 115 * STA interfaces.
@@ -145,6 +148,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
145{ 148{
146 struct ieee80211_sub_if_data *sdata; 149 struct ieee80211_sub_if_data *sdata;
147 150
151 if (WARN_ON(local->use_chanctx))
152 return;
153
148 mutex_lock(&local->iflist_mtx); 154 mutex_lock(&local->iflist_mtx);
149 list_for_each_entry(sdata, &local->interfaces, list) { 155 list_for_each_entry(sdata, &local->interfaces, list) {
150 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) 156 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
@@ -193,13 +199,14 @@ void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
193 199
194 if (roc->mgmt_tx_cookie) { 200 if (roc->mgmt_tx_cookie) {
195 if (!WARN_ON(!roc->frame)) { 201 if (!WARN_ON(!roc->frame)) {
196 ieee80211_tx_skb(roc->sdata, roc->frame); 202 ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
203 roc->chan->band);
197 roc->frame = NULL; 204 roc->frame = NULL;
198 } 205 }
199 } else { 206 } else {
200 cfg80211_ready_on_channel(&roc->sdata->wdev, (unsigned long)roc, 207 cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie,
201 roc->chan, roc->chan_type, 208 roc->chan, roc->req_duration,
202 roc->req_duration, GFP_KERNEL); 209 GFP_KERNEL);
203 } 210 }
204 211
205 roc->notified = true; 212 roc->notified = true;
@@ -276,8 +283,7 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
276 if (!duration) 283 if (!duration)
277 duration = 10; 284 duration = 10;
278 285
279 ret = drv_remain_on_channel(local, roc->chan, 286 ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
280 roc->chan_type,
281 duration); 287 duration);
282 288
283 roc->started = true; 289 roc->started = true;
@@ -313,8 +319,7 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
313 319
314 if (!roc->mgmt_tx_cookie) 320 if (!roc->mgmt_tx_cookie)
315 cfg80211_remain_on_channel_expired(&roc->sdata->wdev, 321 cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
316 (unsigned long)roc, 322 roc->cookie, roc->chan,
317 roc->chan, roc->chan_type,
318 GFP_KERNEL); 323 GFP_KERNEL);
319 324
320 list_for_each_entry_safe(dep, tmp, &roc->dependents, list) 325 list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
@@ -353,7 +358,6 @@ void ieee80211_sw_roc_work(struct work_struct *work)
353 ieee80211_recalc_idle(local); 358 ieee80211_recalc_idle(local);
354 359
355 local->tmp_channel = roc->chan; 360 local->tmp_channel = roc->chan;
356 local->tmp_channel_type = roc->chan_type;
357 ieee80211_hw_config(local, 0); 361 ieee80211_hw_config(local, 0);
358 362
359 /* tell userspace or send frame */ 363 /* tell userspace or send frame */
@@ -458,8 +462,6 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
458 list_move_tail(&roc->list, &tmp_list); 462 list_move_tail(&roc->list, &tmp_list);
459 roc->abort = true; 463 roc->abort = true;
460 } 464 }
461
462 ieee80211_start_next_roc(local);
463 mutex_unlock(&local->mtx); 465 mutex_unlock(&local->mtx);
464 466
465 list_for_each_entry_safe(roc, tmp, &tmp_list, list) { 467 list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 5c572e7a1a71..79a48f37d409 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -33,6 +33,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
33 struct ieee80211_local *local = hw_to_local(hw); 33 struct ieee80211_local *local = hw_to_local(hw);
34 struct ieee80211_sub_if_data *sdata; 34 struct ieee80211_sub_if_data *sdata;
35 struct sta_info *sta; 35 struct sta_info *sta;
36 struct ieee80211_chanctx *ctx;
36 37
37 if (!local->open_count) 38 if (!local->open_count)
38 goto suspend; 39 goto suspend;
@@ -135,12 +136,55 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
135 ieee80211_bss_info_change_notify(sdata, 136 ieee80211_bss_info_change_notify(sdata,
136 BSS_CHANGED_BEACON_ENABLED); 137 BSS_CHANGED_BEACON_ENABLED);
137 138
139 if (sdata->vif.type == NL80211_IFTYPE_AP &&
140 rcu_access_pointer(sdata->u.ap.beacon))
141 drv_stop_ap(local, sdata);
142
143 if (local->use_chanctx) {
144 struct ieee80211_chanctx_conf *conf;
145
146 mutex_lock(&local->chanctx_mtx);
147 conf = rcu_dereference_protected(
148 sdata->vif.chanctx_conf,
149 lockdep_is_held(&local->chanctx_mtx));
150 if (conf) {
151 ctx = container_of(conf,
152 struct ieee80211_chanctx,
153 conf);
154 drv_unassign_vif_chanctx(local, sdata, ctx);
155 }
156
157 mutex_unlock(&local->chanctx_mtx);
158 }
138 drv_remove_interface(local, sdata); 159 drv_remove_interface(local, sdata);
139 } 160 }
140 161
141 sdata = rtnl_dereference(local->monitor_sdata); 162 sdata = rtnl_dereference(local->monitor_sdata);
142 if (sdata) 163 if (sdata) {
164 if (local->use_chanctx) {
165 struct ieee80211_chanctx_conf *conf;
166
167 mutex_lock(&local->chanctx_mtx);
168 conf = rcu_dereference_protected(
169 sdata->vif.chanctx_conf,
170 lockdep_is_held(&local->chanctx_mtx));
171 if (conf) {
172 ctx = container_of(conf,
173 struct ieee80211_chanctx,
174 conf);
175 drv_unassign_vif_chanctx(local, sdata, ctx);
176 }
177
178 mutex_unlock(&local->chanctx_mtx);
179 }
180
143 drv_remove_interface(local, sdata); 181 drv_remove_interface(local, sdata);
182 }
183
184 mutex_lock(&local->chanctx_mtx);
185 list_for_each_entry(ctx, &local->chanctx_list, list)
186 drv_remove_chanctx(local, ctx);
187 mutex_unlock(&local->chanctx_mtx);
144 188
145 /* stop hardware - this must stop RX */ 189 /* stop hardware - this must stop RX */
146 if (local->open_count) 190 if (local->open_count)
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3313c117b322..dd88381c53b7 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -391,7 +391,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
391 return; 391 return;
392 392
393 /* if HT BSS, and we handle a data frame, also try HT rates */ 393 /* if HT BSS, and we handle a data frame, also try HT rates */
394 if (txrc->bss_conf->channel_type == NL80211_CHAN_NO_HT) 394 if (txrc->bss_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
395 return; 395 return;
396 396
397 fc = hdr->frame_control; 397 fc = hdr->frame_control;
@@ -408,8 +408,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
408 408
409 alt_rate.flags |= IEEE80211_TX_RC_MCS; 409 alt_rate.flags |= IEEE80211_TX_RC_MCS;
410 410
411 if ((txrc->bss_conf->channel_type == NL80211_CHAN_HT40MINUS) || 411 if (txrc->bss_conf->chandef.width == NL80211_CHAN_WIDTH_40)
412 (txrc->bss_conf->channel_type == NL80211_CHAN_HT40PLUS))
413 alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 412 alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
414 413
415 if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) { 414 if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 10de668eb9f6..301386dabf88 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -52,11 +52,21 @@ static inline void rate_control_rate_init(struct sta_info *sta)
52 struct ieee80211_sta *ista = &sta->sta; 52 struct ieee80211_sta *ista = &sta->sta;
53 void *priv_sta = sta->rate_ctrl_priv; 53 void *priv_sta = sta->rate_ctrl_priv;
54 struct ieee80211_supported_band *sband; 54 struct ieee80211_supported_band *sband;
55 struct ieee80211_chanctx_conf *chanctx_conf;
55 56
56 if (!ref) 57 if (!ref)
57 return; 58 return;
58 59
59 sband = local->hw.wiphy->bands[local->oper_channel->band]; 60 rcu_read_lock();
61
62 chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
63 if (WARN_ON(!chanctx_conf)) {
64 rcu_read_unlock();
65 return;
66 }
67
68 sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
69 rcu_read_unlock();
60 70
61 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 71 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
62 set_sta_flag(sta, WLAN_STA_RATE_CONTROL); 72 set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 79633ae06fd6..8c5acdc06226 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -154,6 +154,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
154 struct ieee80211_sta *sta, void *priv_sta, 154 struct ieee80211_sta *sta, void *priv_sta,
155 struct sk_buff *skb) 155 struct sk_buff *skb)
156{ 156{
157 struct minstrel_priv *mp = priv;
157 struct minstrel_sta_info *mi = priv_sta; 158 struct minstrel_sta_info *mi = priv_sta;
158 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 159 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
159 struct ieee80211_tx_rate *ar = info->status.rates; 160 struct ieee80211_tx_rate *ar = info->status.rates;
@@ -181,6 +182,10 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
181 182
182 if (mi->sample_deferred > 0) 183 if (mi->sample_deferred > 0)
183 mi->sample_deferred--; 184 mi->sample_deferred--;
185
186 if (time_after(jiffies, mi->stats_update +
187 (mp->update_interval * HZ) / 1000))
188 minstrel_update_stats(mp, mi);
184} 189}
185 190
186 191
@@ -235,10 +240,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
235 240
236 mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; 241 mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot;
237 242
238 if (time_after(jiffies, mi->stats_update + (mp->update_interval *
239 HZ) / 1000))
240 minstrel_update_stats(mp, mi);
241
242 ndx = mi->max_tp_rate; 243 ndx = mi->max_tp_rate;
243 244
244 if (mrr) 245 if (mrr)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index fb1d4aa65e8c..9f9c453bc45d 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -389,9 +389,9 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
389 struct ieee80211_tx_rate *ar = info->status.rates; 389 struct ieee80211_tx_rate *ar = info->status.rates;
390 struct minstrel_rate_stats *rate, *rate2; 390 struct minstrel_rate_stats *rate, *rate2;
391 struct minstrel_priv *mp = priv; 391 struct minstrel_priv *mp = priv;
392 bool last = false; 392 bool last;
393 int group; 393 int group;
394 int i = 0; 394 int i;
395 395
396 if (!msp->is_ht) 396 if (!msp->is_ht)
397 return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb); 397 return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);
@@ -419,13 +419,11 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
419 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 419 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
420 mi->sample_packets += info->status.ampdu_len; 420 mi->sample_packets += info->status.ampdu_len;
421 421
422 last = !minstrel_ht_txstat_valid(&ar[0]);
422 for (i = 0; !last; i++) { 423 for (i = 0; !last; i++) {
423 last = (i == IEEE80211_TX_MAX_RATES - 1) || 424 last = (i == IEEE80211_TX_MAX_RATES - 1) ||
424 !minstrel_ht_txstat_valid(&ar[i + 1]); 425 !minstrel_ht_txstat_valid(&ar[i + 1]);
425 426
426 if (!minstrel_ht_txstat_valid(&ar[i]))
427 break;
428
429 group = minstrel_ht_get_group_idx(&ar[i]); 427 group = minstrel_ht_get_group_idx(&ar[i]);
430 rate = &mi->groups[group].rates[ar[i].idx % 8]; 428 rate = &mi->groups[group].rates[ar[i].idx % 8];
431 429
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 00ade7feb2e3..580704eba8b8 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -40,6 +40,8 @@
40static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 40static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
41 struct sk_buff *skb) 41 struct sk_buff *skb)
42{ 42{
43 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
44
43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 45 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
44 if (likely(skb->len > FCS_LEN)) 46 if (likely(skb->len > FCS_LEN))
45 __pskb_trim(skb, skb->len - FCS_LEN); 47 __pskb_trim(skb, skb->len - FCS_LEN);
@@ -47,24 +49,29 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
47 /* driver bug */ 49 /* driver bug */
48 WARN_ON(1); 50 WARN_ON(1);
49 dev_kfree_skb(skb); 51 dev_kfree_skb(skb);
50 skb = NULL; 52 return NULL;
51 } 53 }
52 } 54 }
53 55
56 if (status->vendor_radiotap_len)
57 __pskb_pull(skb, status->vendor_radiotap_len);
58
54 return skb; 59 return skb;
55} 60}
56 61
57static inline int should_drop_frame(struct sk_buff *skb, 62static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len)
58 int present_fcs_len)
59{ 63{
60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 64 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 65 struct ieee80211_hdr *hdr;
66
67 hdr = (void *)(skb->data + status->vendor_radiotap_len);
62 68
63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 69 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
64 RX_FLAG_FAILED_PLCP_CRC | 70 RX_FLAG_FAILED_PLCP_CRC |
65 RX_FLAG_AMPDU_IS_ZEROLEN)) 71 RX_FLAG_AMPDU_IS_ZEROLEN))
66 return 1; 72 return 1;
67 if (unlikely(skb->len < 16 + present_fcs_len)) 73 if (unlikely(skb->len < 16 + present_fcs_len +
74 status->vendor_radiotap_len))
68 return 1; 75 return 1;
69 if (ieee80211_is_ctl(hdr->frame_control) && 76 if (ieee80211_is_ctl(hdr->frame_control) &&
70 !ieee80211_is_pspoll(hdr->frame_control) && 77 !ieee80211_is_pspoll(hdr->frame_control) &&
@@ -74,32 +81,53 @@ static inline int should_drop_frame(struct sk_buff *skb,
74} 81}
75 82
76static int 83static int
77ieee80211_rx_radiotap_len(struct ieee80211_local *local, 84ieee80211_rx_radiotap_space(struct ieee80211_local *local,
78 struct ieee80211_rx_status *status) 85 struct ieee80211_rx_status *status)
79{ 86{
80 int len; 87 int len;
81 88
82 /* always present fields */ 89 /* always present fields */
83 len = sizeof(struct ieee80211_radiotap_header) + 9; 90 len = sizeof(struct ieee80211_radiotap_header) + 9;
84 91
85 if (status->flag & RX_FLAG_MACTIME_MPDU) 92 /* allocate extra bitmap */
93 if (status->vendor_radiotap_len)
94 len += 4;
95
96 if (ieee80211_have_rx_timestamp(status)) {
97 len = ALIGN(len, 8);
86 len += 8; 98 len += 8;
99 }
87 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 100 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
88 len += 1; 101 len += 1;
89 102
90 if (len & 1) /* padding for RX_FLAGS if necessary */ 103 /* padding for RX_FLAGS if necessary */
91 len++; 104 len = ALIGN(len, 2);
92 105
93 if (status->flag & RX_FLAG_HT) /* HT info */ 106 if (status->flag & RX_FLAG_HT) /* HT info */
94 len += 3; 107 len += 3;
95 108
96 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 109 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
97 /* padding */ 110 len = ALIGN(len, 4);
98 while (len & 3)
99 len++;
100 len += 8; 111 len += 8;
101 } 112 }
102 113
114 if (status->flag & RX_FLAG_VHT) {
115 len = ALIGN(len, 2);
116 len += 12;
117 }
118
119 if (status->vendor_radiotap_len) {
120 if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
121 status->vendor_radiotap_align = 1;
122 /* align standard part of vendor namespace */
123 len = ALIGN(len, 2);
124 /* allocate standard part of vendor namespace */
125 len += 6;
126 /* align vendor-defined part */
127 len = ALIGN(len, status->vendor_radiotap_align);
128 /* vendor-defined part is already in skb */
129 }
130
103 return len; 131 return len;
104} 132}
105 133
@@ -118,6 +146,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
118 struct ieee80211_radiotap_header *rthdr; 146 struct ieee80211_radiotap_header *rthdr;
119 unsigned char *pos; 147 unsigned char *pos;
120 u16 rx_flags = 0; 148 u16 rx_flags = 0;
149 int mpdulen;
150
151 mpdulen = skb->len;
152 if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
153 mpdulen += FCS_LEN;
121 154
122 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 155 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
123 memset(rthdr, 0, rtap_len); 156 memset(rthdr, 0, rtap_len);
@@ -128,17 +161,30 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
128 (1 << IEEE80211_RADIOTAP_CHANNEL) | 161 (1 << IEEE80211_RADIOTAP_CHANNEL) |
129 (1 << IEEE80211_RADIOTAP_ANTENNA) | 162 (1 << IEEE80211_RADIOTAP_ANTENNA) |
130 (1 << IEEE80211_RADIOTAP_RX_FLAGS)); 163 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
131 rthdr->it_len = cpu_to_le16(rtap_len); 164 rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
132 165
133 pos = (unsigned char *)(rthdr+1); 166 pos = (unsigned char *)(rthdr + 1);
167
168 if (status->vendor_radiotap_len) {
169 rthdr->it_present |=
170 cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) |
171 cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT));
172 put_unaligned_le32(status->vendor_radiotap_bitmap, pos);
173 pos += 4;
174 }
134 175
135 /* the order of the following fields is important */ 176 /* the order of the following fields is important */
136 177
137 /* IEEE80211_RADIOTAP_TSFT */ 178 /* IEEE80211_RADIOTAP_TSFT */
138 if (status->flag & RX_FLAG_MACTIME_MPDU) { 179 if (ieee80211_have_rx_timestamp(status)) {
139 put_unaligned_le64(status->mactime, pos); 180 /* padding */
140 rthdr->it_present |= 181 while ((pos - (u8 *)rthdr) & 7)
141 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 182 *pos++ = 0;
183 put_unaligned_le64(
184 ieee80211_calculate_rx_timestamp(local, status,
185 mpdulen, 0),
186 pos);
187 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
142 pos += 8; 188 pos += 8;
143 } 189 }
144 190
@@ -152,7 +198,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
152 pos++; 198 pos++;
153 199
154 /* IEEE80211_RADIOTAP_RATE */ 200 /* IEEE80211_RADIOTAP_RATE */
155 if (!rate || status->flag & RX_FLAG_HT) { 201 if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) {
156 /* 202 /*
157 * Without rate information don't add it. If we have, 203 * Without rate information don't add it. If we have,
158 * MCS information is a separate field in radiotap, 204 * MCS information is a separate field in radiotap,
@@ -172,7 +218,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
172 if (status->band == IEEE80211_BAND_5GHZ) 218 if (status->band == IEEE80211_BAND_5GHZ)
173 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, 219 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
174 pos); 220 pos);
175 else if (status->flag & RX_FLAG_HT) 221 else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
176 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 222 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
177 pos); 223 pos);
178 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 224 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
@@ -205,7 +251,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
205 /* IEEE80211_RADIOTAP_RX_FLAGS */ 251 /* IEEE80211_RADIOTAP_RX_FLAGS */
206 /* ensure 2 byte alignment for the 2 byte field as required */ 252 /* ensure 2 byte alignment for the 2 byte field as required */
207 if ((pos - (u8 *)rthdr) & 1) 253 if ((pos - (u8 *)rthdr) & 1)
208 pos++; 254 *pos++ = 0;
209 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 255 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
210 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 256 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
211 put_unaligned_le16(rx_flags, pos); 257 put_unaligned_le16(rx_flags, pos);
@@ -255,6 +301,56 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
255 *pos++ = 0; 301 *pos++ = 0;
256 *pos++ = 0; 302 *pos++ = 0;
257 } 303 }
304
305 if (status->flag & RX_FLAG_VHT) {
306 u16 known = local->hw.radiotap_vht_details;
307
308 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
309 /* known field - how to handle 80+80? */
310 if (status->flag & RX_FLAG_80P80MHZ)
311 known &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
312 put_unaligned_le16(known, pos);
313 pos += 2;
314 /* flags */
315 if (status->flag & RX_FLAG_SHORT_GI)
316 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
317 pos++;
318 /* bandwidth */
319 if (status->flag & RX_FLAG_80MHZ)
320 *pos++ = 4;
321 else if (status->flag & RX_FLAG_80P80MHZ)
322 *pos++ = 0; /* marked not known above */
323 else if (status->flag & RX_FLAG_160MHZ)
324 *pos++ = 11;
325 else if (status->flag & RX_FLAG_40MHZ)
326 *pos++ = 1;
327 else /* 20 MHz */
328 *pos++ = 0;
329 /* MCS/NSS */
330 *pos = (status->rate_idx << 4) | status->vht_nss;
331 pos += 4;
332 /* coding field */
333 pos++;
334 /* group ID */
335 pos++;
336 /* partial_aid */
337 pos += 2;
338 }
339
340 if (status->vendor_radiotap_len) {
341 /* ensure 2 byte alignment for the vendor field as required */
342 if ((pos - (u8 *)rthdr) & 1)
343 *pos++ = 0;
344 *pos++ = status->vendor_radiotap_oui[0];
345 *pos++ = status->vendor_radiotap_oui[1];
346 *pos++ = status->vendor_radiotap_oui[2];
347 *pos++ = status->vendor_radiotap_subns;
348 put_unaligned_le16(status->vendor_radiotap_len, pos);
349 pos += 2;
350 /* align the actual payload as requested */
351 while ((pos - (u8 *)rthdr) & (status->vendor_radiotap_align - 1))
352 *pos++ = 0;
353 }
258} 354}
259 355
260/* 356/*
@@ -282,14 +378,11 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
282 * the SKB because it has a bad FCS/PLCP checksum. 378 * the SKB because it has a bad FCS/PLCP checksum.
283 */ 379 */
284 380
285 /* room for the radiotap header based on driver features */
286 needed_headroom = ieee80211_rx_radiotap_len(local, status);
287
288 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 381 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
289 present_fcs_len = FCS_LEN; 382 present_fcs_len = FCS_LEN;
290 383
291 /* make sure hdr->frame_control is on the linear part */ 384 /* ensure hdr->frame_control and vendor radiotap data are in skb head */
292 if (!pskb_may_pull(origskb, 2)) { 385 if (!pskb_may_pull(origskb, 2 + status->vendor_radiotap_len)) {
293 dev_kfree_skb(origskb); 386 dev_kfree_skb(origskb);
294 return NULL; 387 return NULL;
295 } 388 }
@@ -303,6 +396,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
303 return remove_monitor_info(local, origskb); 396 return remove_monitor_info(local, origskb);
304 } 397 }
305 398
399 /* room for the radiotap header based on driver features */
400 needed_headroom = ieee80211_rx_radiotap_space(local, status);
401
306 if (should_drop_frame(origskb, present_fcs_len)) { 402 if (should_drop_frame(origskb, present_fcs_len)) {
307 /* only need to expand headroom if necessary */ 403 /* only need to expand headroom if necessary */
308 skb = origskb; 404 skb = origskb;
@@ -374,7 +470,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
374 return origskb; 470 return origskb;
375} 471}
376 472
377
378static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 473static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
379{ 474{
380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 475 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
@@ -403,10 +498,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
403 * 498 *
404 * We also use that counter for non-QoS STAs. 499 * We also use that counter for non-QoS STAs.
405 */ 500 */
406 seqno_idx = NUM_RX_DATA_QUEUES; 501 seqno_idx = IEEE80211_NUM_TIDS;
407 security_idx = 0; 502 security_idx = 0;
408 if (ieee80211_is_mgmt(hdr->frame_control)) 503 if (ieee80211_is_mgmt(hdr->frame_control))
409 security_idx = NUM_RX_DATA_QUEUES; 504 security_idx = IEEE80211_NUM_TIDS;
410 tid = 0; 505 tid = 0;
411 } 506 }
412 507
@@ -481,8 +576,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
481 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 576 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
482 struct ieee80211_mmie *mmie; 577 struct ieee80211_mmie *mmie;
483 578
484 if (skb->len < 24 + sizeof(*mmie) || 579 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
485 !is_multicast_ether_addr(hdr->da))
486 return -1; 580 return -1;
487 581
488 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) 582 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
@@ -497,9 +591,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
497 return le16_to_cpu(mmie->key_id); 591 return le16_to_cpu(mmie->key_id);
498} 592}
499 593
500 594static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
501static ieee80211_rx_result
502ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
503{ 595{
504 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 596 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
505 char *dev_addr = rx->sdata->vif.addr; 597 char *dev_addr = rx->sdata->vif.addr;
@@ -507,7 +599,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
507 if (ieee80211_is_data(hdr->frame_control)) { 599 if (ieee80211_is_data(hdr->frame_control)) {
508 if (is_multicast_ether_addr(hdr->addr1)) { 600 if (is_multicast_ether_addr(hdr->addr1)) {
509 if (ieee80211_has_tods(hdr->frame_control) || 601 if (ieee80211_has_tods(hdr->frame_control) ||
510 !ieee80211_has_fromds(hdr->frame_control)) 602 !ieee80211_has_fromds(hdr->frame_control))
511 return RX_DROP_MONITOR; 603 return RX_DROP_MONITOR;
512 if (ether_addr_equal(hdr->addr3, dev_addr)) 604 if (ether_addr_equal(hdr->addr3, dev_addr))
513 return RX_DROP_MONITOR; 605 return RX_DROP_MONITOR;
@@ -539,7 +631,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
539 mgmt = (struct ieee80211_mgmt *)hdr; 631 mgmt = (struct ieee80211_mgmt *)hdr;
540 category = mgmt->u.action.category; 632 category = mgmt->u.action.category;
541 if (category != WLAN_CATEGORY_MESH_ACTION && 633 if (category != WLAN_CATEGORY_MESH_ACTION &&
542 category != WLAN_CATEGORY_SELF_PROTECTED) 634 category != WLAN_CATEGORY_SELF_PROTECTED)
543 return RX_DROP_MONITOR; 635 return RX_DROP_MONITOR;
544 return RX_CONTINUE; 636 return RX_CONTINUE;
545 } 637 }
@@ -551,7 +643,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
551 return RX_CONTINUE; 643 return RX_CONTINUE;
552 644
553 return RX_DROP_MONITOR; 645 return RX_DROP_MONITOR;
554
555 } 646 }
556 647
557 return RX_CONTINUE; 648 return RX_CONTINUE;
@@ -575,7 +666,6 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
575 return (sq1 - sq2) & SEQ_MASK; 666 return (sq1 - sq2) & SEQ_MASK;
576} 667}
577 668
578
579static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 669static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
580 struct tid_ampdu_rx *tid_agg_rx, 670 struct tid_ampdu_rx *tid_agg_rx,
581 int index) 671 int index)
@@ -1148,12 +1238,19 @@ ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1148 return RX_CONTINUE; 1238 return RX_CONTINUE;
1149} 1239}
1150 1240
1151static void ap_sta_ps_start(struct sta_info *sta) 1241static void sta_ps_start(struct sta_info *sta)
1152{ 1242{
1153 struct ieee80211_sub_if_data *sdata = sta->sdata; 1243 struct ieee80211_sub_if_data *sdata = sta->sdata;
1154 struct ieee80211_local *local = sdata->local; 1244 struct ieee80211_local *local = sdata->local;
1245 struct ps_data *ps;
1246
1247 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1248 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1249 ps = &sdata->bss->ps;
1250 else
1251 return;
1155 1252
1156 atomic_inc(&sdata->bss->num_sta_ps); 1253 atomic_inc(&ps->num_sta_ps);
1157 set_sta_flag(sta, WLAN_STA_PS_STA); 1254 set_sta_flag(sta, WLAN_STA_PS_STA);
1158 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1255 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1159 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1256 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
@@ -1161,7 +1258,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
1161 sta->sta.addr, sta->sta.aid); 1258 sta->sta.addr, sta->sta.aid);
1162} 1259}
1163 1260
1164static void ap_sta_ps_end(struct sta_info *sta) 1261static void sta_ps_end(struct sta_info *sta)
1165{ 1262{
1166 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1263 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1167 sta->sta.addr, sta->sta.aid); 1264 sta->sta.addr, sta->sta.aid);
@@ -1188,9 +1285,9 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1188 return -EINVAL; 1285 return -EINVAL;
1189 1286
1190 if (start) 1287 if (start)
1191 ap_sta_ps_start(sta_inf); 1288 sta_ps_start(sta_inf);
1192 else 1289 else
1193 ap_sta_ps_end(sta_inf); 1290 sta_ps_end(sta_inf);
1194 1291
1195 return 0; 1292 return 0;
1196} 1293}
@@ -1284,17 +1381,22 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1284 1381
1285 /* 1382 /*
1286 * Update last_rx only for IBSS packets which are for the current 1383 * Update last_rx only for IBSS packets which are for the current
1287 * BSSID to avoid keeping the current IBSS network alive in cases 1384 * BSSID and for station already AUTHORIZED to avoid keeping the
1288 * where other STAs start using different BSSID. 1385 * current IBSS network alive in cases where other STAs start
1386 * using different BSSID. This will also give the station another
1387 * chance to restart the authentication/authorization in case
1388 * something went wrong the first time.
1289 */ 1389 */
1290 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1390 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1291 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1391 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1292 NL80211_IFTYPE_ADHOC); 1392 NL80211_IFTYPE_ADHOC);
1293 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) { 1393 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1394 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1294 sta->last_rx = jiffies; 1395 sta->last_rx = jiffies;
1295 if (ieee80211_is_data(hdr->frame_control)) { 1396 if (ieee80211_is_data(hdr->frame_control)) {
1296 sta->last_rx_rate_idx = status->rate_idx; 1397 sta->last_rx_rate_idx = status->rate_idx;
1297 sta->last_rx_rate_flag = status->flag; 1398 sta->last_rx_rate_flag = status->flag;
1399 sta->last_rx_rate_vht_nss = status->vht_nss;
1298 } 1400 }
1299 } 1401 }
1300 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1402 } else if (!is_multicast_ether_addr(hdr->addr1)) {
@@ -1306,6 +1408,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1306 if (ieee80211_is_data(hdr->frame_control)) { 1408 if (ieee80211_is_data(hdr->frame_control)) {
1307 sta->last_rx_rate_idx = status->rate_idx; 1409 sta->last_rx_rate_idx = status->rate_idx;
1308 sta->last_rx_rate_flag = status->flag; 1410 sta->last_rx_rate_flag = status->flag;
1411 sta->last_rx_rate_vht_nss = status->vht_nss;
1309 } 1412 }
1310 } 1413 }
1311 1414
@@ -1342,10 +1445,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1342 */ 1445 */
1343 if (ieee80211_is_data(hdr->frame_control) && 1446 if (ieee80211_is_data(hdr->frame_control) &&
1344 !ieee80211_has_pm(hdr->frame_control)) 1447 !ieee80211_has_pm(hdr->frame_control))
1345 ap_sta_ps_end(sta); 1448 sta_ps_end(sta);
1346 } else { 1449 } else {
1347 if (ieee80211_has_pm(hdr->frame_control)) 1450 if (ieee80211_has_pm(hdr->frame_control))
1348 ap_sta_ps_start(sta); 1451 sta_ps_start(sta);
1349 } 1452 }
1350 } 1453 }
1351 1454
@@ -1391,9 +1494,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1391 struct sk_buff **skb) 1494 struct sk_buff **skb)
1392{ 1495{
1393 struct ieee80211_fragment_entry *entry; 1496 struct ieee80211_fragment_entry *entry;
1394 int idx;
1395 1497
1396 idx = sdata->fragment_next;
1397 entry = &sdata->fragments[sdata->fragment_next++]; 1498 entry = &sdata->fragments[sdata->fragment_next++];
1398 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1499 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1399 sdata->fragment_next = 0; 1500 sdata->fragment_next = 0;
@@ -1580,18 +1681,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1580 return RX_CONTINUE; 1681 return RX_CONTINUE;
1581} 1682}
1582 1683
1583static int 1684static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1584ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1585{ 1685{
1586 if (unlikely(!rx->sta || 1686 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1587 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1588 return -EACCES; 1687 return -EACCES;
1589 1688
1590 return 0; 1689 return 0;
1591} 1690}
1592 1691
1593static int 1692static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1594ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1595{ 1693{
1596 struct sk_buff *skb = rx->skb; 1694 struct sk_buff *skb = rx->skb;
1597 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1695 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -1613,8 +1711,7 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1613 return 0; 1711 return 0;
1614} 1712}
1615 1713
1616static int 1714static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1617ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1618{ 1715{
1619 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1716 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1620 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1717 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
@@ -1998,7 +2095,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1998 } else { 2095 } else {
1999 /* unable to resolve next hop */ 2096 /* unable to resolve next hop */
2000 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, 2097 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
2001 0, reason, fwd_hdr->addr2, sdata); 2098 0, reason, fwd_hdr->addr2, sdata);
2002 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2099 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2003 kfree_skb(fwd_skb); 2100 kfree_skb(fwd_skb);
2004 return RX_DROP_MONITOR; 2101 return RX_DROP_MONITOR;
@@ -2207,7 +2304,7 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2207 2304
2208 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2305 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2209 rx->skb->data, rx->skb->len, 2306 rx->skb->data, rx->skb->len,
2210 status->freq, sig, GFP_ATOMIC); 2307 status->freq, sig);
2211 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2308 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2212 } 2309 }
2213 2310
@@ -2236,7 +2333,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2236 if (len < IEEE80211_MIN_ACTION_SIZE) 2333 if (len < IEEE80211_MIN_ACTION_SIZE)
2237 return RX_DROP_UNUSABLE; 2334 return RX_DROP_UNUSABLE;
2238 2335
2239 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) 2336 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
2337 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED)
2240 return RX_DROP_UNUSABLE; 2338 return RX_DROP_UNUSABLE;
2241 2339
2242 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2340 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
@@ -2407,7 +2505,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2407 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2505 if (!ieee80211_vif_is_mesh(&sdata->vif))
2408 break; 2506 break;
2409 if (mesh_action_is_path_sel(mgmt) && 2507 if (mesh_action_is_path_sel(mgmt) &&
2410 (!mesh_path_sel_is_hwmp(sdata))) 2508 !mesh_path_sel_is_hwmp(sdata))
2411 break; 2509 break;
2412 goto queue; 2510 goto queue;
2413 } 2511 }
@@ -2463,7 +2561,6 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2463 return RX_QUEUED; 2561 return RX_QUEUED;
2464 } 2562 }
2465 2563
2466
2467 return RX_CONTINUE; 2564 return RX_CONTINUE;
2468} 2565}
2469 2566
@@ -2593,7 +2690,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2593 goto out_free_skb; 2690 goto out_free_skb;
2594 2691
2595 /* room for the radiotap header based on driver features */ 2692 /* room for the radiotap header based on driver features */
2596 needed_headroom = ieee80211_rx_radiotap_len(local, status); 2693 needed_headroom = ieee80211_rx_radiotap_space(local, status);
2597 2694
2598 if (skb_headroom(skb) < needed_headroom && 2695 if (skb_headroom(skb) < needed_headroom &&
2599 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 2696 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
@@ -2656,7 +2753,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2656 status = IEEE80211_SKB_RXCB((rx->skb)); 2753 status = IEEE80211_SKB_RXCB((rx->skb));
2657 2754
2658 sband = rx->local->hw.wiphy->bands[status->band]; 2755 sband = rx->local->hw.wiphy->bands[status->band];
2659 if (!(status->flag & RX_FLAG_HT)) 2756 if (!(status->flag & RX_FLAG_HT) &&
2757 !(status->flag & RX_FLAG_VHT))
2660 rate = &sband->bitrates[status->rate_idx]; 2758 rate = &sband->bitrates[status->rate_idx];
2661 2759
2662 ieee80211_rx_cooked_monitor(rx, rate); 2760 ieee80211_rx_cooked_monitor(rx, rate);
@@ -2823,8 +2921,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2823 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2921 status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2824 } else if (!rx->sta) { 2922 } else if (!rx->sta) {
2825 int rate_idx; 2923 int rate_idx;
2826 if (status->flag & RX_FLAG_HT) 2924 if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
2827 rate_idx = 0; /* TODO: HT rates */ 2925 rate_idx = 0; /* TODO: HT/VHT rates */
2828 else 2926 else
2829 rate_idx = status->rate_idx; 2927 rate_idx = status->rate_idx;
2830 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 2928 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
@@ -3048,8 +3146,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
3048 3146
3049 WARN_ON_ONCE(softirq_count() == 0); 3147 WARN_ON_ONCE(softirq_count() == 0);
3050 3148
3051 if (WARN_ON(status->band < 0 || 3149 if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
3052 status->band >= IEEE80211_NUM_BANDS))
3053 goto drop; 3150 goto drop;
3054 3151
3055 sband = local->hw.wiphy->bands[status->band]; 3152 sband = local->hw.wiphy->bands[status->band];
@@ -3094,17 +3191,22 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
3094 * hardware error. The driver should catch hardware 3191 * hardware error. The driver should catch hardware
3095 * errors. 3192 * errors.
3096 */ 3193 */
3097 if (WARN((status->rate_idx < 0 || 3194 if (WARN(status->rate_idx > 76,
3098 status->rate_idx > 76),
3099 "Rate marked as an HT rate but passed " 3195 "Rate marked as an HT rate but passed "
3100 "status->rate_idx is not " 3196 "status->rate_idx is not "
3101 "an MCS index [0-76]: %d (0x%02x)\n", 3197 "an MCS index [0-76]: %d (0x%02x)\n",
3102 status->rate_idx, 3198 status->rate_idx,
3103 status->rate_idx)) 3199 status->rate_idx))
3104 goto drop; 3200 goto drop;
3201 } else if (status->flag & RX_FLAG_VHT) {
3202 if (WARN_ONCE(status->rate_idx > 9 ||
3203 !status->vht_nss ||
3204 status->vht_nss > 8,
3205 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
3206 status->rate_idx, status->vht_nss))
3207 goto drop;
3105 } else { 3208 } else {
3106 if (WARN_ON(status->rate_idx < 0 || 3209 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
3107 status->rate_idx >= sband->n_bitrates))
3108 goto drop; 3210 goto drop;
3109 rate = &sband->bitrates[status->rate_idx]; 3211 rate = &sband->bitrates[status->rate_idx];
3110 } 3212 }
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index c4cdbde24fd3..d59fc6818b1c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -113,18 +113,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
113 bss->valid_data |= IEEE80211_BSS_VALID_ERP; 113 bss->valid_data |= IEEE80211_BSS_VALID_ERP;
114 } 114 }
115 115
116 if (elems->tim && (!elems->parse_error ||
117 !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
118 struct ieee80211_tim_ie *tim_ie = elems->tim;
119 bss->dtim_period = tim_ie->dtim_period;
120 if (!elems->parse_error)
121 bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
122 }
123
124 /* If the beacon had no TIM IE, or it was invalid, use 1 */
125 if (beacon && !bss->dtim_period)
126 bss->dtim_period = 1;
127
128 /* replace old supported rates if we get new values */ 116 /* replace old supported rates if we get new values */
129 if (!elems->parse_error || 117 if (!elems->parse_error ||
130 !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) { 118 !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
@@ -174,7 +162,6 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
174 u8 *elements; 162 u8 *elements;
175 struct ieee80211_channel *channel; 163 struct ieee80211_channel *channel;
176 size_t baselen; 164 size_t baselen;
177 int freq;
178 bool beacon; 165 bool beacon;
179 struct ieee802_11_elems elems; 166 struct ieee802_11_elems elems;
180 167
@@ -209,13 +196,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
209 196
210 ieee802_11_parse_elems(elements, skb->len - baselen, &elems); 197 ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
211 198
212 if (elems.ds_params && elems.ds_params_len == 1) 199 channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
213 freq = ieee80211_channel_to_frequency(elems.ds_params[0],
214 rx_status->band);
215 else
216 freq = rx_status->freq;
217
218 channel = ieee80211_get_channel(local->hw.wiphy, freq);
219 200
220 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) 201 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
221 return; 202 return;
@@ -254,6 +235,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
254 local->hw_scan_req->n_channels = n_chans; 235 local->hw_scan_req->n_channels = n_chans;
255 236
256 ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie, 237 ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
238 local->hw_scan_ies_bufsize,
257 req->ie, req->ie_len, band, 239 req->ie, req->ie_len, band,
258 req->rates[band], 0); 240 req->rates[band], 0);
259 local->hw_scan_req->ie_len = ielen; 241 local->hw_scan_req->ie_len = ielen;
@@ -336,6 +318,10 @@ EXPORT_SYMBOL(ieee80211_scan_completed);
336 318
337static int ieee80211_start_sw_scan(struct ieee80211_local *local) 319static int ieee80211_start_sw_scan(struct ieee80211_local *local)
338{ 320{
321 /* Software scan is not supported in multi-channel cases */
322 if (local->use_chanctx)
323 return -EOPNOTSUPP;
324
339 /* 325 /*
340 * Hardware/driver doesn't support hw_scan, so use software 326 * Hardware/driver doesn't support hw_scan, so use software
341 * scanning instead. First send a nullfunc frame with power save 327 * scanning instead. First send a nullfunc frame with power save
@@ -417,7 +403,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
417 local->scan_req->ie, local->scan_req->ie_len, 403 local->scan_req->ie, local->scan_req->ie_len,
418 local->scan_req->rates[band], false, 404 local->scan_req->rates[band], false,
419 local->scan_req->no_cck, 405 local->scan_req->no_cck,
420 local->hw.conf.channel); 406 local->hw.conf.channel, true);
421 407
422 /* 408 /*
423 * After sending probe requests, wait for probe responses 409 * After sending probe requests, wait for probe responses
@@ -448,11 +434,13 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
448 if (local->ops->hw_scan) { 434 if (local->ops->hw_scan) {
449 u8 *ies; 435 u8 *ies;
450 436
437 local->hw_scan_ies_bufsize = 2 + IEEE80211_MAX_SSID_LEN +
438 local->scan_ies_len +
439 req->ie_len;
451 local->hw_scan_req = kmalloc( 440 local->hw_scan_req = kmalloc(
452 sizeof(*local->hw_scan_req) + 441 sizeof(*local->hw_scan_req) +
453 req->n_channels * sizeof(req->channels[0]) + 442 req->n_channels * sizeof(req->channels[0]) +
454 2 + IEEE80211_MAX_SSID_LEN + local->scan_ies_len + 443 local->hw_scan_ies_bufsize, GFP_KERNEL);
455 req->ie_len, GFP_KERNEL);
456 if (!local->hw_scan_req) 444 if (!local->hw_scan_req)
457 return -ENOMEM; 445 return -ENOMEM;
458 446
@@ -462,6 +450,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
462 sizeof(*local->hw_scan_req) + 450 sizeof(*local->hw_scan_req) +
463 req->n_channels * sizeof(req->channels[0]); 451 req->n_channels * sizeof(req->channels[0]);
464 local->hw_scan_req->ie = ies; 452 local->hw_scan_req->ie = ies;
453 local->hw_scan_req->flags = req->flags;
465 454
466 local->hw_scan_band = 0; 455 local->hw_scan_band = 0;
467 456
@@ -480,7 +469,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
480 if (local->ops->hw_scan) { 469 if (local->ops->hw_scan) {
481 __set_bit(SCAN_HW_SCANNING, &local->scanning); 470 __set_bit(SCAN_HW_SCANNING, &local->scanning);
482 } else if ((req->n_channels == 1) && 471 } else if ((req->n_channels == 1) &&
483 (req->channels[0] == local->oper_channel)) { 472 (req->channels[0] == local->_oper_channel)) {
484 /* 473 /*
485 * If we are scanning only on the operating channel 474 * If we are scanning only on the operating channel
486 * then we do not need to stop normal activities 475 * then we do not need to stop normal activities
@@ -562,6 +551,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
562 unsigned long min_beacon_int = 0; 551 unsigned long min_beacon_int = 0;
563 struct ieee80211_sub_if_data *sdata; 552 struct ieee80211_sub_if_data *sdata;
564 struct ieee80211_channel *next_chan; 553 struct ieee80211_channel *next_chan;
554 enum mac80211_scan_state next_scan_state;
565 555
566 /* 556 /*
567 * check if at least one STA interface is associated, 557 * check if at least one STA interface is associated,
@@ -620,10 +610,18 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
620 usecs_to_jiffies(min_beacon_int * 1024) * 610 usecs_to_jiffies(min_beacon_int * 1024) *
621 local->hw.conf.listen_interval); 611 local->hw.conf.listen_interval);
622 612
623 if (associated && (!tx_empty || bad_latency || listen_int_exceeded)) 613 if (associated && !tx_empty) {
624 local->next_scan_state = SCAN_SUSPEND; 614 if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
625 else 615 next_scan_state = SCAN_ABORT;
626 local->next_scan_state = SCAN_SET_CHANNEL; 616 else
617 next_scan_state = SCAN_SUSPEND;
618 } else if (associated && (bad_latency || listen_int_exceeded)) {
619 next_scan_state = SCAN_SUSPEND;
620 } else {
621 next_scan_state = SCAN_SET_CHANNEL;
622 }
623
624 local->next_scan_state = next_scan_state;
627 625
628 *next_delay = 0; 626 *next_delay = 0;
629} 627}
@@ -794,6 +792,9 @@ void ieee80211_scan_work(struct work_struct *work)
794 case SCAN_RESUME: 792 case SCAN_RESUME:
795 ieee80211_scan_state_resume(local, &next_delay); 793 ieee80211_scan_state_resume(local, &next_delay);
796 break; 794 break;
795 case SCAN_ABORT:
796 aborted = true;
797 goto out_complete;
797 } 798 }
798 } while (next_delay == 0); 799 } while (next_delay == 0);
799 800
@@ -819,9 +820,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
819 return res; 820 return res;
820} 821}
821 822
822int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, 823int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
823 const u8 *ssid, u8 ssid_len, 824 const u8 *ssid, u8 ssid_len,
824 struct ieee80211_channel *chan) 825 struct ieee80211_channel *chan)
825{ 826{
826 struct ieee80211_local *local = sdata->local; 827 struct ieee80211_local *local = sdata->local;
827 int ret = -EBUSY; 828 int ret = -EBUSY;
@@ -835,22 +836,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
835 836
836 /* fill internal scan request */ 837 /* fill internal scan request */
837 if (!chan) { 838 if (!chan) {
838 int i, nchan = 0; 839 int i, max_n;
840 int n_ch = 0;
839 841
840 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 842 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
841 if (!local->hw.wiphy->bands[band]) 843 if (!local->hw.wiphy->bands[band])
842 continue; 844 continue;
843 for (i = 0; 845
844 i < local->hw.wiphy->bands[band]->n_channels; 846 max_n = local->hw.wiphy->bands[band]->n_channels;
845 i++) { 847 for (i = 0; i < max_n; i++) {
846 local->int_scan_req->channels[nchan] = 848 struct ieee80211_channel *tmp_ch =
847 &local->hw.wiphy->bands[band]->channels[i]; 849 &local->hw.wiphy->bands[band]->channels[i];
848 nchan++; 850
851 if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
852 IEEE80211_CHAN_DISABLED))
853 continue;
854
855 local->int_scan_req->channels[n_ch] = tmp_ch;
856 n_ch++;
849 } 857 }
850 } 858 }
851 859
852 local->int_scan_req->n_channels = nchan; 860 if (WARN_ON_ONCE(n_ch == 0))
861 goto unlock;
862
863 local->int_scan_req->n_channels = n_ch;
853 } else { 864 } else {
865 if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
866 IEEE80211_CHAN_DISABLED)))
867 goto unlock;
868
854 local->int_scan_req->channels[0] = chan; 869 local->int_scan_req->channels[0] = chan;
855 local->int_scan_req->n_channels = 1; 870 local->int_scan_req->n_channels = 1;
856 } 871 }
@@ -917,8 +932,11 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
917 struct cfg80211_sched_scan_request *req) 932 struct cfg80211_sched_scan_request *req)
918{ 933{
919 struct ieee80211_local *local = sdata->local; 934 struct ieee80211_local *local = sdata->local;
920 struct ieee80211_sched_scan_ies sched_scan_ies; 935 struct ieee80211_sched_scan_ies sched_scan_ies = {};
921 int ret, i; 936 int ret, i, iebufsz;
937
938 iebufsz = 2 + IEEE80211_MAX_SSID_LEN +
939 local->scan_ies_len + req->ie_len;
922 940
923 mutex_lock(&local->mtx); 941 mutex_lock(&local->mtx);
924 942
@@ -936,10 +954,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
936 if (!local->hw.wiphy->bands[i]) 954 if (!local->hw.wiphy->bands[i])
937 continue; 955 continue;
938 956
939 sched_scan_ies.ie[i] = kzalloc(2 + IEEE80211_MAX_SSID_LEN + 957 sched_scan_ies.ie[i] = kzalloc(iebufsz, GFP_KERNEL);
940 local->scan_ies_len +
941 req->ie_len,
942 GFP_KERNEL);
943 if (!sched_scan_ies.ie[i]) { 958 if (!sched_scan_ies.ie[i]) {
944 ret = -ENOMEM; 959 ret = -ENOMEM;
945 goto out_free; 960 goto out_free;
@@ -947,8 +962,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
947 962
948 sched_scan_ies.len[i] = 963 sched_scan_ies.len[i] =
949 ieee80211_build_preq_ies(local, sched_scan_ies.ie[i], 964 ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
950 req->ie, req->ie_len, i, 965 iebufsz, req->ie, req->ie_len,
951 (u32) -1, 0); 966 i, (u32) -1, 0);
952 } 967 }
953 968
954 ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); 969 ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 0a4e4c04db89..ca9fde198188 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -91,13 +91,13 @@ static int sta_info_hash_del(struct ieee80211_local *local,
91 return -ENOENT; 91 return -ENOENT;
92} 92}
93 93
94static void free_sta_work(struct work_struct *wk) 94static void cleanup_single_sta(struct sta_info *sta)
95{ 95{
96 struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
97 int ac, i; 96 int ac, i;
98 struct tid_ampdu_tx *tid_tx; 97 struct tid_ampdu_tx *tid_tx;
99 struct ieee80211_sub_if_data *sdata = sta->sdata; 98 struct ieee80211_sub_if_data *sdata = sta->sdata;
100 struct ieee80211_local *local = sdata->local; 99 struct ieee80211_local *local = sdata->local;
100 struct ps_data *ps;
101 101
102 /* 102 /*
103 * At this point, when being called as call_rcu callback, 103 * At this point, when being called as call_rcu callback,
@@ -107,18 +107,22 @@ static void free_sta_work(struct work_struct *wk)
107 */ 107 */
108 108
109 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 109 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
110 BUG_ON(!sdata->bss); 110 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
111 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
112 ps = &sdata->bss->ps;
113 else
114 return;
111 115
112 clear_sta_flag(sta, WLAN_STA_PS_STA); 116 clear_sta_flag(sta, WLAN_STA_PS_STA);
113 117
114 atomic_dec(&sdata->bss->num_sta_ps); 118 atomic_dec(&ps->num_sta_ps);
115 sta_info_recalc_tim(sta); 119 sta_info_recalc_tim(sta);
116 } 120 }
117 121
118 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 122 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
119 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 123 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
120 __skb_queue_purge(&sta->ps_tx_buf[ac]); 124 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
121 __skb_queue_purge(&sta->tx_filtered[ac]); 125 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
122 } 126 }
123 127
124#ifdef CONFIG_MAC80211_MESH 128#ifdef CONFIG_MAC80211_MESH
@@ -137,22 +141,46 @@ static void free_sta_work(struct work_struct *wk)
137 * drivers have to handle aggregation stop being requested, followed 141 * drivers have to handle aggregation stop being requested, followed
138 * directly by station destruction. 142 * directly by station destruction.
139 */ 143 */
140 for (i = 0; i < STA_TID_NUM; i++) { 144 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
141 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 145 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
142 if (!tid_tx) 146 if (!tid_tx)
143 continue; 147 continue;
144 __skb_queue_purge(&tid_tx->pending); 148 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
145 kfree(tid_tx); 149 kfree(tid_tx);
146 } 150 }
147 151
148 sta_info_free(local, sta); 152 sta_info_free(local, sta);
149} 153}
150 154
155void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata)
156{
157 struct sta_info *sta;
158
159 spin_lock_bh(&sdata->cleanup_stations_lock);
160 while (!list_empty(&sdata->cleanup_stations)) {
161 sta = list_first_entry(&sdata->cleanup_stations,
162 struct sta_info, list);
163 list_del(&sta->list);
164 spin_unlock_bh(&sdata->cleanup_stations_lock);
165
166 cleanup_single_sta(sta);
167
168 spin_lock_bh(&sdata->cleanup_stations_lock);
169 }
170
171 spin_unlock_bh(&sdata->cleanup_stations_lock);
172}
173
151static void free_sta_rcu(struct rcu_head *h) 174static void free_sta_rcu(struct rcu_head *h)
152{ 175{
153 struct sta_info *sta = container_of(h, struct sta_info, rcu_head); 176 struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
177 struct ieee80211_sub_if_data *sdata = sta->sdata;
154 178
155 ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk); 179 spin_lock(&sdata->cleanup_stations_lock);
180 list_add_tail(&sta->list, &sdata->cleanup_stations);
181 spin_unlock(&sdata->cleanup_stations_lock);
182
183 ieee80211_queue_work(&sdata->local->hw, &sdata->cleanup_stations_wk);
156} 184}
157 185
158/* protected by RCU */ 186/* protected by RCU */
@@ -305,7 +333,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
305 333
306 spin_lock_init(&sta->lock); 334 spin_lock_init(&sta->lock);
307 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 335 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
308 INIT_WORK(&sta->free_sta_wk, free_sta_work);
309 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 336 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
310 mutex_init(&sta->ampdu_mlme.mtx); 337 mutex_init(&sta->ampdu_mlme.mtx);
311 338
@@ -325,7 +352,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
325 return NULL; 352 return NULL;
326 } 353 }
327 354
328 for (i = 0; i < STA_TID_NUM; i++) { 355 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
329 /* 356 /*
330 * timer_to_tid must be initialized with identity mapping 357 * timer_to_tid must be initialized with identity mapping
331 * to enable session_timer's data differentiation. See 358 * to enable session_timer's data differentiation. See
@@ -338,7 +365,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
338 skb_queue_head_init(&sta->tx_filtered[i]); 365 skb_queue_head_init(&sta->tx_filtered[i]);
339 } 366 }
340 367
341 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 368 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
342 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 369 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
343 370
344 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 371 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
@@ -502,22 +529,22 @@ int sta_info_insert(struct sta_info *sta)
502 return err; 529 return err;
503} 530}
504 531
505static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 532static inline void __bss_tim_set(u8 *tim, u16 id)
506{ 533{
507 /* 534 /*
508 * This format has been mandated by the IEEE specifications, 535 * This format has been mandated by the IEEE specifications,
509 * so this line may not be changed to use the __set_bit() format. 536 * so this line may not be changed to use the __set_bit() format.
510 */ 537 */
511 bss->tim[aid / 8] |= (1 << (aid % 8)); 538 tim[id / 8] |= (1 << (id % 8));
512} 539}
513 540
514static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid) 541static inline void __bss_tim_clear(u8 *tim, u16 id)
515{ 542{
516 /* 543 /*
517 * This format has been mandated by the IEEE specifications, 544 * This format has been mandated by the IEEE specifications,
518 * so this line may not be changed to use the __clear_bit() format. 545 * so this line may not be changed to use the __clear_bit() format.
519 */ 546 */
520 bss->tim[aid / 8] &= ~(1 << (aid % 8)); 547 tim[id / 8] &= ~(1 << (id % 8));
521} 548}
522 549
523static unsigned long ieee80211_tids_for_ac(int ac) 550static unsigned long ieee80211_tids_for_ac(int ac)
@@ -541,14 +568,23 @@ static unsigned long ieee80211_tids_for_ac(int ac)
541void sta_info_recalc_tim(struct sta_info *sta) 568void sta_info_recalc_tim(struct sta_info *sta)
542{ 569{
543 struct ieee80211_local *local = sta->local; 570 struct ieee80211_local *local = sta->local;
544 struct ieee80211_if_ap *bss = sta->sdata->bss; 571 struct ps_data *ps;
545 unsigned long flags; 572 unsigned long flags;
546 bool indicate_tim = false; 573 bool indicate_tim = false;
547 u8 ignore_for_tim = sta->sta.uapsd_queues; 574 u8 ignore_for_tim = sta->sta.uapsd_queues;
548 int ac; 575 int ac;
576 u16 id;
577
578 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
579 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
580 if (WARN_ON_ONCE(!sta->sdata->bss))
581 return;
549 582
550 if (WARN_ON_ONCE(!sta->sdata->bss)) 583 ps = &sta->sdata->bss->ps;
584 id = sta->sta.aid;
585 } else {
551 return; 586 return;
587 }
552 588
553 /* No need to do anything if the driver does all */ 589 /* No need to do anything if the driver does all */
554 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) 590 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
@@ -587,9 +623,9 @@ void sta_info_recalc_tim(struct sta_info *sta)
587 spin_lock_irqsave(&local->tim_lock, flags); 623 spin_lock_irqsave(&local->tim_lock, flags);
588 624
589 if (indicate_tim) 625 if (indicate_tim)
590 __bss_tim_set(bss, sta->sta.aid); 626 __bss_tim_set(ps->tim, id);
591 else 627 else
592 __bss_tim_clear(bss, sta->sta.aid); 628 __bss_tim_clear(ps->tim, id);
593 629
594 if (local->ops->set_tim) { 630 if (local->ops->set_tim) {
595 local->tim_in_locked_section = true; 631 local->tim_in_locked_section = true;
@@ -848,7 +884,7 @@ void sta_info_init(struct ieee80211_local *local)
848 884
849void sta_info_stop(struct ieee80211_local *local) 885void sta_info_stop(struct ieee80211_local *local)
850{ 886{
851 del_timer(&local->sta_cleanup); 887 del_timer_sync(&local->sta_cleanup);
852 sta_info_flush(local, NULL); 888 sta_info_flush(local, NULL);
853} 889}
854 890
@@ -877,6 +913,20 @@ int sta_info_flush(struct ieee80211_local *local,
877 } 913 }
878 mutex_unlock(&local->sta_mtx); 914 mutex_unlock(&local->sta_mtx);
879 915
916 rcu_barrier();
917
918 if (sdata) {
919 ieee80211_cleanup_sdata_stas(sdata);
920 cancel_work_sync(&sdata->cleanup_stations_wk);
921 } else {
922 mutex_lock(&local->iflist_mtx);
923 list_for_each_entry(sdata, &local->interfaces, list) {
924 ieee80211_cleanup_sdata_stas(sdata);
925 cancel_work_sync(&sdata->cleanup_stations_wk);
926 }
927 mutex_unlock(&local->iflist_mtx);
928 }
929
880 return ret; 930 return ret;
881} 931}
882 932
@@ -893,8 +943,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
893 continue; 943 continue;
894 944
895 if (time_after(jiffies, sta->last_rx + exp_time)) { 945 if (time_after(jiffies, sta->last_rx + exp_time)) {
896 ibss_dbg(sdata, "expiring inactive STA %pM\n", 946 sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
897 sta->sta.addr); 947 sta->sta.addr);
898 WARN_ON(__sta_info_destroy(sta)); 948 WARN_ON(__sta_info_destroy(sta));
899 } 949 }
900 } 950 }
@@ -948,10 +998,17 @@ static void clear_sta_ps_flags(void *_sta)
948{ 998{
949 struct sta_info *sta = _sta; 999 struct sta_info *sta = _sta;
950 struct ieee80211_sub_if_data *sdata = sta->sdata; 1000 struct ieee80211_sub_if_data *sdata = sta->sdata;
1001 struct ps_data *ps;
1002
1003 if (sdata->vif.type == NL80211_IFTYPE_AP ||
1004 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1005 ps = &sdata->bss->ps;
1006 else
1007 return;
951 1008
952 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1009 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
953 if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA)) 1010 if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
954 atomic_dec(&sdata->bss->num_sta_ps); 1011 atomic_dec(&ps->num_sta_ps);
955} 1012}
956 1013
957/* powersave support code */ 1014/* powersave support code */
@@ -961,10 +1018,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
961 struct ieee80211_local *local = sdata->local; 1018 struct ieee80211_local *local = sdata->local;
962 struct sk_buff_head pending; 1019 struct sk_buff_head pending;
963 int filtered = 0, buffered = 0, ac; 1020 int filtered = 0, buffered = 0, ac;
1021 unsigned long flags;
964 1022
965 clear_sta_flag(sta, WLAN_STA_SP); 1023 clear_sta_flag(sta, WLAN_STA_SP);
966 1024
967 BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1); 1025 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
968 sta->driver_buffered_tids = 0; 1026 sta->driver_buffered_tids = 0;
969 1027
970 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1028 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
@@ -976,12 +1034,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
976 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1034 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
977 int count = skb_queue_len(&pending), tmp; 1035 int count = skb_queue_len(&pending), tmp;
978 1036
1037 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
979 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1038 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
1039 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
980 tmp = skb_queue_len(&pending); 1040 tmp = skb_queue_len(&pending);
981 filtered += tmp - count; 1041 filtered += tmp - count;
982 count = tmp; 1042 count = tmp;
983 1043
1044 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
984 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1045 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
1046 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
985 tmp = skb_queue_len(&pending); 1047 tmp = skb_queue_len(&pending);
986 buffered += tmp - count; 1048 buffered += tmp - count;
987 } 1049 }
@@ -1008,6 +1070,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1008 __le16 fc; 1070 __le16 fc;
1009 bool qos = test_sta_flag(sta, WLAN_STA_WME); 1071 bool qos = test_sta_flag(sta, WLAN_STA_WME);
1010 struct ieee80211_tx_info *info; 1072 struct ieee80211_tx_info *info;
1073 struct ieee80211_chanctx_conf *chanctx_conf;
1011 1074
1012 if (qos) { 1075 if (qos) {
1013 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1076 fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
@@ -1057,7 +1120,16 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1057 1120
1058 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false); 1121 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
1059 1122
1060 ieee80211_xmit(sdata, skb); 1123 rcu_read_lock();
1124 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1125 if (WARN_ON(!chanctx_conf)) {
1126 rcu_read_unlock();
1127 kfree_skb(skb);
1128 return;
1129 }
1130
1131 ieee80211_xmit(sdata, skb, chanctx_conf->def.chan->band);
1132 rcu_read_unlock();
1061} 1133}
1062 1134
1063static void 1135static void
@@ -1338,7 +1410,7 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1338{ 1410{
1339 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1411 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1340 1412
1341 if (WARN_ON(tid >= STA_TID_NUM)) 1413 if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
1342 return; 1414 return;
1343 1415
1344 if (buffered) 1416 if (buffered)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c88f161f8118..37c1889afd3a 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -80,7 +80,6 @@ enum ieee80211_sta_info_flags {
80 WLAN_STA_TOFFSET_KNOWN, 80 WLAN_STA_TOFFSET_KNOWN,
81}; 81};
82 82
83#define STA_TID_NUM 16
84#define ADDBA_RESP_INTERVAL HZ 83#define ADDBA_RESP_INTERVAL HZ
85#define HT_AGG_MAX_RETRIES 15 84#define HT_AGG_MAX_RETRIES 15
86#define HT_AGG_BURST_RETRIES 3 85#define HT_AGG_BURST_RETRIES 3
@@ -197,15 +196,15 @@ struct tid_ampdu_rx {
197struct sta_ampdu_mlme { 196struct sta_ampdu_mlme {
198 struct mutex mtx; 197 struct mutex mtx;
199 /* rx */ 198 /* rx */
200 struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM]; 199 struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS];
201 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; 200 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
202 unsigned long tid_rx_stop_requested[BITS_TO_LONGS(STA_TID_NUM)]; 201 unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
203 /* tx */ 202 /* tx */
204 struct work_struct work; 203 struct work_struct work;
205 struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM]; 204 struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
206 struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM]; 205 struct tid_ampdu_tx *tid_start_tx[IEEE80211_NUM_TIDS];
207 unsigned long last_addba_req_time[STA_TID_NUM]; 206 unsigned long last_addba_req_time[IEEE80211_NUM_TIDS];
208 u8 addba_req_num[STA_TID_NUM]; 207 u8 addba_req_num[IEEE80211_NUM_TIDS];
209 u8 dialog_token_allocator; 208 u8 dialog_token_allocator;
210}; 209};
211 210
@@ -228,6 +227,7 @@ struct sta_ampdu_mlme {
228 * "the" transmit rate 227 * "the" transmit rate
229 * @last_rx_rate_idx: rx status rate index of the last data packet 228 * @last_rx_rate_idx: rx status rate index of the last data packet
230 * @last_rx_rate_flag: rx status flag of the last data packet 229 * @last_rx_rate_flag: rx status flag of the last data packet
230 * @last_rx_rate_vht_nss: rx status nss of last data packet
231 * @lock: used for locking all fields that require locking, see comments 231 * @lock: used for locking all fields that require locking, see comments
232 * in the header file. 232 * in the header file.
233 * @drv_unblock_wk: used for driver PS unblocking 233 * @drv_unblock_wk: used for driver PS unblocking
@@ -250,6 +250,7 @@ struct sta_ampdu_mlme {
250 * @rx_dropped: number of dropped MPDUs from this STA 250 * @rx_dropped: number of dropped MPDUs from this STA
251 * @last_signal: signal of last received frame from this STA 251 * @last_signal: signal of last received frame from this STA
252 * @avg_signal: moving average of signal of received frames from this STA 252 * @avg_signal: moving average of signal of received frames from this STA
253 * @last_ack_signal: signal of last received Ack frame from this STA
253 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) 254 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
254 * @tx_filtered_count: number of frames the hardware filtered for this STA 255 * @tx_filtered_count: number of frames the hardware filtered for this STA
255 * @tx_retry_failed: number of frames that failed retry 256 * @tx_retry_failed: number of frames that failed retry
@@ -273,7 +274,7 @@ struct sta_ampdu_mlme {
273 * @t_offset: timing offset relative to this host 274 * @t_offset: timing offset relative to this host
274 * @t_offset_setpoint: reference timing offset of this sta to be used when 275 * @t_offset_setpoint: reference timing offset of this sta to be used when
275 * calculating clockdrift 276 * calculating clockdrift
276 * @ch_type: peer's channel type 277 * @ch_width: peer's channel width
277 * @debugfs: debug filesystem info 278 * @debugfs: debug filesystem info
278 * @dead: set to true when sta is unlinked 279 * @dead: set to true when sta is unlinked
279 * @uploaded: set to true when sta is uploaded to the driver 280 * @uploaded: set to true when sta is uploaded to the driver
@@ -298,7 +299,6 @@ struct sta_info {
298 spinlock_t lock; 299 spinlock_t lock;
299 300
300 struct work_struct drv_unblock_wk; 301 struct work_struct drv_unblock_wk;
301 struct work_struct free_sta_wk;
302 302
303 u16 listen_interval; 303 u16 listen_interval;
304 304
@@ -329,8 +329,9 @@ struct sta_info {
329 unsigned long rx_dropped; 329 unsigned long rx_dropped;
330 int last_signal; 330 int last_signal;
331 struct ewma avg_signal; 331 struct ewma avg_signal;
332 int last_ack_signal;
332 /* Plus 1 for non-QoS frames */ 333 /* Plus 1 for non-QoS frames */
333 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES + 1]; 334 __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
334 335
335 /* Updated from TX status path only, no locking requirements */ 336 /* Updated from TX status path only, no locking requirements */
336 unsigned long tx_filtered_count; 337 unsigned long tx_filtered_count;
@@ -344,14 +345,15 @@ struct sta_info {
344 unsigned long tx_fragments; 345 unsigned long tx_fragments;
345 struct ieee80211_tx_rate last_tx_rate; 346 struct ieee80211_tx_rate last_tx_rate;
346 int last_rx_rate_idx; 347 int last_rx_rate_idx;
347 int last_rx_rate_flag; 348 u32 last_rx_rate_flag;
349 u8 last_rx_rate_vht_nss;
348 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; 350 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
349 351
350 /* 352 /*
351 * Aggregation information, locked with lock. 353 * Aggregation information, locked with lock.
352 */ 354 */
353 struct sta_ampdu_mlme ampdu_mlme; 355 struct sta_ampdu_mlme ampdu_mlme;
354 u8 timer_to_tid[STA_TID_NUM]; 356 u8 timer_to_tid[IEEE80211_NUM_TIDS];
355 357
356#ifdef CONFIG_MAC80211_MESH 358#ifdef CONFIG_MAC80211_MESH
357 /* 359 /*
@@ -369,7 +371,7 @@ struct sta_info {
369 struct timer_list plink_timer; 371 struct timer_list plink_timer;
370 s64 t_offset; 372 s64 t_offset;
371 s64 t_offset_setpoint; 373 s64 t_offset_setpoint;
372 enum nl80211_channel_type ch_type; 374 enum nl80211_chan_width ch_width;
373#endif 375#endif
374 376
375#ifdef CONFIG_MAC80211_DEBUGFS 377#ifdef CONFIG_MAC80211_DEBUGFS
@@ -551,6 +553,8 @@ int sta_info_flush(struct ieee80211_local *local,
551void sta_set_rate_info_tx(struct sta_info *sta, 553void sta_set_rate_info_tx(struct sta_info *sta,
552 const struct ieee80211_tx_rate *rate, 554 const struct ieee80211_tx_rate *rate,
553 struct rate_info *rinfo); 555 struct rate_info *rinfo);
556void sta_set_rate_info_rx(struct sta_info *sta,
557 struct rate_info *rinfo);
554void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 558void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
555 unsigned long exp_time); 559 unsigned long exp_time);
556 560
@@ -558,4 +562,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
558void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); 562void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
559void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta); 563void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
560 564
565void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata);
566
561#endif /* STA_INFO_H */ 567#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 3af0cc4130f1..07d99578a2b1 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -189,30 +189,31 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
189 } 189 }
190 190
191 if (ieee80211_is_action(mgmt->frame_control) && 191 if (ieee80211_is_action(mgmt->frame_control) &&
192 sdata->vif.type == NL80211_IFTYPE_STATION &&
193 mgmt->u.action.category == WLAN_CATEGORY_HT && 192 mgmt->u.action.category == WLAN_CATEGORY_HT &&
194 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) { 193 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
194 sdata->vif.type == NL80211_IFTYPE_STATION &&
195 ieee80211_sdata_running(sdata)) {
195 /* 196 /*
196 * This update looks racy, but isn't -- if we come 197 * This update looks racy, but isn't -- if we come
197 * here we've definitely got a station that we're 198 * here we've definitely got a station that we're
198 * talking to, and on a managed interface that can 199 * talking to, and on a managed interface that can
199 * only be the AP. And the only other place updating 200 * only be the AP. And the only other place updating
200 * this variable is before we're associated. 201 * this variable in managed mode is before association.
201 */ 202 */
202 switch (mgmt->u.action.u.ht_smps.smps_control) { 203 switch (mgmt->u.action.u.ht_smps.smps_control) {
203 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 204 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
204 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC; 205 sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
205 break; 206 break;
206 case WLAN_HT_SMPS_CONTROL_STATIC: 207 case WLAN_HT_SMPS_CONTROL_STATIC:
207 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC; 208 sdata->smps_mode = IEEE80211_SMPS_STATIC;
208 break; 209 break;
209 case WLAN_HT_SMPS_CONTROL_DISABLED: 210 case WLAN_HT_SMPS_CONTROL_DISABLED:
210 default: /* shouldn't happen since we don't send that */ 211 default: /* shouldn't happen since we don't send that */
211 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF; 212 sdata->smps_mode = IEEE80211_SMPS_OFF;
212 break; 213 break;
213 } 214 }
214 215
215 ieee80211_queue_work(&local->hw, &local->recalc_smps); 216 ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
216 } 217 }
217} 218}
218 219
@@ -324,6 +325,75 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
324 325
325} 326}
326 327
328static void ieee80211_report_used_skb(struct ieee80211_local *local,
329 struct sk_buff *skb, bool dropped)
330{
331 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
332 struct ieee80211_hdr *hdr = (void *)skb->data;
333 bool acked = info->flags & IEEE80211_TX_STAT_ACK;
334
335 if (dropped)
336 acked = false;
337
338 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
339 struct ieee80211_sub_if_data *sdata = NULL;
340 struct ieee80211_sub_if_data *iter_sdata;
341 u64 cookie = (unsigned long)skb;
342
343 rcu_read_lock();
344
345 if (skb->dev) {
346 list_for_each_entry_rcu(iter_sdata, &local->interfaces,
347 list) {
348 if (!iter_sdata->dev)
349 continue;
350
351 if (skb->dev == iter_sdata->dev) {
352 sdata = iter_sdata;
353 break;
354 }
355 }
356 } else {
357 sdata = rcu_dereference(local->p2p_sdata);
358 }
359
360 if (!sdata)
361 skb->dev = NULL;
362 else if (ieee80211_is_nullfunc(hdr->frame_control) ||
363 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
364 cfg80211_probe_status(sdata->dev, hdr->addr1,
365 cookie, acked, GFP_ATOMIC);
366 } else {
367 cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
368 skb->len, acked, GFP_ATOMIC);
369 }
370
371 rcu_read_unlock();
372 }
373
374 if (unlikely(info->ack_frame_id)) {
375 struct sk_buff *ack_skb;
376 unsigned long flags;
377
378 spin_lock_irqsave(&local->ack_status_lock, flags);
379 ack_skb = idr_find(&local->ack_status_frames,
380 info->ack_frame_id);
381 if (ack_skb)
382 idr_remove(&local->ack_status_frames,
383 info->ack_frame_id);
384 spin_unlock_irqrestore(&local->ack_status_lock, flags);
385
386 if (ack_skb) {
387 if (!dropped) {
388 /* consumes ack_skb */
389 skb_complete_wifi_ack(ack_skb, acked);
390 } else {
391 dev_kfree_skb_any(ack_skb);
392 }
393 }
394 }
395}
396
327/* 397/*
328 * Use a static threshold for now, best value to be determined 398 * Use a static threshold for now, best value to be determined
329 * by testing ... 399 * by testing ...
@@ -432,7 +502,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
432 IEEE80211_BAR_CTRL_TID_INFO_MASK) >> 502 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
433 IEEE80211_BAR_CTRL_TID_INFO_SHIFT; 503 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
434 504
435 ieee80211_set_bar_pending(sta, tid, ssn); 505 if (local->hw.flags &
506 IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
507 ieee80211_stop_tx_ba_session(&sta->sta, tid);
508 else
509 ieee80211_set_bar_pending(sta, tid, ssn);
436 } 510 }
437 } 511 }
438 512
@@ -469,6 +543,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
469 sta->lost_packets = 0; 543 sta->lost_packets = 0;
470 } 544 }
471 } 545 }
546
547 if (acked)
548 sta->last_ack_signal = info->status.ack_signal;
472 } 549 }
473 550
474 rcu_read_unlock(); 551 rcu_read_unlock();
@@ -515,62 +592,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
515 msecs_to_jiffies(10)); 592 msecs_to_jiffies(10));
516 } 593 }
517 594
518 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 595 ieee80211_report_used_skb(local, skb, false);
519 u64 cookie = (unsigned long)skb;
520 bool found = false;
521
522 acked = info->flags & IEEE80211_TX_STAT_ACK;
523
524 rcu_read_lock();
525
526 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
527 if (!sdata->dev)
528 continue;
529
530 if (skb->dev != sdata->dev)
531 continue;
532
533 found = true;
534 break;
535 }
536
537 if (!skb->dev) {
538 sdata = rcu_dereference(local->p2p_sdata);
539 if (sdata)
540 found = true;
541 }
542
543 if (!found)
544 skb->dev = NULL;
545 else if (ieee80211_is_nullfunc(hdr->frame_control) ||
546 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
547 cfg80211_probe_status(sdata->dev, hdr->addr1,
548 cookie, acked, GFP_ATOMIC);
549 } else {
550 cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
551 skb->len, acked, GFP_ATOMIC);
552 }
553
554 rcu_read_unlock();
555 }
556
557 if (unlikely(info->ack_frame_id)) {
558 struct sk_buff *ack_skb;
559 unsigned long flags;
560
561 spin_lock_irqsave(&local->ack_status_lock, flags);
562 ack_skb = idr_find(&local->ack_status_frames,
563 info->ack_frame_id);
564 if (ack_skb)
565 idr_remove(&local->ack_status_frames,
566 info->ack_frame_id);
567 spin_unlock_irqrestore(&local->ack_status_lock, flags);
568
569 /* consumes ack_skb */
570 if (ack_skb)
571 skb_complete_wifi_ack(ack_skb,
572 info->flags & IEEE80211_TX_STAT_ACK);
573 }
574 596
575 /* this was a transmitted frame, but now we want to reuse it */ 597 /* this was a transmitted frame, but now we want to reuse it */
576 skb_orphan(skb); 598 skb_orphan(skb);
@@ -646,25 +668,17 @@ EXPORT_SYMBOL(ieee80211_report_low_ack);
646void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) 668void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
647{ 669{
648 struct ieee80211_local *local = hw_to_local(hw); 670 struct ieee80211_local *local = hw_to_local(hw);
649 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
650
651 if (unlikely(info->ack_frame_id)) {
652 struct sk_buff *ack_skb;
653 unsigned long flags;
654
655 spin_lock_irqsave(&local->ack_status_lock, flags);
656 ack_skb = idr_find(&local->ack_status_frames,
657 info->ack_frame_id);
658 if (ack_skb)
659 idr_remove(&local->ack_status_frames,
660 info->ack_frame_id);
661 spin_unlock_irqrestore(&local->ack_status_lock, flags);
662
663 /* consumes ack_skb */
664 if (ack_skb)
665 dev_kfree_skb_any(ack_skb);
666 }
667 671
672 ieee80211_report_used_skb(local, skb, true);
668 dev_kfree_skb_any(skb); 673 dev_kfree_skb_any(skb);
669} 674}
670EXPORT_SYMBOL(ieee80211_free_txskb); 675EXPORT_SYMBOL(ieee80211_free_txskb);
676
677void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
678 struct sk_buff_head *skbs)
679{
680 struct sk_buff *skb;
681
682 while ((skb = __skb_dequeue(skbs)))
683 ieee80211_free_txskb(hw, skb);
684}
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 18d9c8a52e9e..a8270b441a6f 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -28,6 +28,25 @@
28#define VIF_PR_FMT " vif:%s(%d%s)" 28#define VIF_PR_FMT " vif:%s(%d%s)"
29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" 29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
30 30
31#define CHANCTX_ENTRY __field(u32, control_freq) \
32 __field(u32, chan_width) \
33 __field(u32, center_freq1) \
34 __field(u32, center_freq2) \
35 __field(u8, rx_chains_static) \
36 __field(u8, rx_chains_dynamic)
37#define CHANCTX_ASSIGN __entry->control_freq = ctx->conf.def.chan->center_freq;\
38 __entry->chan_width = ctx->conf.def.width; \
39 __entry->center_freq1 = ctx->conf.def.center_freq1; \
40 __entry->center_freq2 = ctx->conf.def.center_freq2; \
41 __entry->rx_chains_static = ctx->conf.rx_chains_static; \
42 __entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic
43#define CHANCTX_PR_FMT " control:%d MHz width:%d center: %d/%d MHz chains:%d/%d"
44#define CHANCTX_PR_ARG __entry->control_freq, __entry->chan_width, \
45 __entry->center_freq1, __entry->center_freq2, \
46 __entry->rx_chains_static, __entry->rx_chains_dynamic
47
48
49
31/* 50/*
32 * Tracing for driver callbacks. 51 * Tracing for driver callbacks.
33 */ 52 */
@@ -301,20 +320,37 @@ TRACE_EVENT(drv_bss_info_changed,
301 TP_STRUCT__entry( 320 TP_STRUCT__entry(
302 LOCAL_ENTRY 321 LOCAL_ENTRY
303 VIF_ENTRY 322 VIF_ENTRY
323 __field(u32, changed)
304 __field(bool, assoc) 324 __field(bool, assoc)
325 __field(bool, ibss_joined)
326 __field(bool, ibss_creator)
305 __field(u16, aid) 327 __field(u16, aid)
306 __field(bool, cts) 328 __field(bool, cts)
307 __field(bool, shortpre) 329 __field(bool, shortpre)
308 __field(bool, shortslot) 330 __field(bool, shortslot)
331 __field(bool, enable_beacon)
309 __field(u8, dtimper) 332 __field(u8, dtimper)
310 __field(u16, bcnint) 333 __field(u16, bcnint)
311 __field(u16, assoc_cap) 334 __field(u16, assoc_cap)
312 __field(u64, sync_tsf) 335 __field(u64, sync_tsf)
313 __field(u32, sync_device_ts) 336 __field(u32, sync_device_ts)
314 __field(u32, basic_rates) 337 __field(u32, basic_rates)
315 __field(u32, changed) 338 __array(int, mcast_rate, IEEE80211_NUM_BANDS)
316 __field(bool, enable_beacon)
317 __field(u16, ht_operation_mode) 339 __field(u16, ht_operation_mode)
340 __field(s32, cqm_rssi_thold);
341 __field(s32, cqm_rssi_hyst);
342 __field(u32, channel_width);
343 __field(u32, channel_cfreq1);
344 __dynamic_array(u32, arp_addr_list, info->arp_addr_cnt);
345 __field(bool, arp_filter_enabled);
346 __field(bool, qos);
347 __field(bool, idle);
348 __field(bool, ps);
349 __dynamic_array(u8, ssid, info->ssid_len);
350 __field(bool, hidden_ssid);
351 __field(int, txpower)
352 __field(u8, p2p_ctwindow)
353 __field(bool, p2p_oppps)
318 ), 354 ),
319 355
320 TP_fast_assign( 356 TP_fast_assign(
@@ -323,17 +359,36 @@ TRACE_EVENT(drv_bss_info_changed,
323 __entry->changed = changed; 359 __entry->changed = changed;
324 __entry->aid = info->aid; 360 __entry->aid = info->aid;
325 __entry->assoc = info->assoc; 361 __entry->assoc = info->assoc;
362 __entry->ibss_joined = info->ibss_joined;
363 __entry->ibss_creator = info->ibss_creator;
326 __entry->shortpre = info->use_short_preamble; 364 __entry->shortpre = info->use_short_preamble;
327 __entry->cts = info->use_cts_prot; 365 __entry->cts = info->use_cts_prot;
328 __entry->shortslot = info->use_short_slot; 366 __entry->shortslot = info->use_short_slot;
367 __entry->enable_beacon = info->enable_beacon;
329 __entry->dtimper = info->dtim_period; 368 __entry->dtimper = info->dtim_period;
330 __entry->bcnint = info->beacon_int; 369 __entry->bcnint = info->beacon_int;
331 __entry->assoc_cap = info->assoc_capability; 370 __entry->assoc_cap = info->assoc_capability;
332 __entry->sync_tsf = info->sync_tsf; 371 __entry->sync_tsf = info->sync_tsf;
333 __entry->sync_device_ts = info->sync_device_ts; 372 __entry->sync_device_ts = info->sync_device_ts;
334 __entry->basic_rates = info->basic_rates; 373 __entry->basic_rates = info->basic_rates;
335 __entry->enable_beacon = info->enable_beacon; 374 memcpy(__entry->mcast_rate, info->mcast_rate,
375 sizeof(__entry->mcast_rate));
336 __entry->ht_operation_mode = info->ht_operation_mode; 376 __entry->ht_operation_mode = info->ht_operation_mode;
377 __entry->cqm_rssi_thold = info->cqm_rssi_thold;
378 __entry->cqm_rssi_hyst = info->cqm_rssi_hyst;
379 __entry->channel_width = info->chandef.width;
380 __entry->channel_cfreq1 = info->chandef.center_freq1;
381 memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list,
382 sizeof(u32) * info->arp_addr_cnt);
383 __entry->arp_filter_enabled = info->arp_filter_enabled;
384 __entry->qos = info->qos;
385 __entry->idle = info->idle;
386 __entry->ps = info->ps;
387 memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
388 __entry->hidden_ssid = info->hidden_ssid;
389 __entry->txpower = info->txpower;
390 __entry->p2p_ctwindow = info->p2p_ctwindow;
391 __entry->p2p_oppps = info->p2p_oppps;
337 ), 392 ),
338 393
339 TP_printk( 394 TP_printk(
@@ -971,28 +1026,31 @@ TRACE_EVENT(drv_get_antenna,
971); 1026);
972 1027
973TRACE_EVENT(drv_remain_on_channel, 1028TRACE_EVENT(drv_remain_on_channel,
974 TP_PROTO(struct ieee80211_local *local, struct ieee80211_channel *chan, 1029 TP_PROTO(struct ieee80211_local *local,
975 enum nl80211_channel_type chantype, unsigned int duration), 1030 struct ieee80211_sub_if_data *sdata,
1031 struct ieee80211_channel *chan,
1032 unsigned int duration),
976 1033
977 TP_ARGS(local, chan, chantype, duration), 1034 TP_ARGS(local, sdata, chan, duration),
978 1035
979 TP_STRUCT__entry( 1036 TP_STRUCT__entry(
980 LOCAL_ENTRY 1037 LOCAL_ENTRY
1038 VIF_ENTRY
981 __field(int, center_freq) 1039 __field(int, center_freq)
982 __field(int, channel_type)
983 __field(unsigned int, duration) 1040 __field(unsigned int, duration)
984 ), 1041 ),
985 1042
986 TP_fast_assign( 1043 TP_fast_assign(
987 LOCAL_ASSIGN; 1044 LOCAL_ASSIGN;
1045 VIF_ASSIGN;
988 __entry->center_freq = chan->center_freq; 1046 __entry->center_freq = chan->center_freq;
989 __entry->channel_type = chantype;
990 __entry->duration = duration; 1047 __entry->duration = duration;
991 ), 1048 ),
992 1049
993 TP_printk( 1050 TP_printk(
994 LOCAL_PR_FMT " freq:%dMHz duration:%dms", 1051 LOCAL_PR_FMT VIF_PR_FMT " freq:%dMHz duration:%dms",
995 LOCAL_PR_ARG, __entry->center_freq, __entry->duration 1052 LOCAL_PR_ARG, VIF_PR_ARG,
1053 __entry->center_freq, __entry->duration
996 ) 1054 )
997); 1055);
998 1056
@@ -1001,34 +1059,6 @@ DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel,
1001 TP_ARGS(local) 1059 TP_ARGS(local)
1002); 1060);
1003 1061
1004TRACE_EVENT(drv_offchannel_tx,
1005 TP_PROTO(struct ieee80211_local *local, struct sk_buff *skb,
1006 struct ieee80211_channel *chan,
1007 enum nl80211_channel_type channel_type,
1008 unsigned int wait),
1009
1010 TP_ARGS(local, skb, chan, channel_type, wait),
1011
1012 TP_STRUCT__entry(
1013 LOCAL_ENTRY
1014 __field(int, center_freq)
1015 __field(int, channel_type)
1016 __field(unsigned int, wait)
1017 ),
1018
1019 TP_fast_assign(
1020 LOCAL_ASSIGN;
1021 __entry->center_freq = chan->center_freq;
1022 __entry->channel_type = channel_type;
1023 __entry->wait = wait;
1024 ),
1025
1026 TP_printk(
1027 LOCAL_PR_FMT " freq:%dMHz, wait:%dms",
1028 LOCAL_PR_ARG, __entry->center_freq, __entry->wait
1029 )
1030);
1031
1032TRACE_EVENT(drv_set_ringparam, 1062TRACE_EVENT(drv_set_ringparam,
1033 TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx), 1063 TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx),
1034 1064
@@ -1256,6 +1286,146 @@ DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx,
1256 TP_ARGS(local, sdata) 1286 TP_ARGS(local, sdata)
1257); 1287);
1258 1288
1289DECLARE_EVENT_CLASS(local_chanctx,
1290 TP_PROTO(struct ieee80211_local *local,
1291 struct ieee80211_chanctx *ctx),
1292
1293 TP_ARGS(local, ctx),
1294
1295 TP_STRUCT__entry(
1296 LOCAL_ENTRY
1297 CHANCTX_ENTRY
1298 ),
1299
1300 TP_fast_assign(
1301 LOCAL_ASSIGN;
1302 CHANCTX_ASSIGN;
1303 ),
1304
1305 TP_printk(
1306 LOCAL_PR_FMT CHANCTX_PR_FMT,
1307 LOCAL_PR_ARG, CHANCTX_PR_ARG
1308 )
1309);
1310
1311DEFINE_EVENT(local_chanctx, drv_add_chanctx,
1312 TP_PROTO(struct ieee80211_local *local,
1313 struct ieee80211_chanctx *ctx),
1314 TP_ARGS(local, ctx)
1315);
1316
1317DEFINE_EVENT(local_chanctx, drv_remove_chanctx,
1318 TP_PROTO(struct ieee80211_local *local,
1319 struct ieee80211_chanctx *ctx),
1320 TP_ARGS(local, ctx)
1321);
1322
1323TRACE_EVENT(drv_change_chanctx,
1324 TP_PROTO(struct ieee80211_local *local,
1325 struct ieee80211_chanctx *ctx,
1326 u32 changed),
1327
1328 TP_ARGS(local, ctx, changed),
1329
1330 TP_STRUCT__entry(
1331 LOCAL_ENTRY
1332 CHANCTX_ENTRY
1333 __field(u32, changed)
1334 ),
1335
1336 TP_fast_assign(
1337 LOCAL_ASSIGN;
1338 CHANCTX_ASSIGN;
1339 __entry->changed = changed;
1340 ),
1341
1342 TP_printk(
1343 LOCAL_PR_FMT CHANCTX_PR_FMT " changed:%#x",
1344 LOCAL_PR_ARG, CHANCTX_PR_ARG, __entry->changed
1345 )
1346);
1347
1348DECLARE_EVENT_CLASS(local_sdata_chanctx,
1349 TP_PROTO(struct ieee80211_local *local,
1350 struct ieee80211_sub_if_data *sdata,
1351 struct ieee80211_chanctx *ctx),
1352
1353 TP_ARGS(local, sdata, ctx),
1354
1355 TP_STRUCT__entry(
1356 LOCAL_ENTRY
1357 VIF_ENTRY
1358 CHANCTX_ENTRY
1359 ),
1360
1361 TP_fast_assign(
1362 LOCAL_ASSIGN;
1363 VIF_ASSIGN;
1364 CHANCTX_ASSIGN;
1365 ),
1366
1367 TP_printk(
1368 LOCAL_PR_FMT VIF_PR_FMT CHANCTX_PR_FMT,
1369 LOCAL_PR_ARG, VIF_PR_ARG, CHANCTX_PR_ARG
1370 )
1371);
1372
1373DEFINE_EVENT(local_sdata_chanctx, drv_assign_vif_chanctx,
1374 TP_PROTO(struct ieee80211_local *local,
1375 struct ieee80211_sub_if_data *sdata,
1376 struct ieee80211_chanctx *ctx),
1377 TP_ARGS(local, sdata, ctx)
1378);
1379
1380DEFINE_EVENT(local_sdata_chanctx, drv_unassign_vif_chanctx,
1381 TP_PROTO(struct ieee80211_local *local,
1382 struct ieee80211_sub_if_data *sdata,
1383 struct ieee80211_chanctx *ctx),
1384 TP_ARGS(local, sdata, ctx)
1385);
1386
1387TRACE_EVENT(drv_start_ap,
1388 TP_PROTO(struct ieee80211_local *local,
1389 struct ieee80211_sub_if_data *sdata,
1390 struct ieee80211_bss_conf *info),
1391
1392 TP_ARGS(local, sdata, info),
1393
1394 TP_STRUCT__entry(
1395 LOCAL_ENTRY
1396 VIF_ENTRY
1397 __field(u8, dtimper)
1398 __field(u16, bcnint)
1399 __dynamic_array(u8, ssid, info->ssid_len);
1400 __field(bool, hidden_ssid);
1401 ),
1402
1403 TP_fast_assign(
1404 LOCAL_ASSIGN;
1405 VIF_ASSIGN;
1406 __entry->dtimper = info->dtim_period;
1407 __entry->bcnint = info->beacon_int;
1408 memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
1409 __entry->hidden_ssid = info->hidden_ssid;
1410 ),
1411
1412 TP_printk(
1413 LOCAL_PR_FMT VIF_PR_FMT,
1414 LOCAL_PR_ARG, VIF_PR_ARG
1415 )
1416);
1417
1418DEFINE_EVENT(local_sdata_evt, drv_stop_ap,
1419 TP_PROTO(struct ieee80211_local *local,
1420 struct ieee80211_sub_if_data *sdata),
1421 TP_ARGS(local, sdata)
1422);
1423
1424DEFINE_EVENT(local_only_evt, drv_restart_complete,
1425 TP_PROTO(struct ieee80211_local *local),
1426 TP_ARGS(local)
1427);
1428
1259/* 1429/*
1260 * Tracing for API calls that drivers call. 1430 * Tracing for API calls that drivers call.
1261 */ 1431 */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c9bf83f36657..e9eadc40c09c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -324,22 +324,20 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
324 struct ieee80211_sub_if_data *sdata; 324 struct ieee80211_sub_if_data *sdata;
325 struct sta_info *sta; 325 struct sta_info *sta;
326 326
327 /*
328 * virtual interfaces are protected by RCU
329 */
330 rcu_read_lock();
331
332 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 327 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
333 struct ieee80211_if_ap *ap; 328 struct ps_data *ps;
334 if (sdata->vif.type != NL80211_IFTYPE_AP) 329
330 if (sdata->vif.type == NL80211_IFTYPE_AP)
331 ps = &sdata->u.ap.ps;
332 else
335 continue; 333 continue;
336 ap = &sdata->u.ap; 334
337 skb = skb_dequeue(&ap->ps_bc_buf); 335 skb = skb_dequeue(&ps->bc_buf);
338 if (skb) { 336 if (skb) {
339 purged++; 337 purged++;
340 dev_kfree_skb(skb); 338 dev_kfree_skb(skb);
341 } 339 }
342 total += skb_queue_len(&ap->ps_bc_buf); 340 total += skb_queue_len(&ps->bc_buf);
343 } 341 }
344 342
345 /* 343 /*
@@ -360,8 +358,6 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
360 } 358 }
361 } 359 }
362 360
363 rcu_read_unlock();
364
365 local->total_ps_buffered = total; 361 local->total_ps_buffered = total;
366 ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged); 362 ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
367} 363}
@@ -371,6 +367,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
371{ 367{
372 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 368 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
373 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 369 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
370 struct ps_data *ps;
374 371
375 /* 372 /*
376 * broadcast/multicast frame 373 * broadcast/multicast frame
@@ -380,16 +377,24 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
380 * This is done either by the hardware or us. 377 * This is done either by the hardware or us.
381 */ 378 */
382 379
383 /* powersaving STAs only in AP/VLAN mode */ 380 /* powersaving STAs currently only in AP/VLAN mode */
384 if (!tx->sdata->bss) 381 if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
382 tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
383 if (!tx->sdata->bss)
384 return TX_CONTINUE;
385
386 ps = &tx->sdata->bss->ps;
387 } else {
385 return TX_CONTINUE; 388 return TX_CONTINUE;
389 }
390
386 391
387 /* no buffering for ordered frames */ 392 /* no buffering for ordered frames */
388 if (ieee80211_has_order(hdr->frame_control)) 393 if (ieee80211_has_order(hdr->frame_control))
389 return TX_CONTINUE; 394 return TX_CONTINUE;
390 395
391 /* no stations in PS mode */ 396 /* no stations in PS mode */
392 if (!atomic_read(&tx->sdata->bss->num_sta_ps)) 397 if (!atomic_read(&ps->num_sta_ps))
393 return TX_CONTINUE; 398 return TX_CONTINUE;
394 399
395 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; 400 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
@@ -404,14 +409,14 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
404 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 409 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
405 purge_old_ps_buffers(tx->local); 410 purge_old_ps_buffers(tx->local);
406 411
407 if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { 412 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
408 ps_dbg(tx->sdata, 413 ps_dbg(tx->sdata,
409 "BC TX buffer full - dropping the oldest frame\n"); 414 "BC TX buffer full - dropping the oldest frame\n");
410 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 415 dev_kfree_skb(skb_dequeue(&ps->bc_buf));
411 } else 416 } else
412 tx->local->total_ps_buffered++; 417 tx->local->total_ps_buffered++;
413 418
414 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); 419 skb_queue_tail(&ps->bc_buf, tx->skb);
415 420
416 return TX_QUEUED; 421 return TX_QUEUED;
417} 422}
@@ -951,7 +956,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
951 fragnum = 0; 956 fragnum = 0;
952 957
953 skb_queue_walk(&tx->skbs, skb) { 958 skb_queue_walk(&tx->skbs, skb) {
954 int next_len;
955 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 959 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
956 960
957 hdr = (void *)skb->data; 961 hdr = (void *)skb->data;
@@ -970,7 +974,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
970 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 974 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
971 } else { 975 } else {
972 hdr->frame_control &= ~morefrags; 976 hdr->frame_control &= ~morefrags;
973 next_len = 0;
974 } 977 }
975 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); 978 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
976 fragnum++; 979 fragnum++;
@@ -1358,7 +1361,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1358 if (tx->skb) 1361 if (tx->skb)
1359 ieee80211_free_txskb(&tx->local->hw, tx->skb); 1362 ieee80211_free_txskb(&tx->local->hw, tx->skb);
1360 else 1363 else
1361 __skb_queue_purge(&tx->skbs); 1364 ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
1362 return -1; 1365 return -1;
1363 } else if (unlikely(res == TX_QUEUED)) { 1366 } else if (unlikely(res == TX_QUEUED)) {
1364 I802_DEBUG_INC(tx->local->tx_handlers_queued); 1367 I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1372,7 +1375,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1372 * Returns false if the frame couldn't be transmitted but was queued instead. 1375 * Returns false if the frame couldn't be transmitted but was queued instead.
1373 */ 1376 */
1374static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, 1377static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1375 struct sk_buff *skb, bool txpending) 1378 struct sk_buff *skb, bool txpending,
1379 enum ieee80211_band band)
1376{ 1380{
1377 struct ieee80211_local *local = sdata->local; 1381 struct ieee80211_local *local = sdata->local;
1378 struct ieee80211_tx_data tx; 1382 struct ieee80211_tx_data tx;
@@ -1386,20 +1390,18 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1386 return true; 1390 return true;
1387 } 1391 }
1388 1392
1389 rcu_read_lock();
1390
1391 /* initialises tx */ 1393 /* initialises tx */
1392 led_len = skb->len; 1394 led_len = skb->len;
1393 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); 1395 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
1394 1396
1395 if (unlikely(res_prepare == TX_DROP)) { 1397 if (unlikely(res_prepare == TX_DROP)) {
1396 ieee80211_free_txskb(&local->hw, skb); 1398 ieee80211_free_txskb(&local->hw, skb);
1397 goto out; 1399 return true;
1398 } else if (unlikely(res_prepare == TX_QUEUED)) { 1400 } else if (unlikely(res_prepare == TX_QUEUED)) {
1399 goto out; 1401 return true;
1400 } 1402 }
1401 1403
1402 info->band = local->hw.conf.channel->band; 1404 info->band = band;
1403 1405
1404 /* set up hw_queue value early */ 1406 /* set up hw_queue value early */
1405 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || 1407 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1410,8 +1412,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1410 if (!invoke_tx_handlers(&tx)) 1412 if (!invoke_tx_handlers(&tx))
1411 result = __ieee80211_tx(local, &tx.skbs, led_len, 1413 result = __ieee80211_tx(local, &tx.skbs, led_len,
1412 tx.sta, txpending); 1414 tx.sta, txpending);
1413 out: 1415
1414 rcu_read_unlock();
1415 return result; 1416 return result;
1416} 1417}
1417 1418
@@ -1446,7 +1447,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1446 return 0; 1447 return 0;
1447} 1448}
1448 1449
1449void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) 1450void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1451 enum ieee80211_band band)
1450{ 1452{
1451 struct ieee80211_local *local = sdata->local; 1453 struct ieee80211_local *local = sdata->local;
1452 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1454 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1454,8 +1456,6 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
1454 int headroom; 1456 int headroom;
1455 bool may_encrypt; 1457 bool may_encrypt;
1456 1458
1457 rcu_read_lock();
1458
1459 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); 1459 may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
1460 1460
1461 headroom = local->tx_headroom; 1461 headroom = local->tx_headroom;
@@ -1466,7 +1466,6 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
1466 1466
1467 if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { 1467 if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
1468 ieee80211_free_txskb(&local->hw, skb); 1468 ieee80211_free_txskb(&local->hw, skb);
1469 rcu_read_unlock();
1470 return; 1469 return;
1471 } 1470 }
1472 1471
@@ -1478,13 +1477,11 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
1478 !is_multicast_ether_addr(hdr->addr1) && 1477 !is_multicast_ether_addr(hdr->addr1) &&
1479 mesh_nexthop_resolve(skb, sdata)) { 1478 mesh_nexthop_resolve(skb, sdata)) {
1480 /* skb queued: don't free */ 1479 /* skb queued: don't free */
1481 rcu_read_unlock();
1482 return; 1480 return;
1483 } 1481 }
1484 1482
1485 ieee80211_set_qos_hdr(sdata, skb); 1483 ieee80211_set_qos_hdr(sdata, skb);
1486 ieee80211_tx(sdata, skb, false); 1484 ieee80211_tx(sdata, skb, false, band);
1487 rcu_read_unlock();
1488} 1485}
1489 1486
1490static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb) 1487static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
@@ -1574,7 +1571,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1574 struct net_device *dev) 1571 struct net_device *dev)
1575{ 1572{
1576 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1573 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1577 struct ieee80211_channel *chan = local->hw.conf.channel; 1574 struct ieee80211_chanctx_conf *chanctx_conf;
1575 struct ieee80211_channel *chan;
1578 struct ieee80211_radiotap_header *prthdr = 1576 struct ieee80211_radiotap_header *prthdr =
1579 (struct ieee80211_radiotap_header *)skb->data; 1577 (struct ieee80211_radiotap_header *)skb->data;
1580 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1578 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1583,26 +1581,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1583 u16 len_rthdr; 1581 u16 len_rthdr;
1584 int hdrlen; 1582 int hdrlen;
1585 1583
1586 /*
1587 * Frame injection is not allowed if beaconing is not allowed
1588 * or if we need radar detection. Beaconing is usually not allowed when
1589 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
1590 * Passive scan is also used in world regulatory domains where
1591 * your country is not known and as such it should be treated as
1592 * NO TX unless the channel is explicitly allowed in which case
1593 * your current regulatory domain would not have the passive scan
1594 * flag.
1595 *
1596 * Since AP mode uses monitor interfaces to inject/TX management
1597 * frames we can make AP mode the exception to this rule once it
1598 * supports radar detection as its implementation can deal with
1599 * radar detection by itself. We can do that later by adding a
1600 * monitor flag interfaces used for AP support.
1601 */
1602 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
1603 IEEE80211_CHAN_PASSIVE_SCAN)))
1604 goto fail;
1605
1606 /* check for not even having the fixed radiotap header part */ 1584 /* check for not even having the fixed radiotap header part */
1607 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) 1585 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
1608 goto fail; /* too short to be possibly valid */ 1586 goto fail; /* too short to be possibly valid */
@@ -1688,11 +1666,45 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1688 } 1666 }
1689 } 1667 }
1690 1668
1691 ieee80211_xmit(sdata, skb); 1669 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1670 if (!chanctx_conf) {
1671 tmp_sdata = rcu_dereference(local->monitor_sdata);
1672 if (tmp_sdata)
1673 chanctx_conf =
1674 rcu_dereference(tmp_sdata->vif.chanctx_conf);
1675 }
1676 if (!chanctx_conf)
1677 goto fail_rcu;
1678
1679 chan = chanctx_conf->def.chan;
1680
1681 /*
1682 * Frame injection is not allowed if beaconing is not allowed
1683 * or if we need radar detection. Beaconing is usually not allowed when
1684 * the mode or operation (Adhoc, AP, Mesh) does not support DFS.
1685 * Passive scan is also used in world regulatory domains where
1686 * your country is not known and as such it should be treated as
1687 * NO TX unless the channel is explicitly allowed in which case
1688 * your current regulatory domain would not have the passive scan
1689 * flag.
1690 *
1691 * Since AP mode uses monitor interfaces to inject/TX management
1692 * frames we can make AP mode the exception to this rule once it
1693 * supports radar detection as its implementation can deal with
1694 * radar detection by itself. We can do that later by adding a
1695 * monitor flag interfaces used for AP support.
1696 */
1697 if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR |
1698 IEEE80211_CHAN_PASSIVE_SCAN)))
1699 goto fail_rcu;
1700
1701 ieee80211_xmit(sdata, skb, chan->band);
1692 rcu_read_unlock(); 1702 rcu_read_unlock();
1693 1703
1694 return NETDEV_TX_OK; 1704 return NETDEV_TX_OK;
1695 1705
1706fail_rcu:
1707 rcu_read_unlock();
1696fail: 1708fail:
1697 dev_kfree_skb(skb); 1709 dev_kfree_skb(skb);
1698 return NETDEV_TX_OK; /* meaning, we dealt with the skb */ 1710 return NETDEV_TX_OK; /* meaning, we dealt with the skb */
@@ -1734,6 +1746,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1734 bool multicast; 1746 bool multicast;
1735 u32 info_flags = 0; 1747 u32 info_flags = 0;
1736 u16 info_id = 0; 1748 u16 info_id = 0;
1749 struct ieee80211_chanctx_conf *chanctx_conf;
1750 struct ieee80211_sub_if_data *ap_sdata;
1751 enum ieee80211_band band;
1737 1752
1738 if (unlikely(skb->len < ETH_HLEN)) 1753 if (unlikely(skb->len < ETH_HLEN))
1739 goto fail; 1754 goto fail;
@@ -1743,9 +1758,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1743 ethertype = (skb->data[12] << 8) | skb->data[13]; 1758 ethertype = (skb->data[12] << 8) | skb->data[13];
1744 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); 1759 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1745 1760
1761 rcu_read_lock();
1762
1746 switch (sdata->vif.type) { 1763 switch (sdata->vif.type) {
1747 case NL80211_IFTYPE_AP_VLAN: 1764 case NL80211_IFTYPE_AP_VLAN:
1748 rcu_read_lock();
1749 sta = rcu_dereference(sdata->u.vlan.sta); 1765 sta = rcu_dereference(sdata->u.vlan.sta);
1750 if (sta) { 1766 if (sta) {
1751 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1767 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
@@ -1758,7 +1774,12 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1758 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); 1774 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1759 wme_sta = test_sta_flag(sta, WLAN_STA_WME); 1775 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1760 } 1776 }
1761 rcu_read_unlock(); 1777 ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1778 u.ap);
1779 chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
1780 if (!chanctx_conf)
1781 goto fail_rcu;
1782 band = chanctx_conf->def.chan->band;
1762 if (sta) 1783 if (sta)
1763 break; 1784 break;
1764 /* fall through */ 1785 /* fall through */
@@ -1769,6 +1790,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1769 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1790 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1770 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1791 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1771 hdrlen = 24; 1792 hdrlen = 24;
1793 if (sdata->vif.type == NL80211_IFTYPE_AP)
1794 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1795 if (!chanctx_conf)
1796 goto fail_rcu;
1797 band = chanctx_conf->def.chan->band;
1772 break; 1798 break;
1773 case NL80211_IFTYPE_WDS: 1799 case NL80211_IFTYPE_WDS:
1774 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1800 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
@@ -1778,15 +1804,20 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1778 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1804 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1779 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1805 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1780 hdrlen = 30; 1806 hdrlen = 30;
1807 /*
1808 * This is the exception! WDS style interfaces are prohibited
1809 * when channel contexts are in used so this must be valid
1810 */
1811 band = local->hw.conf.channel->band;
1781 break; 1812 break;
1782#ifdef CONFIG_MAC80211_MESH 1813#ifdef CONFIG_MAC80211_MESH
1783 case NL80211_IFTYPE_MESH_POINT: 1814 case NL80211_IFTYPE_MESH_POINT:
1784 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { 1815 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1785 /* Do not send frames with mesh_ttl == 0 */ 1816 /* Do not send frames with mesh_ttl == 0 */
1786 sdata->u.mesh.mshstats.dropped_frames_ttl++; 1817 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1787 goto fail; 1818 goto fail_rcu;
1788 } 1819 }
1789 rcu_read_lock(); 1820
1790 if (!is_multicast_ether_addr(skb->data)) { 1821 if (!is_multicast_ether_addr(skb->data)) {
1791 mpath = mesh_path_lookup(skb->data, sdata); 1822 mpath = mesh_path_lookup(skb->data, sdata);
1792 if (!mpath) 1823 if (!mpath)
@@ -1803,7 +1834,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1803 !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { 1834 !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
1804 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1835 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1805 skb->data, skb->data + ETH_ALEN); 1836 skb->data, skb->data + ETH_ALEN);
1806 rcu_read_unlock();
1807 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1837 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1808 sdata, NULL, NULL); 1838 sdata, NULL, NULL);
1809 } else { 1839 } else {
@@ -1819,7 +1849,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1819 mesh_da = mppath->mpp; 1849 mesh_da = mppath->mpp;
1820 else if (mpath) 1850 else if (mpath)
1821 mesh_da = mpath->dst; 1851 mesh_da = mpath->dst;
1822 rcu_read_unlock();
1823 1852
1824 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1853 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1825 mesh_da, sdata->vif.addr); 1854 mesh_da, sdata->vif.addr);
@@ -1839,13 +1868,16 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1839 skb->data + ETH_ALEN); 1868 skb->data + ETH_ALEN);
1840 1869
1841 } 1870 }
1871 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1872 if (!chanctx_conf)
1873 goto fail_rcu;
1874 band = chanctx_conf->def.chan->band;
1842 break; 1875 break;
1843#endif 1876#endif
1844 case NL80211_IFTYPE_STATION: 1877 case NL80211_IFTYPE_STATION:
1845 if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { 1878 if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1846 bool tdls_peer = false; 1879 bool tdls_peer = false;
1847 1880
1848 rcu_read_lock();
1849 sta = sta_info_get(sdata, skb->data); 1881 sta = sta_info_get(sdata, skb->data);
1850 if (sta) { 1882 if (sta) {
1851 authorized = test_sta_flag(sta, 1883 authorized = test_sta_flag(sta,
@@ -1856,7 +1888,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1856 tdls_auth = test_sta_flag(sta, 1888 tdls_auth = test_sta_flag(sta,
1857 WLAN_STA_TDLS_PEER_AUTH); 1889 WLAN_STA_TDLS_PEER_AUTH);
1858 } 1890 }
1859 rcu_read_unlock();
1860 1891
1861 /* 1892 /*
1862 * If the TDLS link is enabled, send everything 1893 * If the TDLS link is enabled, send everything
@@ -1871,7 +1902,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1871 if (tdls_direct) { 1902 if (tdls_direct) {
1872 /* link during setup - throw out frames to peer */ 1903 /* link during setup - throw out frames to peer */
1873 if (!tdls_auth) 1904 if (!tdls_auth)
1874 goto fail; 1905 goto fail_rcu;
1875 1906
1876 /* DA SA BSSID */ 1907 /* DA SA BSSID */
1877 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1908 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1896,6 +1927,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1896 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1927 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1897 hdrlen = 24; 1928 hdrlen = 24;
1898 } 1929 }
1930 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1931 if (!chanctx_conf)
1932 goto fail_rcu;
1933 band = chanctx_conf->def.chan->band;
1899 break; 1934 break;
1900 case NL80211_IFTYPE_ADHOC: 1935 case NL80211_IFTYPE_ADHOC:
1901 /* DA SA BSSID */ 1936 /* DA SA BSSID */
@@ -1903,9 +1938,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1903 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1938 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1904 memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); 1939 memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
1905 hdrlen = 24; 1940 hdrlen = 24;
1941 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1942 if (!chanctx_conf)
1943 goto fail_rcu;
1944 band = chanctx_conf->def.chan->band;
1906 break; 1945 break;
1907 default: 1946 default:
1908 goto fail; 1947 goto fail_rcu;
1909 } 1948 }
1910 1949
1911 /* 1950 /*
@@ -1915,13 +1954,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1915 */ 1954 */
1916 multicast = is_multicast_ether_addr(hdr.addr1); 1955 multicast = is_multicast_ether_addr(hdr.addr1);
1917 if (!multicast) { 1956 if (!multicast) {
1918 rcu_read_lock();
1919 sta = sta_info_get(sdata, hdr.addr1); 1957 sta = sta_info_get(sdata, hdr.addr1);
1920 if (sta) { 1958 if (sta) {
1921 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); 1959 authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
1922 wme_sta = test_sta_flag(sta, WLAN_STA_WME); 1960 wme_sta = test_sta_flag(sta, WLAN_STA_WME);
1923 } 1961 }
1924 rcu_read_unlock();
1925 } 1962 }
1926 1963
1927 /* For mesh, the use of the QoS header is mandatory */ 1964 /* For mesh, the use of the QoS header is mandatory */
@@ -1949,7 +1986,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1949 1986
1950 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); 1987 I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
1951 1988
1952 goto fail; 1989 goto fail_rcu;
1953 } 1990 }
1954 1991
1955 if (unlikely(!multicast && skb->sk && 1992 if (unlikely(!multicast && skb->sk &&
@@ -2004,7 +2041,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2004 kfree_skb(tmp_skb); 2041 kfree_skb(tmp_skb);
2005 2042
2006 if (!skb) 2043 if (!skb)
2007 goto fail; 2044 goto fail_rcu;
2008 } 2045 }
2009 2046
2010 hdr.frame_control = fc; 2047 hdr.frame_control = fc;
@@ -2052,7 +2089,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2052 head_need = max_t(int, 0, head_need); 2089 head_need = max_t(int, 0, head_need);
2053 if (ieee80211_skb_resize(sdata, skb, head_need, true)) { 2090 if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
2054 ieee80211_free_txskb(&local->hw, skb); 2091 ieee80211_free_txskb(&local->hw, skb);
2055 return NETDEV_TX_OK; 2092 skb = NULL;
2093 goto fail_rcu;
2056 } 2094 }
2057 } 2095 }
2058 2096
@@ -2104,10 +2142,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2104 info->flags = info_flags; 2142 info->flags = info_flags;
2105 info->ack_frame_id = info_id; 2143 info->ack_frame_id = info_id;
2106 2144
2107 ieee80211_xmit(sdata, skb); 2145 ieee80211_xmit(sdata, skb, band);
2146 rcu_read_unlock();
2108 2147
2109 return NETDEV_TX_OK; 2148 return NETDEV_TX_OK;
2110 2149
2150 fail_rcu:
2151 rcu_read_unlock();
2111 fail: 2152 fail:
2112 dev_kfree_skb(skb); 2153 dev_kfree_skb(skb);
2113 return NETDEV_TX_OK; 2154 return NETDEV_TX_OK;
@@ -2120,10 +2161,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2120 */ 2161 */
2121void ieee80211_clear_tx_pending(struct ieee80211_local *local) 2162void ieee80211_clear_tx_pending(struct ieee80211_local *local)
2122{ 2163{
2164 struct sk_buff *skb;
2123 int i; 2165 int i;
2124 2166
2125 for (i = 0; i < local->hw.queues; i++) 2167 for (i = 0; i < local->hw.queues; i++) {
2126 skb_queue_purge(&local->pending[i]); 2168 while ((skb = skb_dequeue(&local->pending[i])) != NULL)
2169 ieee80211_free_txskb(&local->hw, skb);
2170 }
2127} 2171}
2128 2172
2129/* 2173/*
@@ -2139,11 +2183,18 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2139 struct sta_info *sta; 2183 struct sta_info *sta;
2140 struct ieee80211_hdr *hdr; 2184 struct ieee80211_hdr *hdr;
2141 bool result; 2185 bool result;
2186 struct ieee80211_chanctx_conf *chanctx_conf;
2142 2187
2143 sdata = vif_to_sdata(info->control.vif); 2188 sdata = vif_to_sdata(info->control.vif);
2144 2189
2145 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { 2190 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
2146 result = ieee80211_tx(sdata, skb, true); 2191 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2192 if (unlikely(!chanctx_conf)) {
2193 dev_kfree_skb(skb);
2194 return true;
2195 }
2196 result = ieee80211_tx(sdata, skb, true,
2197 chanctx_conf->def.chan->band);
2147 } else { 2198 } else {
2148 struct sk_buff_head skbs; 2199 struct sk_buff_head skbs;
2149 2200
@@ -2211,9 +2262,8 @@ void ieee80211_tx_pending(unsigned long data)
2211/* functions for drivers to get certain frames */ 2262/* functions for drivers to get certain frames */
2212 2263
2213static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, 2264static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2214 struct ieee80211_if_ap *bss, 2265 struct ps_data *ps,
2215 struct sk_buff *skb, 2266 struct sk_buff *skb)
2216 struct beacon_data *beacon)
2217{ 2267{
2218 u8 *pos, *tim; 2268 u8 *pos, *tim;
2219 int aid0 = 0; 2269 int aid0 = 0;
@@ -2221,27 +2271,27 @@ static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2221 2271
2222 /* Generate bitmap for TIM only if there are any STAs in power save 2272 /* Generate bitmap for TIM only if there are any STAs in power save
2223 * mode. */ 2273 * mode. */
2224 if (atomic_read(&bss->num_sta_ps) > 0) 2274 if (atomic_read(&ps->num_sta_ps) > 0)
2225 /* in the hope that this is faster than 2275 /* in the hope that this is faster than
2226 * checking byte-for-byte */ 2276 * checking byte-for-byte */
2227 have_bits = !bitmap_empty((unsigned long*)bss->tim, 2277 have_bits = !bitmap_empty((unsigned long*)ps->tim,
2228 IEEE80211_MAX_AID+1); 2278 IEEE80211_MAX_AID+1);
2229 2279
2230 if (bss->dtim_count == 0) 2280 if (ps->dtim_count == 0)
2231 bss->dtim_count = sdata->vif.bss_conf.dtim_period - 1; 2281 ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
2232 else 2282 else
2233 bss->dtim_count--; 2283 ps->dtim_count--;
2234 2284
2235 tim = pos = (u8 *) skb_put(skb, 6); 2285 tim = pos = (u8 *) skb_put(skb, 6);
2236 *pos++ = WLAN_EID_TIM; 2286 *pos++ = WLAN_EID_TIM;
2237 *pos++ = 4; 2287 *pos++ = 4;
2238 *pos++ = bss->dtim_count; 2288 *pos++ = ps->dtim_count;
2239 *pos++ = sdata->vif.bss_conf.dtim_period; 2289 *pos++ = sdata->vif.bss_conf.dtim_period;
2240 2290
2241 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) 2291 if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
2242 aid0 = 1; 2292 aid0 = 1;
2243 2293
2244 bss->dtim_bc_mc = aid0 == 1; 2294 ps->dtim_bc_mc = aid0 == 1;
2245 2295
2246 if (have_bits) { 2296 if (have_bits) {
2247 /* Find largest even number N1 so that bits numbered 1 through 2297 /* Find largest even number N1 so that bits numbered 1 through
@@ -2249,14 +2299,14 @@ static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2249 * (N2 + 1) x 8 through 2007 are 0. */ 2299 * (N2 + 1) x 8 through 2007 are 0. */
2250 n1 = 0; 2300 n1 = 0;
2251 for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { 2301 for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
2252 if (bss->tim[i]) { 2302 if (ps->tim[i]) {
2253 n1 = i & 0xfe; 2303 n1 = i & 0xfe;
2254 break; 2304 break;
2255 } 2305 }
2256 } 2306 }
2257 n2 = n1; 2307 n2 = n1;
2258 for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { 2308 for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
2259 if (bss->tim[i]) { 2309 if (ps->tim[i]) {
2260 n2 = i; 2310 n2 = i;
2261 break; 2311 break;
2262 } 2312 }
@@ -2266,7 +2316,7 @@ static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2266 *pos++ = n1 | aid0; 2316 *pos++ = n1 | aid0;
2267 /* Part Virt Bitmap */ 2317 /* Part Virt Bitmap */
2268 skb_put(skb, n2 - n1); 2318 skb_put(skb, n2 - n1);
2269 memcpy(pos, bss->tim + n1, n2 - n1 + 1); 2319 memcpy(pos, ps->tim + n1, n2 - n1 + 1);
2270 2320
2271 tim[1] = n2 - n1 + 4; 2321 tim[1] = n2 - n1 + 4;
2272 } else { 2322 } else {
@@ -2283,16 +2333,16 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2283 struct sk_buff *skb = NULL; 2333 struct sk_buff *skb = NULL;
2284 struct ieee80211_tx_info *info; 2334 struct ieee80211_tx_info *info;
2285 struct ieee80211_sub_if_data *sdata = NULL; 2335 struct ieee80211_sub_if_data *sdata = NULL;
2286 struct ieee80211_if_ap *ap = NULL; 2336 enum ieee80211_band band;
2287 struct beacon_data *beacon;
2288 enum ieee80211_band band = local->oper_channel->band;
2289 struct ieee80211_tx_rate_control txrc; 2337 struct ieee80211_tx_rate_control txrc;
2338 struct ieee80211_chanctx_conf *chanctx_conf;
2290 2339
2291 rcu_read_lock(); 2340 rcu_read_lock();
2292 2341
2293 sdata = vif_to_sdata(vif); 2342 sdata = vif_to_sdata(vif);
2343 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2294 2344
2295 if (!ieee80211_sdata_running(sdata)) 2345 if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
2296 goto out; 2346 goto out;
2297 2347
2298 if (tim_offset) 2348 if (tim_offset)
@@ -2301,8 +2351,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2301 *tim_length = 0; 2351 *tim_length = 0;
2302 2352
2303 if (sdata->vif.type == NL80211_IFTYPE_AP) { 2353 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2304 ap = &sdata->u.ap; 2354 struct ieee80211_if_ap *ap = &sdata->u.ap;
2305 beacon = rcu_dereference(ap->beacon); 2355 struct beacon_data *beacon = rcu_dereference(ap->beacon);
2356
2306 if (beacon) { 2357 if (beacon) {
2307 /* 2358 /*
2308 * headroom, head length, 2359 * headroom, head length,
@@ -2326,14 +2377,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2326 * of the tim bitmap in mac80211 and the driver. 2377 * of the tim bitmap in mac80211 and the driver.
2327 */ 2378 */
2328 if (local->tim_in_locked_section) { 2379 if (local->tim_in_locked_section) {
2329 ieee80211_beacon_add_tim(sdata, ap, skb, 2380 ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
2330 beacon);
2331 } else { 2381 } else {
2332 unsigned long flags; 2382 unsigned long flags;
2333 2383
2334 spin_lock_irqsave(&local->tim_lock, flags); 2384 spin_lock_irqsave(&local->tim_lock, flags);
2335 ieee80211_beacon_add_tim(sdata, ap, skb, 2385 ieee80211_beacon_add_tim(sdata, &ap->ps, skb);
2336 beacon);
2337 spin_unlock_irqrestore(&local->tim_lock, flags); 2386 spin_unlock_irqrestore(&local->tim_lock, flags);
2338 } 2387 }
2339 2388
@@ -2409,6 +2458,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2409 *pos++ = WLAN_EID_SSID; 2458 *pos++ = WLAN_EID_SSID;
2410 *pos++ = 0x0; 2459 *pos++ = 0x0;
2411 2460
2461 band = chanctx_conf->def.chan->band;
2462
2412 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 2463 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
2413 mesh_add_ds_params_ie(skb, sdata) || 2464 mesh_add_ds_params_ie(skb, sdata) ||
2414 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || 2465 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -2426,6 +2477,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2426 goto out; 2477 goto out;
2427 } 2478 }
2428 2479
2480 band = chanctx_conf->def.chan->band;
2481
2429 info = IEEE80211_SKB_CB(skb); 2482 info = IEEE80211_SKB_CB(skb);
2430 2483
2431 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 2484 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
@@ -2570,7 +2623,7 @@ EXPORT_SYMBOL(ieee80211_nullfunc_get);
2570struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, 2623struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2571 struct ieee80211_vif *vif, 2624 struct ieee80211_vif *vif,
2572 const u8 *ssid, size_t ssid_len, 2625 const u8 *ssid, size_t ssid_len,
2573 const u8 *ie, size_t ie_len) 2626 size_t tailroom)
2574{ 2627{
2575 struct ieee80211_sub_if_data *sdata; 2628 struct ieee80211_sub_if_data *sdata;
2576 struct ieee80211_local *local; 2629 struct ieee80211_local *local;
@@ -2584,7 +2637,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2584 ie_ssid_len = 2 + ssid_len; 2637 ie_ssid_len = 2 + ssid_len;
2585 2638
2586 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + 2639 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2587 ie_ssid_len + ie_len); 2640 ie_ssid_len + tailroom);
2588 if (!skb) 2641 if (!skb)
2589 return NULL; 2642 return NULL;
2590 2643
@@ -2605,11 +2658,6 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2605 memcpy(pos, ssid, ssid_len); 2658 memcpy(pos, ssid, ssid_len);
2606 pos += ssid_len; 2659 pos += ssid_len;
2607 2660
2608 if (ie) {
2609 pos = skb_put(skb, ie_len);
2610 memcpy(pos, ie, ie_len);
2611 }
2612
2613 return skb; 2661 return skb;
2614} 2662}
2615EXPORT_SYMBOL(ieee80211_probereq_get); 2663EXPORT_SYMBOL(ieee80211_probereq_get);
@@ -2653,29 +2701,40 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2653 struct sk_buff *skb = NULL; 2701 struct sk_buff *skb = NULL;
2654 struct ieee80211_tx_data tx; 2702 struct ieee80211_tx_data tx;
2655 struct ieee80211_sub_if_data *sdata; 2703 struct ieee80211_sub_if_data *sdata;
2656 struct ieee80211_if_ap *bss = NULL; 2704 struct ps_data *ps;
2657 struct beacon_data *beacon;
2658 struct ieee80211_tx_info *info; 2705 struct ieee80211_tx_info *info;
2706 struct ieee80211_chanctx_conf *chanctx_conf;
2659 2707
2660 sdata = vif_to_sdata(vif); 2708 sdata = vif_to_sdata(vif);
2661 bss = &sdata->u.ap;
2662 2709
2663 rcu_read_lock(); 2710 rcu_read_lock();
2664 beacon = rcu_dereference(bss->beacon); 2711 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2712
2713 if (!chanctx_conf)
2714 goto out;
2665 2715
2666 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) 2716 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2717 struct beacon_data *beacon =
2718 rcu_dereference(sdata->u.ap.beacon);
2719
2720 if (!beacon || !beacon->head)
2721 goto out;
2722
2723 ps = &sdata->u.ap.ps;
2724 } else {
2667 goto out; 2725 goto out;
2726 }
2668 2727
2669 if (bss->dtim_count != 0 || !bss->dtim_bc_mc) 2728 if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
2670 goto out; /* send buffered bc/mc only after DTIM beacon */ 2729 goto out; /* send buffered bc/mc only after DTIM beacon */
2671 2730
2672 while (1) { 2731 while (1) {
2673 skb = skb_dequeue(&bss->ps_bc_buf); 2732 skb = skb_dequeue(&ps->bc_buf);
2674 if (!skb) 2733 if (!skb)
2675 goto out; 2734 goto out;
2676 local->total_ps_buffered--; 2735 local->total_ps_buffered--;
2677 2736
2678 if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { 2737 if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
2679 struct ieee80211_hdr *hdr = 2738 struct ieee80211_hdr *hdr =
2680 (struct ieee80211_hdr *) skb->data; 2739 (struct ieee80211_hdr *) skb->data;
2681 /* more buffered multicast/broadcast frames ==> set 2740 /* more buffered multicast/broadcast frames ==> set
@@ -2693,7 +2752,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2693 info = IEEE80211_SKB_CB(skb); 2752 info = IEEE80211_SKB_CB(skb);
2694 2753
2695 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2754 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2696 info->band = local->oper_channel->band; 2755 info->band = chanctx_conf->def.chan->band;
2697 2756
2698 if (invoke_tx_handlers(&tx)) 2757 if (invoke_tx_handlers(&tx))
2699 skb = NULL; 2758 skb = NULL;
@@ -2704,8 +2763,9 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2704} 2763}
2705EXPORT_SYMBOL(ieee80211_get_buffered_bc); 2764EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2706 2765
2707void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, 2766void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
2708 struct sk_buff *skb, int tid) 2767 struct sk_buff *skb, int tid,
2768 enum ieee80211_band band)
2709{ 2769{
2710 int ac = ieee802_1d_to_ac[tid & 7]; 2770 int ac = ieee802_1d_to_ac[tid & 7];
2711 2771
@@ -2722,6 +2782,6 @@ void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
2722 * requirements are that we do not come into tx with bhs on. 2782 * requirements are that we do not come into tx with bhs on.
2723 */ 2783 */
2724 local_bh_disable(); 2784 local_bh_disable();
2725 ieee80211_xmit(sdata, skb); 2785 ieee80211_xmit(sdata, skb, band);
2726 local_bh_enable(); 2786 local_bh_enable();
2727} 2787}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 239391807ca9..f11e8c540db4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -512,7 +512,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw)
512EXPORT_SYMBOL(ieee80211_wake_queues); 512EXPORT_SYMBOL(ieee80211_wake_queues);
513 513
514void ieee80211_iterate_active_interfaces( 514void ieee80211_iterate_active_interfaces(
515 struct ieee80211_hw *hw, 515 struct ieee80211_hw *hw, u32 iter_flags,
516 void (*iterator)(void *data, u8 *mac, 516 void (*iterator)(void *data, u8 *mac,
517 struct ieee80211_vif *vif), 517 struct ieee80211_vif *vif),
518 void *data) 518 void *data)
@@ -530,6 +530,9 @@ void ieee80211_iterate_active_interfaces(
530 default: 530 default:
531 break; 531 break;
532 } 532 }
533 if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
534 !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
535 continue;
533 if (ieee80211_sdata_running(sdata)) 536 if (ieee80211_sdata_running(sdata))
534 iterator(data, sdata->vif.addr, 537 iterator(data, sdata->vif.addr,
535 &sdata->vif); 538 &sdata->vif);
@@ -537,7 +540,9 @@ void ieee80211_iterate_active_interfaces(
537 540
538 sdata = rcu_dereference_protected(local->monitor_sdata, 541 sdata = rcu_dereference_protected(local->monitor_sdata,
539 lockdep_is_held(&local->iflist_mtx)); 542 lockdep_is_held(&local->iflist_mtx));
540 if (sdata) 543 if (sdata &&
544 (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
545 sdata->flags & IEEE80211_SDATA_IN_DRIVER))
541 iterator(data, sdata->vif.addr, &sdata->vif); 546 iterator(data, sdata->vif.addr, &sdata->vif);
542 547
543 mutex_unlock(&local->iflist_mtx); 548 mutex_unlock(&local->iflist_mtx);
@@ -545,7 +550,7 @@ void ieee80211_iterate_active_interfaces(
545EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); 550EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
546 551
547void ieee80211_iterate_active_interfaces_atomic( 552void ieee80211_iterate_active_interfaces_atomic(
548 struct ieee80211_hw *hw, 553 struct ieee80211_hw *hw, u32 iter_flags,
549 void (*iterator)(void *data, u8 *mac, 554 void (*iterator)(void *data, u8 *mac,
550 struct ieee80211_vif *vif), 555 struct ieee80211_vif *vif),
551 void *data) 556 void *data)
@@ -563,13 +568,18 @@ void ieee80211_iterate_active_interfaces_atomic(
563 default: 568 default:
564 break; 569 break;
565 } 570 }
571 if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
572 !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
573 continue;
566 if (ieee80211_sdata_running(sdata)) 574 if (ieee80211_sdata_running(sdata))
567 iterator(data, sdata->vif.addr, 575 iterator(data, sdata->vif.addr,
568 &sdata->vif); 576 &sdata->vif);
569 } 577 }
570 578
571 sdata = rcu_dereference(local->monitor_sdata); 579 sdata = rcu_dereference(local->monitor_sdata);
572 if (sdata) 580 if (sdata &&
581 (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
582 sdata->flags & IEEE80211_SDATA_IN_DRIVER))
573 iterator(data, sdata->vif.addr, &sdata->vif); 583 iterator(data, sdata->vif.addr, &sdata->vif);
574 584
575 rcu_read_unlock(); 585 rcu_read_unlock();
@@ -769,6 +779,18 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
769 else 779 else
770 elem_parse_failed = true; 780 elem_parse_failed = true;
771 break; 781 break;
782 case WLAN_EID_VHT_CAPABILITY:
783 if (elen >= sizeof(struct ieee80211_vht_cap))
784 elems->vht_cap_elem = (void *)pos;
785 else
786 elem_parse_failed = true;
787 break;
788 case WLAN_EID_VHT_OPERATION:
789 if (elen >= sizeof(struct ieee80211_vht_operation))
790 elems->vht_operation = (void *)pos;
791 else
792 elem_parse_failed = true;
793 break;
772 case WLAN_EID_MESH_ID: 794 case WLAN_EID_MESH_ID:
773 elems->mesh_id = pos; 795 elems->mesh_id = pos;
774 elems->mesh_id_len = elen; 796 elems->mesh_id_len = elen;
@@ -837,7 +859,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
837 if (elem_parse_failed) 859 if (elem_parse_failed)
838 elems->parse_error = true; 860 elems->parse_error = true;
839 else 861 else
840 set_bit(id, seen_elems); 862 __set_bit(id, seen_elems);
841 863
842 left -= elen; 864 left -= elen;
843 pos += elen; 865 pos += elen;
@@ -860,6 +882,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
860{ 882{
861 struct ieee80211_local *local = sdata->local; 883 struct ieee80211_local *local = sdata->local;
862 struct ieee80211_tx_queue_params qparam; 884 struct ieee80211_tx_queue_params qparam;
885 struct ieee80211_chanctx_conf *chanctx_conf;
863 int ac; 886 int ac;
864 bool use_11b, enable_qos; 887 bool use_11b, enable_qos;
865 int aCWmin, aCWmax; 888 int aCWmin, aCWmax;
@@ -872,8 +895,12 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
872 895
873 memset(&qparam, 0, sizeof(qparam)); 896 memset(&qparam, 0, sizeof(qparam));
874 897
875 use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) && 898 rcu_read_lock();
899 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
900 use_11b = (chanctx_conf &&
901 chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ) &&
876 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 902 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
903 rcu_read_unlock();
877 904
878 /* 905 /*
879 * By default disable QoS in STA mode for old access points, which do 906 * By default disable QoS in STA mode for old access points, which do
@@ -952,7 +979,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
952 const size_t supp_rates_len, 979 const size_t supp_rates_len,
953 const u8 *supp_rates) 980 const u8 *supp_rates)
954{ 981{
955 struct ieee80211_local *local = sdata->local; 982 struct ieee80211_chanctx_conf *chanctx_conf;
956 int i, have_higher_than_11mbit = 0; 983 int i, have_higher_than_11mbit = 0;
957 984
958 /* cf. IEEE 802.11 9.2.12 */ 985 /* cf. IEEE 802.11 9.2.12 */
@@ -960,11 +987,16 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
960 if ((supp_rates[i] & 0x7f) * 5 > 110) 987 if ((supp_rates[i] & 0x7f) * 5 > 110)
961 have_higher_than_11mbit = 1; 988 have_higher_than_11mbit = 1;
962 989
963 if (local->oper_channel->band == IEEE80211_BAND_2GHZ && 990 rcu_read_lock();
991 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
992
993 if (chanctx_conf &&
994 chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ &&
964 have_higher_than_11mbit) 995 have_higher_than_11mbit)
965 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; 996 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
966 else 997 else
967 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; 998 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
999 rcu_read_unlock();
968 1000
969 ieee80211_set_wmm_default(sdata, true); 1001 ieee80211_set_wmm_default(sdata, true);
970} 1002}
@@ -996,7 +1028,7 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
996} 1028}
997 1029
998void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1030void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
999 u16 transaction, u16 auth_alg, 1031 u16 transaction, u16 auth_alg, u16 status,
1000 u8 *extra, size_t extra_len, const u8 *da, 1032 u8 *extra, size_t extra_len, const u8 *da,
1001 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx) 1033 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx)
1002{ 1034{
@@ -1021,7 +1053,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1021 memcpy(mgmt->bssid, bssid, ETH_ALEN); 1053 memcpy(mgmt->bssid, bssid, ETH_ALEN);
1022 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); 1054 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
1023 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 1055 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
1024 mgmt->u.auth.status_code = cpu_to_le16(0); 1056 mgmt->u.auth.status_code = cpu_to_le16(status);
1025 if (extra) 1057 if (extra)
1026 memcpy(skb_put(skb, extra_len), extra, extra_len); 1058 memcpy(skb_put(skb, extra_len), extra, extra_len);
1027 1059
@@ -1075,12 +1107,12 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
1075} 1107}
1076 1108
1077int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, 1109int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1078 const u8 *ie, size_t ie_len, 1110 size_t buffer_len, const u8 *ie, size_t ie_len,
1079 enum ieee80211_band band, u32 rate_mask, 1111 enum ieee80211_band band, u32 rate_mask,
1080 u8 channel) 1112 u8 channel)
1081{ 1113{
1082 struct ieee80211_supported_band *sband; 1114 struct ieee80211_supported_band *sband;
1083 u8 *pos; 1115 u8 *pos = buffer, *end = buffer + buffer_len;
1084 size_t offset = 0, noffset; 1116 size_t offset = 0, noffset;
1085 int supp_rates_len, i; 1117 int supp_rates_len, i;
1086 u8 rates[32]; 1118 u8 rates[32];
@@ -1091,8 +1123,6 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1091 if (WARN_ON_ONCE(!sband)) 1123 if (WARN_ON_ONCE(!sband))
1092 return 0; 1124 return 0;
1093 1125
1094 pos = buffer;
1095
1096 num_rates = 0; 1126 num_rates = 0;
1097 for (i = 0; i < sband->n_bitrates; i++) { 1127 for (i = 0; i < sband->n_bitrates; i++) {
1098 if ((BIT(i) & rate_mask) == 0) 1128 if ((BIT(i) & rate_mask) == 0)
@@ -1102,6 +1132,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1102 1132
1103 supp_rates_len = min_t(int, num_rates, 8); 1133 supp_rates_len = min_t(int, num_rates, 8);
1104 1134
1135 if (end - pos < 2 + supp_rates_len)
1136 goto out_err;
1105 *pos++ = WLAN_EID_SUPP_RATES; 1137 *pos++ = WLAN_EID_SUPP_RATES;
1106 *pos++ = supp_rates_len; 1138 *pos++ = supp_rates_len;
1107 memcpy(pos, rates, supp_rates_len); 1139 memcpy(pos, rates, supp_rates_len);
@@ -1118,6 +1150,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1118 before_extrates, 1150 before_extrates,
1119 ARRAY_SIZE(before_extrates), 1151 ARRAY_SIZE(before_extrates),
1120 offset); 1152 offset);
1153 if (end - pos < noffset - offset)
1154 goto out_err;
1121 memcpy(pos, ie + offset, noffset - offset); 1155 memcpy(pos, ie + offset, noffset - offset);
1122 pos += noffset - offset; 1156 pos += noffset - offset;
1123 offset = noffset; 1157 offset = noffset;
@@ -1125,6 +1159,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1125 1159
1126 ext_rates_len = num_rates - supp_rates_len; 1160 ext_rates_len = num_rates - supp_rates_len;
1127 if (ext_rates_len > 0) { 1161 if (ext_rates_len > 0) {
1162 if (end - pos < 2 + ext_rates_len)
1163 goto out_err;
1128 *pos++ = WLAN_EID_EXT_SUPP_RATES; 1164 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1129 *pos++ = ext_rates_len; 1165 *pos++ = ext_rates_len;
1130 memcpy(pos, rates + supp_rates_len, ext_rates_len); 1166 memcpy(pos, rates + supp_rates_len, ext_rates_len);
@@ -1132,6 +1168,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1132 } 1168 }
1133 1169
1134 if (channel && sband->band == IEEE80211_BAND_2GHZ) { 1170 if (channel && sband->band == IEEE80211_BAND_2GHZ) {
1171 if (end - pos < 3)
1172 goto out_err;
1135 *pos++ = WLAN_EID_DS_PARAMS; 1173 *pos++ = WLAN_EID_DS_PARAMS;
1136 *pos++ = 1; 1174 *pos++ = 1;
1137 *pos++ = channel; 1175 *pos++ = channel;
@@ -1150,14 +1188,19 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1150 noffset = ieee80211_ie_split(ie, ie_len, 1188 noffset = ieee80211_ie_split(ie, ie_len,
1151 before_ht, ARRAY_SIZE(before_ht), 1189 before_ht, ARRAY_SIZE(before_ht),
1152 offset); 1190 offset);
1191 if (end - pos < noffset - offset)
1192 goto out_err;
1153 memcpy(pos, ie + offset, noffset - offset); 1193 memcpy(pos, ie + offset, noffset - offset);
1154 pos += noffset - offset; 1194 pos += noffset - offset;
1155 offset = noffset; 1195 offset = noffset;
1156 } 1196 }
1157 1197
1158 if (sband->ht_cap.ht_supported) 1198 if (sband->ht_cap.ht_supported) {
1199 if (end - pos < 2 + sizeof(struct ieee80211_ht_cap))
1200 goto out_err;
1159 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, 1201 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
1160 sband->ht_cap.cap); 1202 sband->ht_cap.cap);
1203 }
1161 1204
1162 /* 1205 /*
1163 * If adding more here, adjust code in main.c 1206 * If adding more here, adjust code in main.c
@@ -1167,15 +1210,23 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1167 /* add any remaining custom IEs */ 1210 /* add any remaining custom IEs */
1168 if (ie && ie_len) { 1211 if (ie && ie_len) {
1169 noffset = ie_len; 1212 noffset = ie_len;
1213 if (end - pos < noffset - offset)
1214 goto out_err;
1170 memcpy(pos, ie + offset, noffset - offset); 1215 memcpy(pos, ie + offset, noffset - offset);
1171 pos += noffset - offset; 1216 pos += noffset - offset;
1172 } 1217 }
1173 1218
1174 if (sband->vht_cap.vht_supported) 1219 if (sband->vht_cap.vht_supported) {
1220 if (end - pos < 2 + sizeof(struct ieee80211_vht_cap))
1221 goto out_err;
1175 pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, 1222 pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
1176 sband->vht_cap.cap); 1223 sband->vht_cap.cap);
1224 }
1177 1225
1178 return pos - buffer; 1226 return pos - buffer;
1227 out_err:
1228 WARN_ONCE(1, "not enough space for preq IEs\n");
1229 return pos - buffer;
1179} 1230}
1180 1231
1181struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1232struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
@@ -1188,14 +1239,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1188 struct ieee80211_local *local = sdata->local; 1239 struct ieee80211_local *local = sdata->local;
1189 struct sk_buff *skb; 1240 struct sk_buff *skb;
1190 struct ieee80211_mgmt *mgmt; 1241 struct ieee80211_mgmt *mgmt;
1191 size_t buf_len;
1192 u8 *buf;
1193 u8 chan_no; 1242 u8 chan_no;
1194 1243 int ies_len;
1195 /* FIXME: come up with a proper value */
1196 buf = kmalloc(200 + ie_len, GFP_KERNEL);
1197 if (!buf)
1198 return NULL;
1199 1244
1200 /* 1245 /*
1201 * Do not send DS Channel parameter for directed probe requests 1246 * Do not send DS Channel parameter for directed probe requests
@@ -1207,14 +1252,16 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1207 else 1252 else
1208 chan_no = ieee80211_frequency_to_channel(chan->center_freq); 1253 chan_no = ieee80211_frequency_to_channel(chan->center_freq);
1209 1254
1210 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
1211 ratemask, chan_no);
1212
1213 skb = ieee80211_probereq_get(&local->hw, &sdata->vif, 1255 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
1214 ssid, ssid_len, 1256 ssid, ssid_len, 100 + ie_len);
1215 buf, buf_len);
1216 if (!skb) 1257 if (!skb)
1217 goto out; 1258 return NULL;
1259
1260 ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
1261 skb_tailroom(skb),
1262 ie, ie_len, chan->band,
1263 ratemask, chan_no);
1264 skb_put(skb, ies_len);
1218 1265
1219 if (dst) { 1266 if (dst) {
1220 mgmt = (struct ieee80211_mgmt *) skb->data; 1267 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1224,9 +1271,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1224 1271
1225 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1272 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1226 1273
1227 out:
1228 kfree(buf);
1229
1230 return skb; 1274 return skb;
1231} 1275}
1232 1276
@@ -1234,7 +1278,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1234 const u8 *ssid, size_t ssid_len, 1278 const u8 *ssid, size_t ssid_len,
1235 const u8 *ie, size_t ie_len, 1279 const u8 *ie, size_t ie_len,
1236 u32 ratemask, bool directed, bool no_cck, 1280 u32 ratemask, bool directed, bool no_cck,
1237 struct ieee80211_channel *channel) 1281 struct ieee80211_channel *channel, bool scan)
1238{ 1282{
1239 struct sk_buff *skb; 1283 struct sk_buff *skb;
1240 1284
@@ -1245,7 +1289,10 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1245 if (no_cck) 1289 if (no_cck)
1246 IEEE80211_SKB_CB(skb)->flags |= 1290 IEEE80211_SKB_CB(skb)->flags |=
1247 IEEE80211_TX_CTL_NO_CCK_RATE; 1291 IEEE80211_TX_CTL_NO_CCK_RATE;
1248 ieee80211_tx_skb(sdata, skb); 1292 if (scan)
1293 ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
1294 else
1295 ieee80211_tx_skb(sdata, skb);
1249 } 1296 }
1250} 1297}
1251 1298
@@ -1308,6 +1355,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1308{ 1355{
1309 struct ieee80211_hw *hw = &local->hw; 1356 struct ieee80211_hw *hw = &local->hw;
1310 struct ieee80211_sub_if_data *sdata; 1357 struct ieee80211_sub_if_data *sdata;
1358 struct ieee80211_chanctx *ctx;
1311 struct sta_info *sta; 1359 struct sta_info *sta;
1312 int res, i; 1360 int res, i;
1313 1361
@@ -1380,6 +1428,46 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1380 res = drv_add_interface(local, sdata); 1428 res = drv_add_interface(local, sdata);
1381 } 1429 }
1382 1430
1431 /* add channel contexts */
1432 if (local->use_chanctx) {
1433 mutex_lock(&local->chanctx_mtx);
1434 list_for_each_entry(ctx, &local->chanctx_list, list)
1435 WARN_ON(drv_add_chanctx(local, ctx));
1436 mutex_unlock(&local->chanctx_mtx);
1437 }
1438
1439 list_for_each_entry(sdata, &local->interfaces, list) {
1440 struct ieee80211_chanctx_conf *ctx_conf;
1441
1442 if (!ieee80211_sdata_running(sdata))
1443 continue;
1444
1445 mutex_lock(&local->chanctx_mtx);
1446 ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1447 lockdep_is_held(&local->chanctx_mtx));
1448 if (ctx_conf) {
1449 ctx = container_of(ctx_conf, struct ieee80211_chanctx,
1450 conf);
1451 drv_assign_vif_chanctx(local, sdata, ctx);
1452 }
1453 mutex_unlock(&local->chanctx_mtx);
1454 }
1455
1456 sdata = rtnl_dereference(local->monitor_sdata);
1457 if (sdata && local->use_chanctx && ieee80211_sdata_running(sdata)) {
1458 struct ieee80211_chanctx_conf *ctx_conf;
1459
1460 mutex_lock(&local->chanctx_mtx);
1461 ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1462 lockdep_is_held(&local->chanctx_mtx));
1463 if (ctx_conf) {
1464 ctx = container_of(ctx_conf, struct ieee80211_chanctx,
1465 conf);
1466 drv_assign_vif_chanctx(local, sdata, ctx);
1467 }
1468 mutex_unlock(&local->chanctx_mtx);
1469 }
1470
1383 /* add STAs back */ 1471 /* add STAs back */
1384 mutex_lock(&local->sta_mtx); 1472 mutex_lock(&local->sta_mtx);
1385 list_for_each_entry(sta, &local->sta_list, list) { 1473 list_for_each_entry(sta, &local->sta_list, list) {
@@ -1435,7 +1523,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1435 BSS_CHANGED_BSSID | 1523 BSS_CHANGED_BSSID |
1436 BSS_CHANGED_CQM | 1524 BSS_CHANGED_CQM |
1437 BSS_CHANGED_QOS | 1525 BSS_CHANGED_QOS |
1438 BSS_CHANGED_IDLE; 1526 BSS_CHANGED_IDLE |
1527 BSS_CHANGED_TXPOWER;
1439 1528
1440 switch (sdata->vif.type) { 1529 switch (sdata->vif.type) {
1441 case NL80211_IFTYPE_STATION: 1530 case NL80211_IFTYPE_STATION:
@@ -1450,11 +1539,15 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1450 changed |= BSS_CHANGED_IBSS; 1539 changed |= BSS_CHANGED_IBSS;
1451 /* fall through */ 1540 /* fall through */
1452 case NL80211_IFTYPE_AP: 1541 case NL80211_IFTYPE_AP:
1453 changed |= BSS_CHANGED_SSID; 1542 changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS;
1454 1543
1455 if (sdata->vif.type == NL80211_IFTYPE_AP) 1544 if (sdata->vif.type == NL80211_IFTYPE_AP) {
1456 changed |= BSS_CHANGED_AP_PROBE_RESP; 1545 changed |= BSS_CHANGED_AP_PROBE_RESP;
1457 1546
1547 if (rcu_access_pointer(sdata->u.ap.beacon))
1548 drv_start_ap(local, sdata);
1549 }
1550
1458 /* fall through */ 1551 /* fall through */
1459 case NL80211_IFTYPE_MESH_POINT: 1552 case NL80211_IFTYPE_MESH_POINT:
1460 changed |= BSS_CHANGED_BEACON | 1553 changed |= BSS_CHANGED_BEACON |
@@ -1491,6 +1584,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1491 list_for_each_entry(sdata, &local->interfaces, list) { 1584 list_for_each_entry(sdata, &local->interfaces, list) {
1492 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1585 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1493 continue; 1586 continue;
1587 if (!sdata->u.mgd.associated)
1588 continue;
1494 1589
1495 ieee80211_send_nullfunc(local, sdata, 0); 1590 ieee80211_send_nullfunc(local, sdata, 0);
1496 } 1591 }
@@ -1551,8 +1646,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1551 * If this is for hw restart things are still running. 1646 * If this is for hw restart things are still running.
1552 * We may want to change that later, however. 1647 * We may want to change that later, however.
1553 */ 1648 */
1554 if (!local->suspended) 1649 if (!local->suspended) {
1650 drv_restart_complete(local);
1555 return 0; 1651 return 0;
1652 }
1556 1653
1557#ifdef CONFIG_PM 1654#ifdef CONFIG_PM
1558 /* first set suspended false, then resuming */ 1655 /* first set suspended false, then resuming */
@@ -1615,68 +1712,24 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
1615} 1712}
1616EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect); 1713EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect);
1617 1714
1618static int check_mgd_smps(struct ieee80211_if_managed *ifmgd, 1715void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata)
1619 enum ieee80211_smps_mode *smps_mode)
1620{ 1716{
1621 if (ifmgd->associated) { 1717 struct ieee80211_local *local = sdata->local;
1622 *smps_mode = ifmgd->ap_smps; 1718 struct ieee80211_chanctx_conf *chanctx_conf;
1623 1719 struct ieee80211_chanctx *chanctx;
1624 if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1625 if (ifmgd->powersave)
1626 *smps_mode = IEEE80211_SMPS_DYNAMIC;
1627 else
1628 *smps_mode = IEEE80211_SMPS_OFF;
1629 }
1630
1631 return 1;
1632 }
1633
1634 return 0;
1635}
1636
1637void ieee80211_recalc_smps(struct ieee80211_local *local)
1638{
1639 struct ieee80211_sub_if_data *sdata;
1640 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
1641 int count = 0;
1642
1643 mutex_lock(&local->iflist_mtx);
1644
1645 /*
1646 * This function could be improved to handle multiple
1647 * interfaces better, but right now it makes any
1648 * non-station interfaces force SM PS to be turned
1649 * off. If there are multiple station interfaces it
1650 * could also use the best possible mode, e.g. if
1651 * one is in static and the other in dynamic then
1652 * dynamic is ok.
1653 */
1654
1655 list_for_each_entry(sdata, &local->interfaces, list) {
1656 if (!ieee80211_sdata_running(sdata))
1657 continue;
1658 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
1659 continue;
1660 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1661 goto set;
1662 1720
1663 count += check_mgd_smps(&sdata->u.mgd, &smps_mode); 1721 mutex_lock(&local->chanctx_mtx);
1664 1722
1665 if (count > 1) { 1723 chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1666 smps_mode = IEEE80211_SMPS_OFF; 1724 lockdep_is_held(&local->chanctx_mtx));
1667 break;
1668 }
1669 }
1670 1725
1671 if (smps_mode == local->smps_mode) 1726 if (WARN_ON_ONCE(!chanctx_conf))
1672 goto unlock; 1727 goto unlock;
1673 1728
1674 set: 1729 chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
1675 local->smps_mode = smps_mode; 1730 ieee80211_recalc_smps_chanctx(local, chanctx);
1676 /* changed flag is auto-detected for this */
1677 ieee80211_hw_config(local, 0);
1678 unlock: 1731 unlock:
1679 mutex_unlock(&local->iflist_mtx); 1732 mutex_unlock(&local->chanctx_mtx);
1680} 1733}
1681 1734
1682static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id) 1735static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
@@ -1816,8 +1869,8 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1816 __le32 tmp; 1869 __le32 tmp;
1817 1870
1818 *pos++ = WLAN_EID_VHT_CAPABILITY; 1871 *pos++ = WLAN_EID_VHT_CAPABILITY;
1819 *pos++ = sizeof(struct ieee80211_vht_capabilities); 1872 *pos++ = sizeof(struct ieee80211_vht_cap);
1820 memset(pos, 0, sizeof(struct ieee80211_vht_capabilities)); 1873 memset(pos, 0, sizeof(struct ieee80211_vht_cap));
1821 1874
1822 /* capability flags */ 1875 /* capability flags */
1823 tmp = cpu_to_le32(cap); 1876 tmp = cpu_to_le32(cap);
@@ -1832,8 +1885,7 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
1832} 1885}
1833 1886
1834u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, 1887u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1835 struct ieee80211_channel *channel, 1888 const struct cfg80211_chan_def *chandef,
1836 enum nl80211_channel_type channel_type,
1837 u16 prot_mode) 1889 u16 prot_mode)
1838{ 1890{
1839 struct ieee80211_ht_operation *ht_oper; 1891 struct ieee80211_ht_operation *ht_oper;
@@ -1841,23 +1893,25 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1841 *pos++ = WLAN_EID_HT_OPERATION; 1893 *pos++ = WLAN_EID_HT_OPERATION;
1842 *pos++ = sizeof(struct ieee80211_ht_operation); 1894 *pos++ = sizeof(struct ieee80211_ht_operation);
1843 ht_oper = (struct ieee80211_ht_operation *)pos; 1895 ht_oper = (struct ieee80211_ht_operation *)pos;
1844 ht_oper->primary_chan = 1896 ht_oper->primary_chan = ieee80211_frequency_to_channel(
1845 ieee80211_frequency_to_channel(channel->center_freq); 1897 chandef->chan->center_freq);
1846 switch (channel_type) { 1898 switch (chandef->width) {
1847 case NL80211_CHAN_HT40MINUS: 1899 case NL80211_CHAN_WIDTH_160:
1848 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; 1900 case NL80211_CHAN_WIDTH_80P80:
1849 break; 1901 case NL80211_CHAN_WIDTH_80:
1850 case NL80211_CHAN_HT40PLUS: 1902 case NL80211_CHAN_WIDTH_40:
1851 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 1903 if (chandef->center_freq1 > chandef->chan->center_freq)
1904 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1905 else
1906 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1852 break; 1907 break;
1853 case NL80211_CHAN_HT20:
1854 default: 1908 default:
1855 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; 1909 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
1856 break; 1910 break;
1857 } 1911 }
1858 if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && 1912 if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
1859 channel_type != NL80211_CHAN_NO_HT && 1913 chandef->width != NL80211_CHAN_WIDTH_20_NOHT &&
1860 channel_type != NL80211_CHAN_HT20) 1914 chandef->width != NL80211_CHAN_WIDTH_20)
1861 ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; 1915 ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
1862 1916
1863 ht_oper->operation_mode = cpu_to_le16(prot_mode); 1917 ht_oper->operation_mode = cpu_to_le16(prot_mode);
@@ -1871,13 +1925,17 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1871 return pos + sizeof(struct ieee80211_ht_operation); 1925 return pos + sizeof(struct ieee80211_ht_operation);
1872} 1926}
1873 1927
1874enum nl80211_channel_type 1928void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
1875ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper) 1929 struct ieee80211_ht_operation *ht_oper,
1930 struct cfg80211_chan_def *chandef)
1876{ 1931{
1877 enum nl80211_channel_type channel_type; 1932 enum nl80211_channel_type channel_type;
1878 1933
1879 if (!ht_oper) 1934 if (!ht_oper) {
1880 return NL80211_CHAN_NO_HT; 1935 cfg80211_chandef_create(chandef, control_chan,
1936 NL80211_CHAN_NO_HT);
1937 return;
1938 }
1881 1939
1882 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 1940 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
1883 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 1941 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
@@ -1893,7 +1951,7 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
1893 channel_type = NL80211_CHAN_NO_HT; 1951 channel_type = NL80211_CHAN_NO_HT;
1894 } 1952 }
1895 1953
1896 return channel_type; 1954 cfg80211_chandef_create(chandef, control_chan, channel_type);
1897} 1955}
1898 1956
1899int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, 1957int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
@@ -1975,3 +2033,84 @@ int ieee80211_ave_rssi(struct ieee80211_vif *vif)
1975 return ifmgd->ave_beacon_signal; 2033 return ifmgd->ave_beacon_signal;
1976} 2034}
1977EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); 2035EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
2036
2037u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs)
2038{
2039 if (!mcs)
2040 return 1;
2041
2042 /* TODO: consider rx_highest */
2043
2044 if (mcs->rx_mask[3])
2045 return 4;
2046 if (mcs->rx_mask[2])
2047 return 3;
2048 if (mcs->rx_mask[1])
2049 return 2;
2050 return 1;
2051}
2052
2053/**
2054 * ieee80211_calculate_rx_timestamp - calculate timestamp in frame
2055 * @local: mac80211 hw info struct
2056 * @status: RX status
2057 * @mpdu_len: total MPDU length (including FCS)
2058 * @mpdu_offset: offset into MPDU to calculate timestamp at
2059 *
2060 * This function calculates the RX timestamp at the given MPDU offset, taking
2061 * into account what the RX timestamp was. An offset of 0 will just normalize
2062 * the timestamp to TSF at beginning of MPDU reception.
2063 */
2064u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
2065 struct ieee80211_rx_status *status,
2066 unsigned int mpdu_len,
2067 unsigned int mpdu_offset)
2068{
2069 u64 ts = status->mactime;
2070 struct rate_info ri;
2071 u16 rate;
2072
2073 if (WARN_ON(!ieee80211_have_rx_timestamp(status)))
2074 return 0;
2075
2076 memset(&ri, 0, sizeof(ri));
2077
2078 /* Fill cfg80211 rate info */
2079 if (status->flag & RX_FLAG_HT) {
2080 ri.mcs = status->rate_idx;
2081 ri.flags |= RATE_INFO_FLAGS_MCS;
2082 if (status->flag & RX_FLAG_40MHZ)
2083 ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
2084 if (status->flag & RX_FLAG_SHORT_GI)
2085 ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
2086 } else if (status->flag & RX_FLAG_VHT) {
2087 ri.flags |= RATE_INFO_FLAGS_VHT_MCS;
2088 ri.mcs = status->rate_idx;
2089 ri.nss = status->vht_nss;
2090 if (status->flag & RX_FLAG_40MHZ)
2091 ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
2092 if (status->flag & RX_FLAG_80MHZ)
2093 ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
2094 if (status->flag & RX_FLAG_80P80MHZ)
2095 ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
2096 if (status->flag & RX_FLAG_160MHZ)
2097 ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
2098 if (status->flag & RX_FLAG_SHORT_GI)
2099 ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
2100 } else {
2101 struct ieee80211_supported_band *sband;
2102
2103 sband = local->hw.wiphy->bands[status->band];
2104 ri.legacy = sband->bitrates[status->rate_idx].bitrate;
2105 }
2106
2107 rate = cfg80211_calculate_bitrate(&ri);
2108
2109 /* rewind from end of MPDU */
2110 if (status->flag & RX_FLAG_MACTIME_END)
2111 ts -= mpdu_len * 8 * 10 / rate;
2112
2113 ts += mpdu_offset * 8 * 10 / rate;
2114
2115 return ts;
2116}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
new file mode 100644
index 000000000000..f311388aeedf
--- /dev/null
+++ b/net/mac80211/vht.c
@@ -0,0 +1,35 @@
1/*
2 * VHT handling
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/ieee80211.h>
10#include <linux/export.h>
11#include <net/mac80211.h>
12#include "ieee80211_i.h"
13
14
15void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
16 struct ieee80211_supported_band *sband,
17 struct ieee80211_vht_cap *vht_cap_ie,
18 struct ieee80211_sta_vht_cap *vht_cap)
19{
20 if (WARN_ON_ONCE(!vht_cap))
21 return;
22
23 memset(vht_cap, 0, sizeof(*vht_cap));
24
25 if (!vht_cap_ie || !sband->vht_cap.vht_supported)
26 return;
27
28 vht_cap->vht_supported = true;
29
30 vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info);
31
32 /* Copy peer MCS info, the driver might need them. */
33 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
34 sizeof(struct ieee80211_vht_mcs_info));
35}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index cea06e9f26f4..906f00cd6d2f 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -160,31 +160,37 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
160 return ieee80211_downgrade_queue(sdata, skb); 160 return ieee80211_downgrade_queue(sdata, skb);
161} 161}
162 162
163/**
164 * ieee80211_set_qos_hdr - Fill in the QoS header if there is one.
165 *
166 * @sdata: local subif
167 * @skb: packet to be updated
168 */
163void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, 169void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
164 struct sk_buff *skb) 170 struct sk_buff *skb)
165{ 171{
166 struct ieee80211_hdr *hdr = (void *)skb->data; 172 struct ieee80211_hdr *hdr = (void *)skb->data;
167 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 173 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
174 u8 *p;
175 u8 ack_policy, tid;
168 176
169 /* Fill in the QoS header if there is one. */ 177 if (!ieee80211_is_data_qos(hdr->frame_control))
170 if (ieee80211_is_data_qos(hdr->frame_control)) { 178 return;
171 u8 *p = ieee80211_get_qos_ctl(hdr);
172 u8 ack_policy, tid;
173
174 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
175 179
176 /* preserve EOSP bit */ 180 p = ieee80211_get_qos_ctl(hdr);
177 ack_policy = *p & IEEE80211_QOS_CTL_EOSP; 181 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
178 182
179 if (is_multicast_ether_addr(hdr->addr1) || 183 /* preserve EOSP bit */
180 sdata->noack_map & BIT(tid)) { 184 ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
181 ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
182 info->flags |= IEEE80211_TX_CTL_NO_ACK;
183 }
184 185
185 /* qos header is 2 bytes */ 186 if (is_multicast_ether_addr(hdr->addr1) ||
186 *p++ = ack_policy | tid; 187 sdata->noack_map & BIT(tid)) {
187 *p = ieee80211_vif_is_mesh(&sdata->vif) ? 188 ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
188 (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0; 189 info->flags |= IEEE80211_TX_CTL_NO_ACK;
189 } 190 }
191
192 /* qos header is 2 bytes */
193 *p++ = ack_policy | tid;
194 *p = ieee80211_vif_is_mesh(&sdata->vif) ?
195 (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
190} 196}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 8bd2f5c6a56e..c175ee866ff4 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -104,7 +104,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
104 */ 104 */
105 if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) { 105 if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) {
106 if (status->flag & RX_FLAG_MMIC_ERROR) 106 if (status->flag & RX_FLAG_MMIC_ERROR)
107 goto mic_fail; 107 goto mic_fail_no_key;
108 108
109 if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && 109 if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
110 rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) 110 rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
@@ -161,6 +161,9 @@ update_iv:
161 return RX_CONTINUE; 161 return RX_CONTINUE;
162 162
163mic_fail: 163mic_fail:
164 rx->key->u.tkip.mic_failures++;
165
166mic_fail_no_key:
164 /* 167 /*
165 * In some cases the key can be unset - e.g. a multicast packet, in 168 * In some cases the key can be unset - e.g. a multicast packet, in
166 * a driver that supports HW encryption. Send up the key idx only if 169 * a driver that supports HW encryption. Send up the key idx only if
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e748aed290aa..b7c7f815deae 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -224,9 +224,9 @@ void ieee802154_free_device(struct ieee802154_dev *hw)
224 224
225 BUG_ON(!list_empty(&priv->slaves)); 225 BUG_ON(!list_empty(&priv->slaves));
226 226
227 wpan_phy_free(priv->phy);
228
229 mutex_destroy(&priv->slaves_mtx); 227 mutex_destroy(&priv->slaves_mtx);
228
229 wpan_phy_free(priv->phy);
230} 230}
231EXPORT_SYMBOL(ieee802154_free_device); 231EXPORT_SYMBOL(ieee802154_free_device);
232 232
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 1a4df39c722e..4e09d070995a 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -85,6 +85,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
85 85
86 if (!(priv->phy->channels_supported[page] & (1 << chan))) { 86 if (!(priv->phy->channels_supported[page] & (1 << chan))) {
87 WARN_ON(1); 87 WARN_ON(1);
88 kfree_skb(skb);
88 return NETDEV_TX_OK; 89 return NETDEV_TX_OK;
89 } 90 }
90 91
@@ -98,13 +99,15 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
98 } 99 }
99 100
100 if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) { 101 if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) {
101 dev_kfree_skb(skb); 102 kfree_skb(skb);
102 return NETDEV_TX_OK; 103 return NETDEV_TX_OK;
103 } 104 }
104 105
105 work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC); 106 work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC);
106 if (!work) 107 if (!work) {
108 kfree_skb(skb);
107 return NETDEV_TX_BUSY; 109 return NETDEV_TX_BUSY;
110 }
108 111
109 INIT_WORK(&work->work, mac802154_xmit_worker); 112 INIT_WORK(&work->work, mac802154_xmit_worker);
110 work->skb = skb; 113 work->skb = skb;
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index f30f6d4beea1..199b92261e94 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -327,8 +327,10 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
327 327
328 if (chan == MAC802154_CHAN_NONE || 328 if (chan == MAC802154_CHAN_NONE ||
329 page >= WPAN_NUM_PAGES || 329 page >= WPAN_NUM_PAGES ||
330 chan >= WPAN_NUM_CHANNELS) 330 chan >= WPAN_NUM_CHANNELS) {
331 kfree_skb(skb);
331 return NETDEV_TX_OK; 332 return NETDEV_TX_OK;
333 }
332 334
333 skb->skb_iif = dev->ifindex; 335 skb->skb_iif = dev->ifindex;
334 dev->stats.tx_packets++; 336 dev->stats.tx_packets++;
@@ -387,7 +389,7 @@ void mac802154_wpan_setup(struct net_device *dev)
387 389
388static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb) 390static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
389{ 391{
390 return netif_rx(skb); 392 return netif_rx_ni(skb);
391} 393}
392 394
393static int 395static int
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index fefa514b9917..49e96df5fbc4 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -680,6 +680,13 @@ config NETFILTER_XT_TARGET_NFQUEUE
680 680
681 To compile it as a module, choose M here. If unsure, say N. 681 To compile it as a module, choose M here. If unsure, say N.
682 682
683config NETFILTER_XT_TARGET_NOTRACK
684 tristate '"NOTRACK" target support (DEPRECATED)'
685 depends on NF_CONNTRACK
686 depends on IP_NF_RAW || IP6_NF_RAW
687 depends on NETFILTER_ADVANCED
688 select NETFILTER_XT_TARGET_CT
689
683config NETFILTER_XT_TARGET_RATEEST 690config NETFILTER_XT_TARGET_RATEEST
684 tristate '"RATEEST" target support' 691 tristate '"RATEEST" target support'
685 depends on NETFILTER_ADVANCED 692 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 68912dadf13d..a9c488b6c50d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -295,8 +295,6 @@ void __init netfilter_init(void)
295 panic("cannot create netfilter proc entry"); 295 panic("cannot create netfilter proc entry");
296#endif 296#endif
297 297
298 if (netfilter_queue_init() < 0)
299 panic("cannot initialize nf_queue");
300 if (netfilter_log_init() < 0) 298 if (netfilter_log_init() < 0)
301 panic("cannot initialize nf_log"); 299 panic("cannot initialize nf_log");
302} 300}
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 778465f217fa..6d6d8f2b033e 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -28,9 +28,10 @@ static LIST_HEAD(ip_set_type_list); /* all registered set types */
28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ 28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
29static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */ 29static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
30 30
31static struct ip_set **ip_set_list; /* all individual sets */ 31static struct ip_set * __rcu *ip_set_list; /* all individual sets */
32static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ 32static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
33 33
34#define IP_SET_INC 64
34#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0) 35#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
35 36
36static unsigned int max_sets; 37static unsigned int max_sets;
@@ -42,6 +43,12 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
42MODULE_DESCRIPTION("core IP set support"); 43MODULE_DESCRIPTION("core IP set support");
43MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET); 44MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
44 45
46/* When the nfnl mutex is held: */
47#define nfnl_dereference(p) \
48 rcu_dereference_protected(p, 1)
49#define nfnl_set(id) \
50 nfnl_dereference(ip_set_list)[id]
51
45/* 52/*
46 * The set types are implemented in modules and registered set types 53 * The set types are implemented in modules and registered set types
47 * can be found in ip_set_type_list. Adding/deleting types is 54 * can be found in ip_set_type_list. Adding/deleting types is
@@ -321,19 +328,19 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
321 */ 328 */
322 329
323static inline void 330static inline void
324__ip_set_get(ip_set_id_t index) 331__ip_set_get(struct ip_set *set)
325{ 332{
326 write_lock_bh(&ip_set_ref_lock); 333 write_lock_bh(&ip_set_ref_lock);
327 ip_set_list[index]->ref++; 334 set->ref++;
328 write_unlock_bh(&ip_set_ref_lock); 335 write_unlock_bh(&ip_set_ref_lock);
329} 336}
330 337
331static inline void 338static inline void
332__ip_set_put(ip_set_id_t index) 339__ip_set_put(struct ip_set *set)
333{ 340{
334 write_lock_bh(&ip_set_ref_lock); 341 write_lock_bh(&ip_set_ref_lock);
335 BUG_ON(ip_set_list[index]->ref == 0); 342 BUG_ON(set->ref == 0);
336 ip_set_list[index]->ref--; 343 set->ref--;
337 write_unlock_bh(&ip_set_ref_lock); 344 write_unlock_bh(&ip_set_ref_lock);
338} 345}
339 346
@@ -344,12 +351,25 @@ __ip_set_put(ip_set_id_t index)
344 * so it can't be destroyed (or changed) under our foot. 351 * so it can't be destroyed (or changed) under our foot.
345 */ 352 */
346 353
354static inline struct ip_set *
355ip_set_rcu_get(ip_set_id_t index)
356{
357 struct ip_set *set;
358
359 rcu_read_lock();
360 /* ip_set_list itself needs to be protected */
361 set = rcu_dereference(ip_set_list)[index];
362 rcu_read_unlock();
363
364 return set;
365}
366
347int 367int
348ip_set_test(ip_set_id_t index, const struct sk_buff *skb, 368ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
349 const struct xt_action_param *par, 369 const struct xt_action_param *par,
350 const struct ip_set_adt_opt *opt) 370 const struct ip_set_adt_opt *opt)
351{ 371{
352 struct ip_set *set = ip_set_list[index]; 372 struct ip_set *set = ip_set_rcu_get(index);
353 int ret = 0; 373 int ret = 0;
354 374
355 BUG_ON(set == NULL); 375 BUG_ON(set == NULL);
@@ -388,7 +408,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
388 const struct xt_action_param *par, 408 const struct xt_action_param *par,
389 const struct ip_set_adt_opt *opt) 409 const struct ip_set_adt_opt *opt)
390{ 410{
391 struct ip_set *set = ip_set_list[index]; 411 struct ip_set *set = ip_set_rcu_get(index);
392 int ret; 412 int ret;
393 413
394 BUG_ON(set == NULL); 414 BUG_ON(set == NULL);
@@ -411,7 +431,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
411 const struct xt_action_param *par, 431 const struct xt_action_param *par,
412 const struct ip_set_adt_opt *opt) 432 const struct ip_set_adt_opt *opt)
413{ 433{
414 struct ip_set *set = ip_set_list[index]; 434 struct ip_set *set = ip_set_rcu_get(index);
415 int ret = 0; 435 int ret = 0;
416 436
417 BUG_ON(set == NULL); 437 BUG_ON(set == NULL);
@@ -440,14 +460,17 @@ ip_set_get_byname(const char *name, struct ip_set **set)
440 ip_set_id_t i, index = IPSET_INVALID_ID; 460 ip_set_id_t i, index = IPSET_INVALID_ID;
441 struct ip_set *s; 461 struct ip_set *s;
442 462
463 rcu_read_lock();
443 for (i = 0; i < ip_set_max; i++) { 464 for (i = 0; i < ip_set_max; i++) {
444 s = ip_set_list[i]; 465 s = rcu_dereference(ip_set_list)[i];
445 if (s != NULL && STREQ(s->name, name)) { 466 if (s != NULL && STREQ(s->name, name)) {
446 __ip_set_get(i); 467 __ip_set_get(s);
447 index = i; 468 index = i;
448 *set = s; 469 *set = s;
470 break;
449 } 471 }
450 } 472 }
473 rcu_read_unlock();
451 474
452 return index; 475 return index;
453} 476}
@@ -462,8 +485,13 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
462void 485void
463ip_set_put_byindex(ip_set_id_t index) 486ip_set_put_byindex(ip_set_id_t index)
464{ 487{
465 if (ip_set_list[index] != NULL) 488 struct ip_set *set;
466 __ip_set_put(index); 489
490 rcu_read_lock();
491 set = rcu_dereference(ip_set_list)[index];
492 if (set != NULL)
493 __ip_set_put(set);
494 rcu_read_unlock();
467} 495}
468EXPORT_SYMBOL_GPL(ip_set_put_byindex); 496EXPORT_SYMBOL_GPL(ip_set_put_byindex);
469 497
@@ -477,7 +505,7 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
477const char * 505const char *
478ip_set_name_byindex(ip_set_id_t index) 506ip_set_name_byindex(ip_set_id_t index)
479{ 507{
480 const struct ip_set *set = ip_set_list[index]; 508 const struct ip_set *set = ip_set_rcu_get(index);
481 509
482 BUG_ON(set == NULL); 510 BUG_ON(set == NULL);
483 BUG_ON(set->ref == 0); 511 BUG_ON(set->ref == 0);
@@ -501,11 +529,18 @@ EXPORT_SYMBOL_GPL(ip_set_name_byindex);
501ip_set_id_t 529ip_set_id_t
502ip_set_nfnl_get(const char *name) 530ip_set_nfnl_get(const char *name)
503{ 531{
532 ip_set_id_t i, index = IPSET_INVALID_ID;
504 struct ip_set *s; 533 struct ip_set *s;
505 ip_set_id_t index;
506 534
507 nfnl_lock(); 535 nfnl_lock();
508 index = ip_set_get_byname(name, &s); 536 for (i = 0; i < ip_set_max; i++) {
537 s = nfnl_set(i);
538 if (s != NULL && STREQ(s->name, name)) {
539 __ip_set_get(s);
540 index = i;
541 break;
542 }
543 }
509 nfnl_unlock(); 544 nfnl_unlock();
510 545
511 return index; 546 return index;
@@ -521,12 +556,15 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
521ip_set_id_t 556ip_set_id_t
522ip_set_nfnl_get_byindex(ip_set_id_t index) 557ip_set_nfnl_get_byindex(ip_set_id_t index)
523{ 558{
559 struct ip_set *set;
560
524 if (index > ip_set_max) 561 if (index > ip_set_max)
525 return IPSET_INVALID_ID; 562 return IPSET_INVALID_ID;
526 563
527 nfnl_lock(); 564 nfnl_lock();
528 if (ip_set_list[index]) 565 set = nfnl_set(index);
529 __ip_set_get(index); 566 if (set)
567 __ip_set_get(set);
530 else 568 else
531 index = IPSET_INVALID_ID; 569 index = IPSET_INVALID_ID;
532 nfnl_unlock(); 570 nfnl_unlock();
@@ -545,8 +583,11 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
545void 583void
546ip_set_nfnl_put(ip_set_id_t index) 584ip_set_nfnl_put(ip_set_id_t index)
547{ 585{
586 struct ip_set *set;
548 nfnl_lock(); 587 nfnl_lock();
549 ip_set_put_byindex(index); 588 set = nfnl_set(index);
589 if (set != NULL)
590 __ip_set_put(set);
550 nfnl_unlock(); 591 nfnl_unlock();
551} 592}
552EXPORT_SYMBOL_GPL(ip_set_nfnl_put); 593EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -603,41 +644,46 @@ static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
603 [IPSET_ATTR_DATA] = { .type = NLA_NESTED }, 644 [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
604}; 645};
605 646
606static ip_set_id_t 647static struct ip_set *
607find_set_id(const char *name) 648find_set_and_id(const char *name, ip_set_id_t *id)
608{ 649{
609 ip_set_id_t i, index = IPSET_INVALID_ID; 650 struct ip_set *set = NULL;
610 const struct ip_set *set; 651 ip_set_id_t i;
611 652
612 for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) { 653 *id = IPSET_INVALID_ID;
613 set = ip_set_list[i]; 654 for (i = 0; i < ip_set_max; i++) {
614 if (set != NULL && STREQ(set->name, name)) 655 set = nfnl_set(i);
615 index = i; 656 if (set != NULL && STREQ(set->name, name)) {
657 *id = i;
658 break;
659 }
616 } 660 }
617 return index; 661 return (*id == IPSET_INVALID_ID ? NULL : set);
618} 662}
619 663
620static inline struct ip_set * 664static inline struct ip_set *
621find_set(const char *name) 665find_set(const char *name)
622{ 666{
623 ip_set_id_t index = find_set_id(name); 667 ip_set_id_t id;
624 668
625 return index == IPSET_INVALID_ID ? NULL : ip_set_list[index]; 669 return find_set_and_id(name, &id);
626} 670}
627 671
628static int 672static int
629find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set) 673find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
630{ 674{
675 struct ip_set *s;
631 ip_set_id_t i; 676 ip_set_id_t i;
632 677
633 *index = IPSET_INVALID_ID; 678 *index = IPSET_INVALID_ID;
634 for (i = 0; i < ip_set_max; i++) { 679 for (i = 0; i < ip_set_max; i++) {
635 if (ip_set_list[i] == NULL) { 680 s = nfnl_set(i);
681 if (s == NULL) {
636 if (*index == IPSET_INVALID_ID) 682 if (*index == IPSET_INVALID_ID)
637 *index = i; 683 *index = i;
638 } else if (STREQ(name, ip_set_list[i]->name)) { 684 } else if (STREQ(name, s->name)) {
639 /* Name clash */ 685 /* Name clash */
640 *set = ip_set_list[i]; 686 *set = s;
641 return -EEXIST; 687 return -EEXIST;
642 } 688 }
643 } 689 }
@@ -730,10 +776,9 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
730 * and check clashing. 776 * and check clashing.
731 */ 777 */
732 ret = find_free_id(set->name, &index, &clash); 778 ret = find_free_id(set->name, &index, &clash);
733 if (ret != 0) { 779 if (ret == -EEXIST) {
734 /* If this is the same set and requested, ignore error */ 780 /* If this is the same set and requested, ignore error */
735 if (ret == -EEXIST && 781 if ((flags & IPSET_FLAG_EXIST) &&
736 (flags & IPSET_FLAG_EXIST) &&
737 STREQ(set->type->name, clash->type->name) && 782 STREQ(set->type->name, clash->type->name) &&
738 set->type->family == clash->type->family && 783 set->type->family == clash->type->family &&
739 set->type->revision_min == clash->type->revision_min && 784 set->type->revision_min == clash->type->revision_min &&
@@ -741,13 +786,36 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
741 set->variant->same_set(set, clash)) 786 set->variant->same_set(set, clash))
742 ret = 0; 787 ret = 0;
743 goto cleanup; 788 goto cleanup;
744 } 789 } else if (ret == -IPSET_ERR_MAX_SETS) {
790 struct ip_set **list, **tmp;
791 ip_set_id_t i = ip_set_max + IP_SET_INC;
792
793 if (i < ip_set_max || i == IPSET_INVALID_ID)
794 /* Wraparound */
795 goto cleanup;
796
797 list = kzalloc(sizeof(struct ip_set *) * i, GFP_KERNEL);
798 if (!list)
799 goto cleanup;
800 /* nfnl mutex is held, both lists are valid */
801 tmp = nfnl_dereference(ip_set_list);
802 memcpy(list, tmp, sizeof(struct ip_set *) * ip_set_max);
803 rcu_assign_pointer(ip_set_list, list);
804 /* Make sure all current packets have passed through */
805 synchronize_net();
806 /* Use new list */
807 index = ip_set_max;
808 ip_set_max = i;
809 kfree(tmp);
810 ret = 0;
811 } else if (ret)
812 goto cleanup;
745 813
746 /* 814 /*
747 * Finally! Add our shiny new set to the list, and be done. 815 * Finally! Add our shiny new set to the list, and be done.
748 */ 816 */
749 pr_debug("create: '%s' created with index %u!\n", set->name, index); 817 pr_debug("create: '%s' created with index %u!\n", set->name, index);
750 ip_set_list[index] = set; 818 nfnl_set(index) = set;
751 819
752 return ret; 820 return ret;
753 821
@@ -772,10 +840,10 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
772static void 840static void
773ip_set_destroy_set(ip_set_id_t index) 841ip_set_destroy_set(ip_set_id_t index)
774{ 842{
775 struct ip_set *set = ip_set_list[index]; 843 struct ip_set *set = nfnl_set(index);
776 844
777 pr_debug("set: %s\n", set->name); 845 pr_debug("set: %s\n", set->name);
778 ip_set_list[index] = NULL; 846 nfnl_set(index) = NULL;
779 847
780 /* Must call it without holding any lock */ 848 /* Must call it without holding any lock */
781 set->variant->destroy(set); 849 set->variant->destroy(set);
@@ -788,6 +856,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
788 const struct nlmsghdr *nlh, 856 const struct nlmsghdr *nlh,
789 const struct nlattr * const attr[]) 857 const struct nlattr * const attr[])
790{ 858{
859 struct ip_set *s;
791 ip_set_id_t i; 860 ip_set_id_t i;
792 int ret = 0; 861 int ret = 0;
793 862
@@ -807,22 +876,24 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
807 read_lock_bh(&ip_set_ref_lock); 876 read_lock_bh(&ip_set_ref_lock);
808 if (!attr[IPSET_ATTR_SETNAME]) { 877 if (!attr[IPSET_ATTR_SETNAME]) {
809 for (i = 0; i < ip_set_max; i++) { 878 for (i = 0; i < ip_set_max; i++) {
810 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) { 879 s = nfnl_set(i);
880 if (s != NULL && s->ref) {
811 ret = -IPSET_ERR_BUSY; 881 ret = -IPSET_ERR_BUSY;
812 goto out; 882 goto out;
813 } 883 }
814 } 884 }
815 read_unlock_bh(&ip_set_ref_lock); 885 read_unlock_bh(&ip_set_ref_lock);
816 for (i = 0; i < ip_set_max; i++) { 886 for (i = 0; i < ip_set_max; i++) {
817 if (ip_set_list[i] != NULL) 887 s = nfnl_set(i);
888 if (s != NULL)
818 ip_set_destroy_set(i); 889 ip_set_destroy_set(i);
819 } 890 }
820 } else { 891 } else {
821 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 892 s = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &i);
822 if (i == IPSET_INVALID_ID) { 893 if (s == NULL) {
823 ret = -ENOENT; 894 ret = -ENOENT;
824 goto out; 895 goto out;
825 } else if (ip_set_list[i]->ref) { 896 } else if (s->ref) {
826 ret = -IPSET_ERR_BUSY; 897 ret = -IPSET_ERR_BUSY;
827 goto out; 898 goto out;
828 } 899 }
@@ -853,21 +924,24 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
853 const struct nlmsghdr *nlh, 924 const struct nlmsghdr *nlh,
854 const struct nlattr * const attr[]) 925 const struct nlattr * const attr[])
855{ 926{
927 struct ip_set *s;
856 ip_set_id_t i; 928 ip_set_id_t i;
857 929
858 if (unlikely(protocol_failed(attr))) 930 if (unlikely(protocol_failed(attr)))
859 return -IPSET_ERR_PROTOCOL; 931 return -IPSET_ERR_PROTOCOL;
860 932
861 if (!attr[IPSET_ATTR_SETNAME]) { 933 if (!attr[IPSET_ATTR_SETNAME]) {
862 for (i = 0; i < ip_set_max; i++) 934 for (i = 0; i < ip_set_max; i++) {
863 if (ip_set_list[i] != NULL) 935 s = nfnl_set(i);
864 ip_set_flush_set(ip_set_list[i]); 936 if (s != NULL)
937 ip_set_flush_set(s);
938 }
865 } else { 939 } else {
866 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 940 s = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
867 if (i == IPSET_INVALID_ID) 941 if (s == NULL)
868 return -ENOENT; 942 return -ENOENT;
869 943
870 ip_set_flush_set(ip_set_list[i]); 944 ip_set_flush_set(s);
871 } 945 }
872 946
873 return 0; 947 return 0;
@@ -889,7 +963,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
889 const struct nlmsghdr *nlh, 963 const struct nlmsghdr *nlh,
890 const struct nlattr * const attr[]) 964 const struct nlattr * const attr[])
891{ 965{
892 struct ip_set *set; 966 struct ip_set *set, *s;
893 const char *name2; 967 const char *name2;
894 ip_set_id_t i; 968 ip_set_id_t i;
895 int ret = 0; 969 int ret = 0;
@@ -911,8 +985,8 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
911 985
912 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); 986 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
913 for (i = 0; i < ip_set_max; i++) { 987 for (i = 0; i < ip_set_max; i++) {
914 if (ip_set_list[i] != NULL && 988 s = nfnl_set(i);
915 STREQ(ip_set_list[i]->name, name2)) { 989 if (s != NULL && STREQ(s->name, name2)) {
916 ret = -IPSET_ERR_EXIST_SETNAME2; 990 ret = -IPSET_ERR_EXIST_SETNAME2;
917 goto out; 991 goto out;
918 } 992 }
@@ -947,17 +1021,14 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
947 attr[IPSET_ATTR_SETNAME2] == NULL)) 1021 attr[IPSET_ATTR_SETNAME2] == NULL))
948 return -IPSET_ERR_PROTOCOL; 1022 return -IPSET_ERR_PROTOCOL;
949 1023
950 from_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 1024 from = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &from_id);
951 if (from_id == IPSET_INVALID_ID) 1025 if (from == NULL)
952 return -ENOENT; 1026 return -ENOENT;
953 1027
954 to_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME2])); 1028 to = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id);
955 if (to_id == IPSET_INVALID_ID) 1029 if (to == NULL)
956 return -IPSET_ERR_EXIST_SETNAME2; 1030 return -IPSET_ERR_EXIST_SETNAME2;
957 1031
958 from = ip_set_list[from_id];
959 to = ip_set_list[to_id];
960
961 /* Features must not change. 1032 /* Features must not change.
962 * Not an artificial restriction anymore, as we must prevent 1033 * Not an artificial restriction anymore, as we must prevent
963 * possible loops created by swapping in setlist type of sets. */ 1034 * possible loops created by swapping in setlist type of sets. */
@@ -971,8 +1042,8 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
971 1042
972 write_lock_bh(&ip_set_ref_lock); 1043 write_lock_bh(&ip_set_ref_lock);
973 swap(from->ref, to->ref); 1044 swap(from->ref, to->ref);
974 ip_set_list[from_id] = to; 1045 nfnl_set(from_id) = to;
975 ip_set_list[to_id] = from; 1046 nfnl_set(to_id) = from;
976 write_unlock_bh(&ip_set_ref_lock); 1047 write_unlock_bh(&ip_set_ref_lock);
977 1048
978 return 0; 1049 return 0;
@@ -992,7 +1063,7 @@ static int
992ip_set_dump_done(struct netlink_callback *cb) 1063ip_set_dump_done(struct netlink_callback *cb)
993{ 1064{
994 if (cb->args[2]) { 1065 if (cb->args[2]) {
995 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); 1066 pr_debug("release set %s\n", nfnl_set(cb->args[1])->name);
996 ip_set_put_byindex((ip_set_id_t) cb->args[1]); 1067 ip_set_put_byindex((ip_set_id_t) cb->args[1]);
997 } 1068 }
998 return 0; 1069 return 0;
@@ -1030,8 +1101,11 @@ dump_init(struct netlink_callback *cb)
1030 */ 1101 */
1031 1102
1032 if (cda[IPSET_ATTR_SETNAME]) { 1103 if (cda[IPSET_ATTR_SETNAME]) {
1033 index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME])); 1104 struct ip_set *set;
1034 if (index == IPSET_INVALID_ID) 1105
1106 set = find_set_and_id(nla_data(cda[IPSET_ATTR_SETNAME]),
1107 &index);
1108 if (set == NULL)
1035 return -ENOENT; 1109 return -ENOENT;
1036 1110
1037 dump_type = DUMP_ONE; 1111 dump_type = DUMP_ONE;
@@ -1081,7 +1155,7 @@ dump_last:
1081 dump_type, dump_flags, cb->args[1]); 1155 dump_type, dump_flags, cb->args[1]);
1082 for (; cb->args[1] < max; cb->args[1]++) { 1156 for (; cb->args[1] < max; cb->args[1]++) {
1083 index = (ip_set_id_t) cb->args[1]; 1157 index = (ip_set_id_t) cb->args[1];
1084 set = ip_set_list[index]; 1158 set = nfnl_set(index);
1085 if (set == NULL) { 1159 if (set == NULL) {
1086 if (dump_type == DUMP_ONE) { 1160 if (dump_type == DUMP_ONE) {
1087 ret = -ENOENT; 1161 ret = -ENOENT;
@@ -1100,7 +1174,7 @@ dump_last:
1100 if (!cb->args[2]) { 1174 if (!cb->args[2]) {
1101 /* Start listing: make sure set won't be destroyed */ 1175 /* Start listing: make sure set won't be destroyed */
1102 pr_debug("reference set\n"); 1176 pr_debug("reference set\n");
1103 __ip_set_get(index); 1177 __ip_set_get(set);
1104 } 1178 }
1105 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, 1179 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
1106 cb->nlh->nlmsg_seq, flags, 1180 cb->nlh->nlmsg_seq, flags,
@@ -1159,7 +1233,7 @@ next_set:
1159release_refcount: 1233release_refcount:
1160 /* If there was an error or set is done, release set */ 1234 /* If there was an error or set is done, release set */
1161 if (ret || !cb->args[2]) { 1235 if (ret || !cb->args[2]) {
1162 pr_debug("release set %s\n", ip_set_list[index]->name); 1236 pr_debug("release set %s\n", nfnl_set(index)->name);
1163 ip_set_put_byindex(index); 1237 ip_set_put_byindex(index);
1164 cb->args[2] = 0; 1238 cb->args[2] = 0;
1165 } 1239 }
@@ -1409,17 +1483,15 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
1409 const struct ip_set *set; 1483 const struct ip_set *set;
1410 struct sk_buff *skb2; 1484 struct sk_buff *skb2;
1411 struct nlmsghdr *nlh2; 1485 struct nlmsghdr *nlh2;
1412 ip_set_id_t index;
1413 int ret = 0; 1486 int ret = 0;
1414 1487
1415 if (unlikely(protocol_failed(attr) || 1488 if (unlikely(protocol_failed(attr) ||
1416 attr[IPSET_ATTR_SETNAME] == NULL)) 1489 attr[IPSET_ATTR_SETNAME] == NULL))
1417 return -IPSET_ERR_PROTOCOL; 1490 return -IPSET_ERR_PROTOCOL;
1418 1491
1419 index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 1492 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
1420 if (index == IPSET_INVALID_ID) 1493 if (set == NULL)
1421 return -ENOENT; 1494 return -ENOENT;
1422 set = ip_set_list[index];
1423 1495
1424 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1496 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1425 if (skb2 == NULL) 1497 if (skb2 == NULL)
@@ -1643,7 +1715,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1643 void *data; 1715 void *data;
1644 int copylen = *len, ret = 0; 1716 int copylen = *len, ret = 0;
1645 1717
1646 if (!capable(CAP_NET_ADMIN)) 1718 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1647 return -EPERM; 1719 return -EPERM;
1648 if (optval != SO_IP_SET) 1720 if (optval != SO_IP_SET)
1649 return -EBADF; 1721 return -EBADF;
@@ -1684,6 +1756,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1684 } 1756 }
1685 case IP_SET_OP_GET_BYNAME: { 1757 case IP_SET_OP_GET_BYNAME: {
1686 struct ip_set_req_get_set *req_get = data; 1758 struct ip_set_req_get_set *req_get = data;
1759 ip_set_id_t id;
1687 1760
1688 if (*len != sizeof(struct ip_set_req_get_set)) { 1761 if (*len != sizeof(struct ip_set_req_get_set)) {
1689 ret = -EINVAL; 1762 ret = -EINVAL;
@@ -1691,12 +1764,14 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1691 } 1764 }
1692 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; 1765 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
1693 nfnl_lock(); 1766 nfnl_lock();
1694 req_get->set.index = find_set_id(req_get->set.name); 1767 find_set_and_id(req_get->set.name, &id);
1768 req_get->set.index = id;
1695 nfnl_unlock(); 1769 nfnl_unlock();
1696 goto copy; 1770 goto copy;
1697 } 1771 }
1698 case IP_SET_OP_GET_BYINDEX: { 1772 case IP_SET_OP_GET_BYINDEX: {
1699 struct ip_set_req_get_set *req_get = data; 1773 struct ip_set_req_get_set *req_get = data;
1774 struct ip_set *set;
1700 1775
1701 if (*len != sizeof(struct ip_set_req_get_set) || 1776 if (*len != sizeof(struct ip_set_req_get_set) ||
1702 req_get->set.index >= ip_set_max) { 1777 req_get->set.index >= ip_set_max) {
@@ -1704,9 +1779,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1704 goto done; 1779 goto done;
1705 } 1780 }
1706 nfnl_lock(); 1781 nfnl_lock();
1707 strncpy(req_get->set.name, 1782 set = nfnl_set(req_get->set.index);
1708 ip_set_list[req_get->set.index] 1783 strncpy(req_get->set.name, set ? set->name : "",
1709 ? ip_set_list[req_get->set.index]->name : "",
1710 IPSET_MAXNAMELEN); 1784 IPSET_MAXNAMELEN);
1711 nfnl_unlock(); 1785 nfnl_unlock();
1712 goto copy; 1786 goto copy;
@@ -1737,6 +1811,7 @@ static struct nf_sockopt_ops so_set __read_mostly = {
1737static int __init 1811static int __init
1738ip_set_init(void) 1812ip_set_init(void)
1739{ 1813{
1814 struct ip_set **list;
1740 int ret; 1815 int ret;
1741 1816
1742 if (max_sets) 1817 if (max_sets)
@@ -1744,22 +1819,22 @@ ip_set_init(void)
1744 if (ip_set_max >= IPSET_INVALID_ID) 1819 if (ip_set_max >= IPSET_INVALID_ID)
1745 ip_set_max = IPSET_INVALID_ID - 1; 1820 ip_set_max = IPSET_INVALID_ID - 1;
1746 1821
1747 ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max, 1822 list = kzalloc(sizeof(struct ip_set *) * ip_set_max, GFP_KERNEL);
1748 GFP_KERNEL); 1823 if (!list)
1749 if (!ip_set_list)
1750 return -ENOMEM; 1824 return -ENOMEM;
1751 1825
1826 rcu_assign_pointer(ip_set_list, list);
1752 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); 1827 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
1753 if (ret != 0) { 1828 if (ret != 0) {
1754 pr_err("ip_set: cannot register with nfnetlink.\n"); 1829 pr_err("ip_set: cannot register with nfnetlink.\n");
1755 kfree(ip_set_list); 1830 kfree(list);
1756 return ret; 1831 return ret;
1757 } 1832 }
1758 ret = nf_register_sockopt(&so_set); 1833 ret = nf_register_sockopt(&so_set);
1759 if (ret != 0) { 1834 if (ret != 0) {
1760 pr_err("SO_SET registry failed: %d\n", ret); 1835 pr_err("SO_SET registry failed: %d\n", ret);
1761 nfnetlink_subsys_unregister(&ip_set_netlink_subsys); 1836 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
1762 kfree(ip_set_list); 1837 kfree(list);
1763 return ret; 1838 return ret;
1764 } 1839 }
1765 1840
@@ -1770,10 +1845,12 @@ ip_set_init(void)
1770static void __exit 1845static void __exit
1771ip_set_fini(void) 1846ip_set_fini(void)
1772{ 1847{
1848 struct ip_set **list = rcu_dereference_protected(ip_set_list, 1);
1849
1773 /* There can't be any existing set */ 1850 /* There can't be any existing set */
1774 nf_unregister_sockopt(&so_set); 1851 nf_unregister_sockopt(&so_set);
1775 nfnetlink_subsys_unregister(&ip_set_netlink_subsys); 1852 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
1776 kfree(ip_set_list); 1853 kfree(list);
1777 pr_debug("these are the famous last words\n"); 1854 pr_debug("these are the famous last words\n");
1778} 1855}
1779 1856
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index ec3dba5dcd62..5c0b78528e55 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -173,6 +173,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
173 return adtfn(set, &nip, timeout, flags); 173 return adtfn(set, &nip, timeout, flags);
174 } 174 }
175 175
176 ip_to = ip;
176 if (tb[IPSET_ATTR_IP_TO]) { 177 if (tb[IPSET_ATTR_IP_TO]) {
177 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); 178 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
178 if (ret) 179 if (ret)
@@ -185,8 +186,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
185 if (!cidr || cidr > 32) 186 if (!cidr || cidr > 32)
186 return -IPSET_ERR_INVALID_CIDR; 187 return -IPSET_ERR_INVALID_CIDR;
187 ip_set_mask_from_to(ip, ip_to, cidr); 188 ip_set_mask_from_to(ip, ip_to, cidr);
188 } else 189 }
189 ip_to = ip;
190 190
191 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); 191 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
192 192
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 0171f7502fa5..6283351f4eeb 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -162,7 +162,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
162 const struct ip_set_hash *h = set->data; 162 const struct ip_set_hash *h = set->data;
163 ipset_adtfn adtfn = set->variant->adt[adt]; 163 ipset_adtfn adtfn = set->variant->adt[adt];
164 struct hash_ipport4_elem data = { }; 164 struct hash_ipport4_elem data = { };
165 u32 ip, ip_to = 0, p = 0, port, port_to; 165 u32 ip, ip_to, p = 0, port, port_to;
166 u32 timeout = h->timeout; 166 u32 timeout = h->timeout;
167 bool with_ports = false; 167 bool with_ports = false;
168 int ret; 168 int ret;
@@ -210,7 +210,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
210 return ip_set_eexist(ret, flags) ? 0 : ret; 210 return ip_set_eexist(ret, flags) ? 0 : ret;
211 } 211 }
212 212
213 ip = ntohl(data.ip); 213 ip_to = ip = ntohl(data.ip);
214 if (tb[IPSET_ATTR_IP_TO]) { 214 if (tb[IPSET_ATTR_IP_TO]) {
215 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); 215 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
216 if (ret) 216 if (ret)
@@ -223,8 +223,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
223 if (!cidr || cidr > 32) 223 if (!cidr || cidr > 32)
224 return -IPSET_ERR_INVALID_CIDR; 224 return -IPSET_ERR_INVALID_CIDR;
225 ip_set_mask_from_to(ip, ip_to, cidr); 225 ip_set_mask_from_to(ip, ip_to, cidr);
226 } else 226 }
227 ip_to = ip;
228 227
229 port_to = port = ntohs(data.port); 228 port_to = port = ntohs(data.port);
230 if (with_ports && tb[IPSET_ATTR_PORT_TO]) { 229 if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 6344ef551ec8..6a21271c8d5a 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -166,7 +166,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
166 const struct ip_set_hash *h = set->data; 166 const struct ip_set_hash *h = set->data;
167 ipset_adtfn adtfn = set->variant->adt[adt]; 167 ipset_adtfn adtfn = set->variant->adt[adt];
168 struct hash_ipportip4_elem data = { }; 168 struct hash_ipportip4_elem data = { };
169 u32 ip, ip_to = 0, p = 0, port, port_to; 169 u32 ip, ip_to, p = 0, port, port_to;
170 u32 timeout = h->timeout; 170 u32 timeout = h->timeout;
171 bool with_ports = false; 171 bool with_ports = false;
172 int ret; 172 int ret;
@@ -218,7 +218,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
218 return ip_set_eexist(ret, flags) ? 0 : ret; 218 return ip_set_eexist(ret, flags) ? 0 : ret;
219 } 219 }
220 220
221 ip = ntohl(data.ip); 221 ip_to = ip = ntohl(data.ip);
222 if (tb[IPSET_ATTR_IP_TO]) { 222 if (tb[IPSET_ATTR_IP_TO]) {
223 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); 223 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
224 if (ret) 224 if (ret)
@@ -231,8 +231,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
231 if (!cidr || cidr > 32) 231 if (!cidr || cidr > 32)
232 return -IPSET_ERR_INVALID_CIDR; 232 return -IPSET_ERR_INVALID_CIDR;
233 ip_set_mask_from_to(ip, ip_to, cidr); 233 ip_set_mask_from_to(ip, ip_to, cidr);
234 } else 234 }
235 ip_to = ip;
236 235
237 port_to = port = ntohs(data.port); 236 port_to = port = ntohs(data.port);
238 if (with_ports && tb[IPSET_ATTR_PORT_TO]) { 237 if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index cb71f9a774e7..2d5cd4ee30eb 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -215,8 +215,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
215 const struct ip_set_hash *h = set->data; 215 const struct ip_set_hash *h = set->data;
216 ipset_adtfn adtfn = set->variant->adt[adt]; 216 ipset_adtfn adtfn = set->variant->adt[adt];
217 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 }; 217 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
218 u32 ip, ip_to = 0, p = 0, port, port_to; 218 u32 ip, ip_to, p = 0, port, port_to;
219 u32 ip2_from = 0, ip2_to, ip2_last, ip2; 219 u32 ip2_from, ip2_to, ip2_last, ip2;
220 u32 timeout = h->timeout; 220 u32 timeout = h->timeout;
221 bool with_ports = false; 221 bool with_ports = false;
222 u8 cidr; 222 u8 cidr;
@@ -286,6 +286,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
286 return ip_set_eexist(ret, flags) ? 0 : ret; 286 return ip_set_eexist(ret, flags) ? 0 : ret;
287 } 287 }
288 288
289 ip_to = ip;
289 if (tb[IPSET_ATTR_IP_TO]) { 290 if (tb[IPSET_ATTR_IP_TO]) {
290 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); 291 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
291 if (ret) 292 if (ret)
@@ -306,6 +307,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
306 if (port > port_to) 307 if (port > port_to)
307 swap(port, port_to); 308 swap(port, port_to);
308 } 309 }
310
311 ip2_to = ip2_from;
309 if (tb[IPSET_ATTR_IP2_TO]) { 312 if (tb[IPSET_ATTR_IP2_TO]) {
310 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); 313 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
311 if (ret) 314 if (ret)
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index b9a63381e349..45a101439bc5 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -793,7 +793,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
793 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 793 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
794 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, 794 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
795 [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, 795 [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING,
796 .len = IPSET_MAXNAMELEN - 1 }, 796 .len = IFNAMSIZ - 1 },
797 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, 797 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
798 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 798 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
799 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 799 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 8b2cffdfdd99..0c3b1670b0d1 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -28,12 +28,11 @@ if IP_VS
28config IP_VS_IPV6 28config IP_VS_IPV6
29 bool "IPv6 support for IPVS" 29 bool "IPv6 support for IPVS"
30 depends on IPV6 = y || IP_VS = IPV6 30 depends on IPV6 = y || IP_VS = IPV6
31 select IP6_NF_IPTABLES
31 ---help--- 32 ---help---
32 Add IPv6 support to IPVS. This is incomplete and might be dangerous. 33 Add IPv6 support to IPVS.
33 34
34 See http://www.mindbasket.com/ipvs for more information. 35 Say Y if unsure.
35
36 Say N if unsure.
37 36
38config IP_VS_DEBUG 37config IP_VS_DEBUG
39 bool "IP virtual server debugging" 38 bool "IP virtual server debugging"
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 1548df9a7524..30e764ad021f 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -308,13 +308,12 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
308static int 308static int
309ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb, 309ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
310 const struct ip_vs_iphdr *iph, 310 const struct ip_vs_iphdr *iph,
311 unsigned int proto_off, int inverse, 311 int inverse, struct ip_vs_conn_param *p)
312 struct ip_vs_conn_param *p)
313{ 312{
314 __be16 _ports[2], *pptr; 313 __be16 _ports[2], *pptr;
315 struct net *net = skb_net(skb); 314 struct net *net = skb_net(skb);
316 315
317 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); 316 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
318 if (pptr == NULL) 317 if (pptr == NULL)
319 return 1; 318 return 1;
320 319
@@ -329,12 +328,11 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
329 328
330struct ip_vs_conn * 329struct ip_vs_conn *
331ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, 330ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
332 const struct ip_vs_iphdr *iph, 331 const struct ip_vs_iphdr *iph, int inverse)
333 unsigned int proto_off, int inverse)
334{ 332{
335 struct ip_vs_conn_param p; 333 struct ip_vs_conn_param p;
336 334
337 if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p)) 335 if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p))
338 return NULL; 336 return NULL;
339 337
340 return ip_vs_conn_in_get(&p); 338 return ip_vs_conn_in_get(&p);
@@ -432,12 +430,11 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
432 430
433struct ip_vs_conn * 431struct ip_vs_conn *
434ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, 432ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
435 const struct ip_vs_iphdr *iph, 433 const struct ip_vs_iphdr *iph, int inverse)
436 unsigned int proto_off, int inverse)
437{ 434{
438 struct ip_vs_conn_param p; 435 struct ip_vs_conn_param p;
439 436
440 if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p)) 437 if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p))
441 return NULL; 438 return NULL;
442 439
443 return ip_vs_conn_out_get(&p); 440 return ip_vs_conn_out_get(&p);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 58918e20f9d5..47edf5a40a59 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -222,11 +222,10 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
222 */ 222 */
223static struct ip_vs_conn * 223static struct ip_vs_conn *
224ip_vs_sched_persist(struct ip_vs_service *svc, 224ip_vs_sched_persist(struct ip_vs_service *svc,
225 struct sk_buff *skb, 225 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
226 __be16 src_port, __be16 dst_port, int *ignored) 226 int *ignored, struct ip_vs_iphdr *iph)
227{ 227{
228 struct ip_vs_conn *cp = NULL; 228 struct ip_vs_conn *cp = NULL;
229 struct ip_vs_iphdr iph;
230 struct ip_vs_dest *dest; 229 struct ip_vs_dest *dest;
231 struct ip_vs_conn *ct; 230 struct ip_vs_conn *ct;
232 __be16 dport = 0; /* destination port to forward */ 231 __be16 dport = 0; /* destination port to forward */
@@ -236,20 +235,18 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
236 union nf_inet_addr snet; /* source network of the client, 235 union nf_inet_addr snet; /* source network of the client,
237 after masking */ 236 after masking */
238 237
239 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
240
241 /* Mask saddr with the netmask to adjust template granularity */ 238 /* Mask saddr with the netmask to adjust template granularity */
242#ifdef CONFIG_IP_VS_IPV6 239#ifdef CONFIG_IP_VS_IPV6
243 if (svc->af == AF_INET6) 240 if (svc->af == AF_INET6)
244 ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask); 241 ipv6_addr_prefix(&snet.in6, &iph->saddr.in6, svc->netmask);
245 else 242 else
246#endif 243#endif
247 snet.ip = iph.saddr.ip & svc->netmask; 244 snet.ip = iph->saddr.ip & svc->netmask;
248 245
249 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " 246 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
250 "mnet %s\n", 247 "mnet %s\n",
251 IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port), 248 IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
252 IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port), 249 IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
253 IP_VS_DBG_ADDR(svc->af, &snet)); 250 IP_VS_DBG_ADDR(svc->af, &snet));
254 251
255 /* 252 /*
@@ -266,8 +263,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
266 * is created for other persistent services. 263 * is created for other persistent services.
267 */ 264 */
268 { 265 {
269 int protocol = iph.protocol; 266 int protocol = iph->protocol;
270 const union nf_inet_addr *vaddr = &iph.daddr; 267 const union nf_inet_addr *vaddr = &iph->daddr;
271 __be16 vport = 0; 268 __be16 vport = 0;
272 269
273 if (dst_port == svc->port) { 270 if (dst_port == svc->port) {
@@ -342,14 +339,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
342 dport = dest->port; 339 dport = dest->port;
343 340
344 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET 341 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
345 && iph.protocol == IPPROTO_UDP)? 342 && iph->protocol == IPPROTO_UDP) ?
346 IP_VS_CONN_F_ONE_PACKET : 0; 343 IP_VS_CONN_F_ONE_PACKET : 0;
347 344
348 /* 345 /*
349 * Create a new connection according to the template 346 * Create a new connection according to the template
350 */ 347 */
351 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr, 348 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
352 src_port, &iph.daddr, dst_port, &param); 349 src_port, &iph->daddr, dst_port, &param);
353 350
354 cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark); 351 cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
355 if (cp == NULL) { 352 if (cp == NULL) {
@@ -392,18 +389,20 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
392 */ 389 */
393struct ip_vs_conn * 390struct ip_vs_conn *
394ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 391ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
395 struct ip_vs_proto_data *pd, int *ignored) 392 struct ip_vs_proto_data *pd, int *ignored,
393 struct ip_vs_iphdr *iph)
396{ 394{
397 struct ip_vs_protocol *pp = pd->pp; 395 struct ip_vs_protocol *pp = pd->pp;
398 struct ip_vs_conn *cp = NULL; 396 struct ip_vs_conn *cp = NULL;
399 struct ip_vs_iphdr iph;
400 struct ip_vs_dest *dest; 397 struct ip_vs_dest *dest;
401 __be16 _ports[2], *pptr; 398 __be16 _ports[2], *pptr;
402 unsigned int flags; 399 unsigned int flags;
403 400
404 *ignored = 1; 401 *ignored = 1;
405 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 402 /*
406 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); 403 * IPv6 frags, only the first hit here.
404 */
405 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
407 if (pptr == NULL) 406 if (pptr == NULL)
408 return NULL; 407 return NULL;
409 408
@@ -423,7 +422,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
423 * Do not schedule replies from local real server. 422 * Do not schedule replies from local real server.
424 */ 423 */
425 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) && 424 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
426 (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) { 425 (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
427 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0, 426 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
428 "Not scheduling reply for existing connection"); 427 "Not scheduling reply for existing connection");
429 __ip_vs_conn_put(cp); 428 __ip_vs_conn_put(cp);
@@ -434,7 +433,8 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
434 * Persistent service 433 * Persistent service
435 */ 434 */
436 if (svc->flags & IP_VS_SVC_F_PERSISTENT) 435 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
437 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored); 436 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
437 iph);
438 438
439 *ignored = 0; 439 *ignored = 0;
440 440
@@ -456,7 +456,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
456 } 456 }
457 457
458 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET 458 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
459 && iph.protocol == IPPROTO_UDP)? 459 && iph->protocol == IPPROTO_UDP) ?
460 IP_VS_CONN_F_ONE_PACKET : 0; 460 IP_VS_CONN_F_ONE_PACKET : 0;
461 461
462 /* 462 /*
@@ -465,9 +465,9 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
465 { 465 {
466 struct ip_vs_conn_param p; 466 struct ip_vs_conn_param p;
467 467
468 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, 468 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
469 &iph.saddr, pptr[0], &iph.daddr, pptr[1], 469 &iph->saddr, pptr[0], &iph->daddr,
470 &p); 470 pptr[1], &p);
471 cp = ip_vs_conn_new(&p, &dest->addr, 471 cp = ip_vs_conn_new(&p, &dest->addr,
472 dest->port ? dest->port : pptr[1], 472 dest->port ? dest->port : pptr[1],
473 flags, dest, skb->mark); 473 flags, dest, skb->mark);
@@ -496,19 +496,16 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
496 * no destination is available for a new connection. 496 * no destination is available for a new connection.
497 */ 497 */
498int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 498int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
499 struct ip_vs_proto_data *pd) 499 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
500{ 500{
501 __be16 _ports[2], *pptr; 501 __be16 _ports[2], *pptr;
502 struct ip_vs_iphdr iph;
503#ifdef CONFIG_SYSCTL 502#ifdef CONFIG_SYSCTL
504 struct net *net; 503 struct net *net;
505 struct netns_ipvs *ipvs; 504 struct netns_ipvs *ipvs;
506 int unicast; 505 int unicast;
507#endif 506#endif
508 507
509 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 508 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
510
511 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
512 if (pptr == NULL) { 509 if (pptr == NULL) {
513 ip_vs_service_put(svc); 510 ip_vs_service_put(svc);
514 return NF_DROP; 511 return NF_DROP;
@@ -519,10 +516,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
519 516
520#ifdef CONFIG_IP_VS_IPV6 517#ifdef CONFIG_IP_VS_IPV6
521 if (svc->af == AF_INET6) 518 if (svc->af == AF_INET6)
522 unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST; 519 unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
523 else 520 else
524#endif 521#endif
525 unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST); 522 unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);
526 523
527 /* if it is fwmark-based service, the cache_bypass sysctl is up 524 /* if it is fwmark-based service, the cache_bypass sysctl is up
528 and the destination is a non-local unicast, then create 525 and the destination is a non-local unicast, then create
@@ -532,7 +529,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
532 int ret; 529 int ret;
533 struct ip_vs_conn *cp; 530 struct ip_vs_conn *cp;
534 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && 531 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
535 iph.protocol == IPPROTO_UDP)? 532 iph->protocol == IPPROTO_UDP) ?
536 IP_VS_CONN_F_ONE_PACKET : 0; 533 IP_VS_CONN_F_ONE_PACKET : 0;
537 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; 534 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
538 535
@@ -542,9 +539,9 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
542 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); 539 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
543 { 540 {
544 struct ip_vs_conn_param p; 541 struct ip_vs_conn_param p;
545 ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, 542 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
546 &iph.saddr, pptr[0], 543 &iph->saddr, pptr[0],
547 &iph.daddr, pptr[1], &p); 544 &iph->daddr, pptr[1], &p);
548 cp = ip_vs_conn_new(&p, &daddr, 0, 545 cp = ip_vs_conn_new(&p, &daddr, 0,
549 IP_VS_CONN_F_BYPASS | flags, 546 IP_VS_CONN_F_BYPASS | flags,
550 NULL, skb->mark); 547 NULL, skb->mark);
@@ -559,7 +556,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
559 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 556 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
560 557
561 /* transmit the first SYN packet */ 558 /* transmit the first SYN packet */
562 ret = cp->packet_xmit(skb, cp, pd->pp); 559 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
563 /* do not touch skb anymore */ 560 /* do not touch skb anymore */
564 561
565 atomic_inc(&cp->in_pkts); 562 atomic_inc(&cp->in_pkts);
@@ -654,14 +651,6 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
654 return err; 651 return err;
655} 652}
656 653
657#ifdef CONFIG_IP_VS_IPV6
658static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
659{
660 /* TODO IPv6: Find out what to do here for IPv6 */
661 return 0;
662}
663#endif
664
665static int ip_vs_route_me_harder(int af, struct sk_buff *skb) 654static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
666{ 655{
667#ifdef CONFIG_IP_VS_IPV6 656#ifdef CONFIG_IP_VS_IPV6
@@ -732,10 +721,19 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
732 struct ip_vs_conn *cp, int inout) 721 struct ip_vs_conn *cp, int inout)
733{ 722{
734 struct ipv6hdr *iph = ipv6_hdr(skb); 723 struct ipv6hdr *iph = ipv6_hdr(skb);
735 unsigned int icmp_offset = sizeof(struct ipv6hdr); 724 unsigned int icmp_offset = 0;
736 struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) + 725 unsigned int offs = 0; /* header offset*/
737 icmp_offset); 726 int protocol;
738 struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1); 727 struct icmp6hdr *icmph;
728 struct ipv6hdr *ciph;
729 unsigned short fragoffs;
730
731 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
732 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
733 offs = icmp_offset + sizeof(struct icmp6hdr);
734 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
735
736 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
739 737
740 if (inout) { 738 if (inout) {
741 iph->saddr = cp->vaddr.in6; 739 iph->saddr = cp->vaddr.in6;
@@ -746,10 +744,13 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
746 } 744 }
747 745
748 /* the TCP/UDP/SCTP port */ 746 /* the TCP/UDP/SCTP port */
749 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr || 747 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
750 IPPROTO_SCTP == ciph->nexthdr) { 748 IPPROTO_SCTP == protocol)) {
751 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); 749 __be16 *ports = (void *)(skb_network_header(skb) + offs);
752 750
751 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
752 ntohs(inout ? ports[1] : ports[0]),
753 ntohs(inout ? cp->vport : cp->dport));
753 if (inout) 754 if (inout)
754 ports[1] = cp->vport; 755 ports[1] = cp->vport;
755 else 756 else
@@ -898,51 +899,35 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
898 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset, 899 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
899 "Checking outgoing ICMP for"); 900 "Checking outgoing ICMP for");
900 901
901 offset += cih->ihl * 4; 902 ip_vs_fill_ip4hdr(cih, &ciph);
902 903 ciph.len += offset;
903 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
904 /* The embedded headers contain source and dest in reverse order */ 904 /* The embedded headers contain source and dest in reverse order */
905 cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1); 905 cp = pp->conn_out_get(AF_INET, skb, &ciph, 1);
906 if (!cp) 906 if (!cp)
907 return NF_ACCEPT; 907 return NF_ACCEPT;
908 908
909 snet.ip = iph->saddr; 909 snet.ip = iph->saddr;
910 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, 910 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
911 pp, offset, ihl); 911 pp, ciph.len, ihl);
912} 912}
913 913
914#ifdef CONFIG_IP_VS_IPV6 914#ifdef CONFIG_IP_VS_IPV6
915static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, 915static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
916 unsigned int hooknum) 916 unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
917{ 917{
918 struct ipv6hdr *iph;
919 struct icmp6hdr _icmph, *ic; 918 struct icmp6hdr _icmph, *ic;
920 struct ipv6hdr _ciph, *cih; /* The ip header contained 919 struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */
921 within the ICMP */ 920 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
922 struct ip_vs_iphdr ciph;
923 struct ip_vs_conn *cp; 921 struct ip_vs_conn *cp;
924 struct ip_vs_protocol *pp; 922 struct ip_vs_protocol *pp;
925 unsigned int offset;
926 union nf_inet_addr snet; 923 union nf_inet_addr snet;
924 unsigned int writable;
927 925
928 *related = 1; 926 *related = 1;
929 927 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
930 /* reassemble IP fragments */
931 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
932 if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
933 return NF_STOLEN;
934 }
935
936 iph = ipv6_hdr(skb);
937 offset = sizeof(struct ipv6hdr);
938 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
939 if (ic == NULL) 928 if (ic == NULL)
940 return NF_DROP; 929 return NF_DROP;
941 930
942 IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n",
943 ic->icmp6_type, ntohs(icmpv6_id(ic)),
944 &iph->saddr, &iph->daddr);
945
946 /* 931 /*
947 * Work through seeing if this is for us. 932 * Work through seeing if this is for us.
948 * These checks are supposed to be in an order that means easy 933 * These checks are supposed to be in an order that means easy
@@ -950,42 +935,45 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
950 * this means that some packets will manage to get a long way 935 * this means that some packets will manage to get a long way
951 * down this stack and then be rejected, but that's life. 936 * down this stack and then be rejected, but that's life.
952 */ 937 */
953 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && 938 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
954 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
955 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
956 *related = 0; 939 *related = 0;
957 return NF_ACCEPT; 940 return NF_ACCEPT;
958 } 941 }
942 /* Fragment header that is before ICMP header tells us that:
943 * it's not an error message since they can't be fragmented.
944 */
945 if (ipvsh->flags & IP6_FH_F_FRAG)
946 return NF_DROP;
947
948 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
949 ic->icmp6_type, ntohs(icmpv6_id(ic)),
950 &ipvsh->saddr, &ipvsh->daddr);
959 951
960 /* Now find the contained IP header */ 952 /* Now find the contained IP header */
961 offset += sizeof(_icmph); 953 ciph.len = ipvsh->len + sizeof(_icmph);
962 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); 954 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
963 if (cih == NULL) 955 if (ip6h == NULL)
964 return NF_ACCEPT; /* The packet looks wrong, ignore */ 956 return NF_ACCEPT; /* The packet looks wrong, ignore */
965 957 ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */
966 pp = ip_vs_proto_get(cih->nexthdr); 958 ciph.daddr.in6 = ip6h->daddr;
959 /* skip possible IPv6 exthdrs of contained IPv6 packet */
960 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
961 if (ciph.protocol < 0)
962 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
963
964 pp = ip_vs_proto_get(ciph.protocol);
967 if (!pp) 965 if (!pp)
968 return NF_ACCEPT; 966 return NF_ACCEPT;
969 967
970 /* Is the embedded protocol header present? */
971 /* TODO: we don't support fragmentation at the moment anyways */
972 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
973 return NF_ACCEPT;
974
975 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
976 "Checking outgoing ICMPv6 for");
977
978 offset += sizeof(struct ipv6hdr);
979
980 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
981 /* The embedded headers contain source and dest in reverse order */ 968 /* The embedded headers contain source and dest in reverse order */
982 cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1); 969 cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1);
983 if (!cp) 970 if (!cp)
984 return NF_ACCEPT; 971 return NF_ACCEPT;
985 972
986 snet.in6 = iph->saddr; 973 snet.in6 = ciph.saddr.in6;
987 return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, 974 writable = ciph.len;
988 pp, offset, sizeof(struct ipv6hdr)); 975 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
976 pp, writable, sizeof(struct ipv6hdr));
989} 977}
990#endif 978#endif
991 979
@@ -1018,17 +1006,17 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1018 */ 1006 */
1019static unsigned int 1007static unsigned int
1020handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 1008handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1021 struct ip_vs_conn *cp, int ihl) 1009 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
1022{ 1010{
1023 struct ip_vs_protocol *pp = pd->pp; 1011 struct ip_vs_protocol *pp = pd->pp;
1024 1012
1025 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet"); 1013 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
1026 1014
1027 if (!skb_make_writable(skb, ihl)) 1015 if (!skb_make_writable(skb, iph->len))
1028 goto drop; 1016 goto drop;
1029 1017
1030 /* mangle the packet */ 1018 /* mangle the packet */
1031 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) 1019 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1032 goto drop; 1020 goto drop;
1033 1021
1034#ifdef CONFIG_IP_VS_IPV6 1022#ifdef CONFIG_IP_VS_IPV6
@@ -1115,17 +1103,22 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1115 if (!net_ipvs(net)->enable) 1103 if (!net_ipvs(net)->enable)
1116 return NF_ACCEPT; 1104 return NF_ACCEPT;
1117 1105
1118 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1106 ip_vs_fill_iph_skb(af, skb, &iph);
1119#ifdef CONFIG_IP_VS_IPV6 1107#ifdef CONFIG_IP_VS_IPV6
1120 if (af == AF_INET6) { 1108 if (af == AF_INET6) {
1109 if (!iph.fragoffs && skb_nfct_reasm(skb)) {
1110 struct sk_buff *reasm = skb_nfct_reasm(skb);
1111 /* Save fw mark for coming frags */
1112 reasm->ipvs_property = 1;
1113 reasm->mark = skb->mark;
1114 }
1121 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { 1115 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1122 int related; 1116 int related;
1123 int verdict = ip_vs_out_icmp_v6(skb, &related, 1117 int verdict = ip_vs_out_icmp_v6(skb, &related,
1124 hooknum); 1118 hooknum, &iph);
1125 1119
1126 if (related) 1120 if (related)
1127 return verdict; 1121 return verdict;
1128 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1129 } 1122 }
1130 } else 1123 } else
1131#endif 1124#endif
@@ -1135,7 +1128,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1135 1128
1136 if (related) 1129 if (related)
1137 return verdict; 1130 return verdict;
1138 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1139 } 1131 }
1140 1132
1141 pd = ip_vs_proto_data_get(net, iph.protocol); 1133 pd = ip_vs_proto_data_get(net, iph.protocol);
@@ -1145,39 +1137,31 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1145 1137
1146 /* reassemble IP fragments */ 1138 /* reassemble IP fragments */
1147#ifdef CONFIG_IP_VS_IPV6 1139#ifdef CONFIG_IP_VS_IPV6
1148 if (af == AF_INET6) { 1140 if (af == AF_INET)
1149 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1150 if (ip_vs_gather_frags_v6(skb,
1151 ip_vs_defrag_user(hooknum)))
1152 return NF_STOLEN;
1153 }
1154
1155 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1156 } else
1157#endif 1141#endif
1158 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) { 1142 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1159 if (ip_vs_gather_frags(skb, 1143 if (ip_vs_gather_frags(skb,
1160 ip_vs_defrag_user(hooknum))) 1144 ip_vs_defrag_user(hooknum)))
1161 return NF_STOLEN; 1145 return NF_STOLEN;
1162 1146
1163 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1147 ip_vs_fill_ip4hdr(skb_network_header(skb), &iph);
1164 } 1148 }
1165 1149
1166 /* 1150 /*
1167 * Check if the packet belongs to an existing entry 1151 * Check if the packet belongs to an existing entry
1168 */ 1152 */
1169 cp = pp->conn_out_get(af, skb, &iph, iph.len, 0); 1153 cp = pp->conn_out_get(af, skb, &iph, 0);
1170 1154
1171 if (likely(cp)) 1155 if (likely(cp))
1172 return handle_response(af, skb, pd, cp, iph.len); 1156 return handle_response(af, skb, pd, cp, &iph);
1173 if (sysctl_nat_icmp_send(net) && 1157 if (sysctl_nat_icmp_send(net) &&
1174 (pp->protocol == IPPROTO_TCP || 1158 (pp->protocol == IPPROTO_TCP ||
1175 pp->protocol == IPPROTO_UDP || 1159 pp->protocol == IPPROTO_UDP ||
1176 pp->protocol == IPPROTO_SCTP)) { 1160 pp->protocol == IPPROTO_SCTP)) {
1177 __be16 _ports[2], *pptr; 1161 __be16 _ports[2], *pptr;
1178 1162
1179 pptr = skb_header_pointer(skb, iph.len, 1163 pptr = frag_safe_skb_hp(skb, iph.len,
1180 sizeof(_ports), _ports); 1164 sizeof(_ports), _ports, &iph);
1181 if (pptr == NULL) 1165 if (pptr == NULL)
1182 return NF_ACCEPT; /* Not for me */ 1166 return NF_ACCEPT; /* Not for me */
1183 if (ip_vs_lookup_real_service(net, af, iph.protocol, 1167 if (ip_vs_lookup_real_service(net, af, iph.protocol,
@@ -1375,13 +1359,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1375 "Checking incoming ICMP for"); 1359 "Checking incoming ICMP for");
1376 1360
1377 offset2 = offset; 1361 offset2 = offset;
1378 offset += cih->ihl * 4; 1362 ip_vs_fill_ip4hdr(cih, &ciph);
1379 1363 ciph.len += offset;
1380 ip_vs_fill_iphdr(AF_INET, cih, &ciph); 1364 offset = ciph.len;
1381 /* The embedded headers contain source and dest in reverse order. 1365 /* The embedded headers contain source and dest in reverse order.
1382 * For IPIP this is error for request, not for reply. 1366 * For IPIP this is error for request, not for reply.
1383 */ 1367 */
1384 cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1); 1368 cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1);
1385 if (!cp) 1369 if (!cp)
1386 return NF_ACCEPT; 1370 return NF_ACCEPT;
1387 1371
@@ -1450,7 +1434,7 @@ ignore_ipip:
1450 ip_vs_in_stats(cp, skb); 1434 ip_vs_in_stats(cp, skb);
1451 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1435 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
1452 offset += 2 * sizeof(__u16); 1436 offset += 2 * sizeof(__u16);
1453 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum); 1437 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1454 1438
1455out: 1439out:
1456 __ip_vs_conn_put(cp); 1440 __ip_vs_conn_put(cp);
@@ -1459,38 +1443,24 @@ out:
1459} 1443}
1460 1444
1461#ifdef CONFIG_IP_VS_IPV6 1445#ifdef CONFIG_IP_VS_IPV6
1462static int 1446static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
1463ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) 1447 unsigned int hooknum, struct ip_vs_iphdr *iph)
1464{ 1448{
1465 struct net *net = NULL; 1449 struct net *net = NULL;
1466 struct ipv6hdr *iph; 1450 struct ipv6hdr _ip6h, *ip6h;
1467 struct icmp6hdr _icmph, *ic; 1451 struct icmp6hdr _icmph, *ic;
1468 struct ipv6hdr _ciph, *cih; /* The ip header contained 1452 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1469 within the ICMP */
1470 struct ip_vs_iphdr ciph;
1471 struct ip_vs_conn *cp; 1453 struct ip_vs_conn *cp;
1472 struct ip_vs_protocol *pp; 1454 struct ip_vs_protocol *pp;
1473 struct ip_vs_proto_data *pd; 1455 struct ip_vs_proto_data *pd;
1474 unsigned int offset, verdict; 1456 unsigned int offs_ciph, writable, verdict;
1475 1457
1476 *related = 1; 1458 *related = 1;
1477 1459
1478 /* reassemble IP fragments */ 1460 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1479 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1480 if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum)))
1481 return NF_STOLEN;
1482 }
1483
1484 iph = ipv6_hdr(skb);
1485 offset = sizeof(struct ipv6hdr);
1486 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1487 if (ic == NULL) 1461 if (ic == NULL)
1488 return NF_DROP; 1462 return NF_DROP;
1489 1463
1490 IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n",
1491 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1492 &iph->saddr, &iph->daddr);
1493
1494 /* 1464 /*
1495 * Work through seeing if this is for us. 1465 * Work through seeing if this is for us.
1496 * These checks are supposed to be in an order that means easy 1466 * These checks are supposed to be in an order that means easy
@@ -1498,47 +1468,71 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1498 * this means that some packets will manage to get a long way 1468 * this means that some packets will manage to get a long way
1499 * down this stack and then be rejected, but that's life. 1469 * down this stack and then be rejected, but that's life.
1500 */ 1470 */
1501 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && 1471 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1502 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
1503 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
1504 *related = 0; 1472 *related = 0;
1505 return NF_ACCEPT; 1473 return NF_ACCEPT;
1506 } 1474 }
1475 /* Fragment header that is before ICMP header tells us that:
1476 * it's not an error message since they can't be fragmented.
1477 */
1478 if (iph->flags & IP6_FH_F_FRAG)
1479 return NF_DROP;
1480
1481 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1482 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1483 &iph->saddr, &iph->daddr);
1507 1484
1508 /* Now find the contained IP header */ 1485 /* Now find the contained IP header */
1509 offset += sizeof(_icmph); 1486 ciph.len = iph->len + sizeof(_icmph);
1510 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); 1487 offs_ciph = ciph.len; /* Save ip header offset */
1511 if (cih == NULL) 1488 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
1489 if (ip6h == NULL)
1512 return NF_ACCEPT; /* The packet looks wrong, ignore */ 1490 return NF_ACCEPT; /* The packet looks wrong, ignore */
1491 ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */
1492 ciph.daddr.in6 = ip6h->daddr;
1493 /* skip possible IPv6 exthdrs of contained IPv6 packet */
1494 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
1495 if (ciph.protocol < 0)
1496 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
1513 1497
1514 net = skb_net(skb); 1498 net = skb_net(skb);
1515 pd = ip_vs_proto_data_get(net, cih->nexthdr); 1499 pd = ip_vs_proto_data_get(net, ciph.protocol);
1516 if (!pd) 1500 if (!pd)
1517 return NF_ACCEPT; 1501 return NF_ACCEPT;
1518 pp = pd->pp; 1502 pp = pd->pp;
1519 1503
1520 /* Is the embedded protocol header present? */ 1504 /* Cannot handle fragmented embedded protocol */
1521 /* TODO: we don't support fragmentation at the moment anyways */ 1505 if (ciph.fragoffs)
1522 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
1523 return NF_ACCEPT; 1506 return NF_ACCEPT;
1524 1507
1525 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset, 1508 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph,
1526 "Checking incoming ICMPv6 for"); 1509 "Checking incoming ICMPv6 for");
1527 1510
1528 offset += sizeof(struct ipv6hdr); 1511 /* The embedded headers contain source and dest in reverse order
1512 * if not from localhost
1513 */
1514 cp = pp->conn_in_get(AF_INET6, skb, &ciph,
1515 (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1);
1529 1516
1530 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
1531 /* The embedded headers contain source and dest in reverse order */
1532 cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
1533 if (!cp) 1517 if (!cp)
1534 return NF_ACCEPT; 1518 return NF_ACCEPT;
1519 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1520 if ((hooknum == NF_INET_LOCAL_OUT) &&
1521 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1522 __ip_vs_conn_put(cp);
1523 return NF_ACCEPT;
1524 }
1535 1525
1536 /* do the statistics and put it back */ 1526 /* do the statistics and put it back */
1537 ip_vs_in_stats(cp, skb); 1527 ip_vs_in_stats(cp, skb);
1538 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr || 1528
1539 IPPROTO_SCTP == cih->nexthdr) 1529 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1540 offset += 2 * sizeof(__u16); 1530 writable = ciph.len;
1541 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum); 1531 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1532 IPPROTO_SCTP == ciph.protocol)
1533 writable += 2 * sizeof(__u16); /* Also mangle ports */
1534
1535 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph);
1542 1536
1543 __ip_vs_conn_put(cp); 1537 __ip_vs_conn_put(cp);
1544 1538
@@ -1574,7 +1568,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1574 if (unlikely((skb->pkt_type != PACKET_HOST && 1568 if (unlikely((skb->pkt_type != PACKET_HOST &&
1575 hooknum != NF_INET_LOCAL_OUT) || 1569 hooknum != NF_INET_LOCAL_OUT) ||
1576 !skb_dst(skb))) { 1570 !skb_dst(skb))) {
1577 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1571 ip_vs_fill_iph_skb(af, skb, &iph);
1578 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s" 1572 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1579 " ignored in hook %u\n", 1573 " ignored in hook %u\n",
1580 skb->pkt_type, iph.protocol, 1574 skb->pkt_type, iph.protocol,
@@ -1586,7 +1580,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1586 if (!net_ipvs(net)->enable) 1580 if (!net_ipvs(net)->enable)
1587 return NF_ACCEPT; 1581 return NF_ACCEPT;
1588 1582
1589 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1583 ip_vs_fill_iph_skb(af, skb, &iph);
1590 1584
1591 /* Bad... Do not break raw sockets */ 1585 /* Bad... Do not break raw sockets */
1592 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1586 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
@@ -1600,13 +1594,19 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1600 1594
1601#ifdef CONFIG_IP_VS_IPV6 1595#ifdef CONFIG_IP_VS_IPV6
1602 if (af == AF_INET6) { 1596 if (af == AF_INET6) {
1597 if (!iph.fragoffs && skb_nfct_reasm(skb)) {
1598 struct sk_buff *reasm = skb_nfct_reasm(skb);
1599 /* Save fw mark for coming frags. */
1600 reasm->ipvs_property = 1;
1601 reasm->mark = skb->mark;
1602 }
1603 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { 1603 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1604 int related; 1604 int related;
1605 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum); 1605 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
1606 &iph);
1606 1607
1607 if (related) 1608 if (related)
1608 return verdict; 1609 return verdict;
1609 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1610 } 1610 }
1611 } else 1611 } else
1612#endif 1612#endif
@@ -1616,7 +1616,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1616 1616
1617 if (related) 1617 if (related)
1618 return verdict; 1618 return verdict;
1619 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1620 } 1619 }
1621 1620
1622 /* Protocol supported? */ 1621 /* Protocol supported? */
@@ -1627,12 +1626,15 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1627 /* 1626 /*
1628 * Check if the packet belongs to an existing connection entry 1627 * Check if the packet belongs to an existing connection entry
1629 */ 1628 */
1630 cp = pp->conn_in_get(af, skb, &iph, iph.len, 0); 1629 cp = pp->conn_in_get(af, skb, &iph, 0);
1631 1630 if (unlikely(!cp) && !iph.fragoffs) {
1632 if (unlikely(!cp)) { 1631 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1632 * replayed fragment zero will already have created the cp
1633 */
1633 int v; 1634 int v;
1634 1635
1635 if (!pp->conn_schedule(af, skb, pd, &v, &cp)) 1636 /* Schedule and create new connection entry into &cp */
1637 if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph))
1636 return v; 1638 return v;
1637 } 1639 }
1638 1640
@@ -1640,6 +1642,14 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1640 /* sorry, all this trouble for a no-hit :) */ 1642 /* sorry, all this trouble for a no-hit :) */
1641 IP_VS_DBG_PKT(12, af, pp, skb, 0, 1643 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1642 "ip_vs_in: packet continues traversal as normal"); 1644 "ip_vs_in: packet continues traversal as normal");
1645 if (iph.fragoffs && !skb_nfct_reasm(skb)) {
1646 /* Fragment that couldn't be mapped to a conn entry
1647 * and don't have any pointer to a reasm skb
1648 * is missing module nf_defrag_ipv6
1649 */
1650 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1651 IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment");
1652 }
1643 return NF_ACCEPT; 1653 return NF_ACCEPT;
1644 } 1654 }
1645 1655
@@ -1662,7 +1672,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1662 ip_vs_in_stats(cp, skb); 1672 ip_vs_in_stats(cp, skb);
1663 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 1673 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1664 if (cp->packet_xmit) 1674 if (cp->packet_xmit)
1665 ret = cp->packet_xmit(skb, cp, pp); 1675 ret = cp->packet_xmit(skb, cp, pp, &iph);
1666 /* do not touch skb anymore */ 1676 /* do not touch skb anymore */
1667 else { 1677 else {
1668 IP_VS_DBG_RL("warning: packet_xmit is null"); 1678 IP_VS_DBG_RL("warning: packet_xmit is null");
@@ -1724,6 +1734,38 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
1724#ifdef CONFIG_IP_VS_IPV6 1734#ifdef CONFIG_IP_VS_IPV6
1725 1735
1726/* 1736/*
1737 * AF_INET6 fragment handling
1738 * Copy info from first fragment, to the rest of them.
1739 */
1740static unsigned int
1741ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
1742 const struct net_device *in,
1743 const struct net_device *out,
1744 int (*okfn)(struct sk_buff *))
1745{
1746 struct sk_buff *reasm = skb_nfct_reasm(skb);
1747 struct net *net;
1748
1749 /* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
1750 * ipvs_property is set when checking first fragment
1751 * in ip_vs_in() and ip_vs_out().
1752 */
1753 if (reasm)
1754 IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
1755 if (!reasm || !reasm->ipvs_property)
1756 return NF_ACCEPT;
1757
1758 net = skb_net(skb);
1759 if (!net_ipvs(net)->enable)
1760 return NF_ACCEPT;
1761
1762 /* Copy stored fw mark, saved in ip_vs_{in,out} */
1763 skb->mark = reasm->mark;
1764
1765 return NF_ACCEPT;
1766}
1767
1768/*
1727 * AF_INET6 handler in NF_INET_LOCAL_IN chain 1769 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1728 * Schedule and forward packets from remote clients 1770 * Schedule and forward packets from remote clients
1729 */ 1771 */
@@ -1793,8 +1835,10 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1793{ 1835{
1794 int r; 1836 int r;
1795 struct net *net; 1837 struct net *net;
1838 struct ip_vs_iphdr iphdr;
1796 1839
1797 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) 1840 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
1841 if (iphdr.protocol != IPPROTO_ICMPV6)
1798 return NF_ACCEPT; 1842 return NF_ACCEPT;
1799 1843
1800 /* ipvs enabled in this netns ? */ 1844 /* ipvs enabled in this netns ? */
@@ -1802,7 +1846,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1802 if (!net_ipvs(net)->enable) 1846 if (!net_ipvs(net)->enable)
1803 return NF_ACCEPT; 1847 return NF_ACCEPT;
1804 1848
1805 return ip_vs_in_icmp_v6(skb, &r, hooknum); 1849 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
1806} 1850}
1807#endif 1851#endif
1808 1852
@@ -1860,6 +1904,14 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1860 .priority = 100, 1904 .priority = 100,
1861 }, 1905 },
1862#ifdef CONFIG_IP_VS_IPV6 1906#ifdef CONFIG_IP_VS_IPV6
1907 /* After mangle & nat fetch 2:nd fragment and following */
1908 {
1909 .hook = ip_vs_preroute_frag6,
1910 .owner = THIS_MODULE,
1911 .pf = NFPROTO_IPV6,
1912 .hooknum = NF_INET_PRE_ROUTING,
1913 .priority = NF_IP6_PRI_NAT_DST + 1,
1914 },
1863 /* After packet filtering, change source only for VS/NAT */ 1915 /* After packet filtering, change source only for VS/NAT */
1864 { 1916 {
1865 .hook = ip_vs_reply6, 1917 .hook = ip_vs_reply6,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c4ee43710aab..ec664cbb119f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2339,7 +2339,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2339 struct ip_vs_dest_user_kern udest; 2339 struct ip_vs_dest_user_kern udest;
2340 struct netns_ipvs *ipvs = net_ipvs(net); 2340 struct netns_ipvs *ipvs = net_ipvs(net);
2341 2341
2342 if (!capable(CAP_NET_ADMIN)) 2342 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2343 return -EPERM; 2343 return -EPERM;
2344 2344
2345 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) 2345 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
@@ -2632,7 +2632,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2632 struct netns_ipvs *ipvs = net_ipvs(net); 2632 struct netns_ipvs *ipvs = net_ipvs(net);
2633 2633
2634 BUG_ON(!net); 2634 BUG_ON(!net);
2635 if (!capable(CAP_NET_ADMIN)) 2635 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2636 return -EPERM; 2636 return -EPERM;
2637 2637
2638 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) 2638 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
@@ -3699,6 +3699,10 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3699 tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); 3699 tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
3700 if (tbl == NULL) 3700 if (tbl == NULL)
3701 return -ENOMEM; 3701 return -ENOMEM;
3702
3703 /* Don't export sysctls to unprivileged users */
3704 if (net->user_ns != &init_user_ns)
3705 tbl[0].procname = NULL;
3702 } else 3706 } else
3703 tbl = vs_vars; 3707 tbl = vs_vars;
3704 /* Initialize sysctl defaults */ 3708 /* Initialize sysctl defaults */
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 8b7dca9ea422..7f3b0cc00b7a 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -215,7 +215,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
215 struct ip_vs_dh_bucket *tbl; 215 struct ip_vs_dh_bucket *tbl;
216 struct ip_vs_iphdr iph; 216 struct ip_vs_iphdr iph;
217 217
218 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 218 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
219 219
220 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 220 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
221 221
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index df646ccf08a7..fdd89b9564ea 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -479,7 +479,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
479 struct ip_vs_dest *dest = NULL; 479 struct ip_vs_dest *dest = NULL;
480 struct ip_vs_lblc_entry *en; 480 struct ip_vs_lblc_entry *en;
481 481
482 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 482 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
483 483
484 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 484 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
485 485
@@ -560,6 +560,11 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
560 GFP_KERNEL); 560 GFP_KERNEL);
561 if (ipvs->lblc_ctl_table == NULL) 561 if (ipvs->lblc_ctl_table == NULL)
562 return -ENOMEM; 562 return -ENOMEM;
563
564 /* Don't export sysctls to unprivileged users */
565 if (net->user_ns != &init_user_ns)
566 ipvs->lblc_ctl_table[0].procname = NULL;
567
563 } else 568 } else
564 ipvs->lblc_ctl_table = vs_vars_table; 569 ipvs->lblc_ctl_table = vs_vars_table;
565 ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION; 570 ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 570e31ea427a..c03b6a3ade2f 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -649,7 +649,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
649 struct ip_vs_dest *dest = NULL; 649 struct ip_vs_dest *dest = NULL;
650 struct ip_vs_lblcr_entry *en; 650 struct ip_vs_lblcr_entry *en;
651 651
652 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 652 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
653 653
654 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 654 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
655 655
@@ -754,6 +754,10 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
754 GFP_KERNEL); 754 GFP_KERNEL);
755 if (ipvs->lblcr_ctl_table == NULL) 755 if (ipvs->lblcr_ctl_table == NULL)
756 return -ENOMEM; 756 return -ENOMEM;
757
758 /* Don't export sysctls to unprivileged users */
759 if (net->user_ns != &init_user_ns)
760 ipvs->lblcr_ctl_table[0].procname = NULL;
757 } else 761 } else
758 ipvs->lblcr_ctl_table = vs_vars_table; 762 ipvs->lblcr_ctl_table = vs_vars_table;
759 ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION; 763 ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 022e77e1e766..c8beafd401aa 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -82,7 +82,7 @@ void
82ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) 82ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
83{ 83{
84 enum ip_conntrack_info ctinfo; 84 enum ip_conntrack_info ctinfo;
85 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); 85 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
86 struct nf_conntrack_tuple new_tuple; 86 struct nf_conntrack_tuple new_tuple;
87 87
88 if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_untracked(ct) || 88 if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_untracked(ct) ||
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 1aa5cac748c4..12475ef88daf 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -68,23 +68,31 @@ static int get_callid(const char *dptr, unsigned int dataoff,
68static int 68static int
69ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) 69ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
70{ 70{
71 struct sk_buff *reasm = skb_nfct_reasm(skb);
71 struct ip_vs_iphdr iph; 72 struct ip_vs_iphdr iph;
72 unsigned int dataoff, datalen, matchoff, matchlen; 73 unsigned int dataoff, datalen, matchoff, matchlen;
73 const char *dptr; 74 const char *dptr;
74 int retc; 75 int retc;
75 76
76 ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph); 77 ip_vs_fill_iph_skb(p->af, skb, &iph);
77 78
78 /* Only useful with UDP */ 79 /* Only useful with UDP */
79 if (iph.protocol != IPPROTO_UDP) 80 if (iph.protocol != IPPROTO_UDP)
80 return -EINVAL; 81 return -EINVAL;
82 /* todo: IPv6 fragments:
83 * I think this only should be done for the first fragment. /HS
84 */
85 if (reasm) {
86 skb = reasm;
87 dataoff = iph.thoff_reasm + sizeof(struct udphdr);
88 } else
89 dataoff = iph.len + sizeof(struct udphdr);
81 90
82 /* No Data ? */
83 dataoff = iph.len + sizeof(struct udphdr);
84 if (dataoff >= skb->len) 91 if (dataoff >= skb->len)
85 return -EINVAL; 92 return -EINVAL;
86 93 /* todo: Check if this will mess-up the reasm skb !!! /HS */
87 if ((retc=skb_linearize(skb)) < 0) 94 retc = skb_linearize(skb);
95 if (retc < 0)
88 return retc; 96 return retc;
89 dptr = skb->data + dataoff; 97 dptr = skb->data + dataoff;
90 datalen = skb->len - dataoff; 98 datalen = skb->len - dataoff;
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 50d82186da87..939f7fbe9b46 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -280,17 +280,17 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
280 if (ih == NULL) 280 if (ih == NULL)
281 sprintf(buf, "TRUNCATED"); 281 sprintf(buf, "TRUNCATED");
282 else if (ih->nexthdr == IPPROTO_FRAGMENT) 282 else if (ih->nexthdr == IPPROTO_FRAGMENT)
283 sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr); 283 sprintf(buf, "%pI6c->%pI6c frag", &ih->saddr, &ih->daddr);
284 else { 284 else {
285 __be16 _ports[2], *pptr; 285 __be16 _ports[2], *pptr;
286 286
287 pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), 287 pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
288 sizeof(_ports), _ports); 288 sizeof(_ports), _ports);
289 if (pptr == NULL) 289 if (pptr == NULL)
290 sprintf(buf, "TRUNCATED %pI6->%pI6", 290 sprintf(buf, "TRUNCATED %pI6c->%pI6c",
291 &ih->saddr, &ih->daddr); 291 &ih->saddr, &ih->daddr);
292 else 292 else
293 sprintf(buf, "%pI6:%u->%pI6:%u", 293 sprintf(buf, "%pI6c:%u->%pI6c:%u",
294 &ih->saddr, ntohs(pptr[0]), 294 &ih->saddr, ntohs(pptr[0]),
295 &ih->daddr, ntohs(pptr[1])); 295 &ih->daddr, ntohs(pptr[1]));
296 } 296 }
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 5b8eb8b12c3e..5de3dd312c0f 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -57,7 +57,7 @@ ah_esp_conn_fill_param_proto(struct net *net, int af,
57 57
58static struct ip_vs_conn * 58static struct ip_vs_conn *
59ah_esp_conn_in_get(int af, const struct sk_buff *skb, 59ah_esp_conn_in_get(int af, const struct sk_buff *skb,
60 const struct ip_vs_iphdr *iph, unsigned int proto_off, 60 const struct ip_vs_iphdr *iph,
61 int inverse) 61 int inverse)
62{ 62{
63 struct ip_vs_conn *cp; 63 struct ip_vs_conn *cp;
@@ -85,9 +85,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb,
85 85
86static struct ip_vs_conn * 86static struct ip_vs_conn *
87ah_esp_conn_out_get(int af, const struct sk_buff *skb, 87ah_esp_conn_out_get(int af, const struct sk_buff *skb,
88 const struct ip_vs_iphdr *iph, 88 const struct ip_vs_iphdr *iph, int inverse)
89 unsigned int proto_off,
90 int inverse)
91{ 89{
92 struct ip_vs_conn *cp; 90 struct ip_vs_conn *cp;
93 struct ip_vs_conn_param p; 91 struct ip_vs_conn_param p;
@@ -110,7 +108,8 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb,
110 108
111static int 109static int
112ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 110ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
113 int *verdict, struct ip_vs_conn **cpp) 111 int *verdict, struct ip_vs_conn **cpp,
112 struct ip_vs_iphdr *iph)
114{ 113{
115 /* 114 /*
116 * AH/ESP is only related traffic. Pass the packet to IP stack. 115 * AH/ESP is only related traffic. Pass the packet to IP stack.
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 9f3fb751c491..746048b13ef3 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -10,28 +10,26 @@
10 10
11static int 11static int
12sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 12sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
13 int *verdict, struct ip_vs_conn **cpp) 13 int *verdict, struct ip_vs_conn **cpp,
14 struct ip_vs_iphdr *iph)
14{ 15{
15 struct net *net; 16 struct net *net;
16 struct ip_vs_service *svc; 17 struct ip_vs_service *svc;
17 sctp_chunkhdr_t _schunkh, *sch; 18 sctp_chunkhdr_t _schunkh, *sch;
18 sctp_sctphdr_t *sh, _sctph; 19 sctp_sctphdr_t *sh, _sctph;
19 struct ip_vs_iphdr iph;
20 20
21 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 21 sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
22
23 sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph);
24 if (sh == NULL) 22 if (sh == NULL)
25 return 0; 23 return 0;
26 24
27 sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t), 25 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
28 sizeof(_schunkh), &_schunkh); 26 sizeof(_schunkh), &_schunkh);
29 if (sch == NULL) 27 if (sch == NULL)
30 return 0; 28 return 0;
31 net = skb_net(skb); 29 net = skb_net(skb);
32 if ((sch->type == SCTP_CID_INIT) && 30 if ((sch->type == SCTP_CID_INIT) &&
33 (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, 31 (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol,
34 &iph.daddr, sh->dest))) { 32 &iph->daddr, sh->dest))) {
35 int ignored; 33 int ignored;
36 34
37 if (ip_vs_todrop(net_ipvs(net))) { 35 if (ip_vs_todrop(net_ipvs(net))) {
@@ -47,10 +45,10 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
47 * Let the virtual server select a real server for the 45 * Let the virtual server select a real server for the
48 * incoming connection, and create a connection entry. 46 * incoming connection, and create a connection entry.
49 */ 47 */
50 *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 48 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
51 if (!*cpp && ignored <= 0) { 49 if (!*cpp && ignored <= 0) {
52 if (!ignored) 50 if (!ignored)
53 *verdict = ip_vs_leave(svc, skb, pd); 51 *verdict = ip_vs_leave(svc, skb, pd, iph);
54 else { 52 else {
55 ip_vs_service_put(svc); 53 ip_vs_service_put(svc);
56 *verdict = NF_DROP; 54 *verdict = NF_DROP;
@@ -64,20 +62,18 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
64} 62}
65 63
66static int 64static int
67sctp_snat_handler(struct sk_buff *skb, 65sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
68 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 66 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
69{ 67{
70 sctp_sctphdr_t *sctph; 68 sctp_sctphdr_t *sctph;
71 unsigned int sctphoff; 69 unsigned int sctphoff = iph->len;
72 struct sk_buff *iter; 70 struct sk_buff *iter;
73 __be32 crc32; 71 __be32 crc32;
74 72
75#ifdef CONFIG_IP_VS_IPV6 73#ifdef CONFIG_IP_VS_IPV6
76 if (cp->af == AF_INET6) 74 if (cp->af == AF_INET6 && iph->fragoffs)
77 sctphoff = sizeof(struct ipv6hdr); 75 return 1;
78 else
79#endif 76#endif
80 sctphoff = ip_hdrlen(skb);
81 77
82 /* csum_check requires unshared skb */ 78 /* csum_check requires unshared skb */
83 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph))) 79 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
@@ -108,20 +104,18 @@ sctp_snat_handler(struct sk_buff *skb,
108} 104}
109 105
110static int 106static int
111sctp_dnat_handler(struct sk_buff *skb, 107sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
112 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 108 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
113{ 109{
114 sctp_sctphdr_t *sctph; 110 sctp_sctphdr_t *sctph;
115 unsigned int sctphoff; 111 unsigned int sctphoff = iph->len;
116 struct sk_buff *iter; 112 struct sk_buff *iter;
117 __be32 crc32; 113 __be32 crc32;
118 114
119#ifdef CONFIG_IP_VS_IPV6 115#ifdef CONFIG_IP_VS_IPV6
120 if (cp->af == AF_INET6) 116 if (cp->af == AF_INET6 && iph->fragoffs)
121 sctphoff = sizeof(struct ipv6hdr); 117 return 1;
122 else
123#endif 118#endif
124 sctphoff = ip_hdrlen(skb);
125 119
126 /* csum_check requires unshared skb */ 120 /* csum_check requires unshared skb */
127 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph))) 121 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index cd609cc62721..9af653a75825 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -33,16 +33,14 @@
33 33
34static int 34static int
35tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 35tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
36 int *verdict, struct ip_vs_conn **cpp) 36 int *verdict, struct ip_vs_conn **cpp,
37 struct ip_vs_iphdr *iph)
37{ 38{
38 struct net *net; 39 struct net *net;
39 struct ip_vs_service *svc; 40 struct ip_vs_service *svc;
40 struct tcphdr _tcph, *th; 41 struct tcphdr _tcph, *th;
41 struct ip_vs_iphdr iph;
42 42
43 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 43 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
44
45 th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph);
46 if (th == NULL) { 44 if (th == NULL) {
47 *verdict = NF_DROP; 45 *verdict = NF_DROP;
48 return 0; 46 return 0;
@@ -50,8 +48,8 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
50 net = skb_net(skb); 48 net = skb_net(skb);
51 /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ 49 /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
52 if (th->syn && 50 if (th->syn &&
53 (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, 51 (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol,
54 &iph.daddr, th->dest))) { 52 &iph->daddr, th->dest))) {
55 int ignored; 53 int ignored;
56 54
57 if (ip_vs_todrop(net_ipvs(net))) { 55 if (ip_vs_todrop(net_ipvs(net))) {
@@ -68,10 +66,10 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
68 * Let the virtual server select a real server for the 66 * Let the virtual server select a real server for the
69 * incoming connection, and create a connection entry. 67 * incoming connection, and create a connection entry.
70 */ 68 */
71 *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 69 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
72 if (!*cpp && ignored <= 0) { 70 if (!*cpp && ignored <= 0) {
73 if (!ignored) 71 if (!ignored)
74 *verdict = ip_vs_leave(svc, skb, pd); 72 *verdict = ip_vs_leave(svc, skb, pd, iph);
75 else { 73 else {
76 ip_vs_service_put(svc); 74 ip_vs_service_put(svc);
77 *verdict = NF_DROP; 75 *verdict = NF_DROP;
@@ -128,20 +126,18 @@ tcp_partial_csum_update(int af, struct tcphdr *tcph,
128 126
129 127
130static int 128static int
131tcp_snat_handler(struct sk_buff *skb, 129tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
132 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 130 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
133{ 131{
134 struct tcphdr *tcph; 132 struct tcphdr *tcph;
135 unsigned int tcphoff; 133 unsigned int tcphoff = iph->len;
136 int oldlen; 134 int oldlen;
137 int payload_csum = 0; 135 int payload_csum = 0;
138 136
139#ifdef CONFIG_IP_VS_IPV6 137#ifdef CONFIG_IP_VS_IPV6
140 if (cp->af == AF_INET6) 138 if (cp->af == AF_INET6 && iph->fragoffs)
141 tcphoff = sizeof(struct ipv6hdr); 139 return 1;
142 else
143#endif 140#endif
144 tcphoff = ip_hdrlen(skb);
145 oldlen = skb->len - tcphoff; 141 oldlen = skb->len - tcphoff;
146 142
147 /* csum_check requires unshared skb */ 143 /* csum_check requires unshared skb */
@@ -208,20 +204,18 @@ tcp_snat_handler(struct sk_buff *skb,
208 204
209 205
210static int 206static int
211tcp_dnat_handler(struct sk_buff *skb, 207tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
212 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 208 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
213{ 209{
214 struct tcphdr *tcph; 210 struct tcphdr *tcph;
215 unsigned int tcphoff; 211 unsigned int tcphoff = iph->len;
216 int oldlen; 212 int oldlen;
217 int payload_csum = 0; 213 int payload_csum = 0;
218 214
219#ifdef CONFIG_IP_VS_IPV6 215#ifdef CONFIG_IP_VS_IPV6
220 if (cp->af == AF_INET6) 216 if (cp->af == AF_INET6 && iph->fragoffs)
221 tcphoff = sizeof(struct ipv6hdr); 217 return 1;
222 else
223#endif 218#endif
224 tcphoff = ip_hdrlen(skb);
225 oldlen = skb->len - tcphoff; 219 oldlen = skb->len - tcphoff;
226 220
227 /* csum_check requires unshared skb */ 221 /* csum_check requires unshared skb */
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 2fedb2dcb3d1..503a842c90d2 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -30,23 +30,22 @@
30 30
31static int 31static int
32udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 32udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
33 int *verdict, struct ip_vs_conn **cpp) 33 int *verdict, struct ip_vs_conn **cpp,
34 struct ip_vs_iphdr *iph)
34{ 35{
35 struct net *net; 36 struct net *net;
36 struct ip_vs_service *svc; 37 struct ip_vs_service *svc;
37 struct udphdr _udph, *uh; 38 struct udphdr _udph, *uh;
38 struct ip_vs_iphdr iph;
39 39
40 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 40 /* IPv6 fragments, only first fragment will hit this */
41 41 uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
42 uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
43 if (uh == NULL) { 42 if (uh == NULL) {
44 *verdict = NF_DROP; 43 *verdict = NF_DROP;
45 return 0; 44 return 0;
46 } 45 }
47 net = skb_net(skb); 46 net = skb_net(skb);
48 svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, 47 svc = ip_vs_service_get(net, af, skb->mark, iph->protocol,
49 &iph.daddr, uh->dest); 48 &iph->daddr, uh->dest);
50 if (svc) { 49 if (svc) {
51 int ignored; 50 int ignored;
52 51
@@ -64,10 +63,10 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
64 * Let the virtual server select a real server for the 63 * Let the virtual server select a real server for the
65 * incoming connection, and create a connection entry. 64 * incoming connection, and create a connection entry.
66 */ 65 */
67 *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 66 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
68 if (!*cpp && ignored <= 0) { 67 if (!*cpp && ignored <= 0) {
69 if (!ignored) 68 if (!ignored)
70 *verdict = ip_vs_leave(svc, skb, pd); 69 *verdict = ip_vs_leave(svc, skb, pd, iph);
71 else { 70 else {
72 ip_vs_service_put(svc); 71 ip_vs_service_put(svc);
73 *verdict = NF_DROP; 72 *verdict = NF_DROP;
@@ -125,20 +124,18 @@ udp_partial_csum_update(int af, struct udphdr *uhdr,
125 124
126 125
127static int 126static int
128udp_snat_handler(struct sk_buff *skb, 127udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
129 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 128 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
130{ 129{
131 struct udphdr *udph; 130 struct udphdr *udph;
132 unsigned int udphoff; 131 unsigned int udphoff = iph->len;
133 int oldlen; 132 int oldlen;
134 int payload_csum = 0; 133 int payload_csum = 0;
135 134
136#ifdef CONFIG_IP_VS_IPV6 135#ifdef CONFIG_IP_VS_IPV6
137 if (cp->af == AF_INET6) 136 if (cp->af == AF_INET6 && iph->fragoffs)
138 udphoff = sizeof(struct ipv6hdr); 137 return 1;
139 else
140#endif 138#endif
141 udphoff = ip_hdrlen(skb);
142 oldlen = skb->len - udphoff; 139 oldlen = skb->len - udphoff;
143 140
144 /* csum_check requires unshared skb */ 141 /* csum_check requires unshared skb */
@@ -210,20 +207,18 @@ udp_snat_handler(struct sk_buff *skb,
210 207
211 208
212static int 209static int
213udp_dnat_handler(struct sk_buff *skb, 210udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
214 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 211 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
215{ 212{
216 struct udphdr *udph; 213 struct udphdr *udph;
217 unsigned int udphoff; 214 unsigned int udphoff = iph->len;
218 int oldlen; 215 int oldlen;
219 int payload_csum = 0; 216 int payload_csum = 0;
220 217
221#ifdef CONFIG_IP_VS_IPV6 218#ifdef CONFIG_IP_VS_IPV6
222 if (cp->af == AF_INET6) 219 if (cp->af == AF_INET6 && iph->fragoffs)
223 udphoff = sizeof(struct ipv6hdr); 220 return 1;
224 else
225#endif 221#endif
226 udphoff = ip_hdrlen(skb);
227 oldlen = skb->len - udphoff; 222 oldlen = skb->len - udphoff;
228 223
229 /* csum_check requires unshared skb */ 224 /* csum_check requires unshared skb */
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 08dbdd5bc18f..d6bf20d6cdbe 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -159,7 +159,7 @@ void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
159 svc->fwmark, msg); 159 svc->fwmark, msg);
160#ifdef CONFIG_IP_VS_IPV6 160#ifdef CONFIG_IP_VS_IPV6
161 } else if (svc->af == AF_INET6) { 161 } else if (svc->af == AF_INET6) {
162 IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n", 162 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
163 svc->scheduler->name, 163 svc->scheduler->name,
164 ip_vs_proto_name(svc->protocol), 164 ip_vs_proto_name(svc->protocol),
165 &svc->addr.in6, ntohs(svc->port), msg); 165 &svc->addr.in6, ntohs(svc->port), msg);
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 05126521743e..e33126994628 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -228,7 +228,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
228 struct ip_vs_sh_bucket *tbl; 228 struct ip_vs_sh_bucket *tbl;
229 struct ip_vs_iphdr iph; 229 struct ip_vs_iphdr iph;
230 230
231 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 231 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
232 232
233 IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); 233 IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
234 234
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index cc4c8095681a..ee6b7a9f1ec2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -338,7 +338,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
338 local = __ip_vs_is_local_route6(rt); 338 local = __ip_vs_is_local_route6(rt);
339 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) & 339 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
340 rt_mode)) { 340 rt_mode)) {
341 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n", 341 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
342 local ? "local":"non-local", daddr); 342 local ? "local":"non-local", daddr);
343 dst_release(&rt->dst); 343 dst_release(&rt->dst);
344 return NULL; 344 return NULL;
@@ -346,8 +346,8 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
346 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) && 346 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
347 !((ort = (struct rt6_info *) skb_dst(skb)) && 347 !((ort = (struct rt6_info *) skb_dst(skb)) &&
348 __ip_vs_is_local_route6(ort))) { 348 __ip_vs_is_local_route6(ort))) {
349 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local " 349 IP_VS_DBG_RL("Redirect from non-local address %pI6c to local "
350 "requires NAT method, dest: %pI6\n", 350 "requires NAT method, dest: %pI6c\n",
351 &ipv6_hdr(skb)->daddr, daddr); 351 &ipv6_hdr(skb)->daddr, daddr);
352 dst_release(&rt->dst); 352 dst_release(&rt->dst);
353 return NULL; 353 return NULL;
@@ -355,8 +355,8 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
355 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && 355 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
356 ipv6_addr_type(&ipv6_hdr(skb)->saddr) & 356 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
357 IPV6_ADDR_LOOPBACK)) { 357 IPV6_ADDR_LOOPBACK)) {
358 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 " 358 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6c "
359 "to non-local address, dest: %pI6\n", 359 "to non-local address, dest: %pI6c\n",
360 &ipv6_hdr(skb)->saddr, daddr); 360 &ipv6_hdr(skb)->saddr, daddr);
361 dst_release(&rt->dst); 361 dst_release(&rt->dst);
362 return NULL; 362 return NULL;
@@ -427,7 +427,7 @@ do { \
427 */ 427 */
428int 428int
429ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 429ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
430 struct ip_vs_protocol *pp) 430 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
431{ 431{
432 /* we do not touch skb and do not need pskb ptr */ 432 /* we do not touch skb and do not need pskb ptr */
433 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); 433 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
@@ -441,7 +441,7 @@ ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
441 */ 441 */
442int 442int
443ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 443ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
444 struct ip_vs_protocol *pp) 444 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
445{ 445{
446 struct rtable *rt; /* Route to the other host */ 446 struct rtable *rt; /* Route to the other host */
447 struct iphdr *iph = ip_hdr(skb); 447 struct iphdr *iph = ip_hdr(skb);
@@ -496,16 +496,16 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
496#ifdef CONFIG_IP_VS_IPV6 496#ifdef CONFIG_IP_VS_IPV6
497int 497int
498ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 498ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
499 struct ip_vs_protocol *pp) 499 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
500{ 500{
501 struct rt6_info *rt; /* Route to the other host */ 501 struct rt6_info *rt; /* Route to the other host */
502 struct ipv6hdr *iph = ipv6_hdr(skb);
503 int mtu; 502 int mtu;
504 503
505 EnterFunction(10); 504 EnterFunction(10);
506 505
507 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 506 rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0,
508 IP_VS_RT_MODE_NON_LOCAL))) 507 IP_VS_RT_MODE_NON_LOCAL);
508 if (!rt)
509 goto tx_error_icmp; 509 goto tx_error_icmp;
510 510
511 /* MTU checking */ 511 /* MTU checking */
@@ -516,7 +516,9 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
516 516
517 skb->dev = net->loopback_dev; 517 skb->dev = net->loopback_dev;
518 } 518 }
519 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 519 /* only send ICMP too big on first fragment */
520 if (!iph->fragoffs)
521 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
520 dst_release(&rt->dst); 522 dst_release(&rt->dst);
521 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 523 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
522 goto tx_error; 524 goto tx_error;
@@ -559,7 +561,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
559 */ 561 */
560int 562int
561ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 563ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
562 struct ip_vs_protocol *pp) 564 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
563{ 565{
564 struct rtable *rt; /* Route to the other host */ 566 struct rtable *rt; /* Route to the other host */
565 int mtu; 567 int mtu;
@@ -592,7 +594,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
592#if IS_ENABLED(CONFIG_NF_CONNTRACK) 594#if IS_ENABLED(CONFIG_NF_CONNTRACK)
593 if (cp->flags & IP_VS_CONN_F_SYNC && local) { 595 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
594 enum ip_conntrack_info ctinfo; 596 enum ip_conntrack_info ctinfo;
595 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); 597 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
596 598
597 if (ct && !nf_ct_is_untracked(ct)) { 599 if (ct && !nf_ct_is_untracked(ct)) {
598 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0, 600 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
@@ -629,7 +631,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
629 goto tx_error_put; 631 goto tx_error_put;
630 632
631 /* mangle the packet */ 633 /* mangle the packet */
632 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 634 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
633 goto tx_error_put; 635 goto tx_error_put;
634 ip_hdr(skb)->daddr = cp->daddr.ip; 636 ip_hdr(skb)->daddr = cp->daddr.ip;
635 ip_send_check(ip_hdr(skb)); 637 ip_send_check(ip_hdr(skb));
@@ -677,7 +679,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
677#ifdef CONFIG_IP_VS_IPV6 679#ifdef CONFIG_IP_VS_IPV6
678int 680int
679ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 681ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
680 struct ip_vs_protocol *pp) 682 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
681{ 683{
682 struct rt6_info *rt; /* Route to the other host */ 684 struct rt6_info *rt; /* Route to the other host */
683 int mtu; 685 int mtu;
@@ -686,10 +688,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
686 EnterFunction(10); 688 EnterFunction(10);
687 689
688 /* check if it is a connection of no-client-port */ 690 /* check if it is a connection of no-client-port */
689 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { 691 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) {
690 __be16 _pt, *p; 692 __be16 _pt, *p;
691 p = skb_header_pointer(skb, sizeof(struct ipv6hdr), 693 p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt);
692 sizeof(_pt), &_pt);
693 if (p == NULL) 694 if (p == NULL)
694 goto tx_error; 695 goto tx_error;
695 ip_vs_conn_fill_cport(cp, *p); 696 ip_vs_conn_fill_cport(cp, *p);
@@ -709,7 +710,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
709#if IS_ENABLED(CONFIG_NF_CONNTRACK) 710#if IS_ENABLED(CONFIG_NF_CONNTRACK)
710 if (cp->flags & IP_VS_CONN_F_SYNC && local) { 711 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
711 enum ip_conntrack_info ctinfo; 712 enum ip_conntrack_info ctinfo;
712 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); 713 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
713 714
714 if (ct && !nf_ct_is_untracked(ct)) { 715 if (ct && !nf_ct_is_untracked(ct)) {
715 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0, 716 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
@@ -737,7 +738,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
737 738
738 skb->dev = net->loopback_dev; 739 skb->dev = net->loopback_dev;
739 } 740 }
740 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 741 /* only send ICMP too big on first fragment */
742 if (!iph->fragoffs)
743 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
741 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0, 744 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
742 "ip_vs_nat_xmit_v6(): frag needed for"); 745 "ip_vs_nat_xmit_v6(): frag needed for");
743 goto tx_error_put; 746 goto tx_error_put;
@@ -751,7 +754,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
751 goto tx_error_put; 754 goto tx_error_put;
752 755
753 /* mangle the packet */ 756 /* mangle the packet */
754 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 757 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph))
755 goto tx_error; 758 goto tx_error;
756 ipv6_hdr(skb)->daddr = cp->daddr.in6; 759 ipv6_hdr(skb)->daddr = cp->daddr.in6;
757 760
@@ -812,7 +815,7 @@ tx_error_put:
812 */ 815 */
813int 816int
814ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 817ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
815 struct ip_vs_protocol *pp) 818 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
816{ 819{
817 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); 820 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
818 struct rtable *rt; /* Route to the other host */ 821 struct rtable *rt; /* Route to the other host */
@@ -932,7 +935,7 @@ tx_error_put:
932#ifdef CONFIG_IP_VS_IPV6 935#ifdef CONFIG_IP_VS_IPV6
933int 936int
934ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 937ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
935 struct ip_vs_protocol *pp) 938 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
936{ 939{
937 struct rt6_info *rt; /* Route to the other host */ 940 struct rt6_info *rt; /* Route to the other host */
938 struct in6_addr saddr; /* Source for tunnel */ 941 struct in6_addr saddr; /* Source for tunnel */
@@ -972,7 +975,9 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
972 975
973 skb->dev = net->loopback_dev; 976 skb->dev = net->loopback_dev;
974 } 977 }
975 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 978 /* only send ICMP too big on first fragment */
979 if (!ipvsh->fragoffs)
980 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
976 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 981 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
977 goto tx_error_put; 982 goto tx_error_put;
978 } 983 }
@@ -1053,7 +1058,7 @@ tx_error_put:
1053 */ 1058 */
1054int 1059int
1055ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1060ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1056 struct ip_vs_protocol *pp) 1061 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1057{ 1062{
1058 struct rtable *rt; /* Route to the other host */ 1063 struct rtable *rt; /* Route to the other host */
1059 struct iphdr *iph = ip_hdr(skb); 1064 struct iphdr *iph = ip_hdr(skb);
@@ -1115,7 +1120,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1115#ifdef CONFIG_IP_VS_IPV6 1120#ifdef CONFIG_IP_VS_IPV6
1116int 1121int
1117ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1122ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1118 struct ip_vs_protocol *pp) 1123 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
1119{ 1124{
1120 struct rt6_info *rt; /* Route to the other host */ 1125 struct rt6_info *rt; /* Route to the other host */
1121 int mtu; 1126 int mtu;
@@ -1139,7 +1144,9 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1139 1144
1140 skb->dev = net->loopback_dev; 1145 skb->dev = net->loopback_dev;
1141 } 1146 }
1142 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1147 /* only send ICMP too big on first fragment */
1148 if (!iph->fragoffs)
1149 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1143 dst_release(&rt->dst); 1150 dst_release(&rt->dst);
1144 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 1151 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1145 goto tx_error; 1152 goto tx_error;
@@ -1183,7 +1190,8 @@ tx_error:
1183 */ 1190 */
1184int 1191int
1185ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1192ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1186 struct ip_vs_protocol *pp, int offset, unsigned int hooknum) 1193 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1194 struct ip_vs_iphdr *iph)
1187{ 1195{
1188 struct rtable *rt; /* Route to the other host */ 1196 struct rtable *rt; /* Route to the other host */
1189 int mtu; 1197 int mtu;
@@ -1198,7 +1206,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1198 translate address/port back */ 1206 translate address/port back */
1199 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { 1207 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1200 if (cp->packet_xmit) 1208 if (cp->packet_xmit)
1201 rc = cp->packet_xmit(skb, cp, pp); 1209 rc = cp->packet_xmit(skb, cp, pp, iph);
1202 else 1210 else
1203 rc = NF_ACCEPT; 1211 rc = NF_ACCEPT;
1204 /* do not touch skb anymore */ 1212 /* do not touch skb anymore */
@@ -1227,7 +1235,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1227#if IS_ENABLED(CONFIG_NF_CONNTRACK) 1235#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1228 if (cp->flags & IP_VS_CONN_F_SYNC && local) { 1236 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1229 enum ip_conntrack_info ctinfo; 1237 enum ip_conntrack_info ctinfo;
1230 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); 1238 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1231 1239
1232 if (ct && !nf_ct_is_untracked(ct)) { 1240 if (ct && !nf_ct_is_untracked(ct)) {
1233 IP_VS_DBG(10, "%s(): " 1241 IP_VS_DBG(10, "%s(): "
@@ -1304,7 +1312,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1304#ifdef CONFIG_IP_VS_IPV6 1312#ifdef CONFIG_IP_VS_IPV6
1305int 1313int
1306ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1314ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1307 struct ip_vs_protocol *pp, int offset, unsigned int hooknum) 1315 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1316 struct ip_vs_iphdr *iph)
1308{ 1317{
1309 struct rt6_info *rt; /* Route to the other host */ 1318 struct rt6_info *rt; /* Route to the other host */
1310 int mtu; 1319 int mtu;
@@ -1319,7 +1328,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1319 translate address/port back */ 1328 translate address/port back */
1320 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { 1329 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1321 if (cp->packet_xmit) 1330 if (cp->packet_xmit)
1322 rc = cp->packet_xmit(skb, cp, pp); 1331 rc = cp->packet_xmit(skb, cp, pp, iph);
1323 else 1332 else
1324 rc = NF_ACCEPT; 1333 rc = NF_ACCEPT;
1325 /* do not touch skb anymore */ 1334 /* do not touch skb anymore */
@@ -1347,7 +1356,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1347#if IS_ENABLED(CONFIG_NF_CONNTRACK) 1356#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1348 if (cp->flags & IP_VS_CONN_F_SYNC && local) { 1357 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1349 enum ip_conntrack_info ctinfo; 1358 enum ip_conntrack_info ctinfo;
1350 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); 1359 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1351 1360
1352 if (ct && !nf_ct_is_untracked(ct)) { 1361 if (ct && !nf_ct_is_untracked(ct)) {
1353 IP_VS_DBG(10, "%s(): " 1362 IP_VS_DBG(10, "%s(): "
@@ -1375,7 +1384,9 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1375 1384
1376 skb->dev = net->loopback_dev; 1385 skb->dev = net->loopback_dev;
1377 } 1386 }
1378 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1387 /* only send ICMP too big on first fragment */
1388 if (!iph->fragoffs)
1389 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1379 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 1390 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1380 goto tx_error_put; 1391 goto tx_error_put;
1381 } 1392 }
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index d61e0782a797..7df424e2d10c 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -69,6 +69,10 @@ static int nf_conntrack_acct_init_sysctl(struct net *net)
69 69
70 table[0].data = &net->ct.sysctl_acct; 70 table[0].data = &net->ct.sysctl_acct;
71 71
72 /* Don't export sysctls to unprivileged users */
73 if (net->user_ns != &init_user_ns)
74 table[0].procname = NULL;
75
72 net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", 76 net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
73 table); 77 table);
74 if (!net->ct.acct_sysctl_header) { 78 if (!net->ct.acct_sysctl_header) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0f241be28f9e..016d95ead930 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -221,11 +221,9 @@ destroy_conntrack(struct nf_conntrack *nfct)
221 * too. */ 221 * too. */
222 nf_ct_remove_expectations(ct); 222 nf_ct_remove_expectations(ct);
223 223
224 /* We overload first tuple to link into unconfirmed list. */ 224 /* We overload first tuple to link into unconfirmed or dying list.*/
225 if (!nf_ct_is_confirmed(ct)) { 225 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
226 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); 226 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
227 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
228 }
229 227
230 NF_CT_STAT_INC(net, delete); 228 NF_CT_STAT_INC(net, delete);
231 spin_unlock_bh(&nf_conntrack_lock); 229 spin_unlock_bh(&nf_conntrack_lock);
@@ -247,6 +245,9 @@ void nf_ct_delete_from_lists(struct nf_conn *ct)
247 * Otherwise we can get spurious warnings. */ 245 * Otherwise we can get spurious warnings. */
248 NF_CT_STAT_INC(net, delete_list); 246 NF_CT_STAT_INC(net, delete_list);
249 clean_from_lists(ct); 247 clean_from_lists(ct);
248 /* add this conntrack to the dying list */
249 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
250 &net->ct.dying);
250 spin_unlock_bh(&nf_conntrack_lock); 251 spin_unlock_bh(&nf_conntrack_lock);
251} 252}
252EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); 253EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
@@ -268,31 +269,23 @@ static void death_by_event(unsigned long ul_conntrack)
268 } 269 }
269 /* we've got the event delivered, now it's dying */ 270 /* we've got the event delivered, now it's dying */
270 set_bit(IPS_DYING_BIT, &ct->status); 271 set_bit(IPS_DYING_BIT, &ct->status);
271 spin_lock(&nf_conntrack_lock);
272 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
273 spin_unlock(&nf_conntrack_lock);
274 nf_ct_put(ct); 272 nf_ct_put(ct);
275} 273}
276 274
277void nf_ct_insert_dying_list(struct nf_conn *ct) 275void nf_ct_dying_timeout(struct nf_conn *ct)
278{ 276{
279 struct net *net = nf_ct_net(ct); 277 struct net *net = nf_ct_net(ct);
280 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); 278 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
281 279
282 BUG_ON(ecache == NULL); 280 BUG_ON(ecache == NULL);
283 281
284 /* add this conntrack to the dying list */
285 spin_lock_bh(&nf_conntrack_lock);
286 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
287 &net->ct.dying);
288 spin_unlock_bh(&nf_conntrack_lock);
289 /* set a new timer to retry event delivery */ 282 /* set a new timer to retry event delivery */
290 setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); 283 setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
291 ecache->timeout.expires = jiffies + 284 ecache->timeout.expires = jiffies +
292 (random32() % net->ct.sysctl_events_retry_timeout); 285 (random32() % net->ct.sysctl_events_retry_timeout);
293 add_timer(&ecache->timeout); 286 add_timer(&ecache->timeout);
294} 287}
295EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); 288EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
296 289
297static void death_by_timeout(unsigned long ul_conntrack) 290static void death_by_timeout(unsigned long ul_conntrack)
298{ 291{
@@ -307,7 +300,7 @@ static void death_by_timeout(unsigned long ul_conntrack)
307 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { 300 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
308 /* destroy event was not delivered */ 301 /* destroy event was not delivered */
309 nf_ct_delete_from_lists(ct); 302 nf_ct_delete_from_lists(ct);
310 nf_ct_insert_dying_list(ct); 303 nf_ct_dying_timeout(ct);
311 return; 304 return;
312 } 305 }
313 set_bit(IPS_DYING_BIT, &ct->status); 306 set_bit(IPS_DYING_BIT, &ct->status);
@@ -1416,7 +1409,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1416 1409
1417int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1410int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1418{ 1411{
1419 int i, bucket; 1412 int i, bucket, rc;
1420 unsigned int hashsize, old_size; 1413 unsigned int hashsize, old_size;
1421 struct hlist_nulls_head *hash, *old_hash; 1414 struct hlist_nulls_head *hash, *old_hash;
1422 struct nf_conntrack_tuple_hash *h; 1415 struct nf_conntrack_tuple_hash *h;
@@ -1429,7 +1422,9 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1429 if (!nf_conntrack_htable_size) 1422 if (!nf_conntrack_htable_size)
1430 return param_set_uint(val, kp); 1423 return param_set_uint(val, kp);
1431 1424
1432 hashsize = simple_strtoul(val, NULL, 0); 1425 rc = kstrtouint(val, 0, &hashsize);
1426 if (rc)
1427 return rc;
1433 if (!hashsize) 1428 if (!hashsize)
1434 return -EINVAL; 1429 return -EINVAL;
1435 1430
@@ -1531,6 +1526,7 @@ err_extend:
1531 */ 1526 */
1532#define UNCONFIRMED_NULLS_VAL ((1<<30)+0) 1527#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1533#define DYING_NULLS_VAL ((1<<30)+1) 1528#define DYING_NULLS_VAL ((1<<30)+1)
1529#define TEMPLATE_NULLS_VAL ((1<<30)+2)
1534 1530
1535static int nf_conntrack_init_net(struct net *net) 1531static int nf_conntrack_init_net(struct net *net)
1536{ 1532{
@@ -1539,6 +1535,7 @@ static int nf_conntrack_init_net(struct net *net)
1539 atomic_set(&net->ct.count, 0); 1535 atomic_set(&net->ct.count, 0);
1540 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); 1536 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1541 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); 1537 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1538 INIT_HLIST_NULLS_HEAD(&net->ct.tmpl, TEMPLATE_NULLS_VAL);
1542 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1539 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1543 if (!net->ct.stat) { 1540 if (!net->ct.stat) {
1544 ret = -ENOMEM; 1541 ret = -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index de9781b6464f..faa978f1714b 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -196,6 +196,10 @@ static int nf_conntrack_event_init_sysctl(struct net *net)
196 table[0].data = &net->ct.sysctl_events; 196 table[0].data = &net->ct.sysctl_events;
197 table[1].data = &net->ct.sysctl_events_retry_timeout; 197 table[1].data = &net->ct.sysctl_events_retry_timeout;
198 198
199 /* Don't export sysctls to unprivileged users */
200 if (net->user_ns != &init_user_ns)
201 table[0].procname = NULL;
202
199 net->ct.event_sysctl_header = 203 net->ct.event_sysctl_header =
200 register_net_sysctl(net, "net/netfilter", table); 204 register_net_sysctl(net, "net/netfilter", table);
201 if (!net->ct.event_sysctl_header) { 205 if (!net->ct.event_sysctl_header) {
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index c4bc637feb76..884f2b39319a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -64,6 +64,10 @@ static int nf_conntrack_helper_init_sysctl(struct net *net)
64 64
65 table[0].data = &net->ct.sysctl_auto_assign_helper; 65 table[0].data = &net->ct.sysctl_auto_assign_helper;
66 66
67 /* Don't export sysctls to unprivileged users */
68 if (net->user_ns != &init_user_ns)
69 table[0].procname = NULL;
70
67 net->ct.helper_sysctl_header = 71 net->ct.helper_sysctl_header =
68 register_net_sysctl(net, "net/netfilter", table); 72 register_net_sysctl(net, "net/netfilter", table);
69 73
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7bbfb3deea30..627b0e50b238 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -898,7 +898,8 @@ ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
898} 898}
899 899
900static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { 900static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
901 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING }, 901 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
902 .len = NF_CT_HELPER_NAME_LEN - 1 },
902}; 903};
903 904
904static inline int 905static inline int
@@ -932,6 +933,8 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
932 [CTA_ID] = { .type = NLA_U32 }, 933 [CTA_ID] = { .type = NLA_U32 },
933 [CTA_NAT_DST] = { .type = NLA_NESTED }, 934 [CTA_NAT_DST] = { .type = NLA_NESTED },
934 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, 935 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
936 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
937 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
935 [CTA_ZONE] = { .type = NLA_U16 }, 938 [CTA_ZONE] = { .type = NLA_U16 },
936 [CTA_MARK_MASK] = { .type = NLA_U32 }, 939 [CTA_MARK_MASK] = { .type = NLA_U32 },
937}; 940};
@@ -989,7 +992,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
989 nlmsg_report(nlh)) < 0) { 992 nlmsg_report(nlh)) < 0) {
990 nf_ct_delete_from_lists(ct); 993 nf_ct_delete_from_lists(ct);
991 /* we failed to report the event, try later */ 994 /* we failed to report the event, try later */
992 nf_ct_insert_dying_list(ct); 995 nf_ct_dying_timeout(ct);
993 nf_ct_put(ct); 996 nf_ct_put(ct);
994 return 0; 997 return 0;
995 } 998 }
@@ -1089,6 +1092,112 @@ out:
1089 return err == -EAGAIN ? -ENOBUFS : err; 1092 return err == -EAGAIN ? -ENOBUFS : err;
1090} 1093}
1091 1094
1095static int ctnetlink_done_list(struct netlink_callback *cb)
1096{
1097 if (cb->args[1])
1098 nf_ct_put((struct nf_conn *)cb->args[1]);
1099 return 0;
1100}
1101
1102static int
1103ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb,
1104 struct hlist_nulls_head *list)
1105{
1106 struct nf_conn *ct, *last;
1107 struct nf_conntrack_tuple_hash *h;
1108 struct hlist_nulls_node *n;
1109 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1110 u_int8_t l3proto = nfmsg->nfgen_family;
1111 int res;
1112
1113 if (cb->args[2])
1114 return 0;
1115
1116 spin_lock_bh(&nf_conntrack_lock);
1117 last = (struct nf_conn *)cb->args[1];
1118restart:
1119 hlist_nulls_for_each_entry(h, n, list, hnnode) {
1120 ct = nf_ct_tuplehash_to_ctrack(h);
1121 if (l3proto && nf_ct_l3num(ct) != l3proto)
1122 continue;
1123 if (cb->args[1]) {
1124 if (ct != last)
1125 continue;
1126 cb->args[1] = 0;
1127 }
1128 rcu_read_lock();
1129 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1130 cb->nlh->nlmsg_seq,
1131 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1132 ct);
1133 rcu_read_unlock();
1134 if (res < 0) {
1135 nf_conntrack_get(&ct->ct_general);
1136 cb->args[1] = (unsigned long)ct;
1137 goto out;
1138 }
1139 }
1140 if (cb->args[1]) {
1141 cb->args[1] = 0;
1142 goto restart;
1143 } else
1144 cb->args[2] = 1;
1145out:
1146 spin_unlock_bh(&nf_conntrack_lock);
1147 if (last)
1148 nf_ct_put(last);
1149
1150 return skb->len;
1151}
1152
1153static int
1154ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1155{
1156 struct net *net = sock_net(skb->sk);
1157
1158 return ctnetlink_dump_list(skb, cb, &net->ct.dying);
1159}
1160
1161static int
1162ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
1163 const struct nlmsghdr *nlh,
1164 const struct nlattr * const cda[])
1165{
1166 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1167 struct netlink_dump_control c = {
1168 .dump = ctnetlink_dump_dying,
1169 .done = ctnetlink_done_list,
1170 };
1171 return netlink_dump_start(ctnl, skb, nlh, &c);
1172 }
1173
1174 return -EOPNOTSUPP;
1175}
1176
1177static int
1178ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1179{
1180 struct net *net = sock_net(skb->sk);
1181
1182 return ctnetlink_dump_list(skb, cb, &net->ct.unconfirmed);
1183}
1184
1185static int
1186ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
1187 const struct nlmsghdr *nlh,
1188 const struct nlattr * const cda[])
1189{
1190 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1191 struct netlink_dump_control c = {
1192 .dump = ctnetlink_dump_unconfirmed,
1193 .done = ctnetlink_done_list,
1194 };
1195 return netlink_dump_start(ctnl, skb, nlh, &c);
1196 }
1197
1198 return -EOPNOTSUPP;
1199}
1200
1092#ifdef CONFIG_NF_NAT_NEEDED 1201#ifdef CONFIG_NF_NAT_NEEDED
1093static int 1202static int
1094ctnetlink_parse_nat_setup(struct nf_conn *ct, 1203ctnetlink_parse_nat_setup(struct nf_conn *ct,
@@ -2216,7 +2325,8 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2216 [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, 2325 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2217 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, 2326 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2218 [CTA_EXPECT_ID] = { .type = NLA_U32 }, 2327 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2219 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING }, 2328 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2329 .len = NF_CT_HELPER_NAME_LEN - 1 },
2220 [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, 2330 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2221 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, 2331 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2222 [CTA_EXPECT_CLASS] = { .type = NLA_U32 }, 2332 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
@@ -2514,7 +2624,7 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2514 if (!help) { 2624 if (!help) {
2515 if (!cda[CTA_EXPECT_TIMEOUT]) { 2625 if (!cda[CTA_EXPECT_TIMEOUT]) {
2516 err = -EINVAL; 2626 err = -EINVAL;
2517 goto out; 2627 goto err_out;
2518 } 2628 }
2519 exp->timeout.expires = 2629 exp->timeout.expires =
2520 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; 2630 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
@@ -2712,6 +2822,8 @@ static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
2712 .policy = ct_nla_policy }, 2822 .policy = ct_nla_policy },
2713 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu }, 2823 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
2714 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct }, 2824 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
2825 [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying },
2826 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed },
2715}; 2827};
2716 2828
2717static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { 2829static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 6535326cf07c..a8ae287bc7af 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -815,7 +815,7 @@ static struct ctl_table dccp_sysctl_table[] = {
815}; 815};
816#endif /* CONFIG_SYSCTL */ 816#endif /* CONFIG_SYSCTL */
817 817
818static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn, 818static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
819 struct dccp_net *dn) 819 struct dccp_net *dn)
820{ 820{
821#ifdef CONFIG_SYSCTL 821#ifdef CONFIG_SYSCTL
@@ -836,6 +836,10 @@ static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn,
836 pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING]; 836 pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
837 pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; 837 pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
838 pn->ctl_table[7].data = &dn->dccp_loose; 838 pn->ctl_table[7].data = &dn->dccp_loose;
839
840 /* Don't export sysctls to unprivileged users */
841 if (net->user_ns != &init_user_ns)
842 pn->ctl_table[0].procname = NULL;
839#endif 843#endif
840 return 0; 844 return 0;
841} 845}
@@ -857,7 +861,7 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
857 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 861 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
858 } 862 }
859 863
860 return dccp_kmemdup_sysctl_table(pn, dn); 864 return dccp_kmemdup_sysctl_table(net, pn, dn);
861} 865}
862 866
863static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { 867static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 61f9285111d1..83876e9877f1 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1353,6 +1353,8 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1353 [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 }, 1353 [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
1354 [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 }, 1354 [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
1355 [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 }, 1355 [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
1356 [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
1357 [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
1356}; 1358};
1357#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1359#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1358 1360
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 9b3943252a5e..363285d544a1 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -489,6 +489,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
489 table[3].data = &net->ct.sysctl_checksum; 489 table[3].data = &net->ct.sysctl_checksum;
490 table[4].data = &net->ct.sysctl_log_invalid; 490 table[4].data = &net->ct.sysctl_log_invalid;
491 491
492 /* Don't export sysctls to unprivileged users */
493 if (net->user_ns != &init_user_ns)
494 table[0].procname = NULL;
495
492 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); 496 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
493 if (!net->ct.sysctl_header) 497 if (!net->ct.sysctl_header)
494 goto out_unregister_netfilter; 498 goto out_unregister_netfilter;
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index dbb364f62d6f..7ea8026f07c9 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -51,6 +51,10 @@ static int nf_conntrack_tstamp_init_sysctl(struct net *net)
51 51
52 table[0].data = &net->ct.sysctl_tstamp; 52 table[0].data = &net->ct.sysctl_tstamp;
53 53
54 /* Don't export sysctls to unprivileged users */
55 if (net->user_ns != &init_user_ns)
56 table[0].procname = NULL;
57
54 net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter", 58 net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter",
55 table); 59 table);
56 if (!net->ct.tstamp_sysctl_header) { 60 if (!net->ct.tstamp_sysctl_header) {
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 703fb26aa48d..9e312695c818 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -32,7 +32,7 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
32 return NULL; 32 return NULL;
33} 33}
34 34
35/* return EEXIST if the same logger is registred, 0 on success. */ 35/* return EEXIST if the same logger is registered, 0 on success. */
36int nf_log_register(u_int8_t pf, struct nf_logger *logger) 36int nf_log_register(u_int8_t pf, struct nf_logger *logger)
37{ 37{
38 const struct nf_logger *llog; 38 const struct nf_logger *llog;
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 8d2cf9ec37a8..d812c1235b30 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -14,84 +14,32 @@
14#include "nf_internals.h" 14#include "nf_internals.h"
15 15
16/* 16/*
17 * A queue handler may be registered for each protocol. Each is protected by 17 * Hook for nfnetlink_queue to register its queue handler.
18 * long term mutex. The handler must provide an an outfn() to accept packets 18 * We do this so that most of the NFQUEUE code can be modular.
19 * for queueing and must reinject all packets it receives, no matter what. 19 *
20 * Once the queue is registered it must reinject all packets it
21 * receives, no matter what.
20 */ 22 */
21static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly; 23static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
22
23static DEFINE_MUTEX(queue_handler_mutex);
24 24
25/* return EBUSY when somebody else is registered, return EEXIST if the 25/* return EBUSY when somebody else is registered, return EEXIST if the
26 * same handler is registered, return 0 in case of success. */ 26 * same handler is registered, return 0 in case of success. */
27int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) 27void nf_register_queue_handler(const struct nf_queue_handler *qh)
28{ 28{
29 int ret; 29 /* should never happen, we only have one queueing backend in kernel */
30 const struct nf_queue_handler *old; 30 WARN_ON(rcu_access_pointer(queue_handler));
31 31 rcu_assign_pointer(queue_handler, qh);
32 if (pf >= ARRAY_SIZE(queue_handler))
33 return -EINVAL;
34
35 mutex_lock(&queue_handler_mutex);
36 old = rcu_dereference_protected(queue_handler[pf],
37 lockdep_is_held(&queue_handler_mutex));
38 if (old == qh)
39 ret = -EEXIST;
40 else if (old)
41 ret = -EBUSY;
42 else {
43 rcu_assign_pointer(queue_handler[pf], qh);
44 ret = 0;
45 }
46 mutex_unlock(&queue_handler_mutex);
47
48 return ret;
49} 32}
50EXPORT_SYMBOL(nf_register_queue_handler); 33EXPORT_SYMBOL(nf_register_queue_handler);
51 34
52/* The caller must flush their queue before this */ 35/* The caller must flush their queue before this */
53int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) 36void nf_unregister_queue_handler(void)
54{ 37{
55 const struct nf_queue_handler *old; 38 RCU_INIT_POINTER(queue_handler, NULL);
56
57 if (pf >= ARRAY_SIZE(queue_handler))
58 return -EINVAL;
59
60 mutex_lock(&queue_handler_mutex);
61 old = rcu_dereference_protected(queue_handler[pf],
62 lockdep_is_held(&queue_handler_mutex));
63 if (old && old != qh) {
64 mutex_unlock(&queue_handler_mutex);
65 return -EINVAL;
66 }
67
68 RCU_INIT_POINTER(queue_handler[pf], NULL);
69 mutex_unlock(&queue_handler_mutex);
70
71 synchronize_rcu(); 39 synchronize_rcu();
72
73 return 0;
74} 40}
75EXPORT_SYMBOL(nf_unregister_queue_handler); 41EXPORT_SYMBOL(nf_unregister_queue_handler);
76 42
77void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
78{
79 u_int8_t pf;
80
81 mutex_lock(&queue_handler_mutex);
82 for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
83 if (rcu_dereference_protected(
84 queue_handler[pf],
85 lockdep_is_held(&queue_handler_mutex)
86 ) == qh)
87 RCU_INIT_POINTER(queue_handler[pf], NULL);
88 }
89 mutex_unlock(&queue_handler_mutex);
90
91 synchronize_rcu();
92}
93EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
94
95static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) 43static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
96{ 44{
97 /* Release those devices we held, or Alexey will kill me. */ 45 /* Release those devices we held, or Alexey will kill me. */
@@ -137,7 +85,7 @@ static int __nf_queue(struct sk_buff *skb,
137 /* QUEUE == DROP if no one is waiting, to be safe. */ 85 /* QUEUE == DROP if no one is waiting, to be safe. */
138 rcu_read_lock(); 86 rcu_read_lock();
139 87
140 qh = rcu_dereference(queue_handler[pf]); 88 qh = rcu_dereference(queue_handler);
141 if (!qh) { 89 if (!qh) {
142 status = -ESRCH; 90 status = -ESRCH;
143 goto err_unlock; 91 goto err_unlock;
@@ -344,77 +292,3 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
344 kfree(entry); 292 kfree(entry);
345} 293}
346EXPORT_SYMBOL(nf_reinject); 294EXPORT_SYMBOL(nf_reinject);
347
348#ifdef CONFIG_PROC_FS
349static void *seq_start(struct seq_file *seq, loff_t *pos)
350{
351 if (*pos >= ARRAY_SIZE(queue_handler))
352 return NULL;
353
354 return pos;
355}
356
357static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
358{
359 (*pos)++;
360
361 if (*pos >= ARRAY_SIZE(queue_handler))
362 return NULL;
363
364 return pos;
365}
366
367static void seq_stop(struct seq_file *s, void *v)
368{
369
370}
371
372static int seq_show(struct seq_file *s, void *v)
373{
374 int ret;
375 loff_t *pos = v;
376 const struct nf_queue_handler *qh;
377
378 rcu_read_lock();
379 qh = rcu_dereference(queue_handler[*pos]);
380 if (!qh)
381 ret = seq_printf(s, "%2lld NONE\n", *pos);
382 else
383 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
384 rcu_read_unlock();
385
386 return ret;
387}
388
389static const struct seq_operations nfqueue_seq_ops = {
390 .start = seq_start,
391 .next = seq_next,
392 .stop = seq_stop,
393 .show = seq_show,
394};
395
396static int nfqueue_open(struct inode *inode, struct file *file)
397{
398 return seq_open(file, &nfqueue_seq_ops);
399}
400
401static const struct file_operations nfqueue_file_ops = {
402 .owner = THIS_MODULE,
403 .open = nfqueue_open,
404 .read = seq_read,
405 .llseek = seq_lseek,
406 .release = seq_release,
407};
408#endif /* PROC_FS */
409
410
411int __init netfilter_queue_init(void)
412{
413#ifdef CONFIG_PROC_FS
414 if (!proc_create("nf_queue", S_IRUGO,
415 proc_net_netfilter, &nfqueue_file_ops))
416 return -1;
417#endif
418 return 0;
419}
420
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index ffb92c03a358..58a09b7c3f6d 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -138,7 +138,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
138 const struct nfnetlink_subsystem *ss; 138 const struct nfnetlink_subsystem *ss;
139 int type, err; 139 int type, err;
140 140
141 if (!capable(CAP_NET_ADMIN)) 141 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
142 return -EPERM; 142 return -EPERM;
143 143
144 /* All the messages must at least contain nfgenmsg */ 144 /* All the messages must at least contain nfgenmsg */
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 8847b4d8be06..701c88a20fea 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -41,7 +41,8 @@ MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tu
41static LIST_HEAD(cttimeout_list); 41static LIST_HEAD(cttimeout_list);
42 42
43static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = { 43static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
44 [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING }, 44 [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING,
45 .len = CTNL_TIMEOUT_NAME_MAX - 1},
45 [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 }, 46 [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
46 [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 }, 47 [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
47 [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED }, 48 [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 9f199f2e31fa..92fd8eca0d31 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/if_arp.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/ip.h> 18#include <linux/ip.h>
18#include <linux/ipv6.h> 19#include <linux/ipv6.h>
@@ -384,6 +385,7 @@ __build_packet_message(struct nfulnl_instance *inst,
384 struct nfgenmsg *nfmsg; 385 struct nfgenmsg *nfmsg;
385 sk_buff_data_t old_tail = inst->skb->tail; 386 sk_buff_data_t old_tail = inst->skb->tail;
386 struct sock *sk; 387 struct sock *sk;
388 const unsigned char *hwhdrp;
387 389
388 nlh = nlmsg_put(inst->skb, 0, 0, 390 nlh = nlmsg_put(inst->skb, 0, 0,
389 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, 391 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
@@ -485,9 +487,17 @@ __build_packet_message(struct nfulnl_instance *inst,
485 if (indev && skb_mac_header_was_set(skb)) { 487 if (indev && skb_mac_header_was_set(skb)) {
486 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || 488 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
487 nla_put_be16(inst->skb, NFULA_HWLEN, 489 nla_put_be16(inst->skb, NFULA_HWLEN,
488 htons(skb->dev->hard_header_len)) || 490 htons(skb->dev->hard_header_len)))
489 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 491 goto nla_put_failure;
490 skb_mac_header(skb))) 492
493 hwhdrp = skb_mac_header(skb);
494
495 if (skb->dev->type == ARPHRD_SIT)
496 hwhdrp -= ETH_HLEN;
497
498 if (hwhdrp >= skb->head &&
499 nla_put(inst->skb, NFULA_HWHEADER,
500 skb->dev->hard_header_len, hwhdrp))
491 goto nla_put_failure; 501 goto nla_put_failure;
492 } 502 }
493 503
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index e12d44e75b21..3158d87b56a8 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -809,7 +809,6 @@ static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
809}; 809};
810 810
811static const struct nf_queue_handler nfqh = { 811static const struct nf_queue_handler nfqh = {
812 .name = "nf_queue",
813 .outfn = &nfqnl_enqueue_packet, 812 .outfn = &nfqnl_enqueue_packet,
814}; 813};
815 814
@@ -827,14 +826,10 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
827 if (nfqa[NFQA_CFG_CMD]) { 826 if (nfqa[NFQA_CFG_CMD]) {
828 cmd = nla_data(nfqa[NFQA_CFG_CMD]); 827 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
829 828
830 /* Commands without queue context - might sleep */ 829 /* Obsolete commands without queue context */
831 switch (cmd->command) { 830 switch (cmd->command) {
832 case NFQNL_CFG_CMD_PF_BIND: 831 case NFQNL_CFG_CMD_PF_BIND: return 0;
833 return nf_register_queue_handler(ntohs(cmd->pf), 832 case NFQNL_CFG_CMD_PF_UNBIND: return 0;
834 &nfqh);
835 case NFQNL_CFG_CMD_PF_UNBIND:
836 return nf_unregister_queue_handler(ntohs(cmd->pf),
837 &nfqh);
838 } 833 }
839 } 834 }
840 835
@@ -1074,6 +1069,7 @@ static int __init nfnetlink_queue_init(void)
1074#endif 1069#endif
1075 1070
1076 register_netdevice_notifier(&nfqnl_dev_notifier); 1071 register_netdevice_notifier(&nfqnl_dev_notifier);
1072 nf_register_queue_handler(&nfqh);
1077 return status; 1073 return status;
1078 1074
1079#ifdef CONFIG_PROC_FS 1075#ifdef CONFIG_PROC_FS
@@ -1087,7 +1083,7 @@ cleanup_netlink_notifier:
1087 1083
1088static void __exit nfnetlink_queue_fini(void) 1084static void __exit nfnetlink_queue_fini(void)
1089{ 1085{
1090 nf_unregister_queue_handlers(&nfqh); 1086 nf_unregister_queue_handler();
1091 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1087 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1092#ifdef CONFIG_PROC_FS 1088#ifdef CONFIG_PROC_FS
1093 remove_proc_entry("nfnetlink_queue", proc_net_netfilter); 1089 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index ae7f5daeee43..2a0843081840 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -149,6 +149,10 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
149 149
150 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 150 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
151 __set_bit(IPS_CONFIRMED_BIT, &ct->status); 151 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
152
153 /* Overload tuple linked list to put us in template list. */
154 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
155 &par->net->ct.tmpl);
152out: 156out:
153 info->ct = ct; 157 info->ct = ct;
154 return 0; 158 return 0;
@@ -289,6 +293,10 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
289 293
290 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 294 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
291 __set_bit(IPS_CONFIRMED_BIT, &ct->status); 295 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
296
297 /* Overload tuple linked list to put us in template list. */
298 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
299 &par->net->ct.tmpl);
292out: 300out:
293 info->ct = ct; 301 info->ct = ct;
294 return 0; 302 return 0;
@@ -377,14 +385,60 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
377 }, 385 },
378}; 386};
379 387
388static unsigned int
389notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
390{
391 /* Previously seen (loopback)? Ignore. */
392 if (skb->nfct != NULL)
393 return XT_CONTINUE;
394
395 skb->nfct = &nf_ct_untracked_get()->ct_general;
396 skb->nfctinfo = IP_CT_NEW;
397 nf_conntrack_get(skb->nfct);
398
399 return XT_CONTINUE;
400}
401
402static int notrack_chk(const struct xt_tgchk_param *par)
403{
404 if (!par->net->xt.notrack_deprecated_warning) {
405 pr_info("netfilter: NOTRACK target is deprecated, "
406 "use CT instead or upgrade iptables\n");
407 par->net->xt.notrack_deprecated_warning = true;
408 }
409 return 0;
410}
411
412static struct xt_target notrack_tg_reg __read_mostly = {
413 .name = "NOTRACK",
414 .revision = 0,
415 .family = NFPROTO_UNSPEC,
416 .checkentry = notrack_chk,
417 .target = notrack_tg,
418 .table = "raw",
419 .me = THIS_MODULE,
420};
421
380static int __init xt_ct_tg_init(void) 422static int __init xt_ct_tg_init(void)
381{ 423{
382 return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg)); 424 int ret;
425
426 ret = xt_register_target(&notrack_tg_reg);
427 if (ret < 0)
428 return ret;
429
430 ret = xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
431 if (ret < 0) {
432 xt_unregister_target(&notrack_tg_reg);
433 return ret;
434 }
435 return 0;
383} 436}
384 437
385static void __exit xt_ct_tg_exit(void) 438static void __exit xt_ct_tg_exit(void)
386{ 439{
387 xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg)); 440 xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
441 xt_unregister_target(&notrack_tg_reg);
388} 442}
389 443
390module_init(xt_ct_tg_init); 444module_init(xt_ct_tg_init);
@@ -394,3 +448,5 @@ MODULE_LICENSE("GPL");
394MODULE_DESCRIPTION("Xtables: connection tracking target"); 448MODULE_DESCRIPTION("Xtables: connection tracking target");
395MODULE_ALIAS("ipt_CT"); 449MODULE_ALIAS("ipt_CT");
396MODULE_ALIAS("ip6t_CT"); 450MODULE_ALIAS("ip6t_CT");
451MODULE_ALIAS("ipt_NOTRACK");
452MODULE_ALIAS("ip6t_NOTRACK");
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 1686ca1b53a1..73b73f687c58 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -167,7 +167,7 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
167 const struct xt_hmark_info *info) 167 const struct xt_hmark_info *info)
168{ 168{
169 struct ipv6hdr *ip6, _ip6; 169 struct ipv6hdr *ip6, _ip6;
170 int flag = IP6T_FH_F_AUTH; 170 int flag = IP6_FH_F_AUTH;
171 unsigned int nhoff = 0; 171 unsigned int nhoff = 0;
172 u16 fragoff = 0; 172 u16 fragoff = 0;
173 int nexthdr; 173 int nexthdr;
@@ -177,7 +177,7 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
177 if (nexthdr < 0) 177 if (nexthdr < 0)
178 return 0; 178 return 0;
179 /* No need to check for icmp errors on fragments */ 179 /* No need to check for icmp errors on fragments */
180 if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) 180 if ((flag & IP6_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6))
181 goto noicmp; 181 goto noicmp;
182 /* Use inner header in case of ICMP errors */ 182 /* Use inner header in case of ICMP errors */
183 if (get_inner6_hdr(skb, &nhoff)) { 183 if (get_inner6_hdr(skb, &nhoff)) {
@@ -185,7 +185,7 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
185 if (ip6 == NULL) 185 if (ip6 == NULL)
186 return -1; 186 return -1;
187 /* If AH present, use SPI like in ESP. */ 187 /* If AH present, use SPI like in ESP. */
188 flag = IP6T_FH_F_AUTH; 188 flag = IP6_FH_F_AUTH;
189 nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); 189 nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
190 if (nexthdr < 0) 190 if (nexthdr < 0)
191 return -1; 191 return -1;
@@ -201,7 +201,7 @@ noicmp:
201 if (t->proto == IPPROTO_ICMPV6) 201 if (t->proto == IPPROTO_ICMPV6)
202 return 0; 202 return 0;
203 203
204 if (flag & IP6T_FH_F_FRAG) 204 if (flag & IP6_FH_F_FRAG)
205 return 0; 205 return 0;
206 206
207 hmark_set_tuple_ports(skb, nhoff, t, info); 207 hmark_set_tuple_ports(skb, nhoff, t, info);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 26a668a84aa2..a9d7af953ceb 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -157,11 +157,22 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
157/* allocate dsthash_ent, initialize dst, put in htable and lock it */ 157/* allocate dsthash_ent, initialize dst, put in htable and lock it */
158static struct dsthash_ent * 158static struct dsthash_ent *
159dsthash_alloc_init(struct xt_hashlimit_htable *ht, 159dsthash_alloc_init(struct xt_hashlimit_htable *ht,
160 const struct dsthash_dst *dst) 160 const struct dsthash_dst *dst, bool *race)
161{ 161{
162 struct dsthash_ent *ent; 162 struct dsthash_ent *ent;
163 163
164 spin_lock(&ht->lock); 164 spin_lock(&ht->lock);
165
166 /* Two or more packets may race to create the same entry in the
167 * hashtable, double check if this packet lost race.
168 */
169 ent = dsthash_find(ht, dst);
170 if (ent != NULL) {
171 spin_unlock(&ht->lock);
172 *race = true;
173 return ent;
174 }
175
165 /* initialize hash with random val at the time we allocate 176 /* initialize hash with random val at the time we allocate
166 * the first hashtable entry */ 177 * the first hashtable entry */
167 if (unlikely(!ht->rnd_initialized)) { 178 if (unlikely(!ht->rnd_initialized)) {
@@ -318,7 +329,10 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo)
318 parent = hashlimit_net->ipt_hashlimit; 329 parent = hashlimit_net->ipt_hashlimit;
319 else 330 else
320 parent = hashlimit_net->ip6t_hashlimit; 331 parent = hashlimit_net->ip6t_hashlimit;
321 remove_proc_entry(hinfo->pde->name, parent); 332
333 if(parent != NULL)
334 remove_proc_entry(hinfo->pde->name, parent);
335
322 htable_selective_cleanup(hinfo, select_all); 336 htable_selective_cleanup(hinfo, select_all);
323 vfree(hinfo); 337 vfree(hinfo);
324} 338}
@@ -585,6 +599,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
585 unsigned long now = jiffies; 599 unsigned long now = jiffies;
586 struct dsthash_ent *dh; 600 struct dsthash_ent *dh;
587 struct dsthash_dst dst; 601 struct dsthash_dst dst;
602 bool race = false;
588 u32 cost; 603 u32 cost;
589 604
590 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) 605 if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
@@ -593,13 +608,18 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
593 rcu_read_lock_bh(); 608 rcu_read_lock_bh();
594 dh = dsthash_find(hinfo, &dst); 609 dh = dsthash_find(hinfo, &dst);
595 if (dh == NULL) { 610 if (dh == NULL) {
596 dh = dsthash_alloc_init(hinfo, &dst); 611 dh = dsthash_alloc_init(hinfo, &dst, &race);
597 if (dh == NULL) { 612 if (dh == NULL) {
598 rcu_read_unlock_bh(); 613 rcu_read_unlock_bh();
599 goto hotdrop; 614 goto hotdrop;
615 } else if (race) {
616 /* Already got an entry, update expiration timeout */
617 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
618 rateinfo_recalc(dh, now, hinfo->cfg.mode);
619 } else {
620 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
621 rateinfo_init(dh, hinfo);
600 } 622 }
601 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
602 rateinfo_init(dh, hinfo);
603 } else { 623 } else {
604 /* update expiration timeout */ 624 /* update expiration timeout */
605 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); 625 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
@@ -856,6 +876,27 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
856 876
857static void __net_exit hashlimit_proc_net_exit(struct net *net) 877static void __net_exit hashlimit_proc_net_exit(struct net *net)
858{ 878{
879 struct xt_hashlimit_htable *hinfo;
880 struct hlist_node *pos;
881 struct proc_dir_entry *pde;
882 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
883
884 /* recent_net_exit() is called before recent_mt_destroy(). Make sure
885 * that the parent xt_recent proc entry is is empty before trying to
886 * remove it.
887 */
888 mutex_lock(&hashlimit_mutex);
889 pde = hashlimit_net->ipt_hashlimit;
890 if (pde == NULL)
891 pde = hashlimit_net->ip6t_hashlimit;
892
893 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node)
894 remove_proc_entry(hinfo->pde->name, pde);
895
896 hashlimit_net->ipt_hashlimit = NULL;
897 hashlimit_net->ip6t_hashlimit = NULL;
898 mutex_unlock(&hashlimit_mutex);
899
859 proc_net_remove(net, "ipt_hashlimit"); 900 proc_net_remove(net, "ipt_hashlimit");
860#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 901#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
861 proc_net_remove(net, "ip6t_hashlimit"); 902 proc_net_remove(net, "ip6t_hashlimit");
@@ -872,9 +913,6 @@ static int __net_init hashlimit_net_init(struct net *net)
872 913
873static void __net_exit hashlimit_net_exit(struct net *net) 914static void __net_exit hashlimit_net_exit(struct net *net)
874{ 915{
875 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
876
877 BUG_ON(!hlist_empty(&hashlimit_net->htables));
878 hashlimit_proc_net_exit(net); 916 hashlimit_proc_net_exit(net);
879} 917}
880 918
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index bb10b0717f1b..8d47c3780fda 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -67,7 +67,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
67 goto out; 67 goto out;
68 } 68 }
69 69
70 ip_vs_fill_iphdr(family, skb_network_header(skb), &iph); 70 ip_vs_fill_iph_skb(family, skb, &iph);
71 71
72 if (data->bitmask & XT_IPVS_PROTO) 72 if (data->bitmask & XT_IPVS_PROTO)
73 if ((iph.protocol == data->l4proto) ^ 73 if ((iph.protocol == data->l4proto) ^
@@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
85 /* 85 /*
86 * Check if the packet belongs to an existing entry 86 * Check if the packet belongs to an existing entry
87 */ 87 */
88 cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */); 88 cp = pp->conn_out_get(family, skb, &iph, 1 /* inverse */);
89 if (unlikely(cp == NULL)) { 89 if (unlikely(cp == NULL)) {
90 match = false; 90 match = false;
91 goto out; 91 goto out;
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 4635c9b00459..978efc9b555a 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -29,6 +29,7 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/inet.h> 30#include <linux/inet.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/vmalloc.h>
32#include <net/net_namespace.h> 33#include <net/net_namespace.h>
33#include <net/netns/generic.h> 34#include <net/netns/generic.h>
34 35
@@ -310,6 +311,14 @@ out:
310 return ret; 311 return ret;
311} 312}
312 313
314static void recent_table_free(void *addr)
315{
316 if (is_vmalloc_addr(addr))
317 vfree(addr);
318 else
319 kfree(addr);
320}
321
313static int recent_mt_check(const struct xt_mtchk_param *par, 322static int recent_mt_check(const struct xt_mtchk_param *par,
314 const struct xt_recent_mtinfo_v1 *info) 323 const struct xt_recent_mtinfo_v1 *info)
315{ 324{
@@ -322,6 +331,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
322#endif 331#endif
323 unsigned int i; 332 unsigned int i;
324 int ret = -EINVAL; 333 int ret = -EINVAL;
334 size_t sz;
325 335
326 if (unlikely(!hash_rnd_inited)) { 336 if (unlikely(!hash_rnd_inited)) {
327 get_random_bytes(&hash_rnd, sizeof(hash_rnd)); 337 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
@@ -360,8 +370,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
360 goto out; 370 goto out;
361 } 371 }
362 372
363 t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size, 373 sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size;
364 GFP_KERNEL); 374 if (sz <= PAGE_SIZE)
375 t = kzalloc(sz, GFP_KERNEL);
376 else
377 t = vzalloc(sz);
365 if (t == NULL) { 378 if (t == NULL) {
366 ret = -ENOMEM; 379 ret = -ENOMEM;
367 goto out; 380 goto out;
@@ -377,14 +390,14 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
377 uid = make_kuid(&init_user_ns, ip_list_uid); 390 uid = make_kuid(&init_user_ns, ip_list_uid);
378 gid = make_kgid(&init_user_ns, ip_list_gid); 391 gid = make_kgid(&init_user_ns, ip_list_gid);
379 if (!uid_valid(uid) || !gid_valid(gid)) { 392 if (!uid_valid(uid) || !gid_valid(gid)) {
380 kfree(t); 393 recent_table_free(t);
381 ret = -EINVAL; 394 ret = -EINVAL;
382 goto out; 395 goto out;
383 } 396 }
384 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, 397 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
385 &recent_mt_fops, t); 398 &recent_mt_fops, t);
386 if (pde == NULL) { 399 if (pde == NULL) {
387 kfree(t); 400 recent_table_free(t);
388 ret = -ENOMEM; 401 ret = -ENOMEM;
389 goto out; 402 goto out;
390 } 403 }
@@ -431,10 +444,11 @@ static void recent_mt_destroy(const struct xt_mtdtor_param *par)
431 list_del(&t->list); 444 list_del(&t->list);
432 spin_unlock_bh(&recent_lock); 445 spin_unlock_bh(&recent_lock);
433#ifdef CONFIG_PROC_FS 446#ifdef CONFIG_PROC_FS
434 remove_proc_entry(t->name, recent_net->xt_recent); 447 if (recent_net->xt_recent != NULL)
448 remove_proc_entry(t->name, recent_net->xt_recent);
435#endif 449#endif
436 recent_table_flush(t); 450 recent_table_flush(t);
437 kfree(t); 451 recent_table_free(t);
438 } 452 }
439 mutex_unlock(&recent_mutex); 453 mutex_unlock(&recent_mutex);
440} 454}
@@ -615,6 +629,20 @@ static int __net_init recent_proc_net_init(struct net *net)
615 629
616static void __net_exit recent_proc_net_exit(struct net *net) 630static void __net_exit recent_proc_net_exit(struct net *net)
617{ 631{
632 struct recent_net *recent_net = recent_pernet(net);
633 struct recent_table *t;
634
635 /* recent_net_exit() is called before recent_mt_destroy(). Make sure
636 * that the parent xt_recent proc entry is is empty before trying to
637 * remove it.
638 */
639 spin_lock_bh(&recent_lock);
640 list_for_each_entry(t, &recent_net->tables, list)
641 remove_proc_entry(t->name, recent_net->xt_recent);
642
643 recent_net->xt_recent = NULL;
644 spin_unlock_bh(&recent_lock);
645
618 proc_net_remove(net, "xt_recent"); 646 proc_net_remove(net, "xt_recent");
619} 647}
620#else 648#else
@@ -638,9 +666,6 @@ static int __net_init recent_net_init(struct net *net)
638 666
639static void __net_exit recent_net_exit(struct net *net) 667static void __net_exit recent_net_exit(struct net *net)
640{ 668{
641 struct recent_net *recent_net = recent_pernet(net);
642
643 BUG_ON(!list_empty(&recent_net->tables));
644 recent_proc_net_exit(net); 669 recent_proc_net_exit(net);
645} 670}
646 671
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4da797fa5ec5..c0353d55d56f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -612,7 +612,7 @@ retry:
612static inline int netlink_capable(const struct socket *sock, unsigned int flag) 612static inline int netlink_capable(const struct socket *sock, unsigned int flag)
613{ 613{
614 return (nl_table[sock->sk->sk_protocol].flags & flag) || 614 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
615 capable(CAP_NET_ADMIN); 615 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
616} 616}
617 617
618static void 618static void
@@ -669,6 +669,9 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
669 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 669 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
670 int err; 670 int err;
671 671
672 if (addr_len < sizeof(struct sockaddr_nl))
673 return -EINVAL;
674
672 if (nladdr->nl_family != AF_NETLINK) 675 if (nladdr->nl_family != AF_NETLINK)
673 return -EINVAL; 676 return -EINVAL;
674 677
@@ -2059,7 +2062,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
2059 struct sock *s = v; 2062 struct sock *s = v;
2060 struct netlink_sock *nlk = nlk_sk(s); 2063 struct netlink_sock *nlk = nlk_sk(s);
2061 2064
2062 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", 2065 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2063 s, 2066 s,
2064 s->sk_protocol, 2067 s->sk_protocol,
2065 nlk->portid, 2068 nlk->portid,
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 8d8d9bc4b6ff..60c3bbb63e8e 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig NFC 5menuconfig NFC
6 depends on NET && EXPERIMENTAL 6 depends on NET
7 tristate "NFC subsystem support (EXPERIMENTAL)" 7 tristate "NFC subsystem support"
8 default n 8 default n
9 help 9 help
10 Say Y here if you want to build support for NFC (Near field 10 Say Y here if you want to build support for NFC (Near field
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 479bee36dc3e..aa64ea441676 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -40,6 +40,9 @@
40int nfc_devlist_generation; 40int nfc_devlist_generation;
41DEFINE_MUTEX(nfc_devlist_mutex); 41DEFINE_MUTEX(nfc_devlist_mutex);
42 42
43/* NFC device ID bitmap */
44static DEFINE_IDA(nfc_index_ida);
45
43/** 46/**
44 * nfc_dev_up - turn on the NFC device 47 * nfc_dev_up - turn on the NFC device
45 * 48 *
@@ -181,6 +184,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
181 184
182 dev->ops->stop_poll(dev); 185 dev->ops->stop_poll(dev);
183 dev->polling = false; 186 dev->polling = false;
187 dev->rf_mode = NFC_RF_NONE;
184 188
185error: 189error:
186 device_unlock(&dev->dev); 190 device_unlock(&dev->dev);
@@ -194,7 +198,7 @@ static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
194 if (dev->n_targets == 0) 198 if (dev->n_targets == 0)
195 return NULL; 199 return NULL;
196 200
197 for (i = 0; i < dev->n_targets ; i++) { 201 for (i = 0; i < dev->n_targets; i++) {
198 if (dev->targets[i].idx == target_idx) 202 if (dev->targets[i].idx == target_idx)
199 return &dev->targets[i]; 203 return &dev->targets[i];
200 } 204 }
@@ -274,12 +278,14 @@ int nfc_dep_link_down(struct nfc_dev *dev)
274 if (!rc) { 278 if (!rc) {
275 dev->dep_link_up = false; 279 dev->dep_link_up = false;
276 dev->active_target = NULL; 280 dev->active_target = NULL;
281 dev->rf_mode = NFC_RF_NONE;
277 nfc_llcp_mac_is_down(dev); 282 nfc_llcp_mac_is_down(dev);
278 nfc_genl_dep_link_down_event(dev); 283 nfc_genl_dep_link_down_event(dev);
279 } 284 }
280 285
281error: 286error:
282 device_unlock(&dev->dev); 287 device_unlock(&dev->dev);
288
283 return rc; 289 return rc;
284} 290}
285 291
@@ -503,6 +509,7 @@ EXPORT_SYMBOL(nfc_tm_activated);
503int nfc_tm_deactivated(struct nfc_dev *dev) 509int nfc_tm_deactivated(struct nfc_dev *dev)
504{ 510{
505 dev->dep_link_up = false; 511 dev->dep_link_up = false;
512 dev->rf_mode = NFC_RF_NONE;
506 513
507 return nfc_genl_tm_deactivated(dev); 514 return nfc_genl_tm_deactivated(dev);
508} 515}
@@ -697,6 +704,8 @@ static void nfc_check_pres_work(struct work_struct *work)
697 704
698 if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) { 705 if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
699 rc = dev->ops->check_presence(dev, dev->active_target); 706 rc = dev->ops->check_presence(dev, dev->active_target);
707 if (rc == -EOPNOTSUPP)
708 goto exit;
700 if (!rc) { 709 if (!rc) {
701 mod_timer(&dev->check_pres_timer, jiffies + 710 mod_timer(&dev->check_pres_timer, jiffies +
702 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); 711 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
@@ -708,6 +717,7 @@ static void nfc_check_pres_work(struct work_struct *work)
708 } 717 }
709 } 718 }
710 719
720exit:
711 device_unlock(&dev->dev); 721 device_unlock(&dev->dev);
712} 722}
713 723
@@ -753,7 +763,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
753 u32 supported_protocols, 763 u32 supported_protocols,
754 int tx_headroom, int tx_tailroom) 764 int tx_headroom, int tx_tailroom)
755{ 765{
756 static atomic_t dev_no = ATOMIC_INIT(0);
757 struct nfc_dev *dev; 766 struct nfc_dev *dev;
758 767
759 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || 768 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
@@ -767,11 +776,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
767 if (!dev) 776 if (!dev)
768 return NULL; 777 return NULL;
769 778
770 dev->dev.class = &nfc_class;
771 dev->idx = atomic_inc_return(&dev_no) - 1;
772 dev_set_name(&dev->dev, "nfc%d", dev->idx);
773 device_initialize(&dev->dev);
774
775 dev->ops = ops; 779 dev->ops = ops;
776 dev->supported_protocols = supported_protocols; 780 dev->supported_protocols = supported_protocols;
777 dev->tx_headroom = tx_headroom; 781 dev->tx_headroom = tx_headroom;
@@ -779,6 +783,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
779 783
780 nfc_genl_data_init(&dev->genl_data); 784 nfc_genl_data_init(&dev->genl_data);
781 785
786 dev->rf_mode = NFC_RF_NONE;
782 787
783 /* first generation must not be 0 */ 788 /* first generation must not be 0 */
784 dev->targets_generation = 1; 789 dev->targets_generation = 1;
@@ -806,6 +811,14 @@ int nfc_register_device(struct nfc_dev *dev)
806 811
807 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 812 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
808 813
814 dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
815 if (dev->idx < 0)
816 return dev->idx;
817
818 dev->dev.class = &nfc_class;
819 dev_set_name(&dev->dev, "nfc%d", dev->idx);
820 device_initialize(&dev->dev);
821
809 mutex_lock(&nfc_devlist_mutex); 822 mutex_lock(&nfc_devlist_mutex);
810 nfc_devlist_generation++; 823 nfc_devlist_generation++;
811 rc = device_add(&dev->dev); 824 rc = device_add(&dev->dev);
@@ -834,10 +847,12 @@ EXPORT_SYMBOL(nfc_register_device);
834 */ 847 */
835void nfc_unregister_device(struct nfc_dev *dev) 848void nfc_unregister_device(struct nfc_dev *dev)
836{ 849{
837 int rc; 850 int rc, id;
838 851
839 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 852 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
840 853
854 id = dev->idx;
855
841 mutex_lock(&nfc_devlist_mutex); 856 mutex_lock(&nfc_devlist_mutex);
842 nfc_devlist_generation++; 857 nfc_devlist_generation++;
843 858
@@ -856,6 +871,8 @@ void nfc_unregister_device(struct nfc_dev *dev)
856 pr_debug("The userspace won't be notified that the device %s was removed\n", 871 pr_debug("The userspace won't be notified that the device %s was removed\n",
857 dev_name(&dev->dev)); 872 dev_name(&dev->dev));
858 873
874 ida_simple_remove(&nfc_index_ida, id);
875
859} 876}
860EXPORT_SYMBOL(nfc_unregister_device); 877EXPORT_SYMBOL(nfc_unregister_device);
861 878
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 71c6a7086b8f..7d99410e6c1a 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -257,16 +257,16 @@ static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host,
257 *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, 257 *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
258 NFC_HCI_ADM_CREATE_PIPE, 258 NFC_HCI_ADM_CREATE_PIPE,
259 (u8 *) &params, sizeof(params), &skb); 259 (u8 *) &params, sizeof(params), &skb);
260 if (*result == 0) { 260 if (*result < 0)
261 resp = (struct hci_create_pipe_resp *)skb->data; 261 return NFC_HCI_INVALID_PIPE;
262 pipe = resp->pipe;
263 kfree_skb(skb);
264 262
265 pr_debug("pipe created=%d\n", pipe); 263 resp = (struct hci_create_pipe_resp *)skb->data;
264 pipe = resp->pipe;
265 kfree_skb(skb);
266 266
267 return pipe; 267 pr_debug("pipe created=%d\n", pipe);
268 } else 268
269 return NFC_HCI_INVALID_PIPE; 269 return pipe;
270} 270}
271 271
272static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) 272static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
@@ -279,8 +279,6 @@ static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
279 279
280static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) 280static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
281{ 281{
282 int r;
283
284 u8 param[2]; 282 u8 param[2];
285 283
286 /* TODO: Find out what the identity reference data is 284 /* TODO: Find out what the identity reference data is
@@ -288,10 +286,8 @@ static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
288 286
289 pr_debug("\n"); 287 pr_debug("\n");
290 288
291 r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, 289 return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
292 NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL); 290 NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL);
293
294 return 0;
295} 291}
296 292
297int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) 293int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
@@ -348,7 +344,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
348 return -EADDRINUSE; 344 return -EADDRINUSE;
349 345
350 if (pipe != NFC_HCI_INVALID_PIPE) 346 if (pipe != NFC_HCI_INVALID_PIPE)
351 goto pipe_is_open; 347 goto open_pipe;
352 348
353 switch (dest_gate) { 349 switch (dest_gate) {
354 case NFC_HCI_LINK_MGMT_GATE: 350 case NFC_HCI_LINK_MGMT_GATE:
@@ -365,6 +361,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
365 break; 361 break;
366 } 362 }
367 363
364open_pipe:
368 r = nfc_hci_open_pipe(hdev, pipe); 365 r = nfc_hci_open_pipe(hdev, pipe);
369 if (r < 0) { 366 if (r < 0) {
370 if (pipe_created) 367 if (pipe_created)
@@ -375,7 +372,6 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
375 return r; 372 return r;
376 } 373 }
377 374
378pipe_is_open:
379 hdev->gate2pipe[dest_gate] = pipe; 375 hdev->gate2pipe[dest_gate] = pipe;
380 376
381 return 0; 377 return 0;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 5fbb6e40793e..7bea574d5934 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -33,17 +33,20 @@
33/* Largest headroom needed for outgoing HCI commands */ 33/* Largest headroom needed for outgoing HCI commands */
34#define HCI_CMDS_HEADROOM 1 34#define HCI_CMDS_HEADROOM 1
35 35
36static int nfc_hci_result_to_errno(u8 result) 36int nfc_hci_result_to_errno(u8 result)
37{ 37{
38 switch (result) { 38 switch (result) {
39 case NFC_HCI_ANY_OK: 39 case NFC_HCI_ANY_OK:
40 return 0; 40 return 0;
41 case NFC_HCI_ANY_E_REG_PAR_UNKNOWN:
42 return -EOPNOTSUPP;
41 case NFC_HCI_ANY_E_TIMEOUT: 43 case NFC_HCI_ANY_E_TIMEOUT:
42 return -ETIME; 44 return -ETIME;
43 default: 45 default:
44 return -1; 46 return -1;
45 } 47 }
46} 48}
49EXPORT_SYMBOL(nfc_hci_result_to_errno);
47 50
48static void nfc_hci_msg_tx_work(struct work_struct *work) 51static void nfc_hci_msg_tx_work(struct work_struct *work)
49{ 52{
@@ -65,8 +68,9 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
65 -ETIME); 68 -ETIME);
66 kfree(hdev->cmd_pending_msg); 69 kfree(hdev->cmd_pending_msg);
67 hdev->cmd_pending_msg = NULL; 70 hdev->cmd_pending_msg = NULL;
68 } else 71 } else {
69 goto exit; 72 goto exit;
73 }
70 } 74 }
71 75
72next_msg: 76next_msg:
@@ -166,7 +170,7 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
166 kfree_skb(skb); 170 kfree_skb(skb);
167} 171}
168 172
169static u32 nfc_hci_sak_to_protocol(u8 sak) 173u32 nfc_hci_sak_to_protocol(u8 sak)
170{ 174{
171 switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) { 175 switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) {
172 case NFC_HCI_TYPE_A_SEL_PROT_MIFARE: 176 case NFC_HCI_TYPE_A_SEL_PROT_MIFARE:
@@ -181,8 +185,9 @@ static u32 nfc_hci_sak_to_protocol(u8 sak)
181 return 0xffffffff; 185 return 0xffffffff;
182 } 186 }
183} 187}
188EXPORT_SYMBOL(nfc_hci_sak_to_protocol);
184 189
185static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate) 190int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
186{ 191{
187 struct nfc_target *targets; 192 struct nfc_target *targets;
188 struct sk_buff *atqa_skb = NULL; 193 struct sk_buff *atqa_skb = NULL;
@@ -263,7 +268,9 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
263 break; 268 break;
264 } 269 }
265 270
266 targets->hci_reader_gate = gate; 271 /* if driver set the new gate, we will skip the old one */
272 if (targets->hci_reader_gate == 0x00)
273 targets->hci_reader_gate = gate;
267 274
268 r = nfc_targets_found(hdev->ndev, targets, 1); 275 r = nfc_targets_found(hdev->ndev, targets, 1);
269 276
@@ -275,11 +282,18 @@ exit:
275 282
276 return r; 283 return r;
277} 284}
285EXPORT_SYMBOL(nfc_hci_target_discovered);
278 286
279void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, 287void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
280 struct sk_buff *skb) 288 struct sk_buff *skb)
281{ 289{
282 int r = 0; 290 int r = 0;
291 u8 gate = nfc_hci_pipe2gate(hdev, pipe);
292
293 if (gate == 0xff) {
294 pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
295 goto exit;
296 }
283 297
284 switch (event) { 298 switch (event) {
285 case NFC_HCI_EVT_TARGET_DISCOVERED: 299 case NFC_HCI_EVT_TARGET_DISCOVERED:
@@ -303,12 +317,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
303 goto exit; 317 goto exit;
304 } 318 }
305 319
306 r = nfc_hci_target_discovered(hdev, 320 r = nfc_hci_target_discovered(hdev, gate);
307 nfc_hci_pipe2gate(hdev, pipe));
308 break; 321 break;
309 default: 322 default:
310 /* TODO: Unknown events are hardware specific 323 if (hdev->ops->event_received) {
311 * pass them to the driver (needs a new hci_ops) */ 324 hdev->ops->event_received(hdev, gate, event, skb);
325 return;
326 }
327
312 break; 328 break;
313 } 329 }
314 330
@@ -410,6 +426,10 @@ static int hci_dev_version(struct nfc_hci_dev *hdev)
410 426
411 r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, 427 r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
412 NFC_HCI_ID_MGMT_VERSION_SW, &skb); 428 NFC_HCI_ID_MGMT_VERSION_SW, &skb);
429 if (r == -EOPNOTSUPP) {
430 pr_info("Software/Hardware info not available\n");
431 return 0;
432 }
413 if (r < 0) 433 if (r < 0)
414 return r; 434 return r;
415 435
@@ -527,7 +547,8 @@ static int hci_start_poll(struct nfc_dev *nfc_dev,
527 return hdev->ops->start_poll(hdev, im_protocols, tm_protocols); 547 return hdev->ops->start_poll(hdev, im_protocols, tm_protocols);
528 else 548 else
529 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 549 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
530 NFC_HCI_EVT_READER_REQUESTED, NULL, 0); 550 NFC_HCI_EVT_READER_REQUESTED,
551 NULL, 0);
531} 552}
532 553
533static void hci_stop_poll(struct nfc_dev *nfc_dev) 554static void hci_stop_poll(struct nfc_dev *nfc_dev)
@@ -538,6 +559,28 @@ static void hci_stop_poll(struct nfc_dev *nfc_dev)
538 NFC_HCI_EVT_END_OPERATION, NULL, 0); 559 NFC_HCI_EVT_END_OPERATION, NULL, 0);
539} 560}
540 561
562static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
563 __u8 comm_mode, __u8 *gb, size_t gb_len)
564{
565 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
566
567 if (hdev->ops->dep_link_up)
568 return hdev->ops->dep_link_up(hdev, target, comm_mode,
569 gb, gb_len);
570
571 return 0;
572}
573
574static int hci_dep_link_down(struct nfc_dev *nfc_dev)
575{
576 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
577
578 if (hdev->ops->dep_link_down)
579 return hdev->ops->dep_link_down(hdev);
580
581 return 0;
582}
583
541static int hci_activate_target(struct nfc_dev *nfc_dev, 584static int hci_activate_target(struct nfc_dev *nfc_dev,
542 struct nfc_target *target, u32 protocol) 585 struct nfc_target *target, u32 protocol)
543{ 586{
@@ -586,8 +629,8 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
586 switch (target->hci_reader_gate) { 629 switch (target->hci_reader_gate) {
587 case NFC_HCI_RF_READER_A_GATE: 630 case NFC_HCI_RF_READER_A_GATE:
588 case NFC_HCI_RF_READER_B_GATE: 631 case NFC_HCI_RF_READER_B_GATE:
589 if (hdev->ops->data_exchange) { 632 if (hdev->ops->im_transceive) {
590 r = hdev->ops->data_exchange(hdev, target, skb, cb, 633 r = hdev->ops->im_transceive(hdev, target, skb, cb,
591 cb_context); 634 cb_context);
592 if (r <= 0) /* handled */ 635 if (r <= 0) /* handled */
593 break; 636 break;
@@ -604,14 +647,14 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
604 skb->len, hci_transceive_cb, hdev); 647 skb->len, hci_transceive_cb, hdev);
605 break; 648 break;
606 default: 649 default:
607 if (hdev->ops->data_exchange) { 650 if (hdev->ops->im_transceive) {
608 r = hdev->ops->data_exchange(hdev, target, skb, cb, 651 r = hdev->ops->im_transceive(hdev, target, skb, cb,
609 cb_context); 652 cb_context);
610 if (r == 1) 653 if (r == 1)
611 r = -ENOTSUPP; 654 r = -ENOTSUPP;
612 } 655 } else {
613 else
614 r = -ENOTSUPP; 656 r = -ENOTSUPP;
657 }
615 break; 658 break;
616 } 659 }
617 660
@@ -620,6 +663,16 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
620 return r; 663 return r;
621} 664}
622 665
666static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
667{
668 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
669
670 if (hdev->ops->tm_send)
671 return hdev->ops->tm_send(hdev, skb);
672 else
673 return -ENOTSUPP;
674}
675
623static int hci_check_presence(struct nfc_dev *nfc_dev, 676static int hci_check_presence(struct nfc_dev *nfc_dev,
624 struct nfc_target *target) 677 struct nfc_target *target)
625{ 678{
@@ -723,9 +776,12 @@ static struct nfc_ops hci_nfc_ops = {
723 .dev_down = hci_dev_down, 776 .dev_down = hci_dev_down,
724 .start_poll = hci_start_poll, 777 .start_poll = hci_start_poll,
725 .stop_poll = hci_stop_poll, 778 .stop_poll = hci_stop_poll,
779 .dep_link_up = hci_dep_link_up,
780 .dep_link_down = hci_dep_link_down,
726 .activate_target = hci_activate_target, 781 .activate_target = hci_activate_target,
727 .deactivate_target = hci_deactivate_target, 782 .deactivate_target = hci_deactivate_target,
728 .im_transceive = hci_transceive, 783 .im_transceive = hci_transceive,
784 .tm_send = hci_tm_send,
729 .check_presence = hci_check_presence, 785 .check_presence = hci_check_presence,
730}; 786};
731 787
@@ -848,7 +904,7 @@ void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
848} 904}
849EXPORT_SYMBOL(nfc_hci_driver_failure); 905EXPORT_SYMBOL(nfc_hci_driver_failure);
850 906
851void inline nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) 907void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
852{ 908{
853 nfc_llc_rcv_from_drv(hdev->llc, skb); 909 nfc_llc_rcv_from_drv(hdev->llc, skb);
854} 910}
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
index ae1205ded87f..fe5e966e5b88 100644
--- a/net/nfc/hci/llc.c
+++ b/net/nfc/hci/llc.c
@@ -72,7 +72,7 @@ int nfc_llc_register(const char *name, struct nfc_llc_ops *ops)
72 llc_engine->ops = ops; 72 llc_engine->ops = ops;
73 73
74 INIT_LIST_HEAD(&llc_engine->entry); 74 INIT_LIST_HEAD(&llc_engine->entry);
75 list_add_tail (&llc_engine->entry, &llc_engines); 75 list_add_tail(&llc_engine->entry, &llc_engines);
76 76
77 return 0; 77 return 0;
78} 78}
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index 01cbc72943cd..27b313befc35 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -634,9 +634,9 @@ static void llc_shdlc_sm_work(struct work_struct *work)
634 r = llc_shdlc_connect_initiate(shdlc); 634 r = llc_shdlc_connect_initiate(shdlc);
635 else 635 else
636 r = -ETIME; 636 r = -ETIME;
637 if (r < 0) 637 if (r < 0) {
638 llc_shdlc_connect_complete(shdlc, r); 638 llc_shdlc_connect_complete(shdlc, r);
639 else { 639 } else {
640 mod_timer(&shdlc->connect_timer, jiffies + 640 mod_timer(&shdlc->connect_timer, jiffies +
641 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); 641 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
642 642
@@ -682,9 +682,8 @@ static void llc_shdlc_sm_work(struct work_struct *work)
682 llc_shdlc_handle_send_queue(shdlc); 682 llc_shdlc_handle_send_queue(shdlc);
683 } 683 }
684 684
685 if (shdlc->hard_fault) { 685 if (shdlc->hard_fault)
686 shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault); 686 shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
687 }
688 break; 687 break;
689 default: 688 default:
690 break; 689 break;
diff --git a/net/nfc/llcp/Kconfig b/net/nfc/llcp/Kconfig
index fbf5e8150908..a1a41cd68255 100644
--- a/net/nfc/llcp/Kconfig
+++ b/net/nfc/llcp/Kconfig
@@ -1,6 +1,6 @@
1config NFC_LLCP 1config NFC_LLCP
2 depends on NFC && EXPERIMENTAL 2 depends on NFC
3 bool "NFC LLCP support (EXPERIMENTAL)" 3 bool "NFC LLCP support"
4 default n 4 default n
5 help 5 help
6 Say Y here if you want to build support for a kernel NFC LLCP 6 Say Y here if you want to build support for a kernel NFC LLCP
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index c45ccd6c094c..df24be48d4da 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -261,7 +261,6 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
261 struct sk_buff *skb; 261 struct sk_buff *skb;
262 struct nfc_dev *dev; 262 struct nfc_dev *dev;
263 struct nfc_llcp_local *local; 263 struct nfc_llcp_local *local;
264 u16 size = 0;
265 264
266 pr_debug("Sending DISC\n"); 265 pr_debug("Sending DISC\n");
267 266
@@ -273,17 +272,10 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
273 if (dev == NULL) 272 if (dev == NULL)
274 return -ENODEV; 273 return -ENODEV;
275 274
276 size += LLCP_HEADER_SIZE; 275 skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
277 size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
278
279 skb = alloc_skb(size, GFP_ATOMIC);
280 if (skb == NULL) 276 if (skb == NULL)
281 return -ENOMEM; 277 return -ENOMEM;
282 278
283 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
284
285 skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC);
286
287 skb_queue_tail(&local->tx_queue, skb); 279 skb_queue_tail(&local->tx_queue, skb);
288 280
289 return 0; 281 return 0;
@@ -324,8 +316,7 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
324 struct sk_buff *skb; 316 struct sk_buff *skb;
325 u8 *service_name_tlv = NULL, service_name_tlv_length; 317 u8 *service_name_tlv = NULL, service_name_tlv_length;
326 u8 *miux_tlv = NULL, miux_tlv_length; 318 u8 *miux_tlv = NULL, miux_tlv_length;
327 u8 *rw_tlv = NULL, rw_tlv_length, rw; 319 u8 *rw_tlv = NULL, rw_tlv_length;
328 __be16 miux;
329 int err; 320 int err;
330 u16 size = 0; 321 u16 size = 0;
331 322
@@ -343,13 +334,11 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
343 size += service_name_tlv_length; 334 size += service_name_tlv_length;
344 } 335 }
345 336
346 miux = cpu_to_be16(LLCP_MAX_MIUX); 337 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
347 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
348 &miux_tlv_length); 338 &miux_tlv_length);
349 size += miux_tlv_length; 339 size += miux_tlv_length;
350 340
351 rw = LLCP_MAX_RW; 341 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length);
352 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
353 size += rw_tlv_length; 342 size += rw_tlv_length;
354 343
355 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); 344 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -386,8 +375,7 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
386 struct nfc_llcp_local *local; 375 struct nfc_llcp_local *local;
387 struct sk_buff *skb; 376 struct sk_buff *skb;
388 u8 *miux_tlv = NULL, miux_tlv_length; 377 u8 *miux_tlv = NULL, miux_tlv_length;
389 u8 *rw_tlv = NULL, rw_tlv_length, rw; 378 u8 *rw_tlv = NULL, rw_tlv_length;
390 __be16 miux;
391 int err; 379 int err;
392 u16 size = 0; 380 u16 size = 0;
393 381
@@ -397,13 +385,11 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
397 if (local == NULL) 385 if (local == NULL)
398 return -ENODEV; 386 return -ENODEV;
399 387
400 miux = cpu_to_be16(LLCP_MAX_MIUX); 388 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
401 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
402 &miux_tlv_length); 389 &miux_tlv_length);
403 size += miux_tlv_length; 390 size += miux_tlv_length;
404 391
405 rw = LLCP_MAX_RW; 392 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length);
406 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
407 size += rw_tlv_length; 393 size += rw_tlv_length;
408 394
409 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); 395 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
@@ -428,6 +414,52 @@ error_tlv:
428 return err; 414 return err;
429} 415}
430 416
417int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap)
418{
419 struct sk_buff *skb;
420 struct nfc_dev *dev;
421 u8 *sdres_tlv = NULL, sdres_tlv_length, sdres[2];
422 u16 size = 0;
423
424 pr_debug("Sending SNL tid 0x%x sap 0x%x\n", tid, sap);
425
426 if (local == NULL)
427 return -ENODEV;
428
429 dev = local->dev;
430 if (dev == NULL)
431 return -ENODEV;
432
433 sdres[0] = tid;
434 sdres[1] = sap;
435 sdres_tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, sdres, 0,
436 &sdres_tlv_length);
437 if (sdres_tlv == NULL)
438 return -ENOMEM;
439
440 size += LLCP_HEADER_SIZE;
441 size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
442 size += sdres_tlv_length;
443
444 skb = alloc_skb(size, GFP_KERNEL);
445 if (skb == NULL) {
446 kfree(sdres_tlv);
447 return -ENOMEM;
448 }
449
450 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
451
452 skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL);
453
454 memcpy(skb_put(skb, sdres_tlv_length), sdres_tlv, sdres_tlv_length);
455
456 skb_queue_tail(&local->tx_queue, skb);
457
458 kfree(sdres_tlv);
459
460 return 0;
461}
462
431int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) 463int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
432{ 464{
433 struct sk_buff *skb; 465 struct sk_buff *skb;
@@ -496,6 +528,23 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
496 if (local == NULL) 528 if (local == NULL)
497 return -ENODEV; 529 return -ENODEV;
498 530
531 /* Remote is ready but has not acknowledged our frames */
532 if((sock->remote_ready &&
533 skb_queue_len(&sock->tx_pending_queue) >= sock->rw &&
534 skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) {
535 pr_err("Pending queue is full %d frames\n",
536 skb_queue_len(&sock->tx_pending_queue));
537 return -ENOBUFS;
538 }
539
540 /* Remote is not ready and we've been queueing enough frames */
541 if ((!sock->remote_ready &&
542 skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) {
543 pr_err("Tx queue is full %d frames\n",
544 skb_queue_len(&sock->tx_queue));
545 return -ENOBUFS;
546 }
547
499 msg_data = kzalloc(len, GFP_KERNEL); 548 msg_data = kzalloc(len, GFP_KERNEL);
500 if (msg_data == NULL) 549 if (msg_data == NULL)
501 return -ENOMEM; 550 return -ENOMEM;
@@ -541,6 +590,63 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
541 return len; 590 return len;
542} 591}
543 592
593int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
594 struct msghdr *msg, size_t len)
595{
596 struct sk_buff *pdu;
597 struct nfc_llcp_local *local;
598 size_t frag_len = 0, remaining_len;
599 u8 *msg_ptr, *msg_data;
600 int err;
601
602 pr_debug("Send UI frame len %zd\n", len);
603
604 local = sock->local;
605 if (local == NULL)
606 return -ENODEV;
607
608 msg_data = kzalloc(len, GFP_KERNEL);
609 if (msg_data == NULL)
610 return -ENOMEM;
611
612 if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) {
613 kfree(msg_data);
614 return -EFAULT;
615 }
616
617 remaining_len = len;
618 msg_ptr = msg_data;
619
620 while (remaining_len > 0) {
621
622 frag_len = min_t(size_t, sock->miu, remaining_len);
623
624 pr_debug("Fragment %zd bytes remaining %zd",
625 frag_len, remaining_len);
626
627 pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
628 frag_len + LLCP_HEADER_SIZE, &err);
629 if (pdu == NULL) {
630 pr_err("Could not allocate PDU\n");
631 continue;
632 }
633
634 pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
635
636 memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
637
638 /* No need to check for the peer RW for UI frames */
639 skb_queue_tail(&local->tx_queue, pdu);
640
641 remaining_len -= frag_len;
642 msg_ptr += frag_len;
643 }
644
645 kfree(msg_data);
646
647 return len;
648}
649
544int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) 650int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
545{ 651{
546 struct sk_buff *skb; 652 struct sk_buff *skb;
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index cc10d073c338..ec43914c92a9 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -45,12 +45,38 @@ void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk)
45 write_unlock(&l->lock); 45 write_unlock(&l->lock);
46} 46}
47 47
48static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
49{
50 struct nfc_llcp_local *local = sock->local;
51 struct sk_buff *s, *tmp;
52
53 pr_debug("%p\n", &sock->sk);
54
55 skb_queue_purge(&sock->tx_queue);
56 skb_queue_purge(&sock->tx_pending_queue);
57 skb_queue_purge(&sock->tx_backlog_queue);
58
59 if (local == NULL)
60 return;
61
62 /* Search for local pending SKBs that are related to this socket */
63 skb_queue_walk_safe(&local->tx_queue, s, tmp) {
64 if (s->sk != &sock->sk)
65 continue;
66
67 skb_unlink(s, &local->tx_queue);
68 kfree_skb(s);
69 }
70}
71
48static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) 72static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
49{ 73{
50 struct sock *sk; 74 struct sock *sk;
51 struct hlist_node *node, *tmp; 75 struct hlist_node *node, *tmp;
52 struct nfc_llcp_sock *llcp_sock; 76 struct nfc_llcp_sock *llcp_sock;
53 77
78 skb_queue_purge(&local->tx_queue);
79
54 write_lock(&local->sockets.lock); 80 write_lock(&local->sockets.lock);
55 81
56 sk_for_each_safe(sk, node, tmp, &local->sockets.head) { 82 sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
@@ -58,6 +84,8 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
58 84
59 bh_lock_sock(sk); 85 bh_lock_sock(sk);
60 86
87 nfc_llcp_socket_purge(llcp_sock);
88
61 if (sk->sk_state == LLCP_CONNECTED) 89 if (sk->sk_state == LLCP_CONNECTED)
62 nfc_put_device(llcp_sock->dev); 90 nfc_put_device(llcp_sock->dev);
63 91
@@ -65,7 +93,8 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
65 struct nfc_llcp_sock *lsk, *n; 93 struct nfc_llcp_sock *lsk, *n;
66 struct sock *accept_sk; 94 struct sock *accept_sk;
67 95
68 list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, 96 list_for_each_entry_safe(lsk, n,
97 &llcp_sock->accept_queue,
69 accept_queue) { 98 accept_queue) {
70 accept_sk = &lsk->sk; 99 accept_sk = &lsk->sk;
71 bh_lock_sock(accept_sk); 100 bh_lock_sock(accept_sk);
@@ -85,6 +114,16 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
85 } 114 }
86 } 115 }
87 116
117 /*
118 * If we have a connection less socket bound, we keep it alive
119 * if the device is still present.
120 */
121 if (sk->sk_state == LLCP_BOUND && sk->sk_type == SOCK_DGRAM &&
122 listen == true) {
123 bh_unlock_sock(sk);
124 continue;
125 }
126
88 sk->sk_state = LLCP_CLOSED; 127 sk->sk_state = LLCP_CLOSED;
89 128
90 bh_unlock_sock(sk); 129 bh_unlock_sock(sk);
@@ -134,7 +173,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
134{ 173{
135 struct sock *sk; 174 struct sock *sk;
136 struct hlist_node *node; 175 struct hlist_node *node;
137 struct nfc_llcp_sock *llcp_sock; 176 struct nfc_llcp_sock *llcp_sock, *tmp_sock;
138 177
139 pr_debug("ssap dsap %d %d\n", ssap, dsap); 178 pr_debug("ssap dsap %d %d\n", ssap, dsap);
140 179
@@ -146,10 +185,12 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
146 llcp_sock = NULL; 185 llcp_sock = NULL;
147 186
148 sk_for_each(sk, node, &local->sockets.head) { 187 sk_for_each(sk, node, &local->sockets.head) {
149 llcp_sock = nfc_llcp_sock(sk); 188 tmp_sock = nfc_llcp_sock(sk);
150 189
151 if (llcp_sock->ssap == ssap && llcp_sock->dsap == dsap) 190 if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
191 llcp_sock = tmp_sock;
152 break; 192 break;
193 }
153 } 194 }
154 195
155 read_unlock(&local->sockets.lock); 196 read_unlock(&local->sockets.lock);
@@ -249,7 +290,12 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
249 290
250 pr_debug("llcp sock %p\n", tmp_sock); 291 pr_debug("llcp sock %p\n", tmp_sock);
251 292
252 if (tmp_sock->sk.sk_state != LLCP_LISTEN) 293 if (tmp_sock->sk.sk_type == SOCK_STREAM &&
294 tmp_sock->sk.sk_state != LLCP_LISTEN)
295 continue;
296
297 if (tmp_sock->sk.sk_type == SOCK_DGRAM &&
298 tmp_sock->sk.sk_state != LLCP_BOUND)
253 continue; 299 continue;
254 300
255 if (tmp_sock->service_name == NULL || 301 if (tmp_sock->service_name == NULL ||
@@ -421,10 +467,9 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
421static int nfc_llcp_build_gb(struct nfc_llcp_local *local) 467static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
422{ 468{
423 u8 *gb_cur, *version_tlv, version, version_length; 469 u8 *gb_cur, *version_tlv, version, version_length;
424 u8 *lto_tlv, lto, lto_length; 470 u8 *lto_tlv, lto_length;
425 u8 *wks_tlv, wks_length; 471 u8 *wks_tlv, wks_length;
426 u8 *miux_tlv, miux_length; 472 u8 *miux_tlv, miux_length;
427 __be16 miux;
428 u8 gb_len = 0; 473 u8 gb_len = 0;
429 int ret = 0; 474 int ret = 0;
430 475
@@ -433,9 +478,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
433 1, &version_length); 478 1, &version_length);
434 gb_len += version_length; 479 gb_len += version_length;
435 480
436 /* 1500 ms */ 481 lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
437 lto = 150;
438 lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &lto, 1, &lto_length);
439 gb_len += lto_length; 482 gb_len += lto_length;
440 483
441 pr_debug("Local wks 0x%lx\n", local->local_wks); 484 pr_debug("Local wks 0x%lx\n", local->local_wks);
@@ -443,8 +486,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
443 &wks_length); 486 &wks_length);
444 gb_len += wks_length; 487 gb_len += wks_length;
445 488
446 miux = cpu_to_be16(LLCP_MAX_MIUX); 489 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
447 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
448 &miux_length); 490 &miux_length);
449 gb_len += miux_length; 491 gb_len += miux_length;
450 492
@@ -610,7 +652,12 @@ static void nfc_llcp_tx_work(struct work_struct *work)
610 if (skb != NULL) { 652 if (skb != NULL) {
611 sk = skb->sk; 653 sk = skb->sk;
612 llcp_sock = nfc_llcp_sock(sk); 654 llcp_sock = nfc_llcp_sock(sk);
613 if (llcp_sock != NULL) { 655
656 if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
657 nfc_llcp_send_symm(local->dev);
658 } else {
659 struct sk_buff *copy_skb = NULL;
660 u8 ptype = nfc_llcp_ptype(skb);
614 int ret; 661 int ret;
615 662
616 pr_debug("Sending pending skb\n"); 663 pr_debug("Sending pending skb\n");
@@ -618,24 +665,29 @@ static void nfc_llcp_tx_work(struct work_struct *work)
618 DUMP_PREFIX_OFFSET, 16, 1, 665 DUMP_PREFIX_OFFSET, 16, 1,
619 skb->data, skb->len, true); 666 skb->data, skb->len, true);
620 667
668 if (ptype == LLCP_PDU_I)
669 copy_skb = skb_copy(skb, GFP_ATOMIC);
670
621 nfc_llcp_send_to_raw_sock(local, skb, 671 nfc_llcp_send_to_raw_sock(local, skb,
622 NFC_LLCP_DIRECTION_TX); 672 NFC_LLCP_DIRECTION_TX);
623 673
624 ret = nfc_data_exchange(local->dev, local->target_idx, 674 ret = nfc_data_exchange(local->dev, local->target_idx,
625 skb, nfc_llcp_recv, local); 675 skb, nfc_llcp_recv, local);
626 676
627 if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) { 677 if (ret) {
628 skb = skb_get(skb); 678 kfree_skb(copy_skb);
629 skb_queue_tail(&llcp_sock->tx_pending_queue, 679 goto out;
630 skb);
631 } 680 }
632 } else { 681
633 nfc_llcp_send_symm(local->dev); 682 if (ptype == LLCP_PDU_I && copy_skb)
683 skb_queue_tail(&llcp_sock->tx_pending_queue,
684 copy_skb);
634 } 685 }
635 } else { 686 } else {
636 nfc_llcp_send_symm(local->dev); 687 nfc_llcp_send_symm(local->dev);
637 } 688 }
638 689
690out:
639 mod_timer(&local->link_timer, 691 mod_timer(&local->link_timer,
640 jiffies + msecs_to_jiffies(2 * local->remote_lto)); 692 jiffies + msecs_to_jiffies(2 * local->remote_lto));
641} 693}
@@ -704,6 +756,39 @@ static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
704 return NULL; 756 return NULL;
705} 757}
706 758
759static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,
760 struct sk_buff *skb)
761{
762 struct nfc_llcp_sock *llcp_sock;
763 struct nfc_llcp_ui_cb *ui_cb;
764 u8 dsap, ssap;
765
766 dsap = nfc_llcp_dsap(skb);
767 ssap = nfc_llcp_ssap(skb);
768
769 ui_cb = nfc_llcp_ui_skb_cb(skb);
770 ui_cb->dsap = dsap;
771 ui_cb->ssap = ssap;
772
773 printk("%s %d %d\n", __func__, dsap, ssap);
774
775 pr_debug("%d %d\n", dsap, ssap);
776
777 /* We're looking for a bound socket, not a client one */
778 llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
779 if (llcp_sock == NULL || llcp_sock->sk.sk_type != SOCK_DGRAM)
780 return;
781
782 /* There is no sequence with UI frames */
783 skb_pull(skb, LLCP_HEADER_SIZE);
784 if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
785 pr_err("receive queue is full\n");
786 skb_queue_head(&llcp_sock->tx_backlog_queue, skb);
787 }
788
789 nfc_llcp_sock_put(llcp_sock);
790}
791
707static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, 792static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
708 struct sk_buff *skb) 793 struct sk_buff *skb)
709{ 794{
@@ -823,9 +908,6 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
823fail: 908fail:
824 /* Send DM */ 909 /* Send DM */
825 nfc_llcp_send_dm(local, dsap, ssap, reason); 910 nfc_llcp_send_dm(local, dsap, ssap, reason);
826
827 return;
828
829} 911}
830 912
831int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock) 913int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
@@ -903,15 +985,18 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
903 /* Remove skbs from the pending queue */ 985 /* Remove skbs from the pending queue */
904 if (llcp_sock->send_ack_n != nr) { 986 if (llcp_sock->send_ack_n != nr) {
905 struct sk_buff *s, *tmp; 987 struct sk_buff *s, *tmp;
988 u8 n;
906 989
907 llcp_sock->send_ack_n = nr; 990 llcp_sock->send_ack_n = nr;
908 991
909 /* Remove and free all skbs until ns == nr */ 992 /* Remove and free all skbs until ns == nr */
910 skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) { 993 skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) {
994 n = nfc_llcp_ns(s);
995
911 skb_unlink(s, &llcp_sock->tx_pending_queue); 996 skb_unlink(s, &llcp_sock->tx_pending_queue);
912 kfree_skb(s); 997 kfree_skb(s);
913 998
914 if (nfc_llcp_ns(s) == nr) 999 if (n == nr)
915 break; 1000 break;
916 } 1001 }
917 1002
@@ -953,6 +1038,9 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
953 1038
954 sk = &llcp_sock->sk; 1039 sk = &llcp_sock->sk;
955 lock_sock(sk); 1040 lock_sock(sk);
1041
1042 nfc_llcp_socket_purge(llcp_sock);
1043
956 if (sk->sk_state == LLCP_CLOSED) { 1044 if (sk->sk_state == LLCP_CLOSED) {
957 release_sock(sk); 1045 release_sock(sk);
958 nfc_llcp_sock_put(llcp_sock); 1046 nfc_llcp_sock_put(llcp_sock);
@@ -1027,7 +1115,7 @@ static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
1027 } 1115 }
1028 1116
1029 if (llcp_sock == NULL) { 1117 if (llcp_sock == NULL) {
1030 pr_err("Invalid DM\n"); 1118 pr_debug("Already closed\n");
1031 return; 1119 return;
1032 } 1120 }
1033 1121
@@ -1038,8 +1126,100 @@ static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
1038 sk->sk_state_change(sk); 1126 sk->sk_state_change(sk);
1039 1127
1040 nfc_llcp_sock_put(llcp_sock); 1128 nfc_llcp_sock_put(llcp_sock);
1129}
1041 1130
1042 return; 1131static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1132 struct sk_buff *skb)
1133{
1134 struct nfc_llcp_sock *llcp_sock;
1135 u8 dsap, ssap, *tlv, type, length, tid, sap;
1136 u16 tlv_len, offset;
1137 char *service_name;
1138 size_t service_name_len;
1139
1140 dsap = nfc_llcp_dsap(skb);
1141 ssap = nfc_llcp_ssap(skb);
1142
1143 pr_debug("%d %d\n", dsap, ssap);
1144
1145 if (dsap != LLCP_SAP_SDP || ssap != LLCP_SAP_SDP) {
1146 pr_err("Wrong SNL SAP\n");
1147 return;
1148 }
1149
1150 tlv = &skb->data[LLCP_HEADER_SIZE];
1151 tlv_len = skb->len - LLCP_HEADER_SIZE;
1152 offset = 0;
1153
1154 while (offset < tlv_len) {
1155 type = tlv[0];
1156 length = tlv[1];
1157
1158 switch (type) {
1159 case LLCP_TLV_SDREQ:
1160 tid = tlv[2];
1161 service_name = (char *) &tlv[3];
1162 service_name_len = length - 1;
1163
1164 pr_debug("Looking for %.16s\n", service_name);
1165
1166 if (service_name_len == strlen("urn:nfc:sn:sdp") &&
1167 !strncmp(service_name, "urn:nfc:sn:sdp",
1168 service_name_len)) {
1169 sap = 1;
1170 goto send_snl;
1171 }
1172
1173 llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
1174 service_name_len);
1175 if (!llcp_sock) {
1176 sap = 0;
1177 goto send_snl;
1178 }
1179
1180 /*
1181 * We found a socket but its ssap has not been reserved
1182 * yet. We need to assign it for good and send a reply.
1183 * The ssap will be freed when the socket is closed.
1184 */
1185 if (llcp_sock->ssap == LLCP_SDP_UNBOUND) {
1186 atomic_t *client_count;
1187
1188 sap = nfc_llcp_reserve_sdp_ssap(local);
1189
1190 pr_debug("Reserving %d\n", sap);
1191
1192 if (sap == LLCP_SAP_MAX) {
1193 sap = 0;
1194 goto send_snl;
1195 }
1196
1197 client_count =
1198 &local->local_sdp_cnt[sap -
1199 LLCP_WKS_NUM_SAP];
1200
1201 atomic_inc(client_count);
1202
1203 llcp_sock->ssap = sap;
1204 llcp_sock->reserved_ssap = sap;
1205 } else {
1206 sap = llcp_sock->ssap;
1207 }
1208
1209 pr_debug("%p %d\n", llcp_sock, sap);
1210
1211send_snl:
1212 nfc_llcp_send_snl(local, tid, sap);
1213 break;
1214
1215 default:
1216 pr_err("Invalid SNL tlv value 0x%x\n", type);
1217 break;
1218 }
1219
1220 offset += length + 2;
1221 tlv += length + 2;
1222 }
1043} 1223}
1044 1224
1045static void nfc_llcp_rx_work(struct work_struct *work) 1225static void nfc_llcp_rx_work(struct work_struct *work)
@@ -1072,6 +1252,11 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1072 pr_debug("SYMM\n"); 1252 pr_debug("SYMM\n");
1073 break; 1253 break;
1074 1254
1255 case LLCP_PDU_UI:
1256 pr_debug("UI\n");
1257 nfc_llcp_recv_ui(local, skb);
1258 break;
1259
1075 case LLCP_PDU_CONNECT: 1260 case LLCP_PDU_CONNECT:
1076 pr_debug("CONNECT\n"); 1261 pr_debug("CONNECT\n");
1077 nfc_llcp_recv_connect(local, skb); 1262 nfc_llcp_recv_connect(local, skb);
@@ -1092,6 +1277,11 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1092 nfc_llcp_recv_dm(local, skb); 1277 nfc_llcp_recv_dm(local, skb);
1093 break; 1278 break;
1094 1279
1280 case LLCP_PDU_SNL:
1281 pr_debug("SNL\n");
1282 nfc_llcp_recv_snl(local, skb);
1283 break;
1284
1095 case LLCP_PDU_I: 1285 case LLCP_PDU_I:
1096 case LLCP_PDU_RR: 1286 case LLCP_PDU_RR:
1097 case LLCP_PDU_RNR: 1287 case LLCP_PDU_RNR:
@@ -1104,8 +1294,6 @@ static void nfc_llcp_rx_work(struct work_struct *work)
1104 schedule_work(&local->tx_work); 1294 schedule_work(&local->tx_work);
1105 kfree_skb(local->rx_pending); 1295 kfree_skb(local->rx_pending);
1106 local->rx_pending = NULL; 1296 local->rx_pending = NULL;
1107
1108 return;
1109} 1297}
1110 1298
1111void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) 1299void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
@@ -1121,8 +1309,6 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
1121 local->rx_pending = skb_get(skb); 1309 local->rx_pending = skb_get(skb);
1122 del_timer(&local->link_timer); 1310 del_timer(&local->link_timer);
1123 schedule_work(&local->rx_work); 1311 schedule_work(&local->rx_work);
1124
1125 return;
1126} 1312}
1127 1313
1128int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) 1314int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
@@ -1205,12 +1391,16 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
1205 rwlock_init(&local->connecting_sockets.lock); 1391 rwlock_init(&local->connecting_sockets.lock);
1206 rwlock_init(&local->raw_sockets.lock); 1392 rwlock_init(&local->raw_sockets.lock);
1207 1393
1394 local->lto = 150; /* 1500 ms */
1395 local->rw = LLCP_MAX_RW;
1396 local->miux = cpu_to_be16(LLCP_MAX_MIUX);
1397
1208 nfc_llcp_build_gb(local); 1398 nfc_llcp_build_gb(local);
1209 1399
1210 local->remote_miu = LLCP_DEFAULT_MIU; 1400 local->remote_miu = LLCP_DEFAULT_MIU;
1211 local->remote_lto = LLCP_DEFAULT_LTO; 1401 local->remote_lto = LLCP_DEFAULT_LTO;
1212 1402
1213 list_add(&llcp_devices, &local->list); 1403 list_add(&local->list, &llcp_devices);
1214 1404
1215 return 0; 1405 return 0;
1216} 1406}
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index fdb2d24e60bd..0d62366f8cc3 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -64,6 +64,9 @@ struct nfc_llcp_local {
64 u32 target_idx; 64 u32 target_idx;
65 u8 rf_mode; 65 u8 rf_mode;
66 u8 comm_mode; 66 u8 comm_mode;
67 u8 lto;
68 u8 rw;
69 __be16 miux;
67 unsigned long local_wks; /* Well known services */ 70 unsigned long local_wks; /* Well known services */
68 unsigned long local_sdp; /* Local services */ 71 unsigned long local_sdp; /* Local services */
69 unsigned long local_sap; /* Local SAPs, not available for discovery */ 72 unsigned long local_sap; /* Local SAPs, not available for discovery */
@@ -124,6 +127,13 @@ struct nfc_llcp_sock {
124 struct sock *parent; 127 struct sock *parent;
125}; 128};
126 129
130struct nfc_llcp_ui_cb {
131 __u8 dsap;
132 __u8 ssap;
133};
134
135#define nfc_llcp_ui_skb_cb(__skb) ((struct nfc_llcp_ui_cb *)&((__skb)->cb[0]))
136
127#define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk)) 137#define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk))
128#define nfc_llcp_dev(sk) (nfc_llcp_sock((sk))->dev) 138#define nfc_llcp_dev(sk) (nfc_llcp_sock((sk))->dev)
129 139
@@ -209,10 +219,13 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock);
209int nfc_llcp_send_symm(struct nfc_dev *dev); 219int nfc_llcp_send_symm(struct nfc_dev *dev);
210int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); 220int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
211int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); 221int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
222int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap);
212int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); 223int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
213int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); 224int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
214int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, 225int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
215 struct msghdr *msg, size_t len); 226 struct msghdr *msg, size_t len);
227int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
228 struct msghdr *msg, size_t len);
216int nfc_llcp_send_rr(struct nfc_llcp_sock *sock); 229int nfc_llcp_send_rr(struct nfc_llcp_sock *sock);
217 230
218/* Socket API */ 231/* Socket API */
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 63e4cdc92376..fea22eb41b82 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -205,8 +205,8 @@ static int llcp_sock_listen(struct socket *sock, int backlog)
205 205
206 lock_sock(sk); 206 lock_sock(sk);
207 207
208 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) 208 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) ||
209 || sk->sk_state != LLCP_BOUND) { 209 sk->sk_state != LLCP_BOUND) {
210 ret = -EBADFD; 210 ret = -EBADFD;
211 goto error; 211 goto error;
212 } 212 }
@@ -608,6 +608,21 @@ static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
608 608
609 lock_sock(sk); 609 lock_sock(sk);
610 610
611 if (sk->sk_type == SOCK_DGRAM) {
612 struct sockaddr_nfc_llcp *addr =
613 (struct sockaddr_nfc_llcp *)msg->msg_name;
614
615 if (msg->msg_namelen < sizeof(*addr)) {
616 release_sock(sk);
617 return -EINVAL;
618 }
619
620 release_sock(sk);
621
622 return nfc_llcp_send_ui_frame(llcp_sock, addr->dsap, addr->ssap,
623 msg, len);
624 }
625
611 if (sk->sk_state != LLCP_CONNECTED) { 626 if (sk->sk_state != LLCP_CONNECTED) {
612 release_sock(sk); 627 release_sock(sk);
613 return -ENOTCONN; 628 return -ENOTCONN;
@@ -663,11 +678,28 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
663 return -EFAULT; 678 return -EFAULT;
664 } 679 }
665 680
681 if (sk->sk_type == SOCK_DGRAM && msg->msg_name) {
682 struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb);
683 struct sockaddr_nfc_llcp sockaddr;
684
685 pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
686
687 sockaddr.sa_family = AF_NFC;
688 sockaddr.nfc_protocol = NFC_PROTO_NFC_DEP;
689 sockaddr.dsap = ui_cb->dsap;
690 sockaddr.ssap = ui_cb->ssap;
691
692 memcpy(msg->msg_name, &sockaddr, sizeof(sockaddr));
693 msg->msg_namelen = sizeof(sockaddr);
694 }
695
666 /* Mark read part of skb as used */ 696 /* Mark read part of skb as used */
667 if (!(flags & MSG_PEEK)) { 697 if (!(flags & MSG_PEEK)) {
668 698
669 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 699 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
670 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_RAW) { 700 if (sk->sk_type == SOCK_STREAM ||
701 sk->sk_type == SOCK_DGRAM ||
702 sk->sk_type == SOCK_RAW) {
671 skb_pull(skb, copied); 703 skb_pull(skb, copied);
672 if (skb->len) { 704 if (skb->len) {
673 skb_queue_head(&sk->sk_receive_queue, skb); 705 skb_queue_head(&sk->sk_receive_queue, skb);
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
index decdc49b26d8..6d69b5f0f19b 100644
--- a/net/nfc/nci/Kconfig
+++ b/net/nfc/nci/Kconfig
@@ -1,6 +1,6 @@
1config NFC_NCI 1config NFC_NCI
2 depends on NFC && EXPERIMENTAL 2 depends on NFC
3 tristate "NCI protocol support (EXPERIMENTAL)" 3 tristate "NCI protocol support"
4 default n 4 default n
5 help 5 help
6 NCI (NFC Controller Interface) is a communication protocol between 6 NCI (NFC Controller Interface) is a communication protocol between
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index acf9abb7d99b..5f98dc1bf039 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -205,10 +205,10 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
205 cmd.num_disc_configs = 0; 205 cmd.num_disc_configs = 0;
206 206
207 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 207 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
208 (protocols & NFC_PROTO_JEWEL_MASK 208 (protocols & NFC_PROTO_JEWEL_MASK ||
209 || protocols & NFC_PROTO_MIFARE_MASK 209 protocols & NFC_PROTO_MIFARE_MASK ||
210 || protocols & NFC_PROTO_ISO14443_MASK 210 protocols & NFC_PROTO_ISO14443_MASK ||
211 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 211 protocols & NFC_PROTO_NFC_DEP_MASK)) {
212 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 212 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
213 NCI_NFC_A_PASSIVE_POLL_MODE; 213 NCI_NFC_A_PASSIVE_POLL_MODE;
214 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 214 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -224,8 +224,8 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
224 } 224 }
225 225
226 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 226 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
227 (protocols & NFC_PROTO_FELICA_MASK 227 (protocols & NFC_PROTO_FELICA_MASK ||
228 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 228 protocols & NFC_PROTO_NFC_DEP_MASK)) {
229 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 229 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
230 NCI_NFC_F_PASSIVE_POLL_MODE; 230 NCI_NFC_F_PASSIVE_POLL_MODE;
231 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 231 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -414,13 +414,13 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
414 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 414 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
415 struct nci_set_config_param param; 415 struct nci_set_config_param param;
416 __u8 local_gb[NFC_MAX_GT_LEN]; 416 __u8 local_gb[NFC_MAX_GT_LEN];
417 int i, rc = 0; 417 int i;
418 418
419 param.val = nfc_get_local_general_bytes(nfc_dev, &param.len); 419 param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
420 if ((param.val == NULL) || (param.len == 0)) 420 if ((param.val == NULL) || (param.len == 0))
421 return rc; 421 return 0;
422 422
423 if (param.len > NCI_MAX_PARAM_LEN) 423 if (param.len > NFC_MAX_GT_LEN)
424 return -EINVAL; 424 return -EINVAL;
425 425
426 for (i = 0; i < param.len; i++) 426 for (i = 0; i < param.len; i++)
@@ -429,10 +429,8 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
429 param.id = NCI_PN_ATR_REQ_GEN_BYTES; 429 param.id = NCI_PN_ATR_REQ_GEN_BYTES;
430 param.val = local_gb; 430 param.val = local_gb;
431 431
432 rc = nci_request(ndev, nci_set_config_req, (unsigned long)&param, 432 return nci_request(ndev, nci_set_config_req, (unsigned long)&param,
433 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); 433 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
434
435 return rc;
436} 434}
437 435
438static int nci_start_poll(struct nfc_dev *nfc_dev, 436static int nci_start_poll(struct nfc_dev *nfc_dev,
@@ -579,7 +577,6 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
579 } 577 }
580} 578}
581 579
582
583static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, 580static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
584 __u8 comm_mode, __u8 *gb, size_t gb_len) 581 __u8 comm_mode, __u8 *gb, size_t gb_len)
585{ 582{
@@ -806,8 +803,8 @@ int nci_recv_frame(struct sk_buff *skb)
806 803
807 pr_debug("len %d\n", skb->len); 804 pr_debug("len %d\n", skb->len);
808 805
809 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) 806 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
810 && !test_bit(NCI_INIT, &ndev->flags))) { 807 !test_bit(NCI_INIT, &ndev->flags))) {
811 kfree_skb(skb); 808 kfree_skb(skb);
812 return -ENXIO; 809 return -ENXIO;
813 } 810 }
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index c1b5285cbde7..3568ae16786d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -29,6 +29,8 @@
29 29
30#include "nfc.h" 30#include "nfc.h"
31 31
32#include "llcp/llcp.h"
33
32static struct genl_multicast_group nfc_genl_event_mcgrp = { 34static struct genl_multicast_group nfc_genl_event_mcgrp = {
33 .name = NFC_GENL_MCAST_EVENT_NAME, 35 .name = NFC_GENL_MCAST_EVENT_NAME,
34}; 36};
@@ -364,7 +366,8 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
364 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || 366 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
365 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || 367 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
366 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || 368 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
367 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) 369 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) ||
370 nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode))
368 goto nla_put_failure; 371 goto nla_put_failure;
369 372
370 return genlmsg_end(msg, hdr); 373 return genlmsg_end(msg, hdr);
@@ -590,7 +593,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
590 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || 593 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
591 ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] && 594 ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] &&
592 !info->attrs[NFC_ATTR_PROTOCOLS]) && 595 !info->attrs[NFC_ATTR_PROTOCOLS]) &&
593 !info->attrs[NFC_ATTR_TM_PROTOCOLS])) 596 !info->attrs[NFC_ATTR_TM_PROTOCOLS]))
594 return -EINVAL; 597 return -EINVAL;
595 598
596 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 599 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -715,6 +718,146 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
715 return rc; 718 return rc;
716} 719}
717 720
721static int nfc_genl_send_params(struct sk_buff *msg,
722 struct nfc_llcp_local *local,
723 u32 portid, u32 seq)
724{
725 void *hdr;
726
727 hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, 0,
728 NFC_CMD_LLC_GET_PARAMS);
729 if (!hdr)
730 return -EMSGSIZE;
731
732 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, local->dev->idx) ||
733 nla_put_u8(msg, NFC_ATTR_LLC_PARAM_LTO, local->lto) ||
734 nla_put_u8(msg, NFC_ATTR_LLC_PARAM_RW, local->rw) ||
735 nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux)))
736 goto nla_put_failure;
737
738 return genlmsg_end(msg, hdr);
739
740nla_put_failure:
741
742 genlmsg_cancel(msg, hdr);
743 return -EMSGSIZE;
744}
745
746static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
747{
748 struct nfc_dev *dev;
749 struct nfc_llcp_local *local;
750 int rc = 0;
751 struct sk_buff *msg = NULL;
752 u32 idx;
753
754 if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
755 return -EINVAL;
756
757 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
758
759 dev = nfc_get_device(idx);
760 if (!dev)
761 return -ENODEV;
762
763 device_lock(&dev->dev);
764
765 local = nfc_llcp_find_local(dev);
766 if (!local) {
767 rc = -ENODEV;
768 goto exit;
769 }
770
771 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
772 if (!msg) {
773 rc = -ENOMEM;
774 goto exit;
775 }
776
777 rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq);
778
779exit:
780 device_unlock(&dev->dev);
781
782 nfc_put_device(dev);
783
784 if (rc < 0) {
785 if (msg)
786 nlmsg_free(msg);
787
788 return rc;
789 }
790
791 return genlmsg_reply(msg, info);
792}
793
794static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
795{
796 struct nfc_dev *dev;
797 struct nfc_llcp_local *local;
798 u8 rw = 0;
799 u16 miux = 0;
800 u32 idx;
801 int rc = 0;
802
803 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
804 (!info->attrs[NFC_ATTR_LLC_PARAM_LTO] &&
805 !info->attrs[NFC_ATTR_LLC_PARAM_RW] &&
806 !info->attrs[NFC_ATTR_LLC_PARAM_MIUX]))
807 return -EINVAL;
808
809 if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) {
810 rw = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_RW]);
811
812 if (rw > LLCP_MAX_RW)
813 return -EINVAL;
814 }
815
816 if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) {
817 miux = nla_get_u16(info->attrs[NFC_ATTR_LLC_PARAM_MIUX]);
818
819 if (miux > LLCP_MAX_MIUX)
820 return -EINVAL;
821 }
822
823 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
824
825 dev = nfc_get_device(idx);
826 if (!dev)
827 return -ENODEV;
828
829 device_lock(&dev->dev);
830
831 local = nfc_llcp_find_local(dev);
832 if (!local) {
833 nfc_put_device(dev);
834 rc = -ENODEV;
835 goto exit;
836 }
837
838 if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) {
839 if (dev->dep_link_up) {
840 rc = -EINPROGRESS;
841 goto exit;
842 }
843
844 local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]);
845 }
846
847 if (info->attrs[NFC_ATTR_LLC_PARAM_RW])
848 local->rw = rw;
849
850 if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX])
851 local->miux = cpu_to_be16(miux);
852
853exit:
854 device_unlock(&dev->dev);
855
856 nfc_put_device(dev);
857
858 return rc;
859}
860
718static struct genl_ops nfc_genl_ops[] = { 861static struct genl_ops nfc_genl_ops[] = {
719 { 862 {
720 .cmd = NFC_CMD_GET_DEVICE, 863 .cmd = NFC_CMD_GET_DEVICE,
@@ -759,6 +902,16 @@ static struct genl_ops nfc_genl_ops[] = {
759 .done = nfc_genl_dump_targets_done, 902 .done = nfc_genl_dump_targets_done,
760 .policy = nfc_genl_policy, 903 .policy = nfc_genl_policy,
761 }, 904 },
905 {
906 .cmd = NFC_CMD_LLC_GET_PARAMS,
907 .doit = nfc_genl_llc_get_params,
908 .policy = nfc_genl_policy,
909 },
910 {
911 .cmd = NFC_CMD_LLC_SET_PARAMS,
912 .doit = nfc_genl_llc_set_params,
913 .policy = nfc_genl_policy,
914 },
762}; 915};
763 916
764 917
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index c5e42b79a418..87d914d2876a 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -56,6 +56,7 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev);
56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); 56int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); 57u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
58int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb); 58int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
59struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
59int __init nfc_llcp_init(void); 60int __init nfc_llcp_init(void);
60void nfc_llcp_exit(void); 61void nfc_llcp_exit(void);
61 62
@@ -97,6 +98,11 @@ static inline int nfc_llcp_data_received(struct nfc_dev *dev,
97 return 0; 98 return 0;
98} 99}
99 100
101static inline struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
102{
103 return NULL;
104}
105
100static inline int nfc_llcp_init(void) 106static inline int nfc_llcp_init(void)
101{ 107{
102 return 0; 108 return 0;
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 8b8a6a2b2bad..313bf1bc848a 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -256,7 +256,6 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
256 return rc ? : copied; 256 return rc ? : copied;
257} 257}
258 258
259
260static const struct proto_ops rawsock_ops = { 259static const struct proto_ops rawsock_ops = {
261 .family = PF_NFC, 260 .family = PF_NFC,
262 .owner = THIS_MODULE, 261 .owner = THIS_MODULE,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 08114478cb85..ac2defeeba83 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -28,6 +28,7 @@
28#include <linux/if_arp.h> 28#include <linux/if_arp.h>
29#include <linux/if_vlan.h> 29#include <linux/if_vlan.h>
30#include <net/ip.h> 30#include <net/ip.h>
31#include <net/ipv6.h>
31#include <net/checksum.h> 32#include <net/checksum.h>
32#include <net/dsfield.h> 33#include <net/dsfield.h>
33 34
@@ -162,6 +163,53 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
162 *addr = new_addr; 163 *addr = new_addr;
163} 164}
164 165
166static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
167 __be32 addr[4], const __be32 new_addr[4])
168{
169 int transport_len = skb->len - skb_transport_offset(skb);
170
171 if (l4_proto == IPPROTO_TCP) {
172 if (likely(transport_len >= sizeof(struct tcphdr)))
173 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
174 addr, new_addr, 1);
175 } else if (l4_proto == IPPROTO_UDP) {
176 if (likely(transport_len >= sizeof(struct udphdr))) {
177 struct udphdr *uh = udp_hdr(skb);
178
179 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
180 inet_proto_csum_replace16(&uh->check, skb,
181 addr, new_addr, 1);
182 if (!uh->check)
183 uh->check = CSUM_MANGLED_0;
184 }
185 }
186 }
187}
188
189static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
190 __be32 addr[4], const __be32 new_addr[4],
191 bool recalculate_csum)
192{
193 if (recalculate_csum)
194 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
195
196 skb->rxhash = 0;
197 memcpy(addr, new_addr, sizeof(__be32[4]));
198}
199
200static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
201{
202 nh->priority = tc >> 4;
203 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
204}
205
206static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
207{
208 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
209 nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
210 nh->flow_lbl[2] = fl & 0x000000FF;
211}
212
165static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) 213static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
166{ 214{
167 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); 215 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
@@ -195,6 +243,47 @@ static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
195 return 0; 243 return 0;
196} 244}
197 245
246static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
247{
248 struct ipv6hdr *nh;
249 int err;
250 __be32 *saddr;
251 __be32 *daddr;
252
253 err = make_writable(skb, skb_network_offset(skb) +
254 sizeof(struct ipv6hdr));
255 if (unlikely(err))
256 return err;
257
258 nh = ipv6_hdr(skb);
259 saddr = (__be32 *)&nh->saddr;
260 daddr = (__be32 *)&nh->daddr;
261
262 if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
263 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
264 ipv6_key->ipv6_src, true);
265
266 if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
267 unsigned int offset = 0;
268 int flags = IP6_FH_F_SKIP_RH;
269 bool recalc_csum = true;
270
271 if (ipv6_ext_hdr(nh->nexthdr))
272 recalc_csum = ipv6_find_hdr(skb, &offset,
273 NEXTHDR_ROUTING, NULL,
274 &flags) != NEXTHDR_ROUTING;
275
276 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
277 ipv6_key->ipv6_dst, recalc_csum);
278 }
279
280 set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
281 set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
282 nh->hop_limit = ipv6_key->ipv6_hlimit;
283
284 return 0;
285}
286
198/* Must follow make_writable() since that can move the skb data. */ 287/* Must follow make_writable() since that can move the skb data. */
199static void set_tp_port(struct sk_buff *skb, __be16 *port, 288static void set_tp_port(struct sk_buff *skb, __be16 *port,
200 __be16 new_port, __sum16 *check) 289 __be16 new_port, __sum16 *check)
@@ -339,6 +428,10 @@ static int execute_set_action(struct sk_buff *skb,
339 skb->priority = nla_get_u32(nested_attr); 428 skb->priority = nla_get_u32(nested_attr);
340 break; 429 break;
341 430
431 case OVS_KEY_ATTR_SKB_MARK:
432 skb->mark = nla_get_u32(nested_attr);
433 break;
434
342 case OVS_KEY_ATTR_ETHERNET: 435 case OVS_KEY_ATTR_ETHERNET:
343 err = set_eth_addr(skb, nla_data(nested_attr)); 436 err = set_eth_addr(skb, nla_data(nested_attr));
344 break; 437 break;
@@ -347,6 +440,10 @@ static int execute_set_action(struct sk_buff *skb,
347 err = set_ipv4(skb, nla_data(nested_attr)); 440 err = set_ipv4(skb, nla_data(nested_attr));
348 break; 441 break;
349 442
443 case OVS_KEY_ATTR_IPV6:
444 err = set_ipv6(skb, nla_data(nested_attr));
445 break;
446
350 case OVS_KEY_ATTR_TCP: 447 case OVS_KEY_ATTR_TCP:
351 err = set_tcp(skb, nla_data(nested_attr)); 448 err = set_tcp(skb, nla_data(nested_attr));
352 break; 449 break;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 4c4b62ccc7d7..f996db343247 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -208,7 +208,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
208 int error; 208 int error;
209 int key_len; 209 int key_len;
210 210
211 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); 211 stats = this_cpu_ptr(dp->stats_percpu);
212 212
213 /* Extract flow from 'skb' into 'key'. */ 213 /* Extract flow from 'skb' into 'key'. */
214 error = ovs_flow_extract(skb, p->port_no, &key, &key_len); 214 error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
@@ -282,7 +282,7 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
282 return 0; 282 return 0;
283 283
284err: 284err:
285 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id()); 285 stats = this_cpu_ptr(dp->stats_percpu);
286 286
287 u64_stats_update_begin(&stats->sync); 287 u64_stats_update_begin(&stats->sync);
288 stats->n_lost++; 288 stats->n_lost++;
@@ -479,8 +479,10 @@ static int validate_set(const struct nlattr *a,
479 479
480 switch (key_type) { 480 switch (key_type) {
481 const struct ovs_key_ipv4 *ipv4_key; 481 const struct ovs_key_ipv4 *ipv4_key;
482 const struct ovs_key_ipv6 *ipv6_key;
482 483
483 case OVS_KEY_ATTR_PRIORITY: 484 case OVS_KEY_ATTR_PRIORITY:
485 case OVS_KEY_ATTR_SKB_MARK:
484 case OVS_KEY_ATTR_ETHERNET: 486 case OVS_KEY_ATTR_ETHERNET:
485 break; 487 break;
486 488
@@ -500,6 +502,25 @@ static int validate_set(const struct nlattr *a,
500 502
501 break; 503 break;
502 504
505 case OVS_KEY_ATTR_IPV6:
506 if (flow_key->eth.type != htons(ETH_P_IPV6))
507 return -EINVAL;
508
509 if (!flow_key->ip.proto)
510 return -EINVAL;
511
512 ipv6_key = nla_data(ovs_key);
513 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
514 return -EINVAL;
515
516 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
517 return -EINVAL;
518
519 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
520 return -EINVAL;
521
522 break;
523
503 case OVS_KEY_ATTR_TCP: 524 case OVS_KEY_ATTR_TCP:
504 if (flow_key->ip.proto != IPPROTO_TCP) 525 if (flow_key->ip.proto != IPPROTO_TCP)
505 return -EINVAL; 526 return -EINVAL;
@@ -675,6 +696,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
675 goto err_flow_free; 696 goto err_flow_free;
676 697
677 err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority, 698 err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
699 &flow->key.phy.skb_mark,
678 &flow->key.phy.in_port, 700 &flow->key.phy.in_port,
679 a[OVS_PACKET_ATTR_KEY]); 701 a[OVS_PACKET_ATTR_KEY]);
680 if (err) 702 if (err)
@@ -694,6 +716,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
694 716
695 OVS_CB(packet)->flow = flow; 717 OVS_CB(packet)->flow = flow;
696 packet->priority = flow->key.phy.priority; 718 packet->priority = flow->key.phy.priority;
719 packet->mark = flow->key.phy.skb_mark;
697 720
698 rcu_read_lock(); 721 rcu_read_lock();
699 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 722 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 98c70630ad06..c3294cebc4f2 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -604,6 +604,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
604 604
605 key->phy.priority = skb->priority; 605 key->phy.priority = skb->priority;
606 key->phy.in_port = in_port; 606 key->phy.in_port = in_port;
607 key->phy.skb_mark = skb->mark;
607 608
608 skb_reset_mac_header(skb); 609 skb_reset_mac_header(skb);
609 610
@@ -689,7 +690,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
689 } 690 }
690 } 691 }
691 692
692 } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { 693 } else if ((key->eth.type == htons(ETH_P_ARP) ||
694 key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
693 struct arp_eth_header *arp; 695 struct arp_eth_header *arp;
694 696
695 arp = (struct arp_eth_header *)skb_network_header(skb); 697 arp = (struct arp_eth_header *)skb_network_header(skb);
@@ -702,15 +704,11 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
702 /* We only match on the lower 8 bits of the opcode. */ 704 /* We only match on the lower 8 bits of the opcode. */
703 if (ntohs(arp->ar_op) <= 0xff) 705 if (ntohs(arp->ar_op) <= 0xff)
704 key->ip.proto = ntohs(arp->ar_op); 706 key->ip.proto = ntohs(arp->ar_op);
705 707 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
706 if (key->ip.proto == ARPOP_REQUEST 708 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
707 || key->ip.proto == ARPOP_REPLY) { 709 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
708 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); 710 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
709 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); 711 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
710 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
711 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
712 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
713 }
714 } 712 }
715 } else if (key->eth.type == htons(ETH_P_IPV6)) { 713 } else if (key->eth.type == htons(ETH_P_IPV6)) {
716 int nh_len; /* IPv6 Header + Extensions */ 714 int nh_len; /* IPv6 Header + Extensions */
@@ -806,6 +804,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
806 [OVS_KEY_ATTR_ENCAP] = -1, 804 [OVS_KEY_ATTR_ENCAP] = -1,
807 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), 805 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
808 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), 806 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
807 [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32),
809 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), 808 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
810 [OVS_KEY_ATTR_VLAN] = sizeof(__be16), 809 [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
811 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), 810 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
@@ -991,6 +990,10 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
991 } else { 990 } else {
992 swkey->phy.in_port = DP_MAX_PORTS; 991 swkey->phy.in_port = DP_MAX_PORTS;
993 } 992 }
993 if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
994 swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
995 attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
996 }
994 997
995 /* Data attributes. */ 998 /* Data attributes. */
996 if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) 999 if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
@@ -1090,7 +1093,8 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
1090 if (err) 1093 if (err)
1091 return err; 1094 return err;
1092 } 1095 }
1093 } else if (swkey->eth.type == htons(ETH_P_ARP)) { 1096 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1097 swkey->eth.type == htons(ETH_P_RARP)) {
1094 const struct ovs_key_arp *arp_key; 1098 const struct ovs_key_arp *arp_key;
1095 1099
1096 if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) 1100 if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
@@ -1117,6 +1121,8 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
1117 1121
1118/** 1122/**
1119 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. 1123 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1124 * @priority: receives the skb priority
1125 * @mark: receives the skb mark
1120 * @in_port: receives the extracted input port. 1126 * @in_port: receives the extracted input port.
1121 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute 1127 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1122 * sequence. 1128 * sequence.
@@ -1126,7 +1132,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
1126 * get the metadata, that is, the parts of the flow key that cannot be 1132 * get the metadata, that is, the parts of the flow key that cannot be
1127 * extracted from the packet itself. 1133 * extracted from the packet itself.
1128 */ 1134 */
1129int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, 1135int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port,
1130 const struct nlattr *attr) 1136 const struct nlattr *attr)
1131{ 1137{
1132 const struct nlattr *nla; 1138 const struct nlattr *nla;
@@ -1134,6 +1140,7 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
1134 1140
1135 *in_port = DP_MAX_PORTS; 1141 *in_port = DP_MAX_PORTS;
1136 *priority = 0; 1142 *priority = 0;
1143 *mark = 0;
1137 1144
1138 nla_for_each_nested(nla, attr, rem) { 1145 nla_for_each_nested(nla, attr, rem) {
1139 int type = nla_type(nla); 1146 int type = nla_type(nla);
@@ -1152,6 +1159,10 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
1152 return -EINVAL; 1159 return -EINVAL;
1153 *in_port = nla_get_u32(nla); 1160 *in_port = nla_get_u32(nla);
1154 break; 1161 break;
1162
1163 case OVS_KEY_ATTR_SKB_MARK:
1164 *mark = nla_get_u32(nla);
1165 break;
1155 } 1166 }
1156 } 1167 }
1157 } 1168 }
@@ -1173,6 +1184,10 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1173 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) 1184 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
1174 goto nla_put_failure; 1185 goto nla_put_failure;
1175 1186
1187 if (swkey->phy.skb_mark &&
1188 nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark))
1189 goto nla_put_failure;
1190
1176 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); 1191 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1177 if (!nla) 1192 if (!nla)
1178 goto nla_put_failure; 1193 goto nla_put_failure;
@@ -1226,7 +1241,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1226 ipv6_key->ipv6_tclass = swkey->ip.tos; 1241 ipv6_key->ipv6_tclass = swkey->ip.tos;
1227 ipv6_key->ipv6_hlimit = swkey->ip.ttl; 1242 ipv6_key->ipv6_hlimit = swkey->ip.ttl;
1228 ipv6_key->ipv6_frag = swkey->ip.frag; 1243 ipv6_key->ipv6_frag = swkey->ip.frag;
1229 } else if (swkey->eth.type == htons(ETH_P_ARP)) { 1244 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1245 swkey->eth.type == htons(ETH_P_RARP)) {
1230 struct ovs_key_arp *arp_key; 1246 struct ovs_key_arp *arp_key;
1231 1247
1232 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); 1248 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 14a324eb017b..a7bb60ff3b5b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -43,6 +43,7 @@ struct sw_flow_actions {
43struct sw_flow_key { 43struct sw_flow_key {
44 struct { 44 struct {
45 u32 priority; /* Packet QoS priority. */ 45 u32 priority; /* Packet QoS priority. */
46 u32 skb_mark; /* SKB mark. */
46 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */ 47 u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
47 } phy; 48 } phy;
48 struct { 49 struct {
@@ -144,6 +145,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies);
144 * ------ --- ------ ----- 145 * ------ --- ------ -----
145 * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 146 * OVS_KEY_ATTR_PRIORITY 4 -- 4 8
146 * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 147 * OVS_KEY_ATTR_IN_PORT 4 -- 4 8
148 * OVS_KEY_ATTR_SKB_MARK 4 -- 4 8
147 * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 149 * OVS_KEY_ATTR_ETHERNET 12 -- 4 16
148 * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype) 150 * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype)
149 * OVS_KEY_ATTR_8021Q 4 -- 4 8 151 * OVS_KEY_ATTR_8021Q 4 -- 4 8
@@ -153,14 +155,14 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies);
153 * OVS_KEY_ATTR_ICMPV6 2 2 4 8 155 * OVS_KEY_ATTR_ICMPV6 2 2 4 8
154 * OVS_KEY_ATTR_ND 28 -- 4 32 156 * OVS_KEY_ATTR_ND 28 -- 4 32
155 * ------------------------------------------------- 157 * -------------------------------------------------
156 * total 144 158 * total 152
157 */ 159 */
158#define FLOW_BUFSIZE 144 160#define FLOW_BUFSIZE 152
159 161
160int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); 162int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
161int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, 163int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
162 const struct nlattr *); 164 const struct nlattr *);
163int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, 165int ovs_flow_metadata_from_nlattrs(u32 *priority, u32 *mark, u16 *in_port,
164 const struct nlattr *); 166 const struct nlattr *);
165 167
166#define MAX_ACTIONS_BUFSIZE (16 * 1024) 168#define MAX_ACTIONS_BUFSIZE (16 * 1024)
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 3c1e58ba714b..a9327e2e48ce 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -114,6 +114,15 @@ error:
114 return ERR_PTR(err); 114 return ERR_PTR(err);
115} 115}
116 116
117static void free_port_rcu(struct rcu_head *rcu)
118{
119 struct netdev_vport *netdev_vport = container_of(rcu,
120 struct netdev_vport, rcu);
121
122 dev_put(netdev_vport->dev);
123 ovs_vport_free(vport_from_priv(netdev_vport));
124}
125
117static void netdev_destroy(struct vport *vport) 126static void netdev_destroy(struct vport *vport)
118{ 127{
119 struct netdev_vport *netdev_vport = netdev_vport_priv(vport); 128 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
@@ -122,10 +131,7 @@ static void netdev_destroy(struct vport *vport)
122 netdev_rx_handler_unregister(netdev_vport->dev); 131 netdev_rx_handler_unregister(netdev_vport->dev);
123 dev_set_promiscuity(netdev_vport->dev, -1); 132 dev_set_promiscuity(netdev_vport->dev, -1);
124 133
125 synchronize_rcu(); 134 call_rcu(&netdev_vport->rcu, free_port_rcu);
126
127 dev_put(netdev_vport->dev);
128 ovs_vport_free(vport);
129} 135}
130 136
131const char *ovs_netdev_get_name(const struct vport *vport) 137const char *ovs_netdev_get_name(const struct vport *vport)
@@ -158,7 +164,7 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
158 164
159 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { 165 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
160 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", 166 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
161 ovs_dp_name(vport->dp), 167 netdev_vport->dev->name,
162 packet_length(skb), mtu); 168 packet_length(skb), mtu);
163 goto error; 169 goto error;
164 } 170 }
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index f7072a25c604..6478079b3417 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -20,12 +20,15 @@
20#define VPORT_NETDEV_H 1 20#define VPORT_NETDEV_H 1
21 21
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/rcupdate.h>
23 24
24#include "vport.h" 25#include "vport.h"
25 26
26struct vport *ovs_netdev_get_vport(struct net_device *dev); 27struct vport *ovs_netdev_get_vport(struct net_device *dev);
27 28
28struct netdev_vport { 29struct netdev_vport {
30 struct rcu_head rcu;
31
29 struct net_device *dev; 32 struct net_device *dev;
30}; 33};
31 34
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 03779e8a2622..70af0bedbac4 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -333,8 +333,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
333{ 333{
334 struct vport_percpu_stats *stats; 334 struct vport_percpu_stats *stats;
335 335
336 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); 336 stats = this_cpu_ptr(vport->percpu_stats);
337
338 u64_stats_update_begin(&stats->sync); 337 u64_stats_update_begin(&stats->sync);
339 stats->rx_packets++; 338 stats->rx_packets++;
340 stats->rx_bytes += skb->len; 339 stats->rx_bytes += skb->len;
@@ -359,7 +358,7 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
359 if (likely(sent)) { 358 if (likely(sent)) {
360 struct vport_percpu_stats *stats; 359 struct vport_percpu_stats *stats;
361 360
362 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id()); 361 stats = this_cpu_ptr(vport->percpu_stats);
363 362
364 u64_stats_update_begin(&stats->sync); 363 u64_stats_update_begin(&stats->sync);
365 stats->tx_packets++; 364 stats->tx_packets++;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 94060edbbd70..e639645e8fec 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1881,7 +1881,35 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1881 skb_reserve(skb, hlen); 1881 skb_reserve(skb, hlen);
1882 skb_reset_network_header(skb); 1882 skb_reset_network_header(skb);
1883 1883
1884 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); 1884 if (po->tp_tx_has_off) {
1885 int off_min, off_max, off;
1886 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
1887 off_max = po->tx_ring.frame_size - tp_len;
1888 if (sock->type == SOCK_DGRAM) {
1889 switch (po->tp_version) {
1890 case TPACKET_V2:
1891 off = ph.h2->tp_net;
1892 break;
1893 default:
1894 off = ph.h1->tp_net;
1895 break;
1896 }
1897 } else {
1898 switch (po->tp_version) {
1899 case TPACKET_V2:
1900 off = ph.h2->tp_mac;
1901 break;
1902 default:
1903 off = ph.h1->tp_mac;
1904 break;
1905 }
1906 }
1907 if (unlikely((off < off_min) || (off_max < off)))
1908 return -EINVAL;
1909 data = ph.raw + off;
1910 } else {
1911 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1912 }
1885 to_write = tp_len; 1913 to_write = tp_len;
1886 1914
1887 if (sock->type == SOCK_DGRAM) { 1915 if (sock->type == SOCK_DGRAM) {
@@ -1907,7 +1935,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1907 to_write -= dev->hard_header_len; 1935 to_write -= dev->hard_header_len;
1908 } 1936 }
1909 1937
1910 err = -EFAULT;
1911 offset = offset_in_page(data); 1938 offset = offset_in_page(data);
1912 len_max = PAGE_SIZE - offset; 1939 len_max = PAGE_SIZE - offset;
1913 len = ((to_write > len_max) ? len_max : to_write); 1940 len = ((to_write > len_max) ? len_max : to_write);
@@ -1957,7 +1984,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1957 1984
1958 mutex_lock(&po->pg_vec_lock); 1985 mutex_lock(&po->pg_vec_lock);
1959 1986
1960 err = -EBUSY;
1961 if (saddr == NULL) { 1987 if (saddr == NULL) {
1962 dev = po->prot_hook.dev; 1988 dev = po->prot_hook.dev;
1963 proto = po->num; 1989 proto = po->num;
@@ -2478,7 +2504,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2478 __be16 proto = (__force __be16)protocol; /* weird, but documented */ 2504 __be16 proto = (__force __be16)protocol; /* weird, but documented */
2479 int err; 2505 int err;
2480 2506
2481 if (!capable(CAP_NET_RAW)) 2507 if (!ns_capable(net->user_ns, CAP_NET_RAW))
2482 return -EPERM; 2508 return -EPERM;
2483 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && 2509 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2484 sock->type != SOCK_PACKET) 2510 sock->type != SOCK_PACKET)
@@ -3111,6 +3137,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3111 3137
3112 return fanout_add(sk, val & 0xffff, val >> 16); 3138 return fanout_add(sk, val & 0xffff, val >> 16);
3113 } 3139 }
3140 case PACKET_TX_HAS_OFF:
3141 {
3142 unsigned int val;
3143
3144 if (optlen != sizeof(val))
3145 return -EINVAL;
3146 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3147 return -EBUSY;
3148 if (copy_from_user(&val, optval, sizeof(val)))
3149 return -EFAULT;
3150 po->tp_tx_has_off = !!val;
3151 return 0;
3152 }
3114 default: 3153 default:
3115 return -ENOPROTOOPT; 3154 return -ENOPROTOOPT;
3116 } 3155 }
@@ -3202,6 +3241,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3202 ((u32)po->fanout->type << 16)) : 3241 ((u32)po->fanout->type << 16)) :
3203 0); 3242 0);
3204 break; 3243 break;
3244 case PACKET_TX_HAS_OFF:
3245 val = po->tp_tx_has_off;
3246 break;
3205 default: 3247 default:
3206 return -ENOPROTOOPT; 3248 return -ENOPROTOOPT;
3207 } 3249 }
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 44945f6b7252..e84cab8cb7a9 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -109,6 +109,7 @@ struct packet_sock {
109 unsigned int tp_hdrlen; 109 unsigned int tp_hdrlen;
110 unsigned int tp_reserve; 110 unsigned int tp_reserve;
111 unsigned int tp_loss:1; 111 unsigned int tp_loss:1;
112 unsigned int tp_tx_has_off:1;
112 unsigned int tp_tstamp; 113 unsigned int tp_tstamp;
113 struct packet_type prot_hook ____cacheline_aligned_in_smp; 114 struct packet_type prot_hook ____cacheline_aligned_in_smp;
114}; 115};
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 83a8389619aa..0193630d3061 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -70,6 +70,9 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
70 int err; 70 int err;
71 u8 pnaddr; 71 u8 pnaddr;
72 72
73 if (!capable(CAP_NET_ADMIN))
74 return -EPERM;
75
73 if (!capable(CAP_SYS_ADMIN)) 76 if (!capable(CAP_SYS_ADMIN))
74 return -EPERM; 77 return -EPERM;
75 78
@@ -230,6 +233,9 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
230 int err; 233 int err;
231 u8 dst; 234 u8 dst;
232 235
236 if (!capable(CAP_NET_ADMIN))
237 return -EPERM;
238
233 if (!capable(CAP_SYS_ADMIN)) 239 if (!capable(CAP_SYS_ADMIN))
234 return -EPERM; 240 return -EPERM;
235 241
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 8d2b3d5a7c21..7280ab8810c2 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -50,7 +50,7 @@ struct rds_ib_cache_head {
50}; 50};
51 51
52struct rds_ib_refill_cache { 52struct rds_ib_refill_cache {
53 struct rds_ib_cache_head *percpu; 53 struct rds_ib_cache_head __percpu *percpu;
54 struct list_head *xfer; 54 struct list_head *xfer;
55 struct list_head *ready; 55 struct list_head *ready;
56}; 56};
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index a1e116277477..31b74f5e61ad 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -434,12 +434,11 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
434 version = RDS_PROTOCOL_3_0; 434 version = RDS_PROTOCOL_3_0;
435 while ((common >>= 1) != 0) 435 while ((common >>= 1) != 0)
436 version++; 436 version++;
437 } 437 } else
438 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using " 438 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
439 "incompatible protocol version %u.%u\n", 439 &dp->dp_saddr,
440 &dp->dp_saddr, 440 dp->dp_protocol_major,
441 dp->dp_protocol_major, 441 dp->dp_protocol_minor);
442 dp->dp_protocol_minor);
443 return version; 442 return version;
444} 443}
445 444
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8d194912c695..8eb9501e3d60 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -339,8 +339,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
339 sge->length = sizeof(struct rds_header); 339 sge->length = sizeof(struct rds_header);
340 340
341 sge = &recv->r_sge[1]; 341 sge = &recv->r_sge[1];
342 sge->addr = sg_dma_address(&recv->r_frag->f_sg); 342 sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
343 sge->length = sg_dma_len(&recv->r_frag->f_sg); 343 sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
344 344
345 ret = 0; 345 ret = 0;
346out: 346out:
@@ -381,7 +381,10 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
382 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, 382 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
383 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), 383 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
384 (long) sg_dma_address(&recv->r_frag->f_sg), ret); 384 (long) ib_sg_dma_address(
385 ic->i_cm_id->device,
386 &recv->r_frag->f_sg),
387 ret);
385 if (ret) { 388 if (ret) {
386 rds_ib_conn_error(conn, "recv post on " 389 rds_ib_conn_error(conn, "recv post on "
387 "%pI4 returned %d, disconnecting and " 390 "%pI4 returned %d, disconnecting and "
@@ -418,20 +421,21 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
418 struct rds_ib_refill_cache *cache) 421 struct rds_ib_refill_cache *cache)
419{ 422{
420 unsigned long flags; 423 unsigned long flags;
421 struct rds_ib_cache_head *chp;
422 struct list_head *old; 424 struct list_head *old;
425 struct list_head __percpu *chpfirst;
423 426
424 local_irq_save(flags); 427 local_irq_save(flags);
425 428
426 chp = per_cpu_ptr(cache->percpu, smp_processor_id()); 429 chpfirst = __this_cpu_read(cache->percpu->first);
427 if (!chp->first) 430 if (!chpfirst)
428 INIT_LIST_HEAD(new_item); 431 INIT_LIST_HEAD(new_item);
429 else /* put on front */ 432 else /* put on front */
430 list_add_tail(new_item, chp->first); 433 list_add_tail(new_item, chpfirst);
431 chp->first = new_item;
432 chp->count++;
433 434
434 if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT) 435 __this_cpu_write(chpfirst, new_item);
436 __this_cpu_inc(cache->percpu->count);
437
438 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
435 goto end; 439 goto end;
436 440
437 /* 441 /*
@@ -443,12 +447,13 @@ static void rds_ib_recv_cache_put(struct list_head *new_item,
443 do { 447 do {
444 old = xchg(&cache->xfer, NULL); 448 old = xchg(&cache->xfer, NULL);
445 if (old) 449 if (old)
446 list_splice_entire_tail(old, chp->first); 450 list_splice_entire_tail(old, chpfirst);
447 old = cmpxchg(&cache->xfer, NULL, chp->first); 451 old = cmpxchg(&cache->xfer, NULL, chpfirst);
448 } while (old); 452 } while (old);
449 453
450 chp->first = NULL; 454
451 chp->count = 0; 455 __this_cpu_write(chpfirst, NULL);
456 __this_cpu_write(cache->percpu->count, 0);
452end: 457end:
453 local_irq_restore(flags); 458 local_irq_restore(flags);
454} 459}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index a5c952741279..9b9be5279f5d 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -676,7 +676,7 @@ static ssize_t rfkill_soft_store(struct device *dev,
676 rfkill_set_block(rfkill, state); 676 rfkill_set_block(rfkill, state);
677 mutex_unlock(&rfkill_global_mutex); 677 mutex_unlock(&rfkill_global_mutex);
678 678
679 return err ?: count; 679 return count;
680} 680}
681 681
682static u8 user_state_from_blocked(unsigned long state) 682static u8 user_state_from_blocked(unsigned long state)
@@ -721,7 +721,7 @@ static ssize_t rfkill_state_store(struct device *dev,
721 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); 721 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
722 mutex_unlock(&rfkill_global_mutex); 722 mutex_unlock(&rfkill_global_mutex);
723 723
724 return err ?: count; 724 return count;
725} 725}
726 726
727static ssize_t rfkill_claim_show(struct device *dev, 727static ssize_t rfkill_claim_show(struct device *dev,
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 865adb61685a..78fc0937948d 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -213,7 +213,7 @@ static int rfkill_gpio_remove(struct platform_device *pdev)
213 213
214static struct platform_driver rfkill_gpio_driver = { 214static struct platform_driver rfkill_gpio_driver = {
215 .probe = rfkill_gpio_probe, 215 .probe = rfkill_gpio_probe,
216 .remove = __devexit_p(rfkill_gpio_remove), 216 .remove = rfkill_gpio_remove,
217 .driver = { 217 .driver = {
218 .name = "rfkill_gpio", 218 .name = "rfkill_gpio",
219 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 11da3018a853..4b5ab21ecb24 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -55,7 +55,7 @@ struct rfkill_ops rfkill_regulator_ops = {
55 .set_block = rfkill_regulator_set_block, 55 .set_block = rfkill_regulator_set_block,
56}; 56};
57 57
58static int __devinit rfkill_regulator_probe(struct platform_device *pdev) 58static int rfkill_regulator_probe(struct platform_device *pdev)
59{ 59{
60 struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data; 60 struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data;
61 struct rfkill_regulator_data *rfkill_data; 61 struct rfkill_regulator_data *rfkill_data;
@@ -122,7 +122,7 @@ out:
122 return ret; 122 return ret;
123} 123}
124 124
125static int __devexit rfkill_regulator_remove(struct platform_device *pdev) 125static int rfkill_regulator_remove(struct platform_device *pdev)
126{ 126{
127 struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev); 127 struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev);
128 struct rfkill *rf_kill = rfkill_data->rf_kill; 128 struct rfkill *rf_kill = rfkill_data->rf_kill;
@@ -137,7 +137,7 @@ static int __devexit rfkill_regulator_remove(struct platform_device *pdev)
137 137
138static struct platform_driver rfkill_regulator_driver = { 138static struct platform_driver rfkill_regulator_driver = {
139 .probe = rfkill_regulator_probe, 139 .probe = rfkill_regulator_probe,
140 .remove = __devexit_p(rfkill_regulator_remove), 140 .remove = rfkill_regulator_remove,
141 .driver = { 141 .driver = {
142 .name = "rfkill-regulator", 142 .name = "rfkill-regulator",
143 .owner = THIS_MODULE, 143 .owner = THIS_MODULE,
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 62fb51face8a..235e01acac51 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -509,7 +509,7 @@ config NET_EMATCH_TEXT
509 509
510config NET_EMATCH_CANID 510config NET_EMATCH_CANID
511 tristate "CAN Identifier" 511 tristate "CAN Identifier"
512 depends on NET_EMATCH && CAN 512 depends on NET_EMATCH && (CAN=y || CAN=m)
513 ---help--- 513 ---help---
514 Say Y here if you want to be able to classify CAN frames based 514 Say Y here if you want to be able to classify CAN frames based
515 on CAN Identifier. 515 on CAN Identifier.
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 102761d294cb..65d240cbf74b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -987,6 +987,9 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
987 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 987 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
988 int ret = 0, ovr = 0; 988 int ret = 0, ovr = 0;
989 989
990 if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN))
991 return -EPERM;
992
990 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 993 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
991 if (ret < 0) 994 if (ret < 0)
992 return ret; 995 return ret;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7ae02892437c..ff55ed6c49b2 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -139,6 +139,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
139 int err; 139 int err;
140 int tp_created = 0; 140 int tp_created = 0;
141 141
142 if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
143 return -EPERM;
142replay: 144replay:
143 t = nlmsg_data(n); 145 t = nlmsg_data(n);
144 protocol = TC_H_MIN(t->tcm_info); 146 protocol = TC_H_MIN(t->tcm_info);
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 2ecde225ae60..6db7855b9029 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -17,6 +17,7 @@
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/cgroup.h> 18#include <linux/cgroup.h>
19#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
20#include <linux/fdtable.h>
20#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
21#include <net/pkt_cls.h> 22#include <net/pkt_cls.h>
22#include <net/sock.h> 23#include <net/sock.h>
@@ -34,25 +35,51 @@ static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
34 struct cgroup_cls_state, css); 35 struct cgroup_cls_state, css);
35} 36}
36 37
37static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) 38static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
38{ 39{
39 struct cgroup_cls_state *cs; 40 struct cgroup_cls_state *cs;
40 41
41 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 42 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
42 if (!cs) 43 if (!cs)
43 return ERR_PTR(-ENOMEM); 44 return ERR_PTR(-ENOMEM);
45 return &cs->css;
46}
44 47
48static int cgrp_css_online(struct cgroup *cgrp)
49{
45 if (cgrp->parent) 50 if (cgrp->parent)
46 cs->classid = cgrp_cls_state(cgrp->parent)->classid; 51 cgrp_cls_state(cgrp)->classid =
47 52 cgrp_cls_state(cgrp->parent)->classid;
48 return &cs->css; 53 return 0;
49} 54}
50 55
51static void cgrp_destroy(struct cgroup *cgrp) 56static void cgrp_css_free(struct cgroup *cgrp)
52{ 57{
53 kfree(cgrp_cls_state(cgrp)); 58 kfree(cgrp_cls_state(cgrp));
54} 59}
55 60
61static int update_classid(const void *v, struct file *file, unsigned n)
62{
63 int err;
64 struct socket *sock = sock_from_file(file, &err);
65 if (sock)
66 sock->sk->sk_classid = (u32)(unsigned long)v;
67 return 0;
68}
69
70static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
71{
72 struct task_struct *p;
73 void *v;
74
75 cgroup_taskset_for_each(p, cgrp, tset) {
76 task_lock(p);
77 v = (void *)(unsigned long)task_cls_classid(p);
78 iterate_fd(p->files, 0, update_classid, v);
79 task_unlock(p);
80 }
81}
82
56static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) 83static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
57{ 84{
58 return cgrp_cls_state(cgrp)->classid; 85 return cgrp_cls_state(cgrp)->classid;
@@ -75,20 +102,13 @@ static struct cftype ss_files[] = {
75 102
76struct cgroup_subsys net_cls_subsys = { 103struct cgroup_subsys net_cls_subsys = {
77 .name = "net_cls", 104 .name = "net_cls",
78 .create = cgrp_create, 105 .css_alloc = cgrp_css_alloc,
79 .destroy = cgrp_destroy, 106 .css_online = cgrp_css_online,
107 .css_free = cgrp_css_free,
108 .attach = cgrp_attach,
80 .subsys_id = net_cls_subsys_id, 109 .subsys_id = net_cls_subsys_id,
81 .base_cftypes = ss_files, 110 .base_cftypes = ss_files,
82 .module = THIS_MODULE, 111 .module = THIS_MODULE,
83
84 /*
85 * While net_cls cgroup has the rudimentary hierarchy support of
86 * inheriting the parent's classid on cgroup creation, it doesn't
87 * properly propagates config changes in ancestors to their
88 * descendents. A child should follow the parent's configuration
89 * but be allowed to override it. Fix it and remove the following.
90 */
91 .broken_hierarchy = true,
92}; 112};
93 113
94struct cls_cgroup_head { 114struct cls_cgroup_head {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a18d975db59c..d84f7e734cd7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -495,16 +495,15 @@ EXPORT_SYMBOL(qdisc_watchdog_init);
495 495
496void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) 496void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
497{ 497{
498 ktime_t time;
499
500 if (test_bit(__QDISC_STATE_DEACTIVATED, 498 if (test_bit(__QDISC_STATE_DEACTIVATED,
501 &qdisc_root_sleeping(wd->qdisc)->state)) 499 &qdisc_root_sleeping(wd->qdisc)->state))
502 return; 500 return;
503 501
504 qdisc_throttled(wd->qdisc); 502 qdisc_throttled(wd->qdisc);
505 time = ktime_set(0, 0); 503
506 time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); 504 hrtimer_start(&wd->timer,
507 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); 505 ns_to_ktime(PSCHED_TICKS2NS(expires)),
506 HRTIMER_MODE_ABS);
508} 507}
509EXPORT_SYMBOL(qdisc_watchdog_schedule); 508EXPORT_SYMBOL(qdisc_watchdog_schedule);
510 509
@@ -834,6 +833,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
834 goto err_out3; 833 goto err_out3;
835 } 834 }
836 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); 835 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
836 if (!netif_is_multiqueue(dev))
837 sch->flags |= TCQ_F_ONETXQUEUE;
837 } 838 }
838 839
839 sch->handle = handle; 840 sch->handle = handle;
@@ -981,6 +982,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
981 struct Qdisc *p = NULL; 982 struct Qdisc *p = NULL;
982 int err; 983 int err;
983 984
985 if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
986 return -EPERM;
987
984 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 988 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
985 if (!dev) 989 if (!dev)
986 return -ENODEV; 990 return -ENODEV;
@@ -1044,6 +1048,9 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1044 struct Qdisc *q, *p; 1048 struct Qdisc *q, *p;
1045 int err; 1049 int err;
1046 1050
1051 if (!capable(CAP_NET_ADMIN))
1052 return -EPERM;
1053
1047replay: 1054replay:
1048 /* Reinit, just in case something touches this. */ 1055 /* Reinit, just in case something touches this. */
1049 tcm = nlmsg_data(n); 1056 tcm = nlmsg_data(n);
@@ -1380,6 +1387,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1380 u32 qid = TC_H_MAJ(clid); 1387 u32 qid = TC_H_MAJ(clid);
1381 int err; 1388 int err;
1382 1389
1390 if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
1391 return -EPERM;
1392
1383 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1393 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1384 if (!dev) 1394 if (!dev)
1385 return -ENODEV; 1395 return -ENODEV;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 564b9fc8efd3..0e19948470b8 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -509,8 +509,7 @@ static void cbq_ovl_delay(struct cbq_class *cl)
509 cl->cpriority = TC_CBQ_MAXPRIO; 509 cl->cpriority = TC_CBQ_MAXPRIO;
510 q->pmask |= (1<<TC_CBQ_MAXPRIO); 510 q->pmask |= (1<<TC_CBQ_MAXPRIO);
511 511
512 expires = ktime_set(0, 0); 512 expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
513 expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
514 if (hrtimer_try_to_cancel(&q->delay_timer) && 513 if (hrtimer_try_to_cancel(&q->delay_timer) &&
515 ktime_to_ns(ktime_sub( 514 ktime_to_ns(ktime_sub(
516 hrtimer_get_expires(&q->delay_timer), 515 hrtimer_get_expires(&q->delay_timer),
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index aefc1504dc88..5d81a4478514 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -53,20 +53,19 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
53static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 53static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
54{ 54{
55 struct sk_buff *skb = q->gso_skb; 55 struct sk_buff *skb = q->gso_skb;
56 const struct netdev_queue *txq = q->dev_queue;
56 57
57 if (unlikely(skb)) { 58 if (unlikely(skb)) {
58 struct net_device *dev = qdisc_dev(q);
59 struct netdev_queue *txq;
60
61 /* check the reason of requeuing without tx lock first */ 59 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 60 txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb));
63 if (!netif_xmit_frozen_or_stopped(txq)) { 61 if (!netif_xmit_frozen_or_stopped(txq)) {
64 q->gso_skb = NULL; 62 q->gso_skb = NULL;
65 q->q.qlen--; 63 q->q.qlen--;
66 } else 64 } else
67 skb = NULL; 65 skb = NULL;
68 } else { 66 } else {
69 skb = q->dequeue(q); 67 if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq))
68 skb = q->dequeue(q);
70 } 69 }
71 70
72 return skb; 71 return skb;
@@ -686,6 +685,8 @@ static void attach_one_default_qdisc(struct net_device *dev,
686 netdev_info(dev, "activation failed\n"); 685 netdev_info(dev, "activation failed\n");
687 return; 686 return;
688 } 687 }
688 if (!netif_is_multiqueue(dev))
689 qdisc->flags |= TCQ_F_ONETXQUEUE;
689 } 690 }
690 dev_queue->qdisc_sleeping = qdisc; 691 dev_queue->qdisc_sleeping = qdisc;
691} 692}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9d75b7761313..51561eafcb72 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -71,6 +71,12 @@ enum htb_cmode {
71 HTB_CAN_SEND /* class can send */ 71 HTB_CAN_SEND /* class can send */
72}; 72};
73 73
74struct htb_rate_cfg {
75 u64 rate_bps;
76 u32 mult;
77 u32 shift;
78};
79
74/* interior & leaf nodes; props specific to leaves are marked L: */ 80/* interior & leaf nodes; props specific to leaves are marked L: */
75struct htb_class { 81struct htb_class {
76 struct Qdisc_class_common common; 82 struct Qdisc_class_common common;
@@ -118,11 +124,11 @@ struct htb_class {
118 int filter_cnt; 124 int filter_cnt;
119 125
120 /* token bucket parameters */ 126 /* token bucket parameters */
121 struct qdisc_rate_table *rate; /* rate table of the class itself */ 127 struct htb_rate_cfg rate;
122 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ 128 struct htb_rate_cfg ceil;
123 long buffer, cbuffer; /* token bucket depth/rate */ 129 s64 buffer, cbuffer; /* token bucket depth/rate */
124 psched_tdiff_t mbuffer; /* max wait time */ 130 psched_tdiff_t mbuffer; /* max wait time */
125 long tokens, ctokens; /* current number of tokens */ 131 s64 tokens, ctokens; /* current number of tokens */
126 psched_time_t t_c; /* checkpoint time */ 132 psched_time_t t_c; /* checkpoint time */
127}; 133};
128 134
@@ -162,6 +168,45 @@ struct htb_sched {
162 struct work_struct work; 168 struct work_struct work;
163}; 169};
164 170
171static u64 l2t_ns(struct htb_rate_cfg *r, unsigned int len)
172{
173 return ((u64)len * r->mult) >> r->shift;
174}
175
176static void htb_precompute_ratedata(struct htb_rate_cfg *r)
177{
178 u64 factor;
179 u64 mult;
180 int shift;
181
182 r->shift = 0;
183 r->mult = 1;
184 /*
185 * Calibrate mult, shift so that token counting is accurate
186 * for smallest packet size (64 bytes). Token (time in ns) is
187 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will
188 * work as long as the smallest packet transfer time can be
189 * accurately represented in nanosec.
190 */
191 if (r->rate_bps > 0) {
192 /*
193 * Higher shift gives better accuracy. Find the largest
194 * shift such that mult fits in 32 bits.
195 */
196 for (shift = 0; shift < 16; shift++) {
197 r->shift = shift;
198 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
199 mult = div64_u64(factor, r->rate_bps);
200 if (mult > UINT_MAX)
201 break;
202 }
203
204 r->shift = shift - 1;
205 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
206 r->mult = div64_u64(factor, r->rate_bps);
207 }
208}
209
165/* find class in global hash table using given handle */ 210/* find class in global hash table using given handle */
166static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 211static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
167{ 212{
@@ -273,7 +318,7 @@ static void htb_add_to_id_tree(struct rb_root *root,
273 * already in the queue. 318 * already in the queue.
274 */ 319 */
275static void htb_add_to_wait_tree(struct htb_sched *q, 320static void htb_add_to_wait_tree(struct htb_sched *q,
276 struct htb_class *cl, long delay) 321 struct htb_class *cl, s64 delay)
277{ 322{
278 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; 323 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
279 324
@@ -441,14 +486,14 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
441 htb_remove_class_from_row(q, cl, mask); 486 htb_remove_class_from_row(q, cl, mask);
442} 487}
443 488
444static inline long htb_lowater(const struct htb_class *cl) 489static inline s64 htb_lowater(const struct htb_class *cl)
445{ 490{
446 if (htb_hysteresis) 491 if (htb_hysteresis)
447 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; 492 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
448 else 493 else
449 return 0; 494 return 0;
450} 495}
451static inline long htb_hiwater(const struct htb_class *cl) 496static inline s64 htb_hiwater(const struct htb_class *cl)
452{ 497{
453 if (htb_hysteresis) 498 if (htb_hysteresis)
454 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; 499 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
@@ -469,9 +514,9 @@ static inline long htb_hiwater(const struct htb_class *cl)
469 * mode transitions per time unit. The speed gain is about 1/6. 514 * mode transitions per time unit. The speed gain is about 1/6.
470 */ 515 */
471static inline enum htb_cmode 516static inline enum htb_cmode
472htb_class_mode(struct htb_class *cl, long *diff) 517htb_class_mode(struct htb_class *cl, s64 *diff)
473{ 518{
474 long toks; 519 s64 toks;
475 520
476 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { 521 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
477 *diff = -toks; 522 *diff = -toks;
@@ -495,7 +540,7 @@ htb_class_mode(struct htb_class *cl, long *diff)
495 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). 540 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
496 */ 541 */
497static void 542static void
498htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) 543htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
499{ 544{
500 enum htb_cmode new_mode = htb_class_mode(cl, diff); 545 enum htb_cmode new_mode = htb_class_mode(cl, diff);
501 546
@@ -581,26 +626,26 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
581 return NET_XMIT_SUCCESS; 626 return NET_XMIT_SUCCESS;
582} 627}
583 628
584static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff) 629static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
585{ 630{
586 long toks = diff + cl->tokens; 631 s64 toks = diff + cl->tokens;
587 632
588 if (toks > cl->buffer) 633 if (toks > cl->buffer)
589 toks = cl->buffer; 634 toks = cl->buffer;
590 toks -= (long) qdisc_l2t(cl->rate, bytes); 635 toks -= (s64) l2t_ns(&cl->rate, bytes);
591 if (toks <= -cl->mbuffer) 636 if (toks <= -cl->mbuffer)
592 toks = 1 - cl->mbuffer; 637 toks = 1 - cl->mbuffer;
593 638
594 cl->tokens = toks; 639 cl->tokens = toks;
595} 640}
596 641
597static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff) 642static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
598{ 643{
599 long toks = diff + cl->ctokens; 644 s64 toks = diff + cl->ctokens;
600 645
601 if (toks > cl->cbuffer) 646 if (toks > cl->cbuffer)
602 toks = cl->cbuffer; 647 toks = cl->cbuffer;
603 toks -= (long) qdisc_l2t(cl->ceil, bytes); 648 toks -= (s64) l2t_ns(&cl->ceil, bytes);
604 if (toks <= -cl->mbuffer) 649 if (toks <= -cl->mbuffer)
605 toks = 1 - cl->mbuffer; 650 toks = 1 - cl->mbuffer;
606 651
@@ -623,10 +668,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
623{ 668{
624 int bytes = qdisc_pkt_len(skb); 669 int bytes = qdisc_pkt_len(skb);
625 enum htb_cmode old_mode; 670 enum htb_cmode old_mode;
626 long diff; 671 s64 diff;
627 672
628 while (cl) { 673 while (cl) {
629 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); 674 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
630 if (cl->level >= level) { 675 if (cl->level >= level) {
631 if (cl->level == level) 676 if (cl->level == level)
632 cl->xstats.lends++; 677 cl->xstats.lends++;
@@ -673,7 +718,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
673 unsigned long stop_at = start + 2; 718 unsigned long stop_at = start + 2;
674 while (time_before(jiffies, stop_at)) { 719 while (time_before(jiffies, stop_at)) {
675 struct htb_class *cl; 720 struct htb_class *cl;
676 long diff; 721 s64 diff;
677 struct rb_node *p = rb_first(&q->wait_pq[level]); 722 struct rb_node *p = rb_first(&q->wait_pq[level]);
678 723
679 if (!p) 724 if (!p)
@@ -684,7 +729,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
684 return cl->pq_key; 729 return cl->pq_key;
685 730
686 htb_safe_rb_erase(p, q->wait_pq + level); 731 htb_safe_rb_erase(p, q->wait_pq + level);
687 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); 732 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
688 htb_change_class_mode(q, cl, &diff); 733 htb_change_class_mode(q, cl, &diff);
689 if (cl->cmode != HTB_CAN_SEND) 734 if (cl->cmode != HTB_CAN_SEND)
690 htb_add_to_wait_tree(q, cl, diff); 735 htb_add_to_wait_tree(q, cl, diff);
@@ -871,10 +916,10 @@ ok:
871 916
872 if (!sch->q.qlen) 917 if (!sch->q.qlen)
873 goto fin; 918 goto fin;
874 q->now = psched_get_time(); 919 q->now = ktime_to_ns(ktime_get());
875 start_at = jiffies; 920 start_at = jiffies;
876 921
877 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; 922 next_event = q->now + 5LLU * NSEC_PER_SEC;
878 923
879 for (level = 0; level < TC_HTB_MAXDEPTH; level++) { 924 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
880 /* common case optimization - skip event handler quickly */ 925 /* common case optimization - skip event handler quickly */
@@ -884,7 +929,7 @@ ok:
884 if (q->now >= q->near_ev_cache[level]) { 929 if (q->now >= q->near_ev_cache[level]) {
885 event = htb_do_events(q, level, start_at); 930 event = htb_do_events(q, level, start_at);
886 if (!event) 931 if (!event)
887 event = q->now + PSCHED_TICKS_PER_SEC; 932 event = q->now + NSEC_PER_SEC;
888 q->near_ev_cache[level] = event; 933 q->near_ev_cache[level] = event;
889 } else 934 } else
890 event = q->near_ev_cache[level]; 935 event = q->near_ev_cache[level];
@@ -903,10 +948,17 @@ ok:
903 } 948 }
904 } 949 }
905 sch->qstats.overlimits++; 950 sch->qstats.overlimits++;
906 if (likely(next_event > q->now)) 951 if (likely(next_event > q->now)) {
907 qdisc_watchdog_schedule(&q->watchdog, next_event); 952 if (!test_bit(__QDISC_STATE_DEACTIVATED,
908 else 953 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
954 ktime_t time = ns_to_ktime(next_event);
955 qdisc_throttled(q->watchdog.qdisc);
956 hrtimer_start(&q->watchdog.timer, time,
957 HRTIMER_MODE_ABS);
958 }
959 } else {
909 schedule_work(&q->work); 960 schedule_work(&q->work);
961 }
910fin: 962fin:
911 return skb; 963 return skb;
912} 964}
@@ -1082,9 +1134,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1082 1134
1083 memset(&opt, 0, sizeof(opt)); 1135 memset(&opt, 0, sizeof(opt));
1084 1136
1085 opt.rate = cl->rate->rate; 1137 opt.rate.rate = cl->rate.rate_bps >> 3;
1086 opt.buffer = cl->buffer; 1138 opt.buffer = cl->buffer;
1087 opt.ceil = cl->ceil->rate; 1139 opt.ceil.rate = cl->ceil.rate_bps >> 3;
1088 opt.cbuffer = cl->cbuffer; 1140 opt.cbuffer = cl->cbuffer;
1089 opt.quantum = cl->quantum; 1141 opt.quantum = cl->quantum;
1090 opt.prio = cl->prio; 1142 opt.prio = cl->prio;
@@ -1203,9 +1255,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1203 qdisc_destroy(cl->un.leaf.q); 1255 qdisc_destroy(cl->un.leaf.q);
1204 } 1256 }
1205 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1257 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1206 qdisc_put_rtab(cl->rate);
1207 qdisc_put_rtab(cl->ceil);
1208
1209 tcf_destroy_chain(&cl->filter_list); 1258 tcf_destroy_chain(&cl->filter_list);
1210 kfree(cl); 1259 kfree(cl);
1211} 1260}
@@ -1307,7 +1356,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1307 struct htb_sched *q = qdisc_priv(sch); 1356 struct htb_sched *q = qdisc_priv(sch);
1308 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1357 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1309 struct nlattr *opt = tca[TCA_OPTIONS]; 1358 struct nlattr *opt = tca[TCA_OPTIONS];
1310 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1311 struct nlattr *tb[__TCA_HTB_MAX]; 1359 struct nlattr *tb[__TCA_HTB_MAX];
1312 struct tc_htb_opt *hopt; 1360 struct tc_htb_opt *hopt;
1313 1361
@@ -1326,10 +1374,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1326 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); 1374 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327 1375
1328 hopt = nla_data(tb[TCA_HTB_PARMS]); 1376 hopt = nla_data(tb[TCA_HTB_PARMS]);
1329 1377 if (!hopt->rate.rate || !hopt->ceil.rate)
1330 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1331 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1332 if (!rtab || !ctab)
1333 goto failure; 1378 goto failure;
1334 1379
1335 if (!cl) { /* new class */ 1380 if (!cl) { /* new class */
@@ -1439,7 +1484,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1439 * is really leaf before changing cl->un.leaf ! 1484 * is really leaf before changing cl->un.leaf !
1440 */ 1485 */
1441 if (!cl->level) { 1486 if (!cl->level) {
1442 cl->quantum = rtab->rate.rate / q->rate2quantum; 1487 cl->quantum = hopt->rate.rate / q->rate2quantum;
1443 if (!hopt->quantum && cl->quantum < 1000) { 1488 if (!hopt->quantum && cl->quantum < 1000) {
1444 pr_warning( 1489 pr_warning(
1445 "HTB: quantum of class %X is small. Consider r2q change.\n", 1490 "HTB: quantum of class %X is small. Consider r2q change.\n",
@@ -1460,12 +1505,16 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1460 1505
1461 cl->buffer = hopt->buffer; 1506 cl->buffer = hopt->buffer;
1462 cl->cbuffer = hopt->cbuffer; 1507 cl->cbuffer = hopt->cbuffer;
1463 if (cl->rate) 1508
1464 qdisc_put_rtab(cl->rate); 1509 cl->rate.rate_bps = (u64)hopt->rate.rate << 3;
1465 cl->rate = rtab; 1510 cl->ceil.rate_bps = (u64)hopt->ceil.rate << 3;
1466 if (cl->ceil) 1511
1467 qdisc_put_rtab(cl->ceil); 1512 htb_precompute_ratedata(&cl->rate);
1468 cl->ceil = ctab; 1513 htb_precompute_ratedata(&cl->ceil);
1514
1515 cl->buffer = hopt->buffer << PSCHED_SHIFT;
1516 cl->cbuffer = hopt->buffer << PSCHED_SHIFT;
1517
1469 sch_tree_unlock(sch); 1518 sch_tree_unlock(sch);
1470 1519
1471 qdisc_class_hash_grow(sch, &q->clhash); 1520 qdisc_class_hash_grow(sch, &q->clhash);
@@ -1474,10 +1523,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1474 return 0; 1523 return 0;
1475 1524
1476failure: 1525failure:
1477 if (rtab)
1478 qdisc_put_rtab(rtab);
1479 if (ctab)
1480 qdisc_put_rtab(ctab);
1481 return err; 1526 return err;
1482} 1527}
1483 1528
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 0a4b2f9a0094..5da78a19ac9a 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -63,6 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
63 if (qdisc == NULL) 63 if (qdisc == NULL)
64 goto err; 64 goto err;
65 priv->qdiscs[ntx] = qdisc; 65 priv->qdiscs[ntx] = qdisc;
66 qdisc->flags |= TCQ_F_ONETXQUEUE;
66 } 67 }
67 68
68 sch->flags |= TCQ_F_MQROOT; 69 sch->flags |= TCQ_F_MQROOT;
@@ -150,7 +151,8 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
150 dev_deactivate(dev); 151 dev_deactivate(dev);
151 152
152 *old = dev_graft_qdisc(dev_queue, new); 153 *old = dev_graft_qdisc(dev_queue, new);
153 154 if (new)
155 new->flags |= TCQ_F_ONETXQUEUE;
154 if (dev->flags & IFF_UP) 156 if (dev->flags & IFF_UP)
155 dev_activate(dev); 157 dev_activate(dev);
156 return 0; 158 return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index d1831ca966d4..accec33c454c 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,6 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
132 goto err; 132 goto err;
133 } 133 }
134 priv->qdiscs[i] = qdisc; 134 priv->qdiscs[i] = qdisc;
135 qdisc->flags |= TCQ_F_ONETXQUEUE;
135 } 136 }
136 137
137 /* If the mqprio options indicate that hardware should own 138 /* If the mqprio options indicate that hardware should own
@@ -205,6 +206,9 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
205 206
206 *old = dev_graft_qdisc(dev_queue, new); 207 *old = dev_graft_qdisc(dev_queue, new);
207 208
209 if (new)
210 new->flags |= TCQ_F_ONETXQUEUE;
211
208 if (dev->flags & IFF_UP) 212 if (dev->flags & IFF_UP)
209 dev_activate(dev); 213 dev_activate(dev);
210 214
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9687fa1c2275..6ed37652a4c3 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * net/sched/sch_qfq.c Quick Fair Queueing Scheduler. 2 * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
3 * 3 *
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente. 4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
5 * Copyright (c) 2012 Paolo Valente.
5 * 6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -19,12 +20,18 @@
19#include <net/pkt_cls.h> 20#include <net/pkt_cls.h>
20 21
21 22
22/* Quick Fair Queueing 23/* Quick Fair Queueing Plus
23 =================== 24 ========================
24 25
25 Sources: 26 Sources:
26 27
27 Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient 28 [1] Paolo Valente,
29 "Reducing the Execution Time of Fair-Queueing Schedulers."
30 http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
31
32 Sources for QFQ:
33
34 [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
28 Packet Scheduling with Tight Bandwidth Distribution Guarantees." 35 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
29 36
30 See also: 37 See also:
@@ -33,6 +40,20 @@
33 40
34/* 41/*
35 42
43 QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
44 classes. Each aggregate is timestamped with a virtual start time S
45 and a virtual finish time F, and scheduled according to its
46 timestamps. S and F are computed as a function of a system virtual
47 time function V. The classes within each aggregate are instead
48 scheduled with DRR.
49
50 To speed up operations, QFQ+ divides also aggregates into a limited
51 number of groups. Which group a class belongs to depends on the
52 ratio between the maximum packet length for the class and the weight
53 of the class. Groups have their own S and F. In the end, QFQ+
54 schedules groups, then aggregates within groups, then classes within
55 aggregates. See [1] and [2] for a full description.
56
36 Virtual time computations. 57 Virtual time computations.
37 58
38 S, F and V are all computed in fixed point arithmetic with 59 S, F and V are all computed in fixed point arithmetic with
@@ -76,27 +97,28 @@
76#define QFQ_MAX_SLOTS 32 97#define QFQ_MAX_SLOTS 32
77 98
78/* 99/*
79 * Shifts used for class<->group mapping. We allow class weights that are 100 * Shifts used for aggregate<->group mapping. We allow class weights that are
80 * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the 101 * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
81 * group with the smallest index that can support the L_i / r_i configured 102 * group with the smallest index that can support the L_i / r_i configured
82 * for the class. 103 * for the classes in the aggregate.
83 * 104 *
84 * grp->index is the index of the group; and grp->slot_shift 105 * grp->index is the index of the group; and grp->slot_shift
85 * is the shift for the corresponding (scaled) sigma_i. 106 * is the shift for the corresponding (scaled) sigma_i.
86 */ 107 */
87#define QFQ_MAX_INDEX 24 108#define QFQ_MAX_INDEX 24
88#define QFQ_MAX_WSHIFT 12 109#define QFQ_MAX_WSHIFT 10
89 110
90#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) 111#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
91#define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT) 112#define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
92 113
93#define FRAC_BITS 30 /* fixed point arithmetic */ 114#define FRAC_BITS 30 /* fixed point arithmetic */
94#define ONE_FP (1UL << FRAC_BITS) 115#define ONE_FP (1UL << FRAC_BITS)
95#define IWSUM (ONE_FP/QFQ_MAX_WSUM) 116#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
96 117
97#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ 118#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
98#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX) 119#define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
99#define QFQ_MIN_LMAX 256 /* min possible lmax for a class */ 120
121#define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
100 122
101/* 123/*
102 * Possible group states. These values are used as indexes for the bitmaps 124 * Possible group states. These values are used as indexes for the bitmaps
@@ -106,6 +128,8 @@ enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
106 128
107struct qfq_group; 129struct qfq_group;
108 130
131struct qfq_aggregate;
132
109struct qfq_class { 133struct qfq_class {
110 struct Qdisc_class_common common; 134 struct Qdisc_class_common common;
111 135
@@ -116,7 +140,12 @@ struct qfq_class {
116 struct gnet_stats_queue qstats; 140 struct gnet_stats_queue qstats;
117 struct gnet_stats_rate_est rate_est; 141 struct gnet_stats_rate_est rate_est;
118 struct Qdisc *qdisc; 142 struct Qdisc *qdisc;
143 struct list_head alist; /* Link for active-classes list. */
144 struct qfq_aggregate *agg; /* Parent aggregate. */
145 int deficit; /* DRR deficit counter. */
146};
119 147
148struct qfq_aggregate {
120 struct hlist_node next; /* Link for the slot list. */ 149 struct hlist_node next; /* Link for the slot list. */
121 u64 S, F; /* flow timestamps (exact) */ 150 u64 S, F; /* flow timestamps (exact) */
122 151
@@ -127,8 +156,18 @@ struct qfq_class {
127 struct qfq_group *grp; 156 struct qfq_group *grp;
128 157
129 /* these are copied from the flowset. */ 158 /* these are copied from the flowset. */
130 u32 inv_w; /* ONE_FP/weight */ 159 u32 class_weight; /* Weight of each class in this aggregate. */
131 u32 lmax; /* Max packet size for this flow. */ 160 /* Max pkt size for the classes in this aggregate, DRR quantum. */
161 int lmax;
162
163 u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */
164 u32 budgetmax; /* Max budget for this aggregate. */
165 u32 initial_budget, budget; /* Initial and current budget. */
166
167 int num_classes; /* Number of classes in this aggr. */
168 struct list_head active; /* DRR queue of active classes. */
169
170 struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
132}; 171};
133 172
134struct qfq_group { 173struct qfq_group {
@@ -138,7 +177,7 @@ struct qfq_group {
138 unsigned int front; /* Index of the front slot. */ 177 unsigned int front; /* Index of the front slot. */
139 unsigned long full_slots; /* non-empty slots */ 178 unsigned long full_slots; /* non-empty slots */
140 179
141 /* Array of RR lists of active classes. */ 180 /* Array of RR lists of active aggregates. */
142 struct hlist_head slots[QFQ_MAX_SLOTS]; 181 struct hlist_head slots[QFQ_MAX_SLOTS];
143}; 182};
144 183
@@ -146,13 +185,28 @@ struct qfq_sched {
146 struct tcf_proto *filter_list; 185 struct tcf_proto *filter_list;
147 struct Qdisc_class_hash clhash; 186 struct Qdisc_class_hash clhash;
148 187
149 u64 V; /* Precise virtual time. */ 188 u64 oldV, V; /* Precise virtual times. */
150 u32 wsum; /* weight sum */ 189 struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
190 u32 num_active_agg; /* Num. of active aggregates */
191 u32 wsum; /* weight sum */
151 192
152 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */ 193 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
153 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */ 194 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
195 u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
196
197 u32 max_agg_classes; /* Max number of classes per aggr. */
198 struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
154}; 199};
155 200
201/*
202 * Possible reasons why the timestamps of an aggregate are updated
203 * enqueue: the aggregate switches from idle to active and must scheduled
204 * for service
205 * requeue: the aggregate finishes its budget, so it stops being served and
206 * must be rescheduled for service
207 */
208enum update_reason {enqueue, requeue};
209
156static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) 210static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
157{ 211{
158 struct qfq_sched *q = qdisc_priv(sch); 212 struct qfq_sched *q = qdisc_priv(sch);
@@ -182,18 +236,18 @@ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
182 * index = log_2(maxlen/weight) but we need to apply the scaling. 236 * index = log_2(maxlen/weight) but we need to apply the scaling.
183 * This is used only once at flow creation. 237 * This is used only once at flow creation.
184 */ 238 */
185static int qfq_calc_index(u32 inv_w, unsigned int maxlen) 239static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
186{ 240{
187 u64 slot_size = (u64)maxlen * inv_w; 241 u64 slot_size = (u64)maxlen * inv_w;
188 unsigned long size_map; 242 unsigned long size_map;
189 int index = 0; 243 int index = 0;
190 244
191 size_map = slot_size >> QFQ_MIN_SLOT_SHIFT; 245 size_map = slot_size >> min_slot_shift;
192 if (!size_map) 246 if (!size_map)
193 goto out; 247 goto out;
194 248
195 index = __fls(size_map) + 1; /* basically a log_2 */ 249 index = __fls(size_map) + 1; /* basically a log_2 */
196 index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1))); 250 index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
197 251
198 if (index < 0) 252 if (index < 0)
199 index = 0; 253 index = 0;
@@ -204,66 +258,150 @@ out:
204 return index; 258 return index;
205} 259}
206 260
207/* Length of the next packet (0 if the queue is empty). */ 261static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
208static unsigned int qdisc_peek_len(struct Qdisc *sch) 262static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
263 enum update_reason);
264
265static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
266 u32 lmax, u32 weight)
209{ 267{
210 struct sk_buff *skb; 268 INIT_LIST_HEAD(&agg->active);
269 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
270
271 agg->lmax = lmax;
272 agg->class_weight = weight;
273}
274
275static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
276 u32 lmax, u32 weight)
277{
278 struct qfq_aggregate *agg;
279 struct hlist_node *n;
280
281 hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next)
282 if (agg->lmax == lmax && agg->class_weight == weight)
283 return agg;
284
285 return NULL;
286}
287
211 288
212 skb = sch->ops->peek(sch); 289/* Update aggregate as a function of the new number of classes. */
213 return skb ? qdisc_pkt_len(skb) : 0; 290static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
291 int new_num_classes)
292{
293 u32 new_agg_weight;
294
295 if (new_num_classes == q->max_agg_classes)
296 hlist_del_init(&agg->nonfull_next);
297
298 if (agg->num_classes > new_num_classes &&
299 new_num_classes == q->max_agg_classes - 1) /* agg no more full */
300 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
301
302 agg->budgetmax = new_num_classes * agg->lmax;
303 new_agg_weight = agg->class_weight * new_num_classes;
304 agg->inv_w = ONE_FP/new_agg_weight;
305
306 if (agg->grp == NULL) {
307 int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
308 q->min_slot_shift);
309 agg->grp = &q->groups[i];
310 }
311
312 q->wsum +=
313 (int) agg->class_weight * (new_num_classes - agg->num_classes);
314
315 agg->num_classes = new_num_classes;
316}
317
318/* Add class to aggregate. */
319static void qfq_add_to_agg(struct qfq_sched *q,
320 struct qfq_aggregate *agg,
321 struct qfq_class *cl)
322{
323 cl->agg = agg;
324
325 qfq_update_agg(q, agg, agg->num_classes+1);
326 if (cl->qdisc->q.qlen > 0) { /* adding an active class */
327 list_add_tail(&cl->alist, &agg->active);
328 if (list_first_entry(&agg->active, struct qfq_class, alist) ==
329 cl && q->in_serv_agg != agg) /* agg was inactive */
330 qfq_activate_agg(q, agg, enqueue); /* schedule agg */
331 }
214} 332}
215 333
216static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *); 334static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
217static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
218 unsigned int len);
219 335
220static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl, 336static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
221 u32 lmax, u32 inv_w, int delta_w)
222{ 337{
223 int i; 338 if (!hlist_unhashed(&agg->nonfull_next))
339 hlist_del_init(&agg->nonfull_next);
340 if (q->in_serv_agg == agg)
341 q->in_serv_agg = qfq_choose_next_agg(q);
342 kfree(agg);
343}
224 344
225 /* update qfq-specific data */ 345/* Deschedule class from within its parent aggregate. */
226 cl->lmax = lmax; 346static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
227 cl->inv_w = inv_w; 347{
228 i = qfq_calc_index(cl->inv_w, cl->lmax); 348 struct qfq_aggregate *agg = cl->agg;
229 349
230 cl->grp = &q->groups[i];
231 350
232 q->wsum += delta_w; 351 list_del(&cl->alist); /* remove from RR queue of the aggregate */
352 if (list_empty(&agg->active)) /* agg is now inactive */
353 qfq_deactivate_agg(q, agg);
233} 354}
234 355
235static void qfq_update_reactivate_class(struct qfq_sched *q, 356/* Remove class from its parent aggregate. */
236 struct qfq_class *cl, 357static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
237 u32 inv_w, u32 lmax, int delta_w)
238{ 358{
239 bool need_reactivation = false; 359 struct qfq_aggregate *agg = cl->agg;
240 int i = qfq_calc_index(inv_w, lmax);
241 360
242 if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { 361 cl->agg = NULL;
243 /* 362 if (agg->num_classes == 1) { /* agg being emptied, destroy it */
244 * shift cl->F back, to not charge the 363 qfq_destroy_agg(q, agg);
245 * class for the not-yet-served head 364 return;
246 * packet
247 */
248 cl->F = cl->S;
249 /* remove class from its slot in the old group */
250 qfq_deactivate_class(q, cl);
251 need_reactivation = true;
252 } 365 }
366 qfq_update_agg(q, agg, agg->num_classes-1);
367}
253 368
254 qfq_update_class_params(q, cl, lmax, inv_w, delta_w); 369/* Deschedule class and remove it from its parent aggregate. */
370static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
371{
372 if (cl->qdisc->q.qlen > 0) /* class is active */
373 qfq_deactivate_class(q, cl);
255 374
256 if (need_reactivation) /* activate in new group */ 375 qfq_rm_from_agg(q, cl);
257 qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
258} 376}
259 377
378/* Move class to a new aggregate, matching the new class weight and/or lmax */
379static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
380 u32 lmax)
381{
382 struct qfq_sched *q = qdisc_priv(sch);
383 struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
384
385 if (new_agg == NULL) { /* create new aggregate */
386 new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
387 if (new_agg == NULL)
388 return -ENOBUFS;
389 qfq_init_agg(q, new_agg, lmax, weight);
390 }
391 qfq_deact_rm_from_agg(q, cl);
392 qfq_add_to_agg(q, new_agg, cl);
393
394 return 0;
395}
260 396
261static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 397static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
262 struct nlattr **tca, unsigned long *arg) 398 struct nlattr **tca, unsigned long *arg)
263{ 399{
264 struct qfq_sched *q = qdisc_priv(sch); 400 struct qfq_sched *q = qdisc_priv(sch);
265 struct qfq_class *cl = (struct qfq_class *)*arg; 401 struct qfq_class *cl = (struct qfq_class *)*arg;
402 bool existing = false;
266 struct nlattr *tb[TCA_QFQ_MAX + 1]; 403 struct nlattr *tb[TCA_QFQ_MAX + 1];
404 struct qfq_aggregate *new_agg = NULL;
267 u32 weight, lmax, inv_w; 405 u32 weight, lmax, inv_w;
268 int err; 406 int err;
269 int delta_w; 407 int delta_w;
@@ -286,15 +424,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
286 } else 424 } else
287 weight = 1; 425 weight = 1;
288 426
289 inv_w = ONE_FP / weight;
290 weight = ONE_FP / inv_w;
291 delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0);
292 if (q->wsum + delta_w > QFQ_MAX_WSUM) {
293 pr_notice("qfq: total weight out of range (%u + %u)\n",
294 delta_w, q->wsum);
295 return -EINVAL;
296 }
297
298 if (tb[TCA_QFQ_LMAX]) { 427 if (tb[TCA_QFQ_LMAX]) {
299 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); 428 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
300 if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { 429 if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
@@ -304,7 +433,23 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
304 } else 433 } else
305 lmax = psched_mtu(qdisc_dev(sch)); 434 lmax = psched_mtu(qdisc_dev(sch));
306 435
307 if (cl != NULL) { 436 inv_w = ONE_FP / weight;
437 weight = ONE_FP / inv_w;
438
439 if (cl != NULL &&
440 lmax == cl->agg->lmax &&
441 weight == cl->agg->class_weight)
442 return 0; /* nothing to change */
443
444 delta_w = weight - (cl ? cl->agg->class_weight : 0);
445
446 if (q->wsum + delta_w > QFQ_MAX_WSUM) {
447 pr_notice("qfq: total weight out of range (%d + %u)\n",
448 delta_w, q->wsum);
449 return -EINVAL;
450 }
451
452 if (cl != NULL) { /* modify existing class */
308 if (tca[TCA_RATE]) { 453 if (tca[TCA_RATE]) {
309 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 454 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
310 qdisc_root_sleeping_lock(sch), 455 qdisc_root_sleeping_lock(sch),
@@ -312,25 +457,18 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
312 if (err) 457 if (err)
313 return err; 458 return err;
314 } 459 }
315 460 existing = true;
316 if (lmax == cl->lmax && inv_w == cl->inv_w) 461 goto set_change_agg;
317 return 0; /* nothing to update */
318
319 sch_tree_lock(sch);
320 qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w);
321 sch_tree_unlock(sch);
322
323 return 0;
324 } 462 }
325 463
464 /* create and init new class */
326 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL); 465 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
327 if (cl == NULL) 466 if (cl == NULL)
328 return -ENOBUFS; 467 return -ENOBUFS;
329 468
330 cl->refcnt = 1; 469 cl->refcnt = 1;
331 cl->common.classid = classid; 470 cl->common.classid = classid;
332 471 cl->deficit = lmax;
333 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
334 472
335 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 473 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
336 &pfifo_qdisc_ops, classid); 474 &pfifo_qdisc_ops, classid);
@@ -341,11 +479,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
341 err = gen_new_estimator(&cl->bstats, &cl->rate_est, 479 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
342 qdisc_root_sleeping_lock(sch), 480 qdisc_root_sleeping_lock(sch),
343 tca[TCA_RATE]); 481 tca[TCA_RATE]);
344 if (err) { 482 if (err)
345 qdisc_destroy(cl->qdisc); 483 goto destroy_class;
346 kfree(cl);
347 return err;
348 }
349 } 484 }
350 485
351 sch_tree_lock(sch); 486 sch_tree_lock(sch);
@@ -354,19 +489,39 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
354 489
355 qdisc_class_hash_grow(sch, &q->clhash); 490 qdisc_class_hash_grow(sch, &q->clhash);
356 491
492set_change_agg:
493 sch_tree_lock(sch);
494 new_agg = qfq_find_agg(q, lmax, weight);
495 if (new_agg == NULL) { /* create new aggregate */
496 sch_tree_unlock(sch);
497 new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
498 if (new_agg == NULL) {
499 err = -ENOBUFS;
500 gen_kill_estimator(&cl->bstats, &cl->rate_est);
501 goto destroy_class;
502 }
503 sch_tree_lock(sch);
504 qfq_init_agg(q, new_agg, lmax, weight);
505 }
506 if (existing)
507 qfq_deact_rm_from_agg(q, cl);
508 qfq_add_to_agg(q, new_agg, cl);
509 sch_tree_unlock(sch);
510
357 *arg = (unsigned long)cl; 511 *arg = (unsigned long)cl;
358 return 0; 512 return 0;
513
514destroy_class:
515 qdisc_destroy(cl->qdisc);
516 kfree(cl);
517 return err;
359} 518}
360 519
361static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) 520static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
362{ 521{
363 struct qfq_sched *q = qdisc_priv(sch); 522 struct qfq_sched *q = qdisc_priv(sch);
364 523
365 if (cl->inv_w) { 524 qfq_rm_from_agg(q, cl);
366 q->wsum -= ONE_FP / cl->inv_w;
367 cl->inv_w = 0;
368 }
369
370 gen_kill_estimator(&cl->bstats, &cl->rate_est); 525 gen_kill_estimator(&cl->bstats, &cl->rate_est);
371 qdisc_destroy(cl->qdisc); 526 qdisc_destroy(cl->qdisc);
372 kfree(cl); 527 kfree(cl);
@@ -481,8 +636,8 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
481 nest = nla_nest_start(skb, TCA_OPTIONS); 636 nest = nla_nest_start(skb, TCA_OPTIONS);
482 if (nest == NULL) 637 if (nest == NULL)
483 goto nla_put_failure; 638 goto nla_put_failure;
484 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) || 639 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
485 nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax)) 640 nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
486 goto nla_put_failure; 641 goto nla_put_failure;
487 return nla_nest_end(skb, nest); 642 return nla_nest_end(skb, nest);
488 643
@@ -500,8 +655,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
500 memset(&xstats, 0, sizeof(xstats)); 655 memset(&xstats, 0, sizeof(xstats));
501 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; 656 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
502 657
503 xstats.weight = ONE_FP/cl->inv_w; 658 xstats.weight = cl->agg->class_weight;
504 xstats.lmax = cl->lmax; 659 xstats.lmax = cl->agg->lmax;
505 660
506 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 661 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
507 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || 662 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
@@ -652,16 +807,16 @@ static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
652 * perhaps 807 * perhaps
653 * 808 *
654 old_V ^= q->V; 809 old_V ^= q->V;
655 old_V >>= QFQ_MIN_SLOT_SHIFT; 810 old_V >>= q->min_slot_shift;
656 if (old_V) { 811 if (old_V) {
657 ... 812 ...
658 } 813 }
659 * 814 *
660 */ 815 */
661static void qfq_make_eligible(struct qfq_sched *q, u64 old_V) 816static void qfq_make_eligible(struct qfq_sched *q)
662{ 817{
663 unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT; 818 unsigned long vslot = q->V >> q->min_slot_shift;
664 unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT; 819 unsigned long old_vslot = q->oldV >> q->min_slot_shift;
665 820
666 if (vslot != old_vslot) { 821 if (vslot != old_vslot) {
667 unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; 822 unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
@@ -672,34 +827,38 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
672 827
673 828
674/* 829/*
675 * If the weight and lmax (max_pkt_size) of the classes do not change, 830 * The index of the slot in which the aggregate is to be inserted must
676 * then QFQ guarantees that the slot index is never higher than 831 * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
677 * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM). 832 * because the start time of the group may be moved backward by one
833 * slot after the aggregate has been inserted, and this would cause
834 * non-empty slots to be right-shifted by one position.
678 * 835 *
679 * With the current values of the above constants, the index is 836 * If the weight and lmax (max_pkt_size) of the classes do not change,
680 * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18. 837 * then QFQ+ does meet the above contraint according to the current
838 * values of its parameters. In fact, if the weight and lmax of the
839 * classes do not change, then, from the theory, QFQ+ guarantees that
840 * the slot index is never higher than
841 * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
842 * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
681 * 843 *
682 * When the weight of a class is increased or the lmax of the class is 844 * When the weight of a class is increased or the lmax of the class is
683 * decreased, a new class with smaller slot size may happen to be 845 * decreased, a new aggregate with smaller slot size than the original
684 * activated. The activation of this class should be properly delayed 846 * parent aggregate of the class may happen to be activated. The
685 * to when the service of the class has finished in the ideal system 847 * activation of this aggregate should be properly delayed to when the
686 * tracked by QFQ. If the activation of the class is not delayed to 848 * service of the class has finished in the ideal system tracked by
687 * this reference time instant, then this class may be unjustly served 849 * QFQ+. If the activation of the aggregate is not delayed to this
688 * before other classes waiting for service. This may cause 850 * reference time instant, then this aggregate may be unjustly served
689 * (unfrequently) the above bound to the slot index to be violated for 851 * before other aggregates waiting for service. This may cause the
690 * some of these unlucky classes. 852 * above bound to the slot index to be violated for some of these
853 * unlucky aggregates.
691 * 854 *
692 * Instead of delaying the activation of the new class, which is quite 855 * Instead of delaying the activation of the new aggregate, which is
693 * complex, the following inaccurate but simple solution is used: if 856 * quite complex, the following inaccurate but simple solution is used:
694 * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps 857 * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
695 * of the class are shifted backward so as to let the slot index 858 * timestamps of the aggregate are shifted backward so as to let the
696 * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if 859 * slot index become equal to QFQ_MAX_SLOTS-2.
697 * the slot index is above it, then the data structure implementing
698 * the bucket list either gets immediately corrupted or may get
699 * corrupted on a possible next packet arrival that causes the start
700 * time of the group to be shifted backward.
701 */ 860 */
702static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, 861static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
703 u64 roundedS) 862 u64 roundedS)
704{ 863{
705 u64 slot = (roundedS - grp->S) >> grp->slot_shift; 864 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
@@ -708,22 +867,22 @@ static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
708 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { 867 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
709 u64 deltaS = roundedS - grp->S - 868 u64 deltaS = roundedS - grp->S -
710 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); 869 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
711 cl->S -= deltaS; 870 agg->S -= deltaS;
712 cl->F -= deltaS; 871 agg->F -= deltaS;
713 slot = QFQ_MAX_SLOTS - 2; 872 slot = QFQ_MAX_SLOTS - 2;
714 } 873 }
715 874
716 i = (grp->front + slot) % QFQ_MAX_SLOTS; 875 i = (grp->front + slot) % QFQ_MAX_SLOTS;
717 876
718 hlist_add_head(&cl->next, &grp->slots[i]); 877 hlist_add_head(&agg->next, &grp->slots[i]);
719 __set_bit(slot, &grp->full_slots); 878 __set_bit(slot, &grp->full_slots);
720} 879}
721 880
722/* Maybe introduce hlist_first_entry?? */ 881/* Maybe introduce hlist_first_entry?? */
723static struct qfq_class *qfq_slot_head(struct qfq_group *grp) 882static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
724{ 883{
725 return hlist_entry(grp->slots[grp->front].first, 884 return hlist_entry(grp->slots[grp->front].first,
726 struct qfq_class, next); 885 struct qfq_aggregate, next);
727} 886}
728 887
729/* 888/*
@@ -731,20 +890,20 @@ static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
731 */ 890 */
732static void qfq_front_slot_remove(struct qfq_group *grp) 891static void qfq_front_slot_remove(struct qfq_group *grp)
733{ 892{
734 struct qfq_class *cl = qfq_slot_head(grp); 893 struct qfq_aggregate *agg = qfq_slot_head(grp);
735 894
736 BUG_ON(!cl); 895 BUG_ON(!agg);
737 hlist_del(&cl->next); 896 hlist_del(&agg->next);
738 if (hlist_empty(&grp->slots[grp->front])) 897 if (hlist_empty(&grp->slots[grp->front]))
739 __clear_bit(0, &grp->full_slots); 898 __clear_bit(0, &grp->full_slots);
740} 899}
741 900
742/* 901/*
743 * Returns the first full queue in a group. As a side effect, 902 * Returns the first aggregate in the first non-empty bucket of the
744 * adjust the bucket list so the first non-empty bucket is at 903 * group. As a side effect, adjusts the bucket list so the first
745 * position 0 in full_slots. 904 * non-empty bucket is at position 0 in full_slots.
746 */ 905 */
747static struct qfq_class *qfq_slot_scan(struct qfq_group *grp) 906static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
748{ 907{
749 unsigned int i; 908 unsigned int i;
750 909
@@ -780,7 +939,7 @@ static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
780 grp->front = (grp->front - i) % QFQ_MAX_SLOTS; 939 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
781} 940}
782 941
783static void qfq_update_eligible(struct qfq_sched *q, u64 old_V) 942static void qfq_update_eligible(struct qfq_sched *q)
784{ 943{
785 struct qfq_group *grp; 944 struct qfq_group *grp;
786 unsigned long ineligible; 945 unsigned long ineligible;
@@ -792,137 +951,226 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
792 if (qfq_gt(grp->S, q->V)) 951 if (qfq_gt(grp->S, q->V))
793 q->V = grp->S; 952 q->V = grp->S;
794 } 953 }
795 qfq_make_eligible(q, old_V); 954 qfq_make_eligible(q);
796 } 955 }
797} 956}
798 957
799/* 958/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
800 * Updates the class, returns true if also the group needs to be updated. 959static void agg_dequeue(struct qfq_aggregate *agg,
801 */ 960 struct qfq_class *cl, unsigned int len)
802static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
803{ 961{
804 unsigned int len = qdisc_peek_len(cl->qdisc); 962 qdisc_dequeue_peeked(cl->qdisc);
805 963
806 cl->S = cl->F; 964 cl->deficit -= (int) len;
807 if (!len)
808 qfq_front_slot_remove(grp); /* queue is empty */
809 else {
810 u64 roundedS;
811 965
812 cl->F = cl->S + (u64)len * cl->inv_w; 966 if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
813 roundedS = qfq_round_down(cl->S, grp->slot_shift); 967 list_del(&cl->alist);
814 if (roundedS == grp->S) 968 else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
815 return false; 969 cl->deficit += agg->lmax;
816 970 list_move_tail(&cl->alist, &agg->active);
817 qfq_front_slot_remove(grp);
818 qfq_slot_insert(grp, cl, roundedS);
819 } 971 }
972}
973
974static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
975 struct qfq_class **cl,
976 unsigned int *len)
977{
978 struct sk_buff *skb;
820 979
821 return true; 980 *cl = list_first_entry(&agg->active, struct qfq_class, alist);
981 skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
982 if (skb == NULL)
983 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
984 else
985 *len = qdisc_pkt_len(skb);
986
987 return skb;
988}
989
990/* Update F according to the actual service received by the aggregate. */
991static inline void charge_actual_service(struct qfq_aggregate *agg)
992{
993 /* compute the service received by the aggregate */
994 u32 service_received = agg->initial_budget - agg->budget;
995
996 agg->F = agg->S + (u64)service_received * agg->inv_w;
822} 997}
823 998
824static struct sk_buff *qfq_dequeue(struct Qdisc *sch) 999static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
825{ 1000{
826 struct qfq_sched *q = qdisc_priv(sch); 1001 struct qfq_sched *q = qdisc_priv(sch);
827 struct qfq_group *grp; 1002 struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
828 struct qfq_class *cl; 1003 struct qfq_class *cl;
829 struct sk_buff *skb; 1004 struct sk_buff *skb = NULL;
830 unsigned int len; 1005 /* next-packet len, 0 means no more active classes in in-service agg */
831 u64 old_V; 1006 unsigned int len = 0;
832 1007
833 if (!q->bitmaps[ER]) 1008 if (in_serv_agg == NULL)
834 return NULL; 1009 return NULL;
835 1010
836 grp = qfq_ffs(q, q->bitmaps[ER]); 1011 if (!list_empty(&in_serv_agg->active))
1012 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
837 1013
838 cl = qfq_slot_head(grp); 1014 /*
839 skb = qdisc_dequeue_peeked(cl->qdisc); 1015 * If there are no active classes in the in-service aggregate,
840 if (!skb) { 1016 * or if the aggregate has not enough budget to serve its next
841 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n"); 1017 * class, then choose the next aggregate to serve.
842 return NULL; 1018 */
1019 if (len == 0 || in_serv_agg->budget < len) {
1020 charge_actual_service(in_serv_agg);
1021
1022 /* recharge the budget of the aggregate */
1023 in_serv_agg->initial_budget = in_serv_agg->budget =
1024 in_serv_agg->budgetmax;
1025
1026 if (!list_empty(&in_serv_agg->active))
1027 /*
1028 * Still active: reschedule for
1029 * service. Possible optimization: if no other
1030 * aggregate is active, then there is no point
1031 * in rescheduling this aggregate, and we can
1032 * just keep it as the in-service one. This
1033 * should be however a corner case, and to
1034 * handle it, we would need to maintain an
1035 * extra num_active_aggs field.
1036 */
1037 qfq_activate_agg(q, in_serv_agg, requeue);
1038 else if (sch->q.qlen == 0) { /* no aggregate to serve */
1039 q->in_serv_agg = NULL;
1040 return NULL;
1041 }
1042
1043 /*
1044 * If we get here, there are other aggregates queued:
1045 * choose the new aggregate to serve.
1046 */
1047 in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
1048 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
843 } 1049 }
1050 if (!skb)
1051 return NULL;
844 1052
845 sch->q.qlen--; 1053 sch->q.qlen--;
846 qdisc_bstats_update(sch, skb); 1054 qdisc_bstats_update(sch, skb);
847 1055
848 old_V = q->V; 1056 agg_dequeue(in_serv_agg, cl, len);
849 len = qdisc_pkt_len(skb); 1057 in_serv_agg->budget -= len;
850 q->V += (u64)len * IWSUM; 1058 q->V += (u64)len * IWSUM;
851 pr_debug("qfq dequeue: len %u F %lld now %lld\n", 1059 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
852 len, (unsigned long long) cl->F, (unsigned long long) q->V); 1060 len, (unsigned long long) in_serv_agg->F,
1061 (unsigned long long) q->V);
853 1062
854 if (qfq_update_class(grp, cl)) { 1063 return skb;
855 u64 old_F = grp->F; 1064}
856 1065
857 cl = qfq_slot_scan(grp); 1066static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
858 if (!cl) 1067{
859 __clear_bit(grp->index, &q->bitmaps[ER]); 1068 struct qfq_group *grp;
860 else { 1069 struct qfq_aggregate *agg, *new_front_agg;
861 u64 roundedS = qfq_round_down(cl->S, grp->slot_shift); 1070 u64 old_F;
862 unsigned int s;
863 1071
864 if (grp->S == roundedS) 1072 qfq_update_eligible(q);
865 goto skip_unblock; 1073 q->oldV = q->V;
866 grp->S = roundedS; 1074
867 grp->F = roundedS + (2ULL << grp->slot_shift); 1075 if (!q->bitmaps[ER])
868 __clear_bit(grp->index, &q->bitmaps[ER]); 1076 return NULL;
869 s = qfq_calc_state(q, grp); 1077
870 __set_bit(grp->index, &q->bitmaps[s]); 1078 grp = qfq_ffs(q, q->bitmaps[ER]);
871 } 1079 old_F = grp->F;
1080
1081 agg = qfq_slot_head(grp);
872 1082
873 qfq_unblock_groups(q, grp->index, old_F); 1083 /* agg starts to be served, remove it from schedule */
1084 qfq_front_slot_remove(grp);
1085
1086 new_front_agg = qfq_slot_scan(grp);
1087
1088 if (new_front_agg == NULL) /* group is now inactive, remove from ER */
1089 __clear_bit(grp->index, &q->bitmaps[ER]);
1090 else {
1091 u64 roundedS = qfq_round_down(new_front_agg->S,
1092 grp->slot_shift);
1093 unsigned int s;
1094
1095 if (grp->S == roundedS)
1096 return agg;
1097 grp->S = roundedS;
1098 grp->F = roundedS + (2ULL << grp->slot_shift);
1099 __clear_bit(grp->index, &q->bitmaps[ER]);
1100 s = qfq_calc_state(q, grp);
1101 __set_bit(grp->index, &q->bitmaps[s]);
874 } 1102 }
875 1103
876skip_unblock: 1104 qfq_unblock_groups(q, grp->index, old_F);
877 qfq_update_eligible(q, old_V);
878 1105
879 return skb; 1106 return agg;
880} 1107}
881 1108
882/* 1109/*
883 * Assign a reasonable start time for a new flow k in group i. 1110 * Assign a reasonable start time for a new aggregate in group i.
884 * Admissible values for \hat(F) are multiples of \sigma_i 1111 * Admissible values for \hat(F) are multiples of \sigma_i
885 * no greater than V+\sigma_i . Larger values mean that 1112 * no greater than V+\sigma_i . Larger values mean that
886 * we had a wraparound so we consider the timestamp to be stale. 1113 * we had a wraparound so we consider the timestamp to be stale.
887 * 1114 *
888 * If F is not stale and F >= V then we set S = F. 1115 * If F is not stale and F >= V then we set S = F.
889 * Otherwise we should assign S = V, but this may violate 1116 * Otherwise we should assign S = V, but this may violate
890 * the ordering in ER. So, if we have groups in ER, set S to 1117 * the ordering in EB (see [2]). So, if we have groups in ER,
891 * the F_j of the first group j which would be blocking us. 1118 * set S to the F_j of the first group j which would be blocking us.
892 * We are guaranteed not to move S backward because 1119 * We are guaranteed not to move S backward because
893 * otherwise our group i would still be blocked. 1120 * otherwise our group i would still be blocked.
894 */ 1121 */
895static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) 1122static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
896{ 1123{
897 unsigned long mask; 1124 unsigned long mask;
898 u64 limit, roundedF; 1125 u64 limit, roundedF;
899 int slot_shift = cl->grp->slot_shift; 1126 int slot_shift = agg->grp->slot_shift;
900 1127
901 roundedF = qfq_round_down(cl->F, slot_shift); 1128 roundedF = qfq_round_down(agg->F, slot_shift);
902 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); 1129 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
903 1130
904 if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) { 1131 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
905 /* timestamp was stale */ 1132 /* timestamp was stale */
906 mask = mask_from(q->bitmaps[ER], cl->grp->index); 1133 mask = mask_from(q->bitmaps[ER], agg->grp->index);
907 if (mask) { 1134 if (mask) {
908 struct qfq_group *next = qfq_ffs(q, mask); 1135 struct qfq_group *next = qfq_ffs(q, mask);
909 if (qfq_gt(roundedF, next->F)) { 1136 if (qfq_gt(roundedF, next->F)) {
910 if (qfq_gt(limit, next->F)) 1137 if (qfq_gt(limit, next->F))
911 cl->S = next->F; 1138 agg->S = next->F;
912 else /* preserve timestamp correctness */ 1139 else /* preserve timestamp correctness */
913 cl->S = limit; 1140 agg->S = limit;
914 return; 1141 return;
915 } 1142 }
916 } 1143 }
917 cl->S = q->V; 1144 agg->S = q->V;
918 } else /* timestamp is not stale */ 1145 } else /* timestamp is not stale */
919 cl->S = cl->F; 1146 agg->S = agg->F;
920} 1147}
921 1148
1149/*
1150 * Update the timestamps of agg before scheduling/rescheduling it for
1151 * service. In particular, assign to agg->F its maximum possible
1152 * value, i.e., the virtual finish time with which the aggregate
1153 * should be labeled if it used all its budget once in service.
1154 */
1155static inline void
1156qfq_update_agg_ts(struct qfq_sched *q,
1157 struct qfq_aggregate *agg, enum update_reason reason)
1158{
1159 if (reason != requeue)
1160 qfq_update_start(q, agg);
1161 else /* just charge agg for the service received */
1162 agg->S = agg->F;
1163
1164 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
1165}
1166
1167static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *);
1168
922static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1169static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
923{ 1170{
924 struct qfq_sched *q = qdisc_priv(sch); 1171 struct qfq_sched *q = qdisc_priv(sch);
925 struct qfq_class *cl; 1172 struct qfq_class *cl;
1173 struct qfq_aggregate *agg;
926 int err = 0; 1174 int err = 0;
927 1175
928 cl = qfq_classify(skb, sch, &err); 1176 cl = qfq_classify(skb, sch, &err);
@@ -934,11 +1182,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
934 } 1182 }
935 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 1183 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
936 1184
937 if (unlikely(cl->lmax < qdisc_pkt_len(skb))) { 1185 if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
938 pr_debug("qfq: increasing maxpkt from %u to %u for class %u", 1186 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
939 cl->lmax, qdisc_pkt_len(skb), cl->common.classid); 1187 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
940 qfq_update_reactivate_class(q, cl, cl->inv_w, 1188 err = qfq_change_agg(sch, cl, cl->agg->class_weight,
941 qdisc_pkt_len(skb), 0); 1189 qdisc_pkt_len(skb));
1190 if (err)
1191 return err;
942 } 1192 }
943 1193
944 err = qdisc_enqueue(skb, cl->qdisc); 1194 err = qdisc_enqueue(skb, cl->qdisc);
@@ -954,35 +1204,50 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
954 bstats_update(&cl->bstats, skb); 1204 bstats_update(&cl->bstats, skb);
955 ++sch->q.qlen; 1205 ++sch->q.qlen;
956 1206
957 /* If the new skb is not the head of queue, then done here. */ 1207 agg = cl->agg;
958 if (cl->qdisc->q.qlen != 1) 1208 /* if the queue was not empty, then done here */
1209 if (cl->qdisc->q.qlen != 1) {
1210 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1211 list_first_entry(&agg->active, struct qfq_class, alist)
1212 == cl && cl->deficit < qdisc_pkt_len(skb))
1213 list_move_tail(&cl->alist, &agg->active);
1214
959 return err; 1215 return err;
1216 }
1217
1218 /* schedule class for service within the aggregate */
1219 cl->deficit = agg->lmax;
1220 list_add_tail(&cl->alist, &agg->active);
960 1221
961 /* If reach this point, queue q was idle */ 1222 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl)
962 qfq_activate_class(q, cl, qdisc_pkt_len(skb)); 1223 return err; /* aggregate was not empty, nothing else to do */
1224
1225 /* recharge budget */
1226 agg->initial_budget = agg->budget = agg->budgetmax;
1227
1228 qfq_update_agg_ts(q, agg, enqueue);
1229 if (q->in_serv_agg == NULL)
1230 q->in_serv_agg = agg;
1231 else if (agg != q->in_serv_agg)
1232 qfq_schedule_agg(q, agg);
963 1233
964 return err; 1234 return err;
965} 1235}
966 1236
967/* 1237/*
968 * Handle class switch from idle to backlogged. 1238 * Schedule aggregate according to its timestamps.
969 */ 1239 */
970static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, 1240static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
971 unsigned int pkt_len)
972{ 1241{
973 struct qfq_group *grp = cl->grp; 1242 struct qfq_group *grp = agg->grp;
974 u64 roundedS; 1243 u64 roundedS;
975 int s; 1244 int s;
976 1245
977 qfq_update_start(q, cl); 1246 roundedS = qfq_round_down(agg->S, grp->slot_shift);
978
979 /* compute new finish time and rounded start. */
980 cl->F = cl->S + (u64)pkt_len * cl->inv_w;
981 roundedS = qfq_round_down(cl->S, grp->slot_shift);
982 1247
983 /* 1248 /*
984 * insert cl in the correct bucket. 1249 * Insert agg in the correct bucket.
985 * If cl->S >= grp->S we don't need to adjust the 1250 * If agg->S >= grp->S we don't need to adjust the
986 * bucket list and simply go to the insertion phase. 1251 * bucket list and simply go to the insertion phase.
987 * Otherwise grp->S is decreasing, we must make room 1252 * Otherwise grp->S is decreasing, we must make room
988 * in the bucket list, and also recompute the group state. 1253 * in the bucket list, and also recompute the group state.
@@ -990,10 +1255,10 @@ static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
990 * was in ER make sure to adjust V. 1255 * was in ER make sure to adjust V.
991 */ 1256 */
992 if (grp->full_slots) { 1257 if (grp->full_slots) {
993 if (!qfq_gt(grp->S, cl->S)) 1258 if (!qfq_gt(grp->S, agg->S))
994 goto skip_update; 1259 goto skip_update;
995 1260
996 /* create a slot for this cl->S */ 1261 /* create a slot for this agg->S */
997 qfq_slot_rotate(grp, roundedS); 1262 qfq_slot_rotate(grp, roundedS);
998 /* group was surely ineligible, remove */ 1263 /* group was surely ineligible, remove */
999 __clear_bit(grp->index, &q->bitmaps[IR]); 1264 __clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1008,46 +1273,61 @@ static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
1008 1273
1009 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n", 1274 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
1010 s, q->bitmaps[s], 1275 s, q->bitmaps[s],
1011 (unsigned long long) cl->S, 1276 (unsigned long long) agg->S,
1012 (unsigned long long) cl->F, 1277 (unsigned long long) agg->F,
1013 (unsigned long long) q->V); 1278 (unsigned long long) q->V);
1014 1279
1015skip_update: 1280skip_update:
1016 qfq_slot_insert(grp, cl, roundedS); 1281 qfq_slot_insert(grp, agg, roundedS);
1017} 1282}
1018 1283
1019 1284
1285/* Update agg ts and schedule agg for service */
1286static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
1287 enum update_reason reason)
1288{
1289 qfq_update_agg_ts(q, agg, reason);
1290 qfq_schedule_agg(q, agg);
1291}
1292
1020static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, 1293static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
1021 struct qfq_class *cl) 1294 struct qfq_aggregate *agg)
1022{ 1295{
1023 unsigned int i, offset; 1296 unsigned int i, offset;
1024 u64 roundedS; 1297 u64 roundedS;
1025 1298
1026 roundedS = qfq_round_down(cl->S, grp->slot_shift); 1299 roundedS = qfq_round_down(agg->S, grp->slot_shift);
1027 offset = (roundedS - grp->S) >> grp->slot_shift; 1300 offset = (roundedS - grp->S) >> grp->slot_shift;
1301
1028 i = (grp->front + offset) % QFQ_MAX_SLOTS; 1302 i = (grp->front + offset) % QFQ_MAX_SLOTS;
1029 1303
1030 hlist_del(&cl->next); 1304 hlist_del(&agg->next);
1031 if (hlist_empty(&grp->slots[i])) 1305 if (hlist_empty(&grp->slots[i]))
1032 __clear_bit(offset, &grp->full_slots); 1306 __clear_bit(offset, &grp->full_slots);
1033} 1307}
1034 1308
1035/* 1309/*
1036 * called to forcibly destroy a queue. 1310 * Called to forcibly deschedule an aggregate. If the aggregate is
1037 * If the queue is not in the front bucket, or if it has 1311 * not in the front bucket, or if the latter has other aggregates in
1038 * other queues in the front bucket, we can simply remove 1312 * the front bucket, we can simply remove the aggregate with no other
1039 * the queue with no other side effects. 1313 * side effects.
1040 * Otherwise we must propagate the event up. 1314 * Otherwise we must propagate the event up.
1041 */ 1315 */
1042static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) 1316static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
1043{ 1317{
1044 struct qfq_group *grp = cl->grp; 1318 struct qfq_group *grp = agg->grp;
1045 unsigned long mask; 1319 unsigned long mask;
1046 u64 roundedS; 1320 u64 roundedS;
1047 int s; 1321 int s;
1048 1322
1049 cl->F = cl->S; 1323 if (agg == q->in_serv_agg) {
1050 qfq_slot_remove(q, grp, cl); 1324 charge_actual_service(agg);
1325 q->in_serv_agg = qfq_choose_next_agg(q);
1326 return;
1327 }
1328
1329 agg->F = agg->S;
1330 qfq_slot_remove(q, grp, agg);
1051 1331
1052 if (!grp->full_slots) { 1332 if (!grp->full_slots) {
1053 __clear_bit(grp->index, &q->bitmaps[IR]); 1333 __clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1066,8 +1346,8 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
1066 } 1346 }
1067 __clear_bit(grp->index, &q->bitmaps[ER]); 1347 __clear_bit(grp->index, &q->bitmaps[ER]);
1068 } else if (hlist_empty(&grp->slots[grp->front])) { 1348 } else if (hlist_empty(&grp->slots[grp->front])) {
1069 cl = qfq_slot_scan(grp); 1349 agg = qfq_slot_scan(grp);
1070 roundedS = qfq_round_down(cl->S, grp->slot_shift); 1350 roundedS = qfq_round_down(agg->S, grp->slot_shift);
1071 if (grp->S != roundedS) { 1351 if (grp->S != roundedS) {
1072 __clear_bit(grp->index, &q->bitmaps[ER]); 1352 __clear_bit(grp->index, &q->bitmaps[ER]);
1073 __clear_bit(grp->index, &q->bitmaps[IR]); 1353 __clear_bit(grp->index, &q->bitmaps[IR]);
@@ -1080,7 +1360,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
1080 } 1360 }
1081 } 1361 }
1082 1362
1083 qfq_update_eligible(q, q->V); 1363 qfq_update_eligible(q);
1084} 1364}
1085 1365
1086static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) 1366static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1092,6 +1372,32 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1092 qfq_deactivate_class(q, cl); 1372 qfq_deactivate_class(q, cl);
1093} 1373}
1094 1374
1375static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
1376 struct hlist_head *slot)
1377{
1378 struct qfq_aggregate *agg;
1379 struct hlist_node *n;
1380 struct qfq_class *cl;
1381 unsigned int len;
1382
1383 hlist_for_each_entry(agg, n, slot, next) {
1384 list_for_each_entry(cl, &agg->active, alist) {
1385
1386 if (!cl->qdisc->ops->drop)
1387 continue;
1388
1389 len = cl->qdisc->ops->drop(cl->qdisc);
1390 if (len > 0) {
1391 if (cl->qdisc->q.qlen == 0)
1392 qfq_deactivate_class(q, cl);
1393
1394 return len;
1395 }
1396 }
1397 }
1398 return 0;
1399}
1400
1095static unsigned int qfq_drop(struct Qdisc *sch) 1401static unsigned int qfq_drop(struct Qdisc *sch)
1096{ 1402{
1097 struct qfq_sched *q = qdisc_priv(sch); 1403 struct qfq_sched *q = qdisc_priv(sch);
@@ -1101,24 +1407,13 @@ static unsigned int qfq_drop(struct Qdisc *sch)
1101 for (i = 0; i <= QFQ_MAX_INDEX; i++) { 1407 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1102 grp = &q->groups[i]; 1408 grp = &q->groups[i];
1103 for (j = 0; j < QFQ_MAX_SLOTS; j++) { 1409 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1104 struct qfq_class *cl; 1410 len = qfq_drop_from_slot(q, &grp->slots[j]);
1105 struct hlist_node *n; 1411 if (len > 0) {
1106 1412 sch->q.qlen--;
1107 hlist_for_each_entry(cl, n, &grp->slots[j], next) { 1413 return len;
1108
1109 if (!cl->qdisc->ops->drop)
1110 continue;
1111
1112 len = cl->qdisc->ops->drop(cl->qdisc);
1113 if (len > 0) {
1114 sch->q.qlen--;
1115 if (!cl->qdisc->q.qlen)
1116 qfq_deactivate_class(q, cl);
1117
1118 return len;
1119 }
1120 } 1414 }
1121 } 1415 }
1416
1122 } 1417 }
1123 1418
1124 return 0; 1419 return 0;
@@ -1129,44 +1424,51 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1129 struct qfq_sched *q = qdisc_priv(sch); 1424 struct qfq_sched *q = qdisc_priv(sch);
1130 struct qfq_group *grp; 1425 struct qfq_group *grp;
1131 int i, j, err; 1426 int i, j, err;
1427 u32 max_cl_shift, maxbudg_shift, max_classes;
1132 1428
1133 err = qdisc_class_hash_init(&q->clhash); 1429 err = qdisc_class_hash_init(&q->clhash);
1134 if (err < 0) 1430 if (err < 0)
1135 return err; 1431 return err;
1136 1432
1433 if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
1434 max_classes = QFQ_MAX_AGG_CLASSES;
1435 else
1436 max_classes = qdisc_dev(sch)->tx_queue_len + 1;
1437 /* max_cl_shift = floor(log_2(max_classes)) */
1438 max_cl_shift = __fls(max_classes);
1439 q->max_agg_classes = 1<<max_cl_shift;
1440
1441 /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
1442 maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
1443 q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
1444
1137 for (i = 0; i <= QFQ_MAX_INDEX; i++) { 1445 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1138 grp = &q->groups[i]; 1446 grp = &q->groups[i];
1139 grp->index = i; 1447 grp->index = i;
1140 grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS 1448 grp->slot_shift = q->min_slot_shift + i;
1141 - (QFQ_MAX_INDEX - i);
1142 for (j = 0; j < QFQ_MAX_SLOTS; j++) 1449 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1143 INIT_HLIST_HEAD(&grp->slots[j]); 1450 INIT_HLIST_HEAD(&grp->slots[j]);
1144 } 1451 }
1145 1452
1453 INIT_HLIST_HEAD(&q->nonfull_aggs);
1454
1146 return 0; 1455 return 0;
1147} 1456}
1148 1457
1149static void qfq_reset_qdisc(struct Qdisc *sch) 1458static void qfq_reset_qdisc(struct Qdisc *sch)
1150{ 1459{
1151 struct qfq_sched *q = qdisc_priv(sch); 1460 struct qfq_sched *q = qdisc_priv(sch);
1152 struct qfq_group *grp;
1153 struct qfq_class *cl; 1461 struct qfq_class *cl;
1154 struct hlist_node *n, *tmp; 1462 struct hlist_node *n;
1155 unsigned int i, j; 1463 unsigned int i;
1156 1464
1157 for (i = 0; i <= QFQ_MAX_INDEX; i++) { 1465 for (i = 0; i < q->clhash.hashsize; i++) {
1158 grp = &q->groups[i]; 1466 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1159 for (j = 0; j < QFQ_MAX_SLOTS; j++) { 1467 if (cl->qdisc->q.qlen > 0)
1160 hlist_for_each_entry_safe(cl, n, tmp,
1161 &grp->slots[j], next) {
1162 qfq_deactivate_class(q, cl); 1468 qfq_deactivate_class(q, cl);
1163 }
1164 }
1165 }
1166 1469
1167 for (i = 0; i < q->clhash.hashsize; i++) {
1168 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1169 qdisc_reset(cl->qdisc); 1470 qdisc_reset(cl->qdisc);
1471 }
1170 } 1472 }
1171 sch->q.qlen = 0; 1473 sch->q.qlen = 0;
1172} 1474}
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 126b014eb79b..7521d944c0fb 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -9,7 +9,6 @@ menuconfig IP_SCTP
9 select CRYPTO 9 select CRYPTO
10 select CRYPTO_HMAC 10 select CRYPTO_HMAC
11 select CRYPTO_SHA1 11 select CRYPTO_SHA1
12 select CRYPTO_MD5 if SCTP_HMAC_MD5
13 select LIBCRC32C 12 select LIBCRC32C
14 ---help--- 13 ---help---
15 Stream Control Transmission Protocol 14 Stream Control Transmission Protocol
@@ -67,34 +66,45 @@ config SCTP_DBG_OBJCNT
67 'cat /proc/net/sctp/sctp_dbg_objcnt' 66 'cat /proc/net/sctp/sctp_dbg_objcnt'
68 67
69 If unsure, say N 68 If unsure, say N
70
71choice 69choice
72 prompt "SCTP: Cookie HMAC Algorithm" 70 prompt "Default SCTP cookie HMAC encoding"
73 default SCTP_HMAC_MD5 71 default SCTP_DEFAULT_COOKIE_HMAC_MD5
72 help
73 This option sets the default sctp cookie hmac algorithm
74 when in doubt select 'md5'
75
76config SCTP_DEFAULT_COOKIE_HMAC_MD5
77 bool "Enable optional MD5 hmac cookie generation"
74 help 78 help
75 HMAC algorithm to be used during association initialization. It 79 Enable optional MD5 hmac based SCTP cookie generation
76 is strongly recommended to use HMAC-SHA1 or HMAC-MD5. See 80 select SCTP_COOKIE_HMAC_MD5
77 configuration for Cryptographic API and enable those algorithms 81
78 to make usable by SCTP. 82config SCTP_DEFAULT_COOKIE_HMAC_SHA1
79 83 bool "Enable optional SHA1 hmac cookie generation"
80config SCTP_HMAC_NONE 84 help
81 bool "None" 85 Enable optional SHA1 hmac based SCTP cookie generation
82 help 86 select SCTP_COOKIE_HMAC_SHA1
83 Choosing this disables the use of an HMAC during association 87
84 establishment. It is advised to use either HMAC-MD5 or HMAC-SHA1. 88config SCTP_DEFAULT_COOKIE_HMAC_NONE
85 89 bool "Use no hmac alg in SCTP cookie generation"
86config SCTP_HMAC_SHA1
87 bool "HMAC-SHA1"
88 help
89 Enable the use of HMAC-SHA1 during association establishment. It
90 is advised to use either HMAC-MD5 or HMAC-SHA1.
91
92config SCTP_HMAC_MD5
93 bool "HMAC-MD5"
94 help 90 help
95 Enable the use of HMAC-MD5 during association establishment. It is 91 Use no hmac algorithm in SCTP cookie generation
96 advised to use either HMAC-MD5 or HMAC-SHA1.
97 92
98endchoice 93endchoice
99 94
95config SCTP_COOKIE_HMAC_MD5
96 bool "Enable optional MD5 hmac cookie generation"
97 help
98 Enable optional MD5 hmac based SCTP cookie generation
99 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5
100 select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5
101
102config SCTP_COOKIE_HMAC_SHA1
103 bool "Enable optional SHA1 hmac cookie generation"
104 help
105 Enable optional SHA1 hmac based SCTP cookie generation
106 select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
107 select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
108
109
100endif # IP_SCTP 110endif # IP_SCTP
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index b1ef3bc301a5..b45ed1f96921 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -321,6 +321,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
321 asoc->default_timetolive = sp->default_timetolive; 321 asoc->default_timetolive = sp->default_timetolive;
322 asoc->default_rcv_context = sp->default_rcv_context; 322 asoc->default_rcv_context = sp->default_rcv_context;
323 323
324 /* SCTP_GET_ASSOC_STATS COUNTERS */
325 memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
326
324 /* AUTH related initializations */ 327 /* AUTH related initializations */
325 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 328 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
326 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); 329 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
@@ -445,7 +448,7 @@ void sctp_association_free(struct sctp_association *asoc)
445 /* Release the transport structures. */ 448 /* Release the transport structures. */
446 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 449 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
447 transport = list_entry(pos, struct sctp_transport, transports); 450 transport = list_entry(pos, struct sctp_transport, transports);
448 list_del(pos); 451 list_del_rcu(pos);
449 sctp_transport_free(transport); 452 sctp_transport_free(transport);
450 } 453 }
451 454
@@ -565,7 +568,7 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
565 sctp_assoc_update_retran_path(asoc); 568 sctp_assoc_update_retran_path(asoc);
566 569
567 /* Remove this peer from the list. */ 570 /* Remove this peer from the list. */
568 list_del(&peer->transports); 571 list_del_rcu(&peer->transports);
569 572
570 /* Get the first transport of asoc. */ 573 /* Get the first transport of asoc. */
571 pos = asoc->peer.transport_addr_list.next; 574 pos = asoc->peer.transport_addr_list.next;
@@ -760,12 +763,13 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
760 763
761 /* Set the transport's RTO.initial value */ 764 /* Set the transport's RTO.initial value */
762 peer->rto = asoc->rto_initial; 765 peer->rto = asoc->rto_initial;
766 sctp_max_rto(asoc, peer);
763 767
764 /* Set the peer's active state. */ 768 /* Set the peer's active state. */
765 peer->state = peer_state; 769 peer->state = peer_state;
766 770
767 /* Attach the remote transport to our asoc. */ 771 /* Attach the remote transport to our asoc. */
768 list_add_tail(&peer->transports, &asoc->peer.transport_addr_list); 772 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
769 asoc->peer.transport_count++; 773 asoc->peer.transport_count++;
770 774
771 /* If we do not yet have a primary path, set one. */ 775 /* If we do not yet have a primary path, set one. */
@@ -1152,8 +1156,12 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1152 */ 1156 */
1153 if (sctp_chunk_is_data(chunk)) 1157 if (sctp_chunk_is_data(chunk))
1154 asoc->peer.last_data_from = chunk->transport; 1158 asoc->peer.last_data_from = chunk->transport;
1155 else 1159 else {
1156 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); 1160 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1161 asoc->stats.ictrlchunks++;
1162 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1163 asoc->stats.isacks++;
1164 }
1157 1165
1158 if (chunk->transport) 1166 if (chunk->transport)
1159 chunk->transport->last_time_heard = jiffies; 1167 chunk->transport->last_time_heard = jiffies;
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 7c2df9c33df3..69ce21e3716f 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
183 183
184 msg = sctp_datamsg_new(GFP_KERNEL); 184 msg = sctp_datamsg_new(GFP_KERNEL);
185 if (!msg) 185 if (!msg)
186 return NULL; 186 return ERR_PTR(-ENOMEM);
187 187
188 /* Note: Calculate this outside of the loop, so that all fragments 188 /* Note: Calculate this outside of the loop, so that all fragments
189 * have the same expiration. 189 * have the same expiration.
@@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
280 280
281 chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); 281 chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
282 282
283 if (!chunk) 283 if (!chunk) {
284 err = -ENOMEM;
284 goto errout; 285 goto errout;
286 }
287
285 err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov); 288 err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
286 if (err < 0) 289 if (err < 0)
287 goto errout; 290 goto errout_chunk_free;
288 291
289 offset += len; 292 offset += len;
290 293
@@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
315 318
316 chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); 319 chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
317 320
318 if (!chunk) 321 if (!chunk) {
322 err = -ENOMEM;
319 goto errout; 323 goto errout;
324 }
320 325
321 err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov); 326 err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
322 327
@@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
324 __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr 329 __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
325 - (__u8 *)chunk->skb->data); 330 - (__u8 *)chunk->skb->data);
326 if (err < 0) 331 if (err < 0)
327 goto errout; 332 goto errout_chunk_free;
328 333
329 sctp_datamsg_assign(msg, chunk); 334 sctp_datamsg_assign(msg, chunk);
330 list_add_tail(&chunk->frag_list, &msg->chunks); 335 list_add_tail(&chunk->frag_list, &msg->chunks);
@@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
332 337
333 return msg; 338 return msg;
334 339
340errout_chunk_free:
341 sctp_chunk_free(chunk);
342
335errout: 343errout:
336 list_for_each_safe(pos, temp, &msg->chunks) { 344 list_for_each_safe(pos, temp, &msg->chunks) {
337 list_del_init(pos); 345 list_del_init(pos);
@@ -339,7 +347,7 @@ errout:
339 sctp_chunk_free(chunk); 347 sctp_chunk_free(chunk);
340 } 348 }
341 sctp_datamsg_put(msg); 349 sctp_datamsg_put(msg);
342 return NULL; 350 return ERR_PTR(err);
343} 351}
344 352
345/* Check whether this message has expired. */ 353/* Check whether this message has expired. */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1859e2bc83d1..17a001bac2cc 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -163,7 +163,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
163 163
164 list_add(&null_key->key_list, &ep->endpoint_shared_keys); 164 list_add(&null_key->key_list, &ep->endpoint_shared_keys);
165 165
166 /* Allocate and initialize transorms arrays for suported HMACs. */ 166 /* Allocate and initialize transorms arrays for supported HMACs. */
167 err = sctp_auth_init_hmacs(ep, gfp); 167 err = sctp_auth_init_hmacs(ep, gfp);
168 if (err) 168 if (err)
169 goto nomem_hmacs; 169 goto nomem_hmacs;
@@ -480,8 +480,11 @@ normal:
480 */ 480 */
481 if (asoc && sctp_chunk_is_data(chunk)) 481 if (asoc && sctp_chunk_is_data(chunk))
482 asoc->peer.last_data_from = chunk->transport; 482 asoc->peer.last_data_from = chunk->transport;
483 else 483 else {
484 SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS); 484 SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
485 if (asoc)
486 asoc->stats.ictrlchunks++;
487 }
485 488
486 if (chunk->transport) 489 if (chunk->transport)
487 chunk->transport->last_time_heard = jiffies; 490 chunk->transport->last_time_heard = jiffies;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 397296fb156f..2d5ad280de38 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -104,6 +104,8 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
104 * on the BH related data structures. 104 * on the BH related data structures.
105 */ 105 */
106 list_add_tail(&chunk->list, &q->in_chunk_list); 106 list_add_tail(&chunk->list, &q->in_chunk_list);
107 if (chunk->asoc)
108 chunk->asoc->stats.ipackets++;
107 q->immediate.func(&q->immediate); 109 q->immediate.func(&q->immediate);
108} 110}
109 111
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ea14cb445295..f3f0f4dc31dd 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -345,7 +345,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
345 } 345 }
346 346
347out: 347out:
348 if (!IS_ERR(dst)) { 348 if (!IS_ERR_OR_NULL(dst)) {
349 struct rt6_info *rt; 349 struct rt6_info *rt;
350 rt = (struct rt6_info *)dst; 350 rt = (struct rt6_info *)dst;
351 t->dst = dst; 351 t->dst = dst;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 4e90188bf489..f5200a2ad852 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -311,6 +311,8 @@ static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
311 311
312 case SCTP_CID_SACK: 312 case SCTP_CID_SACK:
313 packet->has_sack = 1; 313 packet->has_sack = 1;
314 if (chunk->asoc)
315 chunk->asoc->stats.osacks++;
314 break; 316 break;
315 317
316 case SCTP_CID_AUTH: 318 case SCTP_CID_AUTH:
@@ -584,11 +586,13 @@ int sctp_packet_transmit(struct sctp_packet *packet)
584 */ 586 */
585 587
586 /* Dump that on IP! */ 588 /* Dump that on IP! */
587 if (asoc && asoc->peer.last_sent_to != tp) { 589 if (asoc) {
588 /* Considering the multiple CPU scenario, this is a 590 asoc->stats.opackets++;
589 * "correcter" place for last_sent_to. --xguo 591 if (asoc->peer.last_sent_to != tp)
590 */ 592 /* Considering the multiple CPU scenario, this is a
591 asoc->peer.last_sent_to = tp; 593 * "correcter" place for last_sent_to. --xguo
594 */
595 asoc->peer.last_sent_to = tp;
592 } 596 }
593 597
594 if (has_data) { 598 if (has_data) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 1b4a7f8ec3fd..379c81dee9d1 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -667,6 +667,7 @@ redo:
667 chunk->fast_retransmit = SCTP_DONT_FRTX; 667 chunk->fast_retransmit = SCTP_DONT_FRTX;
668 668
669 q->empty = 0; 669 q->empty = 0;
670 q->asoc->stats.rtxchunks++;
670 break; 671 break;
671 } 672 }
672 673
@@ -876,12 +877,14 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
876 if (status != SCTP_XMIT_OK) { 877 if (status != SCTP_XMIT_OK) {
877 /* put the chunk back */ 878 /* put the chunk back */
878 list_add(&chunk->list, &q->control_chunk_list); 879 list_add(&chunk->list, &q->control_chunk_list);
879 } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { 880 } else {
881 asoc->stats.octrlchunks++;
880 /* PR-SCTP C5) If a FORWARD TSN is sent, the 882 /* PR-SCTP C5) If a FORWARD TSN is sent, the
881 * sender MUST assure that at least one T3-rtx 883 * sender MUST assure that at least one T3-rtx
882 * timer is running. 884 * timer is running.
883 */ 885 */
884 sctp_transport_reset_timers(transport); 886 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
887 sctp_transport_reset_timers(transport);
885 } 888 }
886 break; 889 break;
887 890
@@ -1055,6 +1058,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1055 */ 1058 */
1056 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) 1059 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1057 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; 1060 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1061 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1062 asoc->stats.ouodchunks++;
1063 else
1064 asoc->stats.oodchunks++;
1058 1065
1059 break; 1066 break;
1060 1067
@@ -1162,6 +1169,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1162 1169
1163 sack_ctsn = ntohl(sack->cum_tsn_ack); 1170 sack_ctsn = ntohl(sack->cum_tsn_ack);
1164 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); 1171 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1172 asoc->stats.gapcnt += gap_ack_blocks;
1165 /* 1173 /*
1166 * SFR-CACC algorithm: 1174 * SFR-CACC algorithm:
1167 * On receipt of a SACK the sender SHOULD execute the 1175 * On receipt of a SACK the sender SHOULD execute the
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index bc6cd75cc1dc..5f7518de2fd1 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -122,7 +122,8 @@ static const struct file_operations sctpprobe_fops = {
122 .llseek = noop_llseek, 122 .llseek = noop_llseek,
123}; 123};
124 124
125sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep, 125sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
126 const struct sctp_endpoint *ep,
126 const struct sctp_association *asoc, 127 const struct sctp_association *asoc,
127 const sctp_subtype_t type, 128 const sctp_subtype_t type,
128 void *arg, 129 void *arg,
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index c3bea269faf4..8c19e97262ca 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -102,7 +102,7 @@ static const struct file_operations sctp_snmp_seq_fops = {
102 .open = sctp_snmp_seq_open, 102 .open = sctp_snmp_seq_open,
103 .read = seq_read, 103 .read = seq_read,
104 .llseek = seq_lseek, 104 .llseek = seq_lseek,
105 .release = single_release, 105 .release = single_release_net,
106}; 106};
107 107
108/* Set up the proc fs entry for 'snmp' object. */ 108/* Set up the proc fs entry for 'snmp' object. */
@@ -139,7 +139,11 @@ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_commo
139 primary = &peer->saddr; 139 primary = &peer->saddr;
140 } 140 }
141 141
142 list_for_each_entry(laddr, &epb->bind_addr.address_list, list) { 142 rcu_read_lock();
143 list_for_each_entry_rcu(laddr, &epb->bind_addr.address_list, list) {
144 if (!laddr->valid)
145 continue;
146
143 addr = &laddr->a; 147 addr = &laddr->a;
144 af = sctp_get_af_specific(addr->sa.sa_family); 148 af = sctp_get_af_specific(addr->sa.sa_family);
145 if (primary && af->cmp_addr(addr, primary)) { 149 if (primary && af->cmp_addr(addr, primary)) {
@@ -147,6 +151,7 @@ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_commo
147 } 151 }
148 af->seq_dump_addr(seq, addr); 152 af->seq_dump_addr(seq, addr);
149 } 153 }
154 rcu_read_unlock();
150} 155}
151 156
152/* Dump remote addresses of an association. */ 157/* Dump remote addresses of an association. */
@@ -157,15 +162,20 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
157 struct sctp_af *af; 162 struct sctp_af *af;
158 163
159 primary = &assoc->peer.primary_addr; 164 primary = &assoc->peer.primary_addr;
160 list_for_each_entry(transport, &assoc->peer.transport_addr_list, 165 rcu_read_lock();
166 list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list,
161 transports) { 167 transports) {
162 addr = &transport->ipaddr; 168 addr = &transport->ipaddr;
169 if (transport->dead)
170 continue;
171
163 af = sctp_get_af_specific(addr->sa.sa_family); 172 af = sctp_get_af_specific(addr->sa.sa_family);
164 if (af->cmp_addr(addr, primary)) { 173 if (af->cmp_addr(addr, primary)) {
165 seq_printf(seq, "*"); 174 seq_printf(seq, "*");
166 } 175 }
167 af->seq_dump_addr(seq, addr); 176 af->seq_dump_addr(seq, addr);
168 } 177 }
178 rcu_read_unlock();
169} 179}
170 180
171static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) 181static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
@@ -251,7 +261,7 @@ static const struct file_operations sctp_eps_seq_fops = {
251 .open = sctp_eps_seq_open, 261 .open = sctp_eps_seq_open,
252 .read = seq_read, 262 .read = seq_read,
253 .llseek = seq_lseek, 263 .llseek = seq_lseek,
254 .release = seq_release, 264 .release = seq_release_net,
255}; 265};
256 266
257/* Set up the proc fs entry for 'eps' object. */ 267/* Set up the proc fs entry for 'eps' object. */
@@ -372,7 +382,7 @@ static const struct file_operations sctp_assocs_seq_fops = {
372 .open = sctp_assocs_seq_open, 382 .open = sctp_assocs_seq_open,
373 .read = seq_read, 383 .read = seq_read,
374 .llseek = seq_lseek, 384 .llseek = seq_lseek,
375 .release = seq_release, 385 .release = seq_release_net,
376}; 386};
377 387
378/* Set up the proc fs entry for 'assocs' object. */ 388/* Set up the proc fs entry for 'assocs' object. */
@@ -436,12 +446,16 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
436 head = &sctp_assoc_hashtable[hash]; 446 head = &sctp_assoc_hashtable[hash];
437 sctp_local_bh_disable(); 447 sctp_local_bh_disable();
438 read_lock(&head->lock); 448 read_lock(&head->lock);
449 rcu_read_lock();
439 sctp_for_each_hentry(epb, node, &head->chain) { 450 sctp_for_each_hentry(epb, node, &head->chain) {
440 if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) 451 if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
441 continue; 452 continue;
442 assoc = sctp_assoc(epb); 453 assoc = sctp_assoc(epb);
443 list_for_each_entry(tsp, &assoc->peer.transport_addr_list, 454 list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
444 transports) { 455 transports) {
456 if (tsp->dead)
457 continue;
458
445 /* 459 /*
446 * The remote address (ADDR) 460 * The remote address (ADDR)
447 */ 461 */
@@ -487,6 +501,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
487 } 501 }
488 } 502 }
489 503
504 rcu_read_unlock();
490 read_unlock(&head->lock); 505 read_unlock(&head->lock);
491 sctp_local_bh_enable(); 506 sctp_local_bh_enable();
492 507
@@ -517,7 +532,7 @@ static const struct file_operations sctp_remaddr_seq_fops = {
517 .open = sctp_remaddr_seq_open, 532 .open = sctp_remaddr_seq_open,
518 .read = seq_read, 533 .read = seq_read,
519 .llseek = seq_lseek, 534 .llseek = seq_lseek,
520 .release = seq_release, 535 .release = seq_release_net,
521}; 536};
522 537
523int __net_init sctp_remaddr_proc_init(struct net *net) 538int __net_init sctp_remaddr_proc_init(struct net *net)
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 2d518425d598..f898b1c58bd2 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -86,7 +86,7 @@ int sysctl_sctp_rmem[3];
86int sysctl_sctp_wmem[3]; 86int sysctl_sctp_wmem[3];
87 87
88/* Set up the proc fs entry for the SCTP protocol. */ 88/* Set up the proc fs entry for the SCTP protocol. */
89static __net_init int sctp_proc_init(struct net *net) 89static int __net_init sctp_proc_init(struct net *net)
90{ 90{
91#ifdef CONFIG_PROC_FS 91#ifdef CONFIG_PROC_FS
92 net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net); 92 net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
@@ -1165,7 +1165,7 @@ static void sctp_v4_del_protocol(void)
1165 unregister_inetaddr_notifier(&sctp_inetaddr_notifier); 1165 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1166} 1166}
1167 1167
1168static int sctp_net_init(struct net *net) 1168static int __net_init sctp_net_init(struct net *net)
1169{ 1169{
1170 int status; 1170 int status;
1171 1171
@@ -1190,6 +1190,15 @@ static int sctp_net_init(struct net *net)
1190 /* Whether Cookie Preservative is enabled(1) or not(0) */ 1190 /* Whether Cookie Preservative is enabled(1) or not(0) */
1191 net->sctp.cookie_preserve_enable = 1; 1191 net->sctp.cookie_preserve_enable = 1;
1192 1192
1193 /* Default sctp sockets to use md5 as their hmac alg */
1194#if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
1195 net->sctp.sctp_hmac_alg = "md5";
1196#elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
1197 net->sctp.sctp_hmac_alg = "sha1";
1198#else
1199 net->sctp.sctp_hmac_alg = NULL;
1200#endif
1201
1193 /* Max.Burst - 4 */ 1202 /* Max.Burst - 4 */
1194 net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST; 1203 net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST;
1195 1204
@@ -1281,7 +1290,7 @@ err_sysctl_register:
1281 return status; 1290 return status;
1282} 1291}
1283 1292
1284static void sctp_net_exit(struct net *net) 1293static void __net_exit sctp_net_exit(struct net *net)
1285{ 1294{
1286 /* Free the local address list */ 1295 /* Free the local address list */
1287 sctp_free_addr_wq(net); 1296 sctp_free_addr_wq(net);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index fbe1636309a7..e1c5fc2be6b8 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -804,10 +804,11 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
804 gabs); 804 gabs);
805 805
806 /* Add the duplicate TSN information. */ 806 /* Add the duplicate TSN information. */
807 if (num_dup_tsns) 807 if (num_dup_tsns) {
808 aptr->stats.idupchunks += num_dup_tsns;
808 sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, 809 sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
809 sctp_tsnmap_get_dups(map)); 810 sctp_tsnmap_get_dups(map));
810 811 }
811 /* Once we have a sack generated, check to see what our sack 812 /* Once we have a sack generated, check to see what our sack
812 * generation is, if its 0, reset the transports to 0, and reset 813 * generation is, if its 0, reset the transports to 0, and reset
813 * the association generation to 1 814 * the association generation to 1
@@ -1090,6 +1091,25 @@ nodata:
1090 return retval; 1091 return retval;
1091} 1092}
1092 1093
1094struct sctp_chunk *sctp_make_violation_max_retrans(
1095 const struct sctp_association *asoc,
1096 const struct sctp_chunk *chunk)
1097{
1098 struct sctp_chunk *retval;
1099 static const char error[] = "Association exceeded its max_retans count";
1100 size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t);
1101
1102 retval = sctp_make_abort(asoc, chunk, payload_len);
1103 if (!retval)
1104 goto nodata;
1105
1106 sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error));
1107 sctp_addto_chunk(retval, sizeof(error), error);
1108
1109nodata:
1110 return retval;
1111}
1112
1093/* Make a HEARTBEAT chunk. */ 1113/* Make a HEARTBEAT chunk. */
1094struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, 1114struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
1095 const struct sctp_transport *transport) 1115 const struct sctp_transport *transport)
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 6773d7803627..c9577754a708 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -542,6 +542,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
542 */ 542 */
543 if (!is_hb || transport->hb_sent) { 543 if (!is_hb || transport->hb_sent) {
544 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 544 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
545 sctp_max_rto(asoc, transport);
545 } 546 }
546} 547}
547 548
@@ -577,7 +578,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
577 unsigned int error) 578 unsigned int error)
578{ 579{
579 struct sctp_ulpevent *event; 580 struct sctp_ulpevent *event;
580 581 struct sctp_chunk *abort;
581 /* Cancel any partial delivery in progress. */ 582 /* Cancel any partial delivery in progress. */
582 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 583 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
583 584
@@ -593,6 +594,13 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
593 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 594 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
594 SCTP_ULPEVENT(event)); 595 SCTP_ULPEVENT(event));
595 596
597 if (asoc->overall_error_count >= asoc->max_retrans) {
598 abort = sctp_make_violation_max_retrans(asoc, chunk);
599 if (abort)
600 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
601 SCTP_CHUNK(abort));
602 }
603
596 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 604 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
597 SCTP_STATE(SCTP_STATE_CLOSED)); 605 SCTP_STATE(SCTP_STATE_CLOSED));
598 606
@@ -1268,14 +1276,14 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1268 sctp_outq_uncork(&asoc->outqueue); 1276 sctp_outq_uncork(&asoc->outqueue);
1269 local_cork = 0; 1277 local_cork = 0;
1270 } 1278 }
1271 asoc = cmd->obj.ptr; 1279 asoc = cmd->obj.asoc;
1272 /* Register with the endpoint. */ 1280 /* Register with the endpoint. */
1273 sctp_endpoint_add_asoc(ep, asoc); 1281 sctp_endpoint_add_asoc(ep, asoc);
1274 sctp_hash_established(asoc); 1282 sctp_hash_established(asoc);
1275 break; 1283 break;
1276 1284
1277 case SCTP_CMD_UPDATE_ASSOC: 1285 case SCTP_CMD_UPDATE_ASSOC:
1278 sctp_assoc_update(asoc, cmd->obj.ptr); 1286 sctp_assoc_update(asoc, cmd->obj.asoc);
1279 break; 1287 break;
1280 1288
1281 case SCTP_CMD_PURGE_OUTQUEUE: 1289 case SCTP_CMD_PURGE_OUTQUEUE:
@@ -1315,7 +1323,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1315 break; 1323 break;
1316 1324
1317 case SCTP_CMD_PROCESS_FWDTSN: 1325 case SCTP_CMD_PROCESS_FWDTSN:
1318 sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr); 1326 sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
1319 break; 1327 break;
1320 1328
1321 case SCTP_CMD_GEN_SACK: 1329 case SCTP_CMD_GEN_SACK:
@@ -1331,7 +1339,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1331 case SCTP_CMD_PROCESS_SACK: 1339 case SCTP_CMD_PROCESS_SACK:
1332 /* Process an inbound SACK. */ 1340 /* Process an inbound SACK. */
1333 error = sctp_cmd_process_sack(commands, asoc, 1341 error = sctp_cmd_process_sack(commands, asoc,
1334 cmd->obj.ptr); 1342 cmd->obj.chunk);
1335 break; 1343 break;
1336 1344
1337 case SCTP_CMD_GEN_INIT_ACK: 1345 case SCTP_CMD_GEN_INIT_ACK:
@@ -1352,15 +1360,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1352 * layer which will bail. 1360 * layer which will bail.
1353 */ 1361 */
1354 error = sctp_cmd_process_init(commands, asoc, chunk, 1362 error = sctp_cmd_process_init(commands, asoc, chunk,
1355 cmd->obj.ptr, gfp); 1363 cmd->obj.init, gfp);
1356 break; 1364 break;
1357 1365
1358 case SCTP_CMD_GEN_COOKIE_ECHO: 1366 case SCTP_CMD_GEN_COOKIE_ECHO:
1359 /* Generate a COOKIE ECHO chunk. */ 1367 /* Generate a COOKIE ECHO chunk. */
1360 new_obj = sctp_make_cookie_echo(asoc, chunk); 1368 new_obj = sctp_make_cookie_echo(asoc, chunk);
1361 if (!new_obj) { 1369 if (!new_obj) {
1362 if (cmd->obj.ptr) 1370 if (cmd->obj.chunk)
1363 sctp_chunk_free(cmd->obj.ptr); 1371 sctp_chunk_free(cmd->obj.chunk);
1364 goto nomem; 1372 goto nomem;
1365 } 1373 }
1366 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1374 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
@@ -1369,9 +1377,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1369 /* If there is an ERROR chunk to be sent along with 1377 /* If there is an ERROR chunk to be sent along with
1370 * the COOKIE_ECHO, send it, too. 1378 * the COOKIE_ECHO, send it, too.
1371 */ 1379 */
1372 if (cmd->obj.ptr) 1380 if (cmd->obj.chunk)
1373 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1381 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1374 SCTP_CHUNK(cmd->obj.ptr)); 1382 SCTP_CHUNK(cmd->obj.chunk));
1375 1383
1376 if (new_obj->transport) { 1384 if (new_obj->transport) {
1377 new_obj->transport->init_sent_count++; 1385 new_obj->transport->init_sent_count++;
@@ -1417,18 +1425,18 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1417 case SCTP_CMD_CHUNK_ULP: 1425 case SCTP_CMD_CHUNK_ULP:
1418 /* Send a chunk to the sockets layer. */ 1426 /* Send a chunk to the sockets layer. */
1419 SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", 1427 SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
1420 "chunk_up:", cmd->obj.ptr, 1428 "chunk_up:", cmd->obj.chunk,
1421 "ulpq:", &asoc->ulpq); 1429 "ulpq:", &asoc->ulpq);
1422 sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr, 1430 sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
1423 GFP_ATOMIC); 1431 GFP_ATOMIC);
1424 break; 1432 break;
1425 1433
1426 case SCTP_CMD_EVENT_ULP: 1434 case SCTP_CMD_EVENT_ULP:
1427 /* Send a notification to the sockets layer. */ 1435 /* Send a notification to the sockets layer. */
1428 SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", 1436 SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
1429 "event_up:",cmd->obj.ptr, 1437 "event_up:",cmd->obj.ulpevent,
1430 "ulpq:",&asoc->ulpq); 1438 "ulpq:",&asoc->ulpq);
1431 sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr); 1439 sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
1432 break; 1440 break;
1433 1441
1434 case SCTP_CMD_REPLY: 1442 case SCTP_CMD_REPLY:
@@ -1438,12 +1446,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1438 local_cork = 1; 1446 local_cork = 1;
1439 } 1447 }
1440 /* Send a chunk to our peer. */ 1448 /* Send a chunk to our peer. */
1441 error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr); 1449 error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk);
1442 break; 1450 break;
1443 1451
1444 case SCTP_CMD_SEND_PKT: 1452 case SCTP_CMD_SEND_PKT:
1445 /* Send a full packet to our peer. */ 1453 /* Send a full packet to our peer. */
1446 packet = cmd->obj.ptr; 1454 packet = cmd->obj.packet;
1447 sctp_packet_transmit(packet); 1455 sctp_packet_transmit(packet);
1448 sctp_ootb_pkt_free(packet); 1456 sctp_ootb_pkt_free(packet);
1449 break; 1457 break;
@@ -1480,7 +1488,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1480 break; 1488 break;
1481 1489
1482 case SCTP_CMD_SETUP_T2: 1490 case SCTP_CMD_SETUP_T2:
1483 sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); 1491 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1484 break; 1492 break;
1485 1493
1486 case SCTP_CMD_TIMER_START_ONCE: 1494 case SCTP_CMD_TIMER_START_ONCE:
@@ -1514,7 +1522,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1514 break; 1522 break;
1515 1523
1516 case SCTP_CMD_INIT_CHOOSE_TRANSPORT: 1524 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1517 chunk = cmd->obj.ptr; 1525 chunk = cmd->obj.chunk;
1518 t = sctp_assoc_choose_alter_transport(asoc, 1526 t = sctp_assoc_choose_alter_transport(asoc,
1519 asoc->init_last_sent_to); 1527 asoc->init_last_sent_to);
1520 asoc->init_last_sent_to = t; 1528 asoc->init_last_sent_to = t;
@@ -1665,17 +1673,16 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1665 break; 1673 break;
1666 1674
1667 case SCTP_CMD_PART_DELIVER: 1675 case SCTP_CMD_PART_DELIVER:
1668 sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr, 1676 sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
1669 GFP_ATOMIC);
1670 break; 1677 break;
1671 1678
1672 case SCTP_CMD_RENEGE: 1679 case SCTP_CMD_RENEGE:
1673 sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr, 1680 sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
1674 GFP_ATOMIC); 1681 GFP_ATOMIC);
1675 break; 1682 break;
1676 1683
1677 case SCTP_CMD_SETUP_T4: 1684 case SCTP_CMD_SETUP_T4:
1678 sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr); 1685 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1679 break; 1686 break;
1680 1687
1681 case SCTP_CMD_PROCESS_OPERR: 1688 case SCTP_CMD_PROCESS_OPERR:
@@ -1734,8 +1741,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1734 break; 1741 break;
1735 1742
1736 default: 1743 default:
1737 pr_warn("Impossible command: %u, %p\n", 1744 pr_warn("Impossible command: %u\n",
1738 cmd->verb, cmd->obj.ptr); 1745 cmd->verb);
1739 break; 1746 break;
1740 } 1747 }
1741 1748
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index b6adef8a1e93..618ec7e216ca 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1055,6 +1055,7 @@ sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
1055 void *arg, 1055 void *arg,
1056 sctp_cmd_seq_t *commands) 1056 sctp_cmd_seq_t *commands)
1057{ 1057{
1058 sctp_paramhdr_t *param_hdr;
1058 struct sctp_chunk *chunk = arg; 1059 struct sctp_chunk *chunk = arg;
1059 struct sctp_chunk *reply; 1060 struct sctp_chunk *reply;
1060 size_t paylen = 0; 1061 size_t paylen = 0;
@@ -1072,12 +1073,17 @@ sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
1072 * Information field copied from the received HEARTBEAT chunk. 1073 * Information field copied from the received HEARTBEAT chunk.
1073 */ 1074 */
1074 chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; 1075 chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data;
1076 param_hdr = (sctp_paramhdr_t *) chunk->subh.hb_hdr;
1075 paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); 1077 paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
1078
1079 if (ntohs(param_hdr->length) > paylen)
1080 return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
1081 param_hdr, commands);
1082
1076 if (!pskb_pull(chunk->skb, paylen)) 1083 if (!pskb_pull(chunk->skb, paylen))
1077 goto nomem; 1084 goto nomem;
1078 1085
1079 reply = sctp_make_heartbeat_ack(asoc, chunk, 1086 reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen);
1080 chunk->subh.hb_hdr, paylen);
1081 if (!reply) 1087 if (!reply)
1082 goto nomem; 1088 goto nomem;
1083 1089
@@ -3994,7 +4000,7 @@ static sctp_ierror_t sctp_sf_authenticate(struct net *net,
3994 chunk->subh.auth_hdr = auth_hdr; 4000 chunk->subh.auth_hdr = auth_hdr;
3995 skb_pull(chunk->skb, sizeof(struct sctp_authhdr)); 4001 skb_pull(chunk->skb, sizeof(struct sctp_authhdr));
3996 4002
3997 /* Make sure that we suport the HMAC algorithm from the auth 4003 /* Make sure that we support the HMAC algorithm from the auth
3998 * chunk. 4004 * chunk.
3999 */ 4005 */
4000 if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id)) 4006 if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id))
@@ -6127,6 +6133,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6127 /* The TSN is too high--silently discard the chunk and 6133 /* The TSN is too high--silently discard the chunk and
6128 * count on it getting retransmitted later. 6134 * count on it getting retransmitted later.
6129 */ 6135 */
6136 if (chunk->asoc)
6137 chunk->asoc->stats.outofseqtsns++;
6130 return SCTP_IERROR_HIGH_TSN; 6138 return SCTP_IERROR_HIGH_TSN;
6131 } else if (tmp > 0) { 6139 } else if (tmp > 0) {
6132 /* This is a duplicate. Record it. */ 6140 /* This is a duplicate. Record it. */
@@ -6226,10 +6234,14 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6226 /* Note: Some chunks may get overcounted (if we drop) or overcounted 6234 /* Note: Some chunks may get overcounted (if we drop) or overcounted
6227 * if we renege and the chunk arrives again. 6235 * if we renege and the chunk arrives again.
6228 */ 6236 */
6229 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 6237 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
6230 SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS); 6238 SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
6231 else { 6239 if (chunk->asoc)
6240 chunk->asoc->stats.iuodchunks++;
6241 } else {
6232 SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS); 6242 SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
6243 if (chunk->asoc)
6244 chunk->asoc->stats.iodchunks++;
6233 ordered = 1; 6245 ordered = 1;
6234 } 6246 }
6235 6247
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a60d1f8b41c5..9e65758cb038 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -110,7 +110,6 @@ static int sctp_do_bind(struct sock *, union sctp_addr *, int);
110static int sctp_autobind(struct sock *sk); 110static int sctp_autobind(struct sock *sk);
111static void sctp_sock_migrate(struct sock *, struct sock *, 111static void sctp_sock_migrate(struct sock *, struct sock *,
112 struct sctp_association *, sctp_socket_type_t); 112 struct sctp_association *, sctp_socket_type_t);
113static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
114 113
115extern struct kmem_cache *sctp_bucket_cachep; 114extern struct kmem_cache *sctp_bucket_cachep;
116extern long sysctl_sctp_mem[3]; 115extern long sysctl_sctp_mem[3];
@@ -336,6 +335,7 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
336/* Bind a local address either to an endpoint or to an association. */ 335/* Bind a local address either to an endpoint or to an association. */
337SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 336SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
338{ 337{
338 struct net *net = sock_net(sk);
339 struct sctp_sock *sp = sctp_sk(sk); 339 struct sctp_sock *sp = sctp_sk(sk);
340 struct sctp_endpoint *ep = sp->ep; 340 struct sctp_endpoint *ep = sp->ep;
341 struct sctp_bind_addr *bp = &ep->base.bind_addr; 341 struct sctp_bind_addr *bp = &ep->base.bind_addr;
@@ -379,7 +379,8 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
379 } 379 }
380 } 380 }
381 381
382 if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 382 if (snum && snum < PROT_SOCK &&
383 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
383 return -EACCES; 384 return -EACCES;
384 385
385 /* See if the address matches any of the addresses we may have 386 /* See if the address matches any of the addresses we may have
@@ -610,6 +611,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
610 2*asoc->pathmtu, 4380)); 611 2*asoc->pathmtu, 4380));
611 trans->ssthresh = asoc->peer.i.a_rwnd; 612 trans->ssthresh = asoc->peer.i.a_rwnd;
612 trans->rto = asoc->rto_initial; 613 trans->rto = asoc->rto_initial;
614 sctp_max_rto(asoc, trans);
613 trans->rtt = trans->srtt = trans->rttvar = 0; 615 trans->rtt = trans->srtt = trans->rttvar = 0;
614 sctp_transport_route(trans, NULL, 616 sctp_transport_route(trans, NULL,
615 sctp_sk(asoc->base.sk)); 617 sctp_sk(asoc->base.sk));
@@ -1162,7 +1164,7 @@ static int __sctp_connect(struct sock* sk,
1162 * be permitted to open new associations. 1164 * be permitted to open new associations.
1163 */ 1165 */
1164 if (ep->base.bind_addr.port < PROT_SOCK && 1166 if (ep->base.bind_addr.port < PROT_SOCK &&
1165 !capable(CAP_NET_BIND_SERVICE)) { 1167 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1166 err = -EACCES; 1168 err = -EACCES;
1167 goto out_free; 1169 goto out_free;
1168 } 1170 }
@@ -1791,7 +1793,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1791 * associations. 1793 * associations.
1792 */ 1794 */
1793 if (ep->base.bind_addr.port < PROT_SOCK && 1795 if (ep->base.bind_addr.port < PROT_SOCK &&
1794 !capable(CAP_NET_BIND_SERVICE)) { 1796 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1795 err = -EACCES; 1797 err = -EACCES;
1796 goto out_unlock; 1798 goto out_unlock;
1797 } 1799 }
@@ -1915,8 +1917,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1915 1917
1916 /* Break the message into multiple chunks of maximum size. */ 1918 /* Break the message into multiple chunks of maximum size. */
1917 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); 1919 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
1918 if (!datamsg) { 1920 if (IS_ERR(datamsg)) {
1919 err = -ENOMEM; 1921 err = PTR_ERR(datamsg);
1920 goto out_free; 1922 goto out_free;
1921 } 1923 }
1922 1924
@@ -3890,6 +3892,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3890 sp->default_rcv_context = 0; 3892 sp->default_rcv_context = 0;
3891 sp->max_burst = net->sctp.max_burst; 3893 sp->max_burst = net->sctp.max_burst;
3892 3894
3895 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
3896
3893 /* Initialize default setup parameters. These parameters 3897 /* Initialize default setup parameters. These parameters
3894 * can be modified with the SCTP_INITMSG socket option or 3898 * can be modified with the SCTP_INITMSG socket option or
3895 * overridden by the SCTP_INIT CMSG. 3899 * overridden by the SCTP_INIT CMSG.
@@ -5632,6 +5636,71 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
5632 return 0; 5636 return 0;
5633} 5637}
5634 5638
5639/*
5640 * SCTP_GET_ASSOC_STATS
5641 *
5642 * This option retrieves local per endpoint statistics. It is modeled
5643 * after OpenSolaris' implementation
5644 */
5645static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5646 char __user *optval,
5647 int __user *optlen)
5648{
5649 struct sctp_assoc_stats sas;
5650 struct sctp_association *asoc = NULL;
5651
5652 /* User must provide at least the assoc id */
5653 if (len < sizeof(sctp_assoc_t))
5654 return -EINVAL;
5655
5656 if (copy_from_user(&sas, optval, len))
5657 return -EFAULT;
5658
5659 asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
5660 if (!asoc)
5661 return -EINVAL;
5662
5663 sas.sas_rtxchunks = asoc->stats.rtxchunks;
5664 sas.sas_gapcnt = asoc->stats.gapcnt;
5665 sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
5666 sas.sas_osacks = asoc->stats.osacks;
5667 sas.sas_isacks = asoc->stats.isacks;
5668 sas.sas_octrlchunks = asoc->stats.octrlchunks;
5669 sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
5670 sas.sas_oodchunks = asoc->stats.oodchunks;
5671 sas.sas_iodchunks = asoc->stats.iodchunks;
5672 sas.sas_ouodchunks = asoc->stats.ouodchunks;
5673 sas.sas_iuodchunks = asoc->stats.iuodchunks;
5674 sas.sas_idupchunks = asoc->stats.idupchunks;
5675 sas.sas_opackets = asoc->stats.opackets;
5676 sas.sas_ipackets = asoc->stats.ipackets;
5677
5678 /* New high max rto observed, will return 0 if not a single
5679 * RTO update took place. obs_rto_ipaddr will be bogus
5680 * in such a case
5681 */
5682 sas.sas_maxrto = asoc->stats.max_obs_rto;
5683 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
5684 sizeof(struct sockaddr_storage));
5685
5686 /* Mark beginning of a new observation period */
5687 asoc->stats.max_obs_rto = asoc->rto_min;
5688
5689 /* Allow the struct to grow and fill in as much as possible */
5690 len = min_t(size_t, len, sizeof(sas));
5691
5692 if (put_user(len, optlen))
5693 return -EFAULT;
5694
5695 SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n",
5696 len, sas.sas_assoc_id);
5697
5698 if (copy_to_user(optval, &sas, len))
5699 return -EFAULT;
5700
5701 return 0;
5702}
5703
5635SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, 5704SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5636 char __user *optval, int __user *optlen) 5705 char __user *optval, int __user *optlen)
5637{ 5706{
@@ -5773,6 +5842,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5773 case SCTP_PEER_ADDR_THLDS: 5842 case SCTP_PEER_ADDR_THLDS:
5774 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); 5843 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
5775 break; 5844 break;
5845 case SCTP_GET_ASSOC_STATS:
5846 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
5847 break;
5776 default: 5848 default:
5777 retval = -ENOPROTOOPT; 5849 retval = -ENOPROTOOPT;
5778 break; 5850 break;
@@ -5981,13 +6053,15 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
5981 struct sctp_sock *sp = sctp_sk(sk); 6053 struct sctp_sock *sp = sctp_sk(sk);
5982 struct sctp_endpoint *ep = sp->ep; 6054 struct sctp_endpoint *ep = sp->ep;
5983 struct crypto_hash *tfm = NULL; 6055 struct crypto_hash *tfm = NULL;
6056 char alg[32];
5984 6057
5985 /* Allocate HMAC for generating cookie. */ 6058 /* Allocate HMAC for generating cookie. */
5986 if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { 6059 if (!sp->hmac && sp->sctp_hmac_alg) {
5987 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); 6060 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
6061 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
5988 if (IS_ERR(tfm)) { 6062 if (IS_ERR(tfm)) {
5989 net_info_ratelimited("failed to load transform for %s: %ld\n", 6063 net_info_ratelimited("failed to load transform for %s: %ld\n",
5990 sctp_hmac_alg, PTR_ERR(tfm)); 6064 sp->sctp_hmac_alg, PTR_ERR(tfm));
5991 return -ENOSYS; 6065 return -ENOSYS;
5992 } 6066 }
5993 sctp_sk(sk)->hmac = tfm; 6067 sctp_sk(sk)->hmac = tfm;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 70e3ba5cb50b..043889ac86c0 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -62,6 +62,11 @@ extern long sysctl_sctp_mem[3];
62extern int sysctl_sctp_rmem[3]; 62extern int sysctl_sctp_rmem[3];
63extern int sysctl_sctp_wmem[3]; 63extern int sysctl_sctp_wmem[3];
64 64
65static int proc_sctp_do_hmac_alg(ctl_table *ctl,
66 int write,
67 void __user *buffer, size_t *lenp,
68
69 loff_t *ppos);
65static ctl_table sctp_table[] = { 70static ctl_table sctp_table[] = {
66 { 71 {
67 .procname = "sctp_mem", 72 .procname = "sctp_mem",
@@ -147,6 +152,12 @@ static ctl_table sctp_net_table[] = {
147 .proc_handler = proc_dointvec, 152 .proc_handler = proc_dointvec,
148 }, 153 },
149 { 154 {
155 .procname = "cookie_hmac_alg",
156 .maxlen = 8,
157 .mode = 0644,
158 .proc_handler = proc_sctp_do_hmac_alg,
159 },
160 {
150 .procname = "valid_cookie_life", 161 .procname = "valid_cookie_life",
151 .data = &init_net.sctp.valid_cookie_life, 162 .data = &init_net.sctp.valid_cookie_life,
152 .maxlen = sizeof(unsigned int), 163 .maxlen = sizeof(unsigned int),
@@ -289,6 +300,54 @@ static ctl_table sctp_net_table[] = {
289 { /* sentinel */ } 300 { /* sentinel */ }
290}; 301};
291 302
303static int proc_sctp_do_hmac_alg(ctl_table *ctl,
304 int write,
305 void __user *buffer, size_t *lenp,
306 loff_t *ppos)
307{
308 struct net *net = current->nsproxy->net_ns;
309 char tmp[8];
310 ctl_table tbl;
311 int ret;
312 int changed = 0;
313 char *none = "none";
314
315 memset(&tbl, 0, sizeof(struct ctl_table));
316
317 if (write) {
318 tbl.data = tmp;
319 tbl.maxlen = 8;
320 } else {
321 tbl.data = net->sctp.sctp_hmac_alg ? : none;
322 tbl.maxlen = strlen(tbl.data);
323 }
324 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
325
326 if (write) {
327#ifdef CONFIG_CRYPTO_MD5
328 if (!strncmp(tmp, "md5", 3)) {
329 net->sctp.sctp_hmac_alg = "md5";
330 changed = 1;
331 }
332#endif
333#ifdef CONFIG_CRYPTO_SHA1
334 if (!strncmp(tmp, "sha1", 4)) {
335 net->sctp.sctp_hmac_alg = "sha1";
336 changed = 1;
337 }
338#endif
339 if (!strncmp(tmp, "none", 4)) {
340 net->sctp.sctp_hmac_alg = NULL;
341 changed = 1;
342 }
343
344 if (!changed)
345 ret = -EINVAL;
346 }
347
348 return ret;
349}
350
292int sctp_sysctl_net_register(struct net *net) 351int sctp_sysctl_net_register(struct net *net)
293{ 352{
294 struct ctl_table *table; 353 struct ctl_table *table;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 953c21e4af97..4e45bb68aef0 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -163,13 +163,11 @@ void sctp_transport_free(struct sctp_transport *transport)
163 sctp_transport_put(transport); 163 sctp_transport_put(transport);
164} 164}
165 165
166/* Destroy the transport data structure. 166static void sctp_transport_destroy_rcu(struct rcu_head *head)
167 * Assumes there are no more users of this structure.
168 */
169static void sctp_transport_destroy(struct sctp_transport *transport)
170{ 167{
171 SCTP_ASSERT(transport->dead, "Transport is not dead", return); 168 struct sctp_transport *transport;
172 169
170 transport = container_of(head, struct sctp_transport, rcu);
173 if (transport->asoc) 171 if (transport->asoc)
174 sctp_association_put(transport->asoc); 172 sctp_association_put(transport->asoc);
175 173
@@ -180,6 +178,16 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
180 SCTP_DBG_OBJCNT_DEC(transport); 178 SCTP_DBG_OBJCNT_DEC(transport);
181} 179}
182 180
181/* Destroy the transport data structure.
182 * Assumes there are no more users of this structure.
183 */
184static void sctp_transport_destroy(struct sctp_transport *transport)
185{
186 SCTP_ASSERT(transport->dead, "Transport is not dead", return);
187
188 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
189}
190
183/* Start T3_rtx timer if it is not already running and update the heartbeat 191/* Start T3_rtx timer if it is not already running and update the heartbeat
184 * timer. This routine is called every time a DATA chunk is sent. 192 * timer. This routine is called every time a DATA chunk is sent.
185 */ 193 */
@@ -331,7 +339,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
331 * 1/8, rto_alpha would be expressed as 3. 339 * 1/8, rto_alpha would be expressed as 3.
332 */ 340 */
333 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) 341 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
334 + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta); 342 + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
335 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) 343 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
336 + (rtt >> net->sctp.rto_alpha); 344 + (rtt >> net->sctp.rto_alpha);
337 } else { 345 } else {
@@ -363,6 +371,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
363 if (tp->rto > tp->asoc->rto_max) 371 if (tp->rto > tp->asoc->rto_max)
364 tp->rto = tp->asoc->rto_max; 372 tp->rto = tp->asoc->rto_max;
365 373
374 sctp_max_rto(tp->asoc, tp);
366 tp->rtt = rtt; 375 tp->rtt = rtt;
367 376
368 /* Reset rto_pending so that a new RTT measurement is started when a 377 /* Reset rto_pending so that a new RTT measurement is started when a
@@ -620,6 +629,7 @@ void sctp_transport_reset(struct sctp_transport *t)
620 t->burst_limited = 0; 629 t->burst_limited = 0;
621 t->ssthresh = asoc->peer.i.a_rwnd; 630 t->ssthresh = asoc->peer.i.a_rwnd;
622 t->rto = asoc->rto_initial; 631 t->rto = asoc->rto_initial;
632 sctp_max_rto(asoc, t);
623 t->rtt = 0; 633 t->rtt = 0;
624 t->srtt = 0; 634 t->srtt = 0;
625 t->rttvar = 0; 635 t->rttvar = 0;
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index b5fb7c409023..5f25e0c92c31 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -272,7 +272,7 @@ __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map)
272 __u32 max_tsn = map->max_tsn_seen; 272 __u32 max_tsn = map->max_tsn_seen;
273 __u32 base_tsn = map->base_tsn; 273 __u32 base_tsn = map->base_tsn;
274 __u16 pending_data; 274 __u16 pending_data;
275 u32 gap, i; 275 u32 gap;
276 276
277 pending_data = max_tsn - cum_tsn; 277 pending_data = max_tsn - cum_tsn;
278 gap = max_tsn - base_tsn; 278 gap = max_tsn - base_tsn;
@@ -280,11 +280,7 @@ __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map)
280 if (gap == 0 || gap >= map->len) 280 if (gap == 0 || gap >= map->len)
281 goto out; 281 goto out;
282 282
283 for (i = 0; i < gap+1; i++) { 283 pending_data -= bitmap_weight(map->tsn_map, gap + 1);
284 if (test_bit(i, map->tsn_map))
285 pending_data--;
286 }
287
288out: 284out:
289 return pending_data; 285 return pending_data;
290} 286}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 360d8697b95c..ada17464b65b 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -997,7 +997,6 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
997 997
998/* Partial deliver the first message as there is pressure on rwnd. */ 998/* Partial deliver the first message as there is pressure on rwnd. */
999void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, 999void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1000 struct sctp_chunk *chunk,
1001 gfp_t gfp) 1000 gfp_t gfp)
1002{ 1001{
1003 struct sctp_ulpevent *event; 1002 struct sctp_ulpevent *event;
@@ -1060,7 +1059,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1060 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); 1059 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
1061 sctp_ulpq_tail_data(ulpq, chunk, gfp); 1060 sctp_ulpq_tail_data(ulpq, chunk, gfp);
1062 1061
1063 sctp_ulpq_partial_delivery(ulpq, chunk, gfp); 1062 sctp_ulpq_partial_delivery(ulpq, gfp);
1064 } 1063 }
1065 1064
1066 sk_mem_reclaim(asoc->base.sk); 1065 sk_mem_reclaim(asoc->base.sk);
diff --git a/net/socket.c b/net/socket.c
index d92c490e66fa..2ca51c719ef9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -620,8 +620,6 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
620{ 620{
621 struct sock_iocb *si = kiocb_to_siocb(iocb); 621 struct sock_iocb *si = kiocb_to_siocb(iocb);
622 622
623 sock_update_classid(sock->sk);
624
625 si->sock = sock; 623 si->sock = sock;
626 si->scm = NULL; 624 si->scm = NULL;
627 si->msg = msg; 625 si->msg = msg;
@@ -784,8 +782,6 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
784{ 782{
785 struct sock_iocb *si = kiocb_to_siocb(iocb); 783 struct sock_iocb *si = kiocb_to_siocb(iocb);
786 784
787 sock_update_classid(sock->sk);
788
789 si->sock = sock; 785 si->sock = sock;
790 si->scm = NULL; 786 si->scm = NULL;
791 si->msg = msg; 787 si->msg = msg;
@@ -896,8 +892,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
896 if (unlikely(!sock->ops->splice_read)) 892 if (unlikely(!sock->ops->splice_read))
897 return -EINVAL; 893 return -EINVAL;
898 894
899 sock_update_classid(sock->sk);
900
901 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 895 return sock->ops->splice_read(sock, ppos, pipe, len, flags);
902} 896}
903 897
@@ -3437,8 +3431,6 @@ EXPORT_SYMBOL(kernel_setsockopt);
3437int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3431int kernel_sendpage(struct socket *sock, struct page *page, int offset,
3438 size_t size, int flags) 3432 size_t size, int flags)
3439{ 3433{
3440 sock_update_classid(sock->sk);
3441
3442 if (sock->ops->sendpage) 3434 if (sock->ops->sendpage)
3443 return sock->ops->sendpage(sock, page, offset, size, flags); 3435 return sock->ops->sendpage(sock, page, offset, size, flags);
3444 3436
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 909dc0c31aab..6e5c824b040b 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -192,17 +192,23 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
192 const void *q; 192 const void *q;
193 unsigned int seclen; 193 unsigned int seclen;
194 unsigned int timeout; 194 unsigned int timeout;
195 unsigned long now = jiffies;
195 u32 window_size; 196 u32 window_size;
196 int ret; 197 int ret;
197 198
198 /* First unsigned int gives the lifetime (in seconds) of the cred */ 199 /* First unsigned int gives the remaining lifetime in seconds of the
200 * credential - e.g. the remaining TGT lifetime for Kerberos or
201 * the -t value passed to GSSD.
202 */
199 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 203 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
200 if (IS_ERR(p)) 204 if (IS_ERR(p))
201 goto err; 205 goto err;
202 if (timeout == 0) 206 if (timeout == 0)
203 timeout = GSSD_MIN_TIMEOUT; 207 timeout = GSSD_MIN_TIMEOUT;
204 ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4; 208 ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
205 /* Sequence number window. Determines the maximum number of simultaneous requests */ 209 /* Sequence number window. Determines the maximum number of
210 * simultaneous requests
211 */
206 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 212 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
207 if (IS_ERR(p)) 213 if (IS_ERR(p))
208 goto err; 214 goto err;
@@ -237,9 +243,12 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
237 p = ERR_PTR(ret); 243 p = ERR_PTR(ret);
238 goto err; 244 goto err;
239 } 245 }
246 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n",
247 __func__, ctx->gc_expiry, now, timeout);
240 return q; 248 return q;
241err: 249err:
242 dprintk("RPC: %s returning %ld\n", __func__, -PTR_ERR(p)); 250 dprintk("RPC: %s returns %ld gc_expiry %lu now %lu timeout %u\n",
251 __func__, -PTR_ERR(p), ctx->gc_expiry, now, timeout);
243 return p; 252 return p;
244} 253}
245 254
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index a9c0bbccad6b..890a29912d5a 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -59,7 +59,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
59 struct xdr_buf *xbufp; 59 struct xdr_buf *xbufp;
60 60
61 dprintk("RPC: free allocations for req= %p\n", req); 61 dprintk("RPC: free allocations for req= %p\n", req);
62 BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 62 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
63 xbufp = &req->rq_private_buf; 63 xbufp = &req->rq_private_buf;
64 free_page((unsigned long)xbufp->head[0].iov_base); 64 free_page((unsigned long)xbufp->head[0].iov_base);
65 xbufp = &req->rq_snd_buf; 65 xbufp = &req->rq_snd_buf;
@@ -191,7 +191,9 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
191 191
192 dprintk("RPC: destroy backchannel transport\n"); 192 dprintk("RPC: destroy backchannel transport\n");
193 193
194 BUG_ON(max_reqs == 0); 194 if (max_reqs == 0)
195 goto out;
196
195 spin_lock_bh(&xprt->bc_pa_lock); 197 spin_lock_bh(&xprt->bc_pa_lock);
196 xprt_dec_alloc_count(xprt, max_reqs); 198 xprt_dec_alloc_count(xprt, max_reqs);
197 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 199 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
@@ -202,6 +204,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
202 } 204 }
203 spin_unlock_bh(&xprt->bc_pa_lock); 205 spin_unlock_bh(&xprt->bc_pa_lock);
204 206
207out:
205 dprintk("RPC: backchannel list empty= %s\n", 208 dprintk("RPC: backchannel list empty= %s\n",
206 list_empty(&xprt->bc_pa_list) ? "true" : "false"); 209 list_empty(&xprt->bc_pa_list) ? "true" : "false");
207} 210}
@@ -255,7 +258,7 @@ void xprt_free_bc_request(struct rpc_rqst *req)
255 dprintk("RPC: free backchannel req=%p\n", req); 258 dprintk("RPC: free backchannel req=%p\n", req);
256 259
257 smp_mb__before_clear_bit(); 260 smp_mb__before_clear_bit();
258 BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 261 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
259 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 262 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
260 smp_mb__after_clear_bit(); 263 smp_mb__after_clear_bit();
261 264
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 0b2eb388cbda..15c7a8a1c24f 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -53,7 +53,7 @@ int bc_send(struct rpc_rqst *req)
53 if (IS_ERR(task)) 53 if (IS_ERR(task))
54 ret = PTR_ERR(task); 54 ret = PTR_ERR(task);
55 else { 55 else {
56 BUG_ON(atomic_read(&task->tk_count) != 1); 56 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
57 ret = task->tk_status; 57 ret = task->tk_status;
58 rpc_put_task(task); 58 rpc_put_task(task);
59 } 59 }
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index fc2f7aa4dca7..9afa4393c217 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -775,11 +775,11 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
775 if (rp->q.list.next == &cd->queue) { 775 if (rp->q.list.next == &cd->queue) {
776 spin_unlock(&queue_lock); 776 spin_unlock(&queue_lock);
777 mutex_unlock(&inode->i_mutex); 777 mutex_unlock(&inode->i_mutex);
778 BUG_ON(rp->offset); 778 WARN_ON_ONCE(rp->offset);
779 return 0; 779 return 0;
780 } 780 }
781 rq = container_of(rp->q.list.next, struct cache_request, q.list); 781 rq = container_of(rp->q.list.next, struct cache_request, q.list);
782 BUG_ON(rq->q.reader); 782 WARN_ON_ONCE(rq->q.reader);
783 if (rp->offset == 0) 783 if (rp->offset == 0)
784 rq->readers++; 784 rq->readers++;
785 spin_unlock(&queue_lock); 785 spin_unlock(&queue_lock);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cdc7564b4512..507b5e84fbdb 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -132,8 +132,10 @@ static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
132 int error; 132 int error;
133 133
134 dir = rpc_d_lookup_sb(sb, dir_name); 134 dir = rpc_d_lookup_sb(sb, dir_name);
135 if (dir == NULL) 135 if (dir == NULL) {
136 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
136 return dir; 137 return dir;
138 }
137 for (;;) { 139 for (;;) {
138 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 140 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
139 name[sizeof(name) - 1] = '\0'; 141 name[sizeof(name) - 1] = '\0';
@@ -192,7 +194,8 @@ static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
192 case RPC_PIPEFS_MOUNT: 194 case RPC_PIPEFS_MOUNT:
193 dentry = rpc_setup_pipedir_sb(sb, clnt, 195 dentry = rpc_setup_pipedir_sb(sb, clnt,
194 clnt->cl_program->pipe_dir_name); 196 clnt->cl_program->pipe_dir_name);
195 BUG_ON(dentry == NULL); 197 if (!dentry)
198 return -ENOENT;
196 if (IS_ERR(dentry)) 199 if (IS_ERR(dentry))
197 return PTR_ERR(dentry); 200 return PTR_ERR(dentry);
198 clnt->cl_dentry = dentry; 201 clnt->cl_dentry = dentry;
@@ -234,7 +237,7 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
234 spin_lock(&sn->rpc_client_lock); 237 spin_lock(&sn->rpc_client_lock);
235 list_for_each_entry(clnt, &sn->all_clients, cl_clients) { 238 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
236 if (clnt->cl_program->pipe_dir_name == NULL) 239 if (clnt->cl_program->pipe_dir_name == NULL)
237 break; 240 continue;
238 if (rpc_clnt_skip_event(clnt, event)) 241 if (rpc_clnt_skip_event(clnt, event))
239 continue; 242 continue;
240 if (atomic_inc_not_zero(&clnt->cl_count) == 0) 243 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
@@ -552,7 +555,7 @@ EXPORT_SYMBOL_GPL(rpc_clone_client);
552 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth 555 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
553 * 556 *
554 * @clnt: RPC client whose parameters are copied 557 * @clnt: RPC client whose parameters are copied
555 * @auth: security flavor for new client 558 * @flavor: security flavor for new client
556 * 559 *
557 * Returns a fresh RPC client or an ERR_PTR. 560 * Returns a fresh RPC client or an ERR_PTR.
558 */ 561 */
@@ -607,6 +610,8 @@ EXPORT_SYMBOL_GPL(rpc_killall_tasks);
607 */ 610 */
608void rpc_shutdown_client(struct rpc_clnt *clnt) 611void rpc_shutdown_client(struct rpc_clnt *clnt)
609{ 612{
613 might_sleep();
614
610 dprintk_rcu("RPC: shutting down %s client for %s\n", 615 dprintk_rcu("RPC: shutting down %s client for %s\n",
611 clnt->cl_protname, 616 clnt->cl_protname,
612 rcu_dereference(clnt->cl_xprt)->servername); 617 rcu_dereference(clnt->cl_xprt)->servername);
@@ -693,21 +698,19 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
693 const struct rpc_program *program, 698 const struct rpc_program *program,
694 u32 vers) 699 u32 vers)
695{ 700{
701 struct rpc_create_args args = {
702 .program = program,
703 .prognumber = program->number,
704 .version = vers,
705 .authflavor = old->cl_auth->au_flavor,
706 .client_name = old->cl_principal,
707 };
696 struct rpc_clnt *clnt; 708 struct rpc_clnt *clnt;
697 const struct rpc_version *version;
698 int err; 709 int err;
699 710
700 BUG_ON(vers >= program->nrvers || !program->version[vers]); 711 clnt = __rpc_clone_client(&args, old);
701 version = program->version[vers];
702 clnt = rpc_clone_client(old);
703 if (IS_ERR(clnt)) 712 if (IS_ERR(clnt))
704 goto out; 713 goto out;
705 clnt->cl_procinfo = version->procs;
706 clnt->cl_maxproc = version->nrprocs;
707 clnt->cl_protname = program->name;
708 clnt->cl_prog = program->number;
709 clnt->cl_vers = version->number;
710 clnt->cl_stats = program->stats;
711 err = rpc_ping(clnt); 714 err = rpc_ping(clnt);
712 if (err != 0) { 715 if (err != 0) {
713 rpc_shutdown_client(clnt); 716 rpc_shutdown_client(clnt);
@@ -832,7 +835,12 @@ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flag
832 }; 835 };
833 int status; 836 int status;
834 837
835 BUG_ON(flags & RPC_TASK_ASYNC); 838 WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
839 if (flags & RPC_TASK_ASYNC) {
840 rpc_release_calldata(task_setup_data.callback_ops,
841 task_setup_data.callback_data);
842 return -EINVAL;
843 }
836 844
837 task = rpc_run_task(&task_setup_data); 845 task = rpc_run_task(&task_setup_data);
838 if (IS_ERR(task)) 846 if (IS_ERR(task))
@@ -908,7 +916,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
908 916
909 task->tk_action = call_bc_transmit; 917 task->tk_action = call_bc_transmit;
910 atomic_inc(&task->tk_count); 918 atomic_inc(&task->tk_count);
911 BUG_ON(atomic_read(&task->tk_count) != 2); 919 WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
912 rpc_execute(task); 920 rpc_execute(task);
913 921
914out: 922out:
@@ -1368,6 +1376,7 @@ call_refreshresult(struct rpc_task *task)
1368 return; 1376 return;
1369 case -ETIMEDOUT: 1377 case -ETIMEDOUT:
1370 rpc_delay(task, 3*HZ); 1378 rpc_delay(task, 3*HZ);
1379 case -EKEYEXPIRED:
1371 case -EAGAIN: 1380 case -EAGAIN:
1372 status = -EACCES; 1381 status = -EACCES;
1373 if (!task->tk_cred_retry) 1382 if (!task->tk_cred_retry)
@@ -1654,7 +1663,6 @@ call_transmit(struct rpc_task *task)
1654 task->tk_action = call_transmit_status; 1663 task->tk_action = call_transmit_status;
1655 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1664 /* Encode here so that rpcsec_gss can use correct sequence number. */
1656 if (rpc_task_need_encode(task)) { 1665 if (rpc_task_need_encode(task)) {
1657 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1658 rpc_xdr_encode(task); 1666 rpc_xdr_encode(task);
1659 /* Did the encode result in an error condition? */ 1667 /* Did the encode result in an error condition? */
1660 if (task->tk_status != 0) { 1668 if (task->tk_status != 0) {
@@ -1738,7 +1746,6 @@ call_bc_transmit(struct rpc_task *task)
1738{ 1746{
1739 struct rpc_rqst *req = task->tk_rqstp; 1747 struct rpc_rqst *req = task->tk_rqstp;
1740 1748
1741 BUG_ON(task->tk_status != 0);
1742 task->tk_status = xprt_prepare_transmit(task); 1749 task->tk_status = xprt_prepare_transmit(task);
1743 if (task->tk_status == -EAGAIN) { 1750 if (task->tk_status == -EAGAIN) {
1744 /* 1751 /*
@@ -1785,7 +1792,7 @@ call_bc_transmit(struct rpc_task *task)
1785 * We were unable to reply and will have to drop the 1792 * We were unable to reply and will have to drop the
1786 * request. The server should reconnect and retransmit. 1793 * request. The server should reconnect and retransmit.
1787 */ 1794 */
1788 BUG_ON(task->tk_status == -EAGAIN); 1795 WARN_ON_ONCE(task->tk_status == -EAGAIN);
1789 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1796 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1790 "error: %d\n", task->tk_status); 1797 "error: %d\n", task->tk_status);
1791 break; 1798 break;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 80f5dd23417d..fd10981ea792 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1093,7 +1093,7 @@ void rpc_put_sb_net(const struct net *net)
1093{ 1093{
1094 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1094 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1095 1095
1096 BUG_ON(sn->pipefs_sb == NULL); 1096 WARN_ON(sn->pipefs_sb == NULL);
1097 mutex_unlock(&sn->pipefs_sb_lock); 1097 mutex_unlock(&sn->pipefs_sb_lock);
1098} 1098}
1099EXPORT_SYMBOL_GPL(rpc_put_sb_net); 1099EXPORT_SYMBOL_GPL(rpc_put_sb_net);
@@ -1152,14 +1152,19 @@ static void rpc_kill_sb(struct super_block *sb)
1152 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1152 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1153 1153
1154 mutex_lock(&sn->pipefs_sb_lock); 1154 mutex_lock(&sn->pipefs_sb_lock);
1155 if (sn->pipefs_sb != sb) {
1156 mutex_unlock(&sn->pipefs_sb_lock);
1157 goto out;
1158 }
1155 sn->pipefs_sb = NULL; 1159 sn->pipefs_sb = NULL;
1156 mutex_unlock(&sn->pipefs_sb_lock); 1160 mutex_unlock(&sn->pipefs_sb_lock);
1157 put_net(net);
1158 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", 1161 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n",
1159 net, NET_NAME(net)); 1162 net, NET_NAME(net));
1160 blocking_notifier_call_chain(&rpc_pipefs_notifier_list, 1163 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
1161 RPC_PIPEFS_UMOUNT, 1164 RPC_PIPEFS_UMOUNT,
1162 sb); 1165 sb);
1166 put_net(net);
1167out:
1163 kill_litter_super(sb); 1168 kill_litter_super(sb);
1164} 1169}
1165 1170
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index a70acae496e4..795a0f4e920b 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -23,7 +23,6 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/nsproxy.h>
27#include <net/ipv6.h> 26#include <net/ipv6.h>
28 27
29#include <linux/sunrpc/clnt.h> 28#include <linux/sunrpc/clnt.h>
@@ -884,7 +883,10 @@ static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
884 u32 len; 883 u32 len;
885 884
886 len = strlen(string); 885 len = strlen(string);
887 BUG_ON(len > maxstrlen); 886 WARN_ON_ONCE(len > maxstrlen);
887 if (len > maxstrlen)
888 /* truncate and hope for the best */
889 len = maxstrlen;
888 p = xdr_reserve_space(xdr, 4 + len); 890 p = xdr_reserve_space(xdr, 4 + len);
889 xdr_encode_opaque(p, string, len); 891 xdr_encode_opaque(p, string, len);
890} 892}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6357fcb00c7e..bfa31714581f 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,6 +98,23 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99} 99}
100 100
101static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
102{
103 queue->priority = priority;
104}
105
106static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
107{
108 queue->owner = pid;
109 queue->nr = RPC_BATCH_COUNT;
110}
111
112static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
113{
114 rpc_set_waitqueue_priority(queue, queue->maxpriority);
115 rpc_set_waitqueue_owner(queue, 0);
116}
117
101/* 118/*
102 * Add new request to a priority queue. 119 * Add new request to a priority queue.
103 */ 120 */
@@ -109,9 +126,11 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
109 struct rpc_task *t; 126 struct rpc_task *t;
110 127
111 INIT_LIST_HEAD(&task->u.tk_wait.links); 128 INIT_LIST_HEAD(&task->u.tk_wait.links);
112 q = &queue->tasks[queue_priority];
113 if (unlikely(queue_priority > queue->maxpriority)) 129 if (unlikely(queue_priority > queue->maxpriority))
114 q = &queue->tasks[queue->maxpriority]; 130 queue_priority = queue->maxpriority;
131 if (queue_priority > queue->priority)
132 rpc_set_waitqueue_priority(queue, queue_priority);
133 q = &queue->tasks[queue_priority];
115 list_for_each_entry(t, q, u.tk_wait.list) { 134 list_for_each_entry(t, q, u.tk_wait.list) {
116 if (t->tk_owner == task->tk_owner) { 135 if (t->tk_owner == task->tk_owner) {
117 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); 136 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
@@ -133,7 +152,9 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
133 struct rpc_task *task, 152 struct rpc_task *task,
134 unsigned char queue_priority) 153 unsigned char queue_priority)
135{ 154{
136 BUG_ON (RPC_IS_QUEUED(task)); 155 WARN_ON_ONCE(RPC_IS_QUEUED(task));
156 if (RPC_IS_QUEUED(task))
157 return;
137 158
138 if (RPC_IS_PRIORITY(queue)) 159 if (RPC_IS_PRIORITY(queue))
139 __rpc_add_wait_queue_priority(queue, task, queue_priority); 160 __rpc_add_wait_queue_priority(queue, task, queue_priority);
@@ -178,24 +199,6 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
178 task->tk_pid, queue, rpc_qname(queue)); 199 task->tk_pid, queue, rpc_qname(queue));
179} 200}
180 201
181static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
182{
183 queue->priority = priority;
184 queue->count = 1 << (priority * 2);
185}
186
187static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
188{
189 queue->owner = pid;
190 queue->nr = RPC_BATCH_COUNT;
191}
192
193static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
194{
195 rpc_set_waitqueue_priority(queue, queue->maxpriority);
196 rpc_set_waitqueue_owner(queue, 0);
197}
198
199static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 202static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
200{ 203{
201 int i; 204 int i;
@@ -334,7 +337,7 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
334 337
335 __rpc_add_wait_queue(q, task, queue_priority); 338 __rpc_add_wait_queue(q, task, queue_priority);
336 339
337 BUG_ON(task->tk_callback != NULL); 340 WARN_ON_ONCE(task->tk_callback != NULL);
338 task->tk_callback = action; 341 task->tk_callback = action;
339 __rpc_add_timer(q, task); 342 __rpc_add_timer(q, task);
340} 343}
@@ -343,7 +346,12 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
343 rpc_action action) 346 rpc_action action)
344{ 347{
345 /* We shouldn't ever put an inactive task to sleep */ 348 /* We shouldn't ever put an inactive task to sleep */
346 BUG_ON(!RPC_IS_ACTIVATED(task)); 349 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
350 if (!RPC_IS_ACTIVATED(task)) {
351 task->tk_status = -EIO;
352 rpc_put_task_async(task);
353 return;
354 }
347 355
348 /* 356 /*
349 * Protect the queue operations. 357 * Protect the queue operations.
@@ -358,7 +366,12 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
358 rpc_action action, int priority) 366 rpc_action action, int priority)
359{ 367{
360 /* We shouldn't ever put an inactive task to sleep */ 368 /* We shouldn't ever put an inactive task to sleep */
361 BUG_ON(!RPC_IS_ACTIVATED(task)); 369 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
370 if (!RPC_IS_ACTIVATED(task)) {
371 task->tk_status = -EIO;
372 rpc_put_task_async(task);
373 return;
374 }
362 375
363 /* 376 /*
364 * Protect the queue operations. 377 * Protect the queue operations.
@@ -367,6 +380,7 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
367 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); 380 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
368 spin_unlock_bh(&q->lock); 381 spin_unlock_bh(&q->lock);
369} 382}
383EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
370 384
371/** 385/**
372 * __rpc_do_wake_up_task - wake up a single rpc_task 386 * __rpc_do_wake_up_task - wake up a single rpc_task
@@ -451,8 +465,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
451 /* 465 /*
452 * Check if we need to switch queues. 466 * Check if we need to switch queues.
453 */ 467 */
454 if (--queue->count) 468 goto new_owner;
455 goto new_owner;
456 } 469 }
457 470
458 /* 471 /*
@@ -697,7 +710,9 @@ static void __rpc_execute(struct rpc_task *task)
697 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 710 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
698 task->tk_pid, task->tk_flags); 711 task->tk_pid, task->tk_flags);
699 712
700 BUG_ON(RPC_IS_QUEUED(task)); 713 WARN_ON_ONCE(RPC_IS_QUEUED(task));
714 if (RPC_IS_QUEUED(task))
715 return;
701 716
702 for (;;) { 717 for (;;) {
703 void (*do_action)(struct rpc_task *); 718 void (*do_action)(struct rpc_task *);
@@ -919,16 +934,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
919 return task; 934 return task;
920} 935}
921 936
937/*
938 * rpc_free_task - release rpc task and perform cleanups
939 *
940 * Note that we free up the rpc_task _after_ rpc_release_calldata()
941 * in order to work around a workqueue dependency issue.
942 *
943 * Tejun Heo states:
944 * "Workqueue currently considers two work items to be the same if they're
945 * on the same address and won't execute them concurrently - ie. it
946 * makes a work item which is queued again while being executed wait
947 * for the previous execution to complete.
948 *
949 * If a work function frees the work item, and then waits for an event
950 * which should be performed by another work item and *that* work item
951 * recycles the freed work item, it can create a false dependency loop.
952 * There really is no reliable way to detect this short of verifying
953 * every memory free."
954 *
955 */
922static void rpc_free_task(struct rpc_task *task) 956static void rpc_free_task(struct rpc_task *task)
923{ 957{
924 const struct rpc_call_ops *tk_ops = task->tk_ops; 958 unsigned short tk_flags = task->tk_flags;
925 void *calldata = task->tk_calldata; 959
960 rpc_release_calldata(task->tk_ops, task->tk_calldata);
926 961
927 if (task->tk_flags & RPC_TASK_DYNAMIC) { 962 if (tk_flags & RPC_TASK_DYNAMIC) {
928 dprintk("RPC: %5u freeing task\n", task->tk_pid); 963 dprintk("RPC: %5u freeing task\n", task->tk_pid);
929 mempool_free(task, rpc_task_mempool); 964 mempool_free(task, rpc_task_mempool);
930 } 965 }
931 rpc_release_calldata(tk_ops, calldata);
932} 966}
933 967
934static void rpc_async_release(struct work_struct *work) 968static void rpc_async_release(struct work_struct *work)
@@ -938,8 +972,7 @@ static void rpc_async_release(struct work_struct *work)
938 972
939static void rpc_release_resources_task(struct rpc_task *task) 973static void rpc_release_resources_task(struct rpc_task *task)
940{ 974{
941 if (task->tk_rqstp) 975 xprt_release(task);
942 xprt_release(task);
943 if (task->tk_msg.rpc_cred) { 976 if (task->tk_msg.rpc_cred) {
944 put_rpccred(task->tk_msg.rpc_cred); 977 put_rpccred(task->tk_msg.rpc_cred);
945 task->tk_msg.rpc_cred = NULL; 978 task->tk_msg.rpc_cred = NULL;
@@ -981,7 +1014,7 @@ static void rpc_release_task(struct rpc_task *task)
981{ 1014{
982 dprintk("RPC: %5u release task\n", task->tk_pid); 1015 dprintk("RPC: %5u release task\n", task->tk_pid);
983 1016
984 BUG_ON (RPC_IS_QUEUED(task)); 1017 WARN_ON_ONCE(RPC_IS_QUEUED(task));
985 1018
986 rpc_release_resources_task(task); 1019 rpc_release_resources_task(task);
987 1020
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 3ee7461926d8..dbf12ac5ecb7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/nsproxy.h>
24 23
25#include <linux/sunrpc/types.h> 24#include <linux/sunrpc/types.h>
26#include <linux/sunrpc/xdr.h> 25#include <linux/sunrpc/xdr.h>
@@ -324,7 +323,9 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
324 * The caller checks for sv_nrpools > 1, which 323 * The caller checks for sv_nrpools > 1, which
325 * implies that we've been initialized. 324 * implies that we've been initialized.
326 */ 325 */
327 BUG_ON(m->count == 0); 326 WARN_ON_ONCE(m->count == 0);
327 if (m->count == 0)
328 return;
328 329
329 switch (m->mode) { 330 switch (m->mode) {
330 case SVC_POOL_PERCPU: 331 case SVC_POOL_PERCPU:
@@ -585,7 +586,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
585 * We assume one is at most one page 586 * We assume one is at most one page
586 */ 587 */
587 arghi = 0; 588 arghi = 0;
588 BUG_ON(pages > RPCSVC_MAXPAGES); 589 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
590 if (pages > RPCSVC_MAXPAGES)
591 pages = RPCSVC_MAXPAGES;
589 while (pages) { 592 while (pages) {
590 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0); 593 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
591 if (!p) 594 if (!p)
@@ -946,7 +949,9 @@ int svc_register(const struct svc_serv *serv, struct net *net,
946 unsigned int i; 949 unsigned int i;
947 int error = 0; 950 int error = 0;
948 951
949 BUG_ON(proto == 0 && port == 0); 952 WARN_ON_ONCE(proto == 0 && port == 0);
953 if (proto == 0 && port == 0)
954 return -EINVAL;
950 955
951 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 956 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
952 for (i = 0; i < progp->pg_nvers; i++) { 957 for (i = 0; i < progp->pg_nvers; i++) {
@@ -1035,7 +1040,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
1035} 1040}
1036 1041
1037/* 1042/*
1038 * Printk the given error with the address of the client that caused it. 1043 * dprintk the given error with the address of the client that caused it.
1039 */ 1044 */
1040static __printf(2, 3) 1045static __printf(2, 3)
1041void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1046void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
@@ -1049,8 +1054,7 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1049 vaf.fmt = fmt; 1054 vaf.fmt = fmt;
1050 vaf.va = &args; 1055 vaf.va = &args;
1051 1056
1052 net_warn_ratelimited("svc: %s: %pV", 1057 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1053 svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1054 1058
1055 va_end(args); 1059 va_end(args);
1056} 1060}
@@ -1299,7 +1303,7 @@ svc_process(struct svc_rqst *rqstp)
1299 * Setup response xdr_buf. 1303 * Setup response xdr_buf.
1300 * Initially it has just one page 1304 * Initially it has just one page
1301 */ 1305 */
1302 rqstp->rq_resused = 1; 1306 rqstp->rq_next_page = &rqstp->rq_respages[1];
1303 resv->iov_base = page_address(rqstp->rq_respages[0]); 1307 resv->iov_base = page_address(rqstp->rq_respages[0]);
1304 resv->iov_len = 0; 1308 resv->iov_len = 0;
1305 rqstp->rq_res.pages = rqstp->rq_respages + 1; 1309 rqstp->rq_res.pages = rqstp->rq_respages + 1;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 194d865fae72..b8e47fac7315 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -218,7 +218,9 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
218 */ 218 */
219static void svc_xprt_received(struct svc_xprt *xprt) 219static void svc_xprt_received(struct svc_xprt *xprt)
220{ 220{
221 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); 221 WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
222 if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
223 return;
222 /* As soon as we clear busy, the xprt could be closed and 224 /* As soon as we clear busy, the xprt could be closed and
223 * 'put', so we need a reference to call svc_xprt_enqueue with: 225 * 'put', so we need a reference to call svc_xprt_enqueue with:
224 */ 226 */
@@ -577,7 +579,10 @@ int svc_alloc_arg(struct svc_rqst *rqstp)
577 579
578 /* now allocate needed pages. If we get a failure, sleep briefly */ 580 /* now allocate needed pages. If we get a failure, sleep briefly */
579 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 581 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
580 BUG_ON(pages >= RPCSVC_MAXPAGES); 582 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES);
583 if (pages >= RPCSVC_MAXPAGES)
584 /* use as many pages as possible */
585 pages = RPCSVC_MAXPAGES - 1;
581 for (i = 0; i < pages ; i++) 586 for (i = 0; i < pages ; i++)
582 while (rqstp->rq_pages[i] == NULL) { 587 while (rqstp->rq_pages[i] == NULL) {
583 struct page *p = alloc_page(GFP_KERNEL); 588 struct page *p = alloc_page(GFP_KERNEL);
@@ -926,7 +931,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
926 spin_lock_bh(&serv->sv_lock); 931 spin_lock_bh(&serv->sv_lock);
927 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) 932 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
928 list_del_init(&xprt->xpt_list); 933 list_del_init(&xprt->xpt_list);
929 BUG_ON(!list_empty(&xprt->xpt_ready)); 934 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
930 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 935 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
931 serv->sv_tmpcnt--; 936 serv->sv_tmpcnt--;
932 spin_unlock_bh(&serv->sv_lock); 937 spin_unlock_bh(&serv->sv_lock);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 03827cef1fa7..0a148c9d2a5c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -84,7 +84,11 @@ static struct lock_class_key svc_slock_key[2];
84static void svc_reclassify_socket(struct socket *sock) 84static void svc_reclassify_socket(struct socket *sock)
85{ 85{
86 struct sock *sk = sock->sk; 86 struct sock *sk = sock->sk;
87 BUG_ON(sock_owned_by_user(sk)); 87
88 WARN_ON_ONCE(sock_owned_by_user(sk));
89 if (sock_owned_by_user(sk))
90 return;
91
88 switch (sk->sk_family) { 92 switch (sk->sk_family) {
89 case AF_INET: 93 case AF_INET:
90 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 94 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
@@ -601,6 +605,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
601 rqstp->rq_respages = rqstp->rq_pages + 1 + 605 rqstp->rq_respages = rqstp->rq_pages + 1 +
602 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); 606 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
603 } 607 }
608 rqstp->rq_next_page = rqstp->rq_respages+1;
604 609
605 if (serv->sv_stats) 610 if (serv->sv_stats)
606 serv->sv_stats->netudpcnt++; 611 serv->sv_stats->netudpcnt++;
@@ -874,9 +879,9 @@ static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst
874{ 879{
875 unsigned int i, len, npages; 880 unsigned int i, len, npages;
876 881
877 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) 882 if (svsk->sk_datalen == 0)
878 return 0; 883 return 0;
879 len = svsk->sk_tcplen - sizeof(rpc_fraghdr); 884 len = svsk->sk_datalen;
880 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 885 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
881 for (i = 0; i < npages; i++) { 886 for (i = 0; i < npages; i++) {
882 if (rqstp->rq_pages[i] != NULL) 887 if (rqstp->rq_pages[i] != NULL)
@@ -893,9 +898,9 @@ static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
893{ 898{
894 unsigned int i, len, npages; 899 unsigned int i, len, npages;
895 900
896 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) 901 if (svsk->sk_datalen == 0)
897 return; 902 return;
898 len = svsk->sk_tcplen - sizeof(rpc_fraghdr); 903 len = svsk->sk_datalen;
899 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 904 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
900 for (i = 0; i < npages; i++) { 905 for (i = 0; i < npages; i++) {
901 svsk->sk_pages[i] = rqstp->rq_pages[i]; 906 svsk->sk_pages[i] = rqstp->rq_pages[i];
@@ -907,9 +912,9 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
907{ 912{
908 unsigned int i, len, npages; 913 unsigned int i, len, npages;
909 914
910 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) 915 if (svsk->sk_datalen == 0)
911 goto out; 916 goto out;
912 len = svsk->sk_tcplen - sizeof(rpc_fraghdr); 917 len = svsk->sk_datalen;
913 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 918 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
914 for (i = 0; i < npages; i++) { 919 for (i = 0; i < npages; i++) {
915 BUG_ON(svsk->sk_pages[i] == NULL); 920 BUG_ON(svsk->sk_pages[i] == NULL);
@@ -918,13 +923,12 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
918 } 923 }
919out: 924out:
920 svsk->sk_tcplen = 0; 925 svsk->sk_tcplen = 0;
926 svsk->sk_datalen = 0;
921} 927}
922 928
923/* 929/*
924 * Receive data. 930 * Receive fragment record header.
925 * If we haven't gotten the record length yet, get the next four bytes. 931 * If we haven't gotten the record length yet, get the next four bytes.
926 * Otherwise try to gobble up as much as possible up to the complete
927 * record length.
928 */ 932 */
929static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) 933static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
930{ 934{
@@ -950,32 +954,16 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
950 return -EAGAIN; 954 return -EAGAIN;
951 } 955 }
952 956
953 svsk->sk_reclen = ntohl(svsk->sk_reclen); 957 dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk));
954 if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) { 958 if (svc_sock_reclen(svsk) + svsk->sk_datalen >
955 /* FIXME: technically, a record can be fragmented, 959 serv->sv_max_mesg) {
956 * and non-terminal fragments will not have the top 960 net_notice_ratelimited("RPC: fragment too large: %d\n",
957 * bit set in the fragment length header. 961 svc_sock_reclen(svsk));
958 * But apparently no known nfs clients send fragmented
959 * records. */
960 net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
961 goto err_delete;
962 }
963
964 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
965 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
966 if (svsk->sk_reclen > serv->sv_max_mesg) {
967 net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
968 (unsigned long)svsk->sk_reclen);
969 goto err_delete; 962 goto err_delete;
970 } 963 }
971 } 964 }
972 965
973 if (svsk->sk_reclen < 8) 966 return svc_sock_reclen(svsk);
974 goto err_delete; /* client is nuts. */
975
976 len = svsk->sk_reclen;
977
978 return len;
979error: 967error:
980 dprintk("RPC: TCP recv_record got %d\n", len); 968 dprintk("RPC: TCP recv_record got %d\n", len);
981 return len; 969 return len;
@@ -1019,7 +1007,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1019 if (dst->iov_len < src->iov_len) 1007 if (dst->iov_len < src->iov_len)
1020 return -EAGAIN; /* whatever; just giving up. */ 1008 return -EAGAIN; /* whatever; just giving up. */
1021 memcpy(dst->iov_base, src->iov_base, src->iov_len); 1009 memcpy(dst->iov_base, src->iov_base, src->iov_len);
1022 xprt_complete_rqst(req->rq_task, svsk->sk_reclen); 1010 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
1023 rqstp->rq_arg.len = 0; 1011 rqstp->rq_arg.len = 0;
1024 return 0; 1012 return 0;
1025} 1013}
@@ -1038,6 +1026,17 @@ static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
1038 return i; 1026 return i;
1039} 1027}
1040 1028
1029static void svc_tcp_fragment_received(struct svc_sock *svsk)
1030{
1031 /* If we have more data, signal svc_xprt_enqueue() to try again */
1032 if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
1033 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1034 dprintk("svc: TCP %s record (%d bytes)\n",
1035 svc_sock_final_rec(svsk) ? "final" : "nonfinal",
1036 svc_sock_reclen(svsk));
1037 svsk->sk_tcplen = 0;
1038 svsk->sk_reclen = 0;
1039}
1041 1040
1042/* 1041/*
1043 * Receive data from a TCP socket. 1042 * Receive data from a TCP socket.
@@ -1064,29 +1063,39 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1064 goto error; 1063 goto error;
1065 1064
1066 base = svc_tcp_restore_pages(svsk, rqstp); 1065 base = svc_tcp_restore_pages(svsk, rqstp);
1067 want = svsk->sk_reclen - base; 1066 want = svc_sock_reclen(svsk) - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
1068 1067
1069 vec = rqstp->rq_vec; 1068 vec = rqstp->rq_vec;
1070 1069
1071 pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], 1070 pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
1072 svsk->sk_reclen); 1071 svsk->sk_datalen + want);
1073 1072
1074 rqstp->rq_respages = &rqstp->rq_pages[pnum]; 1073 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1074 rqstp->rq_next_page = rqstp->rq_respages + 1;
1075 1075
1076 /* Now receive data */ 1076 /* Now receive data */
1077 len = svc_partial_recvfrom(rqstp, vec, pnum, want, base); 1077 len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
1078 if (len >= 0) 1078 if (len >= 0) {
1079 svsk->sk_tcplen += len; 1079 svsk->sk_tcplen += len;
1080 if (len != want) { 1080 svsk->sk_datalen += len;
1081 }
1082 if (len != want || !svc_sock_final_rec(svsk)) {
1081 svc_tcp_save_pages(svsk, rqstp); 1083 svc_tcp_save_pages(svsk, rqstp);
1082 if (len < 0 && len != -EAGAIN) 1084 if (len < 0 && len != -EAGAIN)
1083 goto err_other; 1085 goto err_delete;
1084 dprintk("svc: incomplete TCP record (%d of %d)\n", 1086 if (len == want)
1085 svsk->sk_tcplen, svsk->sk_reclen); 1087 svc_tcp_fragment_received(svsk);
1088 else
1089 dprintk("svc: incomplete TCP record (%d of %d)\n",
1090 (int)(svsk->sk_tcplen - sizeof(rpc_fraghdr)),
1091 svc_sock_reclen(svsk));
1086 goto err_noclose; 1092 goto err_noclose;
1087 } 1093 }
1088 1094
1089 rqstp->rq_arg.len = svsk->sk_reclen; 1095 if (svc_sock_reclen(svsk) < 8)
1096 goto err_delete; /* client is nuts. */
1097
1098 rqstp->rq_arg.len = svsk->sk_datalen;
1090 rqstp->rq_arg.page_base = 0; 1099 rqstp->rq_arg.page_base = 0;
1091 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { 1100 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1092 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; 1101 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
@@ -1103,11 +1112,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1103 len = receive_cb_reply(svsk, rqstp); 1112 len = receive_cb_reply(svsk, rqstp);
1104 1113
1105 /* Reset TCP read info */ 1114 /* Reset TCP read info */
1106 svsk->sk_reclen = 0; 1115 svsk->sk_datalen = 0;
1107 svsk->sk_tcplen = 0; 1116 svc_tcp_fragment_received(svsk);
1108 /* If we have more data, signal svc_xprt_enqueue() to try again */
1109 if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
1110 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1111 1117
1112 if (len < 0) 1118 if (len < 0)
1113 goto error; 1119 goto error;
@@ -1116,15 +1122,14 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1116 if (serv->sv_stats) 1122 if (serv->sv_stats)
1117 serv->sv_stats->nettcpcnt++; 1123 serv->sv_stats->nettcpcnt++;
1118 1124
1119 dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len);
1120 return rqstp->rq_arg.len; 1125 return rqstp->rq_arg.len;
1121 1126
1122error: 1127error:
1123 if (len != -EAGAIN) 1128 if (len != -EAGAIN)
1124 goto err_other; 1129 goto err_delete;
1125 dprintk("RPC: TCP recvfrom got EAGAIN\n"); 1130 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1126 return 0; 1131 return 0;
1127err_other: 1132err_delete:
1128 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1133 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1129 svsk->sk_xprt.xpt_server->sv_name, -len); 1134 svsk->sk_xprt.xpt_server->sv_name, -len);
1130 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1135 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1301,6 +1306,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1301 1306
1302 svsk->sk_reclen = 0; 1307 svsk->sk_reclen = 0;
1303 svsk->sk_tcplen = 0; 1308 svsk->sk_tcplen = 0;
1309 svsk->sk_datalen = 0;
1304 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); 1310 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
1305 1311
1306 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1312 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 08f50afd5f2a..56055632f151 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -318,7 +318,10 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
318 318
319 tail = buf->tail; 319 tail = buf->tail;
320 head = buf->head; 320 head = buf->head;
321 BUG_ON (len > head->iov_len); 321
322 WARN_ON_ONCE(len > head->iov_len);
323 if (len > head->iov_len)
324 len = head->iov_len;
322 325
323 /* Shift the tail first */ 326 /* Shift the tail first */
324 if (tail->iov_len != 0) { 327 if (tail->iov_len != 0) {
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index bd462a532acf..33811db8788a 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1136,10 +1136,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1136void xprt_release(struct rpc_task *task) 1136void xprt_release(struct rpc_task *task)
1137{ 1137{
1138 struct rpc_xprt *xprt; 1138 struct rpc_xprt *xprt;
1139 struct rpc_rqst *req; 1139 struct rpc_rqst *req = task->tk_rqstp;
1140 1140
1141 if (!(req = task->tk_rqstp)) 1141 if (req == NULL) {
1142 if (task->tk_client) {
1143 rcu_read_lock();
1144 xprt = rcu_dereference(task->tk_client->cl_xprt);
1145 if (xprt->snd_task == task)
1146 xprt_release_write(xprt, task);
1147 rcu_read_unlock();
1148 }
1142 return; 1149 return;
1150 }
1143 1151
1144 xprt = req->rq_xprt; 1152 xprt = req->rq_xprt;
1145 if (task->tk_ops->rpc_count_stats != NULL) 1153 if (task->tk_ops->rpc_count_stats != NULL)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 41cb63b623df..0ce75524ed21 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -521,11 +521,11 @@ next_sge:
521 rqstp->rq_pages[ch_no] = NULL; 521 rqstp->rq_pages[ch_no] = NULL;
522 522
523 /* 523 /*
524 * Detach res pages. svc_release must see a resused count of 524 * Detach res pages. If svc_release sees any it will attempt to
525 * zero or it will attempt to put them. 525 * put them.
526 */ 526 */
527 while (rqstp->rq_resused) 527 while (rqstp->rq_next_page != rqstp->rq_respages)
528 rqstp->rq_respages[--rqstp->rq_resused] = NULL; 528 *(--rqstp->rq_next_page) = NULL;
529 529
530 return err; 530 return err;
531} 531}
@@ -550,7 +550,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
550 550
551 /* rq_respages starts after the last arg page */ 551 /* rq_respages starts after the last arg page */
552 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; 552 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
553 rqstp->rq_resused = 0; 553 rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no];
554 554
555 /* Rebuild rq_arg head and tail. */ 555 /* Rebuild rq_arg head and tail. */
556 rqstp->rq_arg.head[0] = head->arg.head[0]; 556 rqstp->rq_arg.head[0] = head->arg.head[0];
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 42eb7ba0b903..c1d124dc772b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -548,6 +548,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
548 int sge_no; 548 int sge_no;
549 int sge_bytes; 549 int sge_bytes;
550 int page_no; 550 int page_no;
551 int pages;
551 int ret; 552 int ret;
552 553
553 /* Post a recv buffer to handle another request. */ 554 /* Post a recv buffer to handle another request. */
@@ -611,7 +612,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
611 * respages array. They are our pages until the I/O 612 * respages array. They are our pages until the I/O
612 * completes. 613 * completes.
613 */ 614 */
614 for (page_no = 0; page_no < rqstp->rq_resused; page_no++) { 615 pages = rqstp->rq_next_page - rqstp->rq_respages;
616 for (page_no = 0; page_no < pages; page_no++) {
615 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; 617 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
616 ctxt->count++; 618 ctxt->count++;
617 rqstp->rq_respages[page_no] = NULL; 619 rqstp->rq_respages[page_no] = NULL;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 75853cabf4c9..68b0a81c31d5 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1746,7 +1746,6 @@ static inline void xs_reclassify_socketu(struct socket *sock)
1746{ 1746{
1747 struct sock *sk = sock->sk; 1747 struct sock *sk = sock->sk;
1748 1748
1749 BUG_ON(sock_owned_by_user(sk));
1750 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1749 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1751 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); 1750 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1752} 1751}
@@ -1755,7 +1754,6 @@ static inline void xs_reclassify_socket4(struct socket *sock)
1755{ 1754{
1756 struct sock *sk = sock->sk; 1755 struct sock *sk = sock->sk;
1757 1756
1758 BUG_ON(sock_owned_by_user(sk));
1759 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1757 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1760 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); 1758 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1761} 1759}
@@ -1764,13 +1762,16 @@ static inline void xs_reclassify_socket6(struct socket *sock)
1764{ 1762{
1765 struct sock *sk = sock->sk; 1763 struct sock *sk = sock->sk;
1766 1764
1767 BUG_ON(sock_owned_by_user(sk));
1768 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1765 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1769 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); 1766 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1770} 1767}
1771 1768
1772static inline void xs_reclassify_socket(int family, struct socket *sock) 1769static inline void xs_reclassify_socket(int family, struct socket *sock)
1773{ 1770{
1771 WARN_ON_ONCE(sock_owned_by_user(sock->sk));
1772 if (sock_owned_by_user(sock->sk))
1773 return;
1774
1774 switch (family) { 1775 switch (family) {
1775 case AF_LOCAL: 1776 case AF_LOCAL:
1776 xs_reclassify_socketu(sock); 1777 xs_reclassify_socketu(sock);
@@ -1901,6 +1902,10 @@ static void xs_local_setup_socket(struct work_struct *work)
1901 dprintk("RPC: xprt %p: socket %s does not exist\n", 1902 dprintk("RPC: xprt %p: socket %s does not exist\n",
1902 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1903 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1903 break; 1904 break;
1905 case -ECONNREFUSED:
1906 dprintk("RPC: xprt %p: connection refused for %s\n",
1907 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1908 break;
1904 default: 1909 default:
1905 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 1910 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1906 __func__, -status, 1911 __func__, -status,
@@ -2329,9 +2334,11 @@ static void *bc_malloc(struct rpc_task *task, size_t size)
2329 struct page *page; 2334 struct page *page;
2330 struct rpc_buffer *buf; 2335 struct rpc_buffer *buf;
2331 2336
2332 BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer)); 2337 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2333 page = alloc_page(GFP_KERNEL); 2338 if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
2339 return NULL;
2334 2340
2341 page = alloc_page(GFP_KERNEL);
2335 if (!page) 2342 if (!page)
2336 return NULL; 2343 return NULL;
2337 2344
@@ -2393,7 +2400,6 @@ static int bc_send_request(struct rpc_task *task)
2393{ 2400{
2394 struct rpc_rqst *req = task->tk_rqstp; 2401 struct rpc_rqst *req = task->tk_rqstp;
2395 struct svc_xprt *xprt; 2402 struct svc_xprt *xprt;
2396 struct svc_sock *svsk;
2397 u32 len; 2403 u32 len;
2398 2404
2399 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); 2405 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
@@ -2401,7 +2407,6 @@ static int bc_send_request(struct rpc_task *task)
2401 * Get the server socket associated with this callback xprt 2407 * Get the server socket associated with this callback xprt
2402 */ 2408 */
2403 xprt = req->rq_xprt->bc_xprt; 2409 xprt = req->rq_xprt->bc_xprt;
2404 svsk = container_of(xprt, struct svc_sock, sk_xprt);
2405 2410
2406 /* 2411 /*
2407 * Grab the mutex to serialize data as the connection is shared 2412 * Grab the mutex to serialize data as the connection is shared
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index e3a6e37cd1c5..9bc6db04be3e 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -38,15 +38,24 @@ static int is_seen(struct ctl_table_set *set)
38} 38}
39 39
40/* Return standard mode bits for table entry. */ 40/* Return standard mode bits for table entry. */
41static int net_ctl_permissions(struct ctl_table_root *root, 41static int net_ctl_permissions(struct ctl_table_header *head,
42 struct nsproxy *nsproxy,
43 struct ctl_table *table) 42 struct ctl_table *table)
44{ 43{
44 struct net *net = container_of(head->set, struct net, sysctls);
45 kuid_t root_uid = make_kuid(net->user_ns, 0);
46 kgid_t root_gid = make_kgid(net->user_ns, 0);
47
45 /* Allow network administrator to have same access as root. */ 48 /* Allow network administrator to have same access as root. */
46 if (capable(CAP_NET_ADMIN)) { 49 if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
50 uid_eq(root_uid, current_uid())) {
47 int mode = (table->mode >> 6) & 7; 51 int mode = (table->mode >> 6) & 7;
48 return (mode << 6) | (mode << 3) | mode; 52 return (mode << 6) | (mode << 3) | mode;
49 } 53 }
54 /* Allow netns root group to have the same access as the root group */
55 if (gid_eq(root_gid, current_gid())) {
56 int mode = (table->mode >> 3) & 7;
57 return (mode << 3) | mode;
58 }
50 return table->mode; 59 return table->mode;
51} 60}
52 61
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 585460180ffb..bc41bd31eadc 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -20,18 +20,9 @@ menuconfig TIPC
20 20
21 If in doubt, say N. 21 If in doubt, say N.
22 22
23if TIPC
24
25config TIPC_ADVANCED
26 bool "Advanced TIPC configuration"
27 default n
28 help
29 Saying Y here will open some advanced configuration for TIPC.
30 Most users do not need to bother; if unsure, just say N.
31
32config TIPC_PORTS 23config TIPC_PORTS
33 int "Maximum number of ports in a node" 24 int "Maximum number of ports in a node"
34 depends on TIPC_ADVANCED 25 depends on TIPC
35 range 127 65535 26 range 127 65535
36 default "8191" 27 default "8191"
37 help 28 help
@@ -40,5 +31,3 @@ config TIPC_PORTS
40 31
41 Setting this to a smaller value saves some memory, 32 Setting this to a smaller value saves some memory,
42 setting it to higher allows for more ports. 33 setting it to higher allows for more ports.
43
44endif # TIPC
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e4e6d8cd47e6..54f89f90ac33 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -347,7 +347,7 @@ static void bclink_peek_nack(struct tipc_msg *msg)
347 347
348 tipc_node_lock(n_ptr); 348 tipc_node_lock(n_ptr);
349 349
350 if (n_ptr->bclink.supported && 350 if (n_ptr->bclink.recv_permitted &&
351 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && 351 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
352 (n_ptr->bclink.last_in == msg_bcgap_after(msg))) 352 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
353 n_ptr->bclink.oos_state = 2; 353 n_ptr->bclink.oos_state = 2;
@@ -429,7 +429,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
429 goto exit; 429 goto exit;
430 430
431 tipc_node_lock(node); 431 tipc_node_lock(node);
432 if (unlikely(!node->bclink.supported)) 432 if (unlikely(!node->bclink.recv_permitted))
433 goto unlock; 433 goto unlock;
434 434
435 /* Handle broadcast protocol message */ 435 /* Handle broadcast protocol message */
@@ -564,7 +564,7 @@ exit:
564 564
565u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 565u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
566{ 566{
567 return (n_ptr->bclink.supported && 567 return (n_ptr->bclink.recv_permitted &&
568 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); 568 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
569} 569}
570 570
@@ -619,16 +619,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
619 if (bcbearer->remains_new.count == bcbearer->remains.count) 619 if (bcbearer->remains_new.count == bcbearer->remains.count)
620 continue; /* bearer pair doesn't add anything */ 620 continue; /* bearer pair doesn't add anything */
621 621
622 if (p->blocked || 622 if (!tipc_bearer_blocked(p))
623 p->media->send_msg(buf, p, &p->media->bcast_addr)) { 623 tipc_bearer_send(p, buf, &p->media->bcast_addr);
624 else if (s && !tipc_bearer_blocked(s))
624 /* unable to send on primary bearer */ 625 /* unable to send on primary bearer */
625 if (!s || s->blocked || 626 tipc_bearer_send(s, buf, &s->media->bcast_addr);
626 s->media->send_msg(buf, s, 627 else
627 &s->media->bcast_addr)) { 628 /* unable to send on either bearer */
628 /* unable to send on either bearer */ 629 continue;
629 continue;
630 }
631 }
632 630
633 if (s) { 631 if (s) {
634 bcbearer->bpairs[bp_index].primary = s; 632 bcbearer->bpairs[bp_index].primary = s;
@@ -731,8 +729,8 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
731 " TX naks:%u acks:%u dups:%u\n", 729 " TX naks:%u acks:%u dups:%u\n",
732 s->sent_nacks, s->sent_acks, s->retransmitted); 730 s->sent_nacks, s->sent_acks, s->retransmitted);
733 ret += tipc_snprintf(buf + ret, buf_size - ret, 731 ret += tipc_snprintf(buf + ret, buf_size - ret,
734 " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 732 " Congestion link:%u Send queue max:%u avg:%u\n",
735 s->bearer_congs, s->link_congs, s->max_queue_sz, 733 s->link_congs, s->max_queue_sz,
736 s->queue_sz_counts ? 734 s->queue_sz_counts ?
737 (s->accu_queue_sz / s->queue_sz_counts) : 0); 735 (s->accu_queue_sz / s->queue_sz_counts) : 0);
738 736
@@ -766,7 +764,6 @@ int tipc_bclink_set_queue_limits(u32 limit)
766 764
767void tipc_bclink_init(void) 765void tipc_bclink_init(void)
768{ 766{
769 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
770 bcbearer->bearer.media = &bcbearer->media; 767 bcbearer->bearer.media = &bcbearer->media;
771 bcbearer->media.send_msg = tipc_bcbearer_send; 768 bcbearer->media.send_msg = tipc_bcbearer_send;
772 sprintf(bcbearer->media.name, "tipc-broadcast"); 769 sprintf(bcbearer->media.name, "tipc-broadcast");
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 4ec5c80e8a7c..aa62f93a9127 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -279,116 +279,31 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
279} 279}
280 280
281/* 281/*
282 * bearer_push(): Resolve bearer congestion. Force the waiting 282 * Interrupt enabling new requests after bearer blocking:
283 * links to push out their unsent packets, one packet per link
284 * per iteration, until all packets are gone or congestion reoccurs.
285 * 'tipc_net_lock' is read_locked when this function is called
286 * bearer.lock must be taken before calling
287 * Returns binary true(1) ore false(0)
288 */
289static int bearer_push(struct tipc_bearer *b_ptr)
290{
291 u32 res = 0;
292 struct tipc_link *ln, *tln;
293
294 if (b_ptr->blocked)
295 return 0;
296
297 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
298 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
299 res = tipc_link_push_packet(ln);
300 if (res == PUSH_FAILED)
301 break;
302 if (res == PUSH_FINISHED)
303 list_move_tail(&ln->link_list, &b_ptr->links);
304 }
305 }
306 return list_empty(&b_ptr->cong_links);
307}
308
309void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
310{
311 spin_lock_bh(&b_ptr->lock);
312 bearer_push(b_ptr);
313 spin_unlock_bh(&b_ptr->lock);
314}
315
316
317/*
318 * Interrupt enabling new requests after bearer congestion or blocking:
319 * See bearer_send(). 283 * See bearer_send().
320 */ 284 */
321void tipc_continue(struct tipc_bearer *b_ptr) 285void tipc_continue(struct tipc_bearer *b)
322{ 286{
323 spin_lock_bh(&b_ptr->lock); 287 spin_lock_bh(&b->lock);
324 if (!list_empty(&b_ptr->cong_links)) 288 b->blocked = 0;
325 tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr); 289 spin_unlock_bh(&b->lock);
326 b_ptr->blocked = 0;
327 spin_unlock_bh(&b_ptr->lock);
328} 290}
329 291
330/* 292/*
331 * Schedule link for sending of messages after the bearer 293 * tipc_bearer_blocked - determines if bearer is currently blocked
332 * has been deblocked by 'continue()'. This method is called
333 * when somebody tries to send a message via this link while
334 * the bearer is congested. 'tipc_net_lock' is in read_lock here
335 * bearer.lock is busy
336 */ 294 */
337static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, 295int tipc_bearer_blocked(struct tipc_bearer *b)
338 struct tipc_link *l_ptr)
339{ 296{
340 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links); 297 int res;
341}
342
343/*
344 * Schedule link for sending of messages after the bearer
345 * has been deblocked by 'continue()'. This method is called
346 * when somebody tries to send a message via this link while
347 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
348 * bearer.lock is free
349 */
350void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
351{
352 spin_lock_bh(&b_ptr->lock);
353 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
354 spin_unlock_bh(&b_ptr->lock);
355}
356
357 298
358/* 299 spin_lock_bh(&b->lock);
359 * tipc_bearer_resolve_congestion(): Check if there is bearer congestion, 300 res = b->blocked;
360 * and if there is, try to resolve it before returning. 301 spin_unlock_bh(&b->lock);
361 * 'tipc_net_lock' is read_locked when this function is called
362 */
363int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
364 struct tipc_link *l_ptr)
365{
366 int res = 1;
367 302
368 if (list_empty(&b_ptr->cong_links))
369 return 1;
370 spin_lock_bh(&b_ptr->lock);
371 if (!bearer_push(b_ptr)) {
372 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
373 res = 0;
374 }
375 spin_unlock_bh(&b_ptr->lock);
376 return res; 303 return res;
377} 304}
378 305
379/** 306/**
380 * tipc_bearer_congested - determines if bearer is currently congested
381 */
382int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
383{
384 if (unlikely(b_ptr->blocked))
385 return 1;
386 if (likely(list_empty(&b_ptr->cong_links)))
387 return 0;
388 return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
389}
390
391/**
392 * tipc_enable_bearer - enable bearer with the given name 307 * tipc_enable_bearer - enable bearer with the given name
393 */ 308 */
394int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) 309int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
@@ -489,7 +404,6 @@ restart:
489 b_ptr->net_plane = bearer_id + 'A'; 404 b_ptr->net_plane = bearer_id + 'A';
490 b_ptr->active = 1; 405 b_ptr->active = 1;
491 b_ptr->priority = priority; 406 b_ptr->priority = priority;
492 INIT_LIST_HEAD(&b_ptr->cong_links);
493 INIT_LIST_HEAD(&b_ptr->links); 407 INIT_LIST_HEAD(&b_ptr->links);
494 spin_lock_init(&b_ptr->lock); 408 spin_lock_init(&b_ptr->lock);
495 409
@@ -528,7 +442,6 @@ int tipc_block_bearer(const char *name)
528 pr_info("Blocking bearer <%s>\n", name); 442 pr_info("Blocking bearer <%s>\n", name);
529 spin_lock_bh(&b_ptr->lock); 443 spin_lock_bh(&b_ptr->lock);
530 b_ptr->blocked = 1; 444 b_ptr->blocked = 1;
531 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
532 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 445 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
533 struct tipc_node *n_ptr = l_ptr->owner; 446 struct tipc_node *n_ptr = l_ptr->owner;
534 447
@@ -555,7 +468,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
555 spin_lock_bh(&b_ptr->lock); 468 spin_lock_bh(&b_ptr->lock);
556 b_ptr->blocked = 1; 469 b_ptr->blocked = 1;
557 b_ptr->media->disable_bearer(b_ptr); 470 b_ptr->media->disable_bearer(b_ptr);
558 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
559 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 471 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
560 tipc_link_delete(l_ptr); 472 tipc_link_delete(l_ptr);
561 } 473 }
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index dd4c2abf08e7..39f1192d04bf 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -120,7 +120,6 @@ struct tipc_media {
120 * @identity: array index of this bearer within TIPC bearer array 120 * @identity: array index of this bearer within TIPC bearer array
121 * @link_req: ptr to (optional) structure making periodic link setup requests 121 * @link_req: ptr to (optional) structure making periodic link setup requests
122 * @links: list of non-congested links associated with bearer 122 * @links: list of non-congested links associated with bearer
123 * @cong_links: list of congested links associated with bearer
124 * @active: non-zero if bearer structure is represents a bearer 123 * @active: non-zero if bearer structure is represents a bearer
125 * @net_plane: network plane ('A' through 'H') currently associated with bearer 124 * @net_plane: network plane ('A' through 'H') currently associated with bearer
126 * @nodes: indicates which nodes in cluster can be reached through bearer 125 * @nodes: indicates which nodes in cluster can be reached through bearer
@@ -143,7 +142,6 @@ struct tipc_bearer {
143 u32 identity; 142 u32 identity;
144 struct tipc_link_req *link_req; 143 struct tipc_link_req *link_req;
145 struct list_head links; 144 struct list_head links;
146 struct list_head cong_links;
147 int active; 145 int active;
148 char net_plane; 146 char net_plane;
149 struct tipc_node_map nodes; 147 struct tipc_node_map nodes;
@@ -185,39 +183,23 @@ struct sk_buff *tipc_media_get_names(void);
185struct sk_buff *tipc_bearer_get_names(void); 183struct sk_buff *tipc_bearer_get_names(void);
186void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); 184void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
187void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); 185void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
188void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
189struct tipc_bearer *tipc_bearer_find(const char *name); 186struct tipc_bearer *tipc_bearer_find(const char *name);
190struct tipc_bearer *tipc_bearer_find_interface(const char *if_name); 187struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
191struct tipc_media *tipc_media_find(const char *name); 188struct tipc_media *tipc_media_find(const char *name);
192int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, 189int tipc_bearer_blocked(struct tipc_bearer *b_ptr);
193 struct tipc_link *l_ptr);
194int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
195void tipc_bearer_stop(void); 190void tipc_bearer_stop(void);
196void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
197
198 191
199/** 192/**
200 * tipc_bearer_send- sends buffer to destination over bearer 193 * tipc_bearer_send- sends buffer to destination over bearer
201 * 194 *
202 * Returns true (1) if successful, or false (0) if unable to send
203 *
204 * IMPORTANT: 195 * IMPORTANT:
205 * The media send routine must not alter the buffer being passed in 196 * The media send routine must not alter the buffer being passed in
206 * as it may be needed for later retransmission! 197 * as it may be needed for later retransmission!
207 *
208 * If the media send routine returns a non-zero value (indicating that
209 * it was unable to send the buffer), it must:
210 * 1) mark the bearer as blocked,
211 * 2) call tipc_continue() once the bearer is able to send again.
212 * Media types that are unable to meet these two critera must ensure their
213 * send routine always returns success -- even if the buffer was not sent --
214 * and let TIPC's link code deal with the undelivered message.
215 */ 198 */
216static inline int tipc_bearer_send(struct tipc_bearer *b_ptr, 199static inline void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
217 struct sk_buff *buf,
218 struct tipc_media_addr *dest) 200 struct tipc_media_addr *dest)
219{ 201{
220 return !b_ptr->media->send_msg(buf, b_ptr, dest); 202 b->media->send_msg(buf, b, dest);
221} 203}
222 204
223#endif /* _TIPC_BEARER_H */ 205#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index bfe8af88469a..fc05cecd7481 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -42,11 +42,6 @@
42 42
43#include <linux/module.h> 43#include <linux/module.h>
44 44
45#ifndef CONFIG_TIPC_PORTS
46#define CONFIG_TIPC_PORTS 8191
47#endif
48
49
50/* global variables used by multiple sub-systems within TIPC */ 45/* global variables used by multiple sub-systems within TIPC */
51int tipc_random __read_mostly; 46int tipc_random __read_mostly;
52 47
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 50eaa403eb6e..1074b9587e81 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -243,7 +243,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
243 if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) { 243 if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) {
244 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr); 244 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
245 if (rbuf) { 245 if (rbuf) {
246 b_ptr->media->send_msg(rbuf, b_ptr, &media_addr); 246 tipc_bearer_send(b_ptr, rbuf, &media_addr);
247 kfree_skb(rbuf); 247 kfree_skb(rbuf);
248 } 248 }
249 } 249 }
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a79c755cb417..daa6080a2a0c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/link.c: TIPC link code 2 * net/tipc/link.c: TIPC link code
3 * 3 *
4 * Copyright (c) 1996-2007, Ericsson AB 4 * Copyright (c) 1996-2007, 2012, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -97,12 +97,13 @@ static int link_send_sections_long(struct tipc_port *sender,
97 struct iovec const *msg_sect, 97 struct iovec const *msg_sect,
98 u32 num_sect, unsigned int total_len, 98 u32 num_sect, unsigned int total_len,
99 u32 destnode); 99 u32 destnode);
100static void link_check_defragm_bufs(struct tipc_link *l_ptr);
101static void link_state_event(struct tipc_link *l_ptr, u32 event); 100static void link_state_event(struct tipc_link *l_ptr, u32 event);
102static void link_reset_statistics(struct tipc_link *l_ptr); 101static void link_reset_statistics(struct tipc_link *l_ptr);
103static void link_print(struct tipc_link *l_ptr, const char *str); 102static void link_print(struct tipc_link *l_ptr, const char *str);
104static void link_start(struct tipc_link *l_ptr); 103static void link_start(struct tipc_link *l_ptr);
105static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); 104static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
105static void tipc_link_send_sync(struct tipc_link *l);
106static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
106 107
107/* 108/*
108 * Simple link routines 109 * Simple link routines
@@ -269,7 +270,6 @@ static void link_timeout(struct tipc_link *l_ptr)
269 } 270 }
270 271
271 /* do all other link processing performed on a periodic basis */ 272 /* do all other link processing performed on a periodic basis */
272 link_check_defragm_bufs(l_ptr);
273 273
274 link_state_event(l_ptr, TIMEOUT_EVT); 274 link_state_event(l_ptr, TIMEOUT_EVT);
275 275
@@ -712,6 +712,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
712 link_activate(l_ptr); 712 link_activate(l_ptr);
713 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 713 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
714 l_ptr->fsm_msg_cnt++; 714 l_ptr->fsm_msg_cnt++;
715 if (l_ptr->owner->working_links == 1)
716 tipc_link_send_sync(l_ptr);
715 link_set_timer(l_ptr, cont_intv); 717 link_set_timer(l_ptr, cont_intv);
716 break; 718 break;
717 case RESET_MSG: 719 case RESET_MSG:
@@ -745,6 +747,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
745 link_activate(l_ptr); 747 link_activate(l_ptr);
746 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 748 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
747 l_ptr->fsm_msg_cnt++; 749 l_ptr->fsm_msg_cnt++;
750 if (l_ptr->owner->working_links == 1)
751 tipc_link_send_sync(l_ptr);
748 link_set_timer(l_ptr, cont_intv); 752 link_set_timer(l_ptr, cont_intv);
749 break; 753 break;
750 case RESET_MSG: 754 case RESET_MSG:
@@ -872,17 +876,12 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
872 return link_send_long_buf(l_ptr, buf); 876 return link_send_long_buf(l_ptr, buf);
873 877
874 /* Packet can be queued or sent. */ 878 /* Packet can be queued or sent. */
875 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && 879 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) &&
876 !link_congested(l_ptr))) { 880 !link_congested(l_ptr))) {
877 link_add_to_outqueue(l_ptr, buf, msg); 881 link_add_to_outqueue(l_ptr, buf, msg);
878 882
879 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { 883 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
880 l_ptr->unacked_window = 0; 884 l_ptr->unacked_window = 0;
881 } else {
882 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
883 l_ptr->stats.bearer_congs++;
884 l_ptr->next_out = buf;
885 }
886 return dsz; 885 return dsz;
887 } 886 }
888 /* Congestion: can message be bundled ? */ 887 /* Congestion: can message be bundled ? */
@@ -891,10 +890,8 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
891 890
892 /* Try adding message to an existing bundle */ 891 /* Try adding message to an existing bundle */
893 if (l_ptr->next_out && 892 if (l_ptr->next_out &&
894 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 893 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
895 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
896 return dsz; 894 return dsz;
897 }
898 895
899 /* Try creating a new bundle */ 896 /* Try creating a new bundle */
900 if (size <= max_packet * 2 / 3) { 897 if (size <= max_packet * 2 / 3) {
@@ -917,7 +914,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
917 if (!l_ptr->next_out) 914 if (!l_ptr->next_out)
918 l_ptr->next_out = buf; 915 l_ptr->next_out = buf;
919 link_add_to_outqueue(l_ptr, buf, msg); 916 link_add_to_outqueue(l_ptr, buf, msg);
920 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
921 return dsz; 917 return dsz;
922} 918}
923 919
@@ -949,7 +945,48 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
949 return res; 945 return res;
950} 946}
951 947
952/** 948/*
949 * tipc_link_send_sync - synchronize broadcast link endpoints.
950 *
951 * Give a newly added peer node the sequence number where it should
952 * start receiving and acking broadcast packets.
953 *
954 * Called with node locked
955 */
956static void tipc_link_send_sync(struct tipc_link *l)
957{
958 struct sk_buff *buf;
959 struct tipc_msg *msg;
960
961 buf = tipc_buf_acquire(INT_H_SIZE);
962 if (!buf)
963 return;
964
965 msg = buf_msg(buf);
966 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
967 msg_set_last_bcast(msg, l->owner->bclink.acked);
968 link_add_chain_to_outqueue(l, buf, 0);
969 tipc_link_push_queue(l);
970}
971
972/*
973 * tipc_link_recv_sync - synchronize broadcast link endpoints.
974 * Receive the sequence number where we should start receiving and
975 * acking broadcast packets from a newly added peer node, and open
976 * up for reception of such packets.
977 *
978 * Called with node locked
979 */
980static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
981{
982 struct tipc_msg *msg = buf_msg(buf);
983
984 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
985 n->bclink.recv_permitted = true;
986 kfree_skb(buf);
987}
988
989/*
953 * tipc_link_send_names - send name table entries to new neighbor 990 * tipc_link_send_names - send name table entries to new neighbor
954 * 991 *
955 * Send routine for bulk delivery of name table messages when contact 992 * Send routine for bulk delivery of name table messages when contact
@@ -1006,16 +1043,11 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
1006 1043
1007 if (likely(!link_congested(l_ptr))) { 1044 if (likely(!link_congested(l_ptr))) {
1008 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 1045 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1009 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { 1046 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) {
1010 link_add_to_outqueue(l_ptr, buf, msg); 1047 link_add_to_outqueue(l_ptr, buf, msg);
1011 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, 1048 tipc_bearer_send(l_ptr->b_ptr, buf,
1012 &l_ptr->media_addr))) { 1049 &l_ptr->media_addr);
1013 l_ptr->unacked_window = 0; 1050 l_ptr->unacked_window = 0;
1014 return res;
1015 }
1016 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1017 l_ptr->stats.bearer_congs++;
1018 l_ptr->next_out = buf;
1019 return res; 1051 return res;
1020 } 1052 }
1021 } else 1053 } else
@@ -1106,7 +1138,7 @@ exit:
1106 1138
1107 /* Exit if link (or bearer) is congested */ 1139 /* Exit if link (or bearer) is congested */
1108 if (link_congested(l_ptr) || 1140 if (link_congested(l_ptr) ||
1109 !list_empty(&l_ptr->b_ptr->cong_links)) { 1141 tipc_bearer_blocked(l_ptr->b_ptr)) {
1110 res = link_schedule_port(l_ptr, 1142 res = link_schedule_port(l_ptr,
1111 sender->ref, res); 1143 sender->ref, res);
1112 goto exit; 1144 goto exit;
@@ -1329,15 +1361,11 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1329 if (r_q_size && buf) { 1361 if (r_q_size && buf) {
1330 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1362 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1331 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1363 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1332 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1364 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1333 l_ptr->retransm_queue_head = mod(++r_q_head); 1365 l_ptr->retransm_queue_head = mod(++r_q_head);
1334 l_ptr->retransm_queue_size = --r_q_size; 1366 l_ptr->retransm_queue_size = --r_q_size;
1335 l_ptr->stats.retransmitted++; 1367 l_ptr->stats.retransmitted++;
1336 return 0; 1368 return 0;
1337 } else {
1338 l_ptr->stats.bearer_congs++;
1339 return PUSH_FAILED;
1340 }
1341 } 1369 }
1342 1370
1343 /* Send deferred protocol message, if any: */ 1371 /* Send deferred protocol message, if any: */
@@ -1345,15 +1373,11 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1345 if (buf) { 1373 if (buf) {
1346 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1374 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1347 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1375 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1348 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1376 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1349 l_ptr->unacked_window = 0; 1377 l_ptr->unacked_window = 0;
1350 kfree_skb(buf); 1378 kfree_skb(buf);
1351 l_ptr->proto_msg_queue = NULL; 1379 l_ptr->proto_msg_queue = NULL;
1352 return 0; 1380 return 0;
1353 } else {
1354 l_ptr->stats.bearer_congs++;
1355 return PUSH_FAILED;
1356 }
1357 } 1381 }
1358 1382
1359 /* Send one deferred data message, if send window not full: */ 1383 /* Send one deferred data message, if send window not full: */
@@ -1366,18 +1390,14 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1366 if (mod(next - first) < l_ptr->queue_limit[0]) { 1390 if (mod(next - first) < l_ptr->queue_limit[0]) {
1367 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1391 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1368 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1392 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1369 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1393 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1370 if (msg_user(msg) == MSG_BUNDLER) 1394 if (msg_user(msg) == MSG_BUNDLER)
1371 msg_set_type(msg, CLOSED_MSG); 1395 msg_set_type(msg, CLOSED_MSG);
1372 l_ptr->next_out = buf->next; 1396 l_ptr->next_out = buf->next;
1373 return 0; 1397 return 0;
1374 } else {
1375 l_ptr->stats.bearer_congs++;
1376 return PUSH_FAILED;
1377 }
1378 } 1398 }
1379 } 1399 }
1380 return PUSH_FINISHED; 1400 return 1;
1381} 1401}
1382 1402
1383/* 1403/*
@@ -1388,15 +1408,12 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
1388{ 1408{
1389 u32 res; 1409 u32 res;
1390 1410
1391 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) 1411 if (tipc_bearer_blocked(l_ptr->b_ptr))
1392 return; 1412 return;
1393 1413
1394 do { 1414 do {
1395 res = tipc_link_push_packet(l_ptr); 1415 res = tipc_link_push_packet(l_ptr);
1396 } while (!res); 1416 } while (!res);
1397
1398 if (res == PUSH_FAILED)
1399 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1400} 1417}
1401 1418
1402static void link_reset_all(unsigned long addr) 1419static void link_reset_all(unsigned long addr)
@@ -1454,9 +1471,8 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1454 1471
1455 tipc_addr_string_fill(addr_string, n_ptr->addr); 1472 tipc_addr_string_fill(addr_string, n_ptr->addr);
1456 pr_info("Broadcast link info for %s\n", addr_string); 1473 pr_info("Broadcast link info for %s\n", addr_string);
1457 pr_info("Supportable: %d, Supported: %d, Acked: %u\n", 1474 pr_info("Reception permitted: %d, Acked: %u\n",
1458 n_ptr->bclink.supportable, 1475 n_ptr->bclink.recv_permitted,
1459 n_ptr->bclink.supported,
1460 n_ptr->bclink.acked); 1476 n_ptr->bclink.acked);
1461 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1477 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1462 n_ptr->bclink.last_in, 1478 n_ptr->bclink.last_in,
@@ -1481,7 +1497,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1481 1497
1482 msg = buf_msg(buf); 1498 msg = buf_msg(buf);
1483 1499
1484 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1500 if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1485 if (l_ptr->retransm_queue_size == 0) { 1501 if (l_ptr->retransm_queue_size == 0) {
1486 l_ptr->retransm_queue_head = msg_seqno(msg); 1502 l_ptr->retransm_queue_head = msg_seqno(msg);
1487 l_ptr->retransm_queue_size = retransmits; 1503 l_ptr->retransm_queue_size = retransmits;
@@ -1491,7 +1507,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1491 } 1507 }
1492 return; 1508 return;
1493 } else { 1509 } else {
1494 /* Detect repeated retransmit failures on uncongested bearer */ 1510 /* Detect repeated retransmit failures on unblocked bearer */
1495 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1511 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1496 if (++l_ptr->stale_count > 100) { 1512 if (++l_ptr->stale_count > 100) {
1497 link_retransmit_failure(l_ptr, buf); 1513 link_retransmit_failure(l_ptr, buf);
@@ -1507,17 +1523,10 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1507 msg = buf_msg(buf); 1523 msg = buf_msg(buf);
1508 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1524 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1509 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1525 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1510 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1526 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1511 buf = buf->next; 1527 buf = buf->next;
1512 retransmits--; 1528 retransmits--;
1513 l_ptr->stats.retransmitted++; 1529 l_ptr->stats.retransmitted++;
1514 } else {
1515 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1516 l_ptr->stats.bearer_congs++;
1517 l_ptr->retransm_queue_head = buf_seqno(buf);
1518 l_ptr->retransm_queue_size = retransmits;
1519 return;
1520 }
1521 } 1530 }
1522 1531
1523 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1532 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
@@ -1676,7 +1685,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1676 ackd = msg_ack(msg); 1685 ackd = msg_ack(msg);
1677 1686
1678 /* Release acked messages */ 1687 /* Release acked messages */
1679 if (n_ptr->bclink.supported) 1688 if (n_ptr->bclink.recv_permitted)
1680 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1689 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1681 1690
1682 crs = l_ptr->first_out; 1691 crs = l_ptr->first_out;
@@ -1727,9 +1736,14 @@ deliver:
1727 tipc_link_recv_bundle(buf); 1736 tipc_link_recv_bundle(buf);
1728 continue; 1737 continue;
1729 case NAME_DISTRIBUTOR: 1738 case NAME_DISTRIBUTOR:
1739 n_ptr->bclink.recv_permitted = true;
1730 tipc_node_unlock(n_ptr); 1740 tipc_node_unlock(n_ptr);
1731 tipc_named_recv(buf); 1741 tipc_named_recv(buf);
1732 continue; 1742 continue;
1743 case BCAST_PROTOCOL:
1744 tipc_link_recv_sync(n_ptr, buf);
1745 tipc_node_unlock(n_ptr);
1746 continue;
1733 case CONN_MANAGER: 1747 case CONN_MANAGER:
1734 tipc_node_unlock(n_ptr); 1748 tipc_node_unlock(n_ptr);
1735 tipc_port_recv_proto_msg(buf); 1749 tipc_port_recv_proto_msg(buf);
@@ -1772,16 +1786,19 @@ deliver:
1772 continue; 1786 continue;
1773 } 1787 }
1774 1788
1789 /* Link is not in state WORKING_WORKING */
1775 if (msg_user(msg) == LINK_PROTOCOL) { 1790 if (msg_user(msg) == LINK_PROTOCOL) {
1776 link_recv_proto_msg(l_ptr, buf); 1791 link_recv_proto_msg(l_ptr, buf);
1777 head = link_insert_deferred_queue(l_ptr, head); 1792 head = link_insert_deferred_queue(l_ptr, head);
1778 tipc_node_unlock(n_ptr); 1793 tipc_node_unlock(n_ptr);
1779 continue; 1794 continue;
1780 } 1795 }
1796
1797 /* Traffic message. Conditionally activate link */
1781 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1798 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1782 1799
1783 if (link_working_working(l_ptr)) { 1800 if (link_working_working(l_ptr)) {
1784 /* Re-insert in front of queue */ 1801 /* Re-insert buffer in front of queue */
1785 buf->next = head; 1802 buf->next = head;
1786 head = buf; 1803 head = buf;
1787 tipc_node_unlock(n_ptr); 1804 tipc_node_unlock(n_ptr);
@@ -1972,21 +1989,13 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1972 1989
1973 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1990 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1974 1991
1975 /* Defer message if bearer is already congested */ 1992 /* Defer message if bearer is already blocked */
1976 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1993 if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1977 l_ptr->proto_msg_queue = buf;
1978 return;
1979 }
1980
1981 /* Defer message if attempting to send results in bearer congestion */
1982 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1983 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1984 l_ptr->proto_msg_queue = buf; 1994 l_ptr->proto_msg_queue = buf;
1985 l_ptr->stats.bearer_congs++;
1986 return; 1995 return;
1987 } 1996 }
1988 1997
1989 /* Discard message if it was sent successfully */ 1998 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1990 l_ptr->unacked_window = 0; 1999 l_ptr->unacked_window = 0;
1991 kfree_skb(buf); 2000 kfree_skb(buf);
1992} 2001}
@@ -2057,7 +2066,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2057 } else { 2066 } else {
2058 l_ptr->max_pkt = l_ptr->max_pkt_target; 2067 l_ptr->max_pkt = l_ptr->max_pkt_target;
2059 } 2068 }
2060 l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
2061 2069
2062 /* Synchronize broadcast link info, if not done previously */ 2070 /* Synchronize broadcast link info, if not done previously */
2063 if (!tipc_node_is_up(l_ptr->owner)) { 2071 if (!tipc_node_is_up(l_ptr->owner)) {
@@ -2112,7 +2120,7 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2112 } 2120 }
2113 2121
2114 /* Protocol message before retransmits, reduce loss risk */ 2122 /* Protocol message before retransmits, reduce loss risk */
2115 if (l_ptr->owner->bclink.supported) 2123 if (l_ptr->owner->bclink.recv_permitted)
2116 tipc_bclink_update_link_state(l_ptr->owner, 2124 tipc_bclink_update_link_state(l_ptr->owner,
2117 msg_last_bcast(msg)); 2125 msg_last_bcast(msg));
2118 2126
@@ -2487,16 +2495,6 @@ static void set_expected_frags(struct sk_buff *buf, u32 exp)
2487 msg_set_bcast_ack(buf_msg(buf), exp); 2495 msg_set_bcast_ack(buf_msg(buf), exp);
2488} 2496}
2489 2497
2490static u32 get_timer_cnt(struct sk_buff *buf)
2491{
2492 return msg_reroute_cnt(buf_msg(buf));
2493}
2494
2495static void incr_timer_cnt(struct sk_buff *buf)
2496{
2497 msg_incr_reroute_cnt(buf_msg(buf));
2498}
2499
2500/* 2498/*
2501 * tipc_link_recv_fragment(): Called with node lock on. Returns 2499 * tipc_link_recv_fragment(): Called with node lock on. Returns
2502 * the reassembled buffer if message is complete. 2500 * the reassembled buffer if message is complete.
@@ -2575,38 +2573,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2575 return 0; 2573 return 0;
2576} 2574}
2577 2575
2578/**
2579 * link_check_defragm_bufs - flush stale incoming message fragments
2580 * @l_ptr: pointer to link
2581 */
2582static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2583{
2584 struct sk_buff *prev = NULL;
2585 struct sk_buff *next = NULL;
2586 struct sk_buff *buf = l_ptr->defragm_buf;
2587
2588 if (!buf)
2589 return;
2590 if (!link_working_working(l_ptr))
2591 return;
2592 while (buf) {
2593 u32 cnt = get_timer_cnt(buf);
2594
2595 next = buf->next;
2596 if (cnt < 4) {
2597 incr_timer_cnt(buf);
2598 prev = buf;
2599 } else {
2600 if (prev)
2601 prev->next = buf->next;
2602 else
2603 l_ptr->defragm_buf = buf->next;
2604 kfree_skb(buf);
2605 }
2606 buf = next;
2607 }
2608}
2609
2610static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2576static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2611{ 2577{
2612 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2578 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2937,8 +2903,8 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2937 s->sent_nacks, s->sent_acks, s->retransmitted); 2903 s->sent_nacks, s->sent_acks, s->retransmitted);
2938 2904
2939 ret += tipc_snprintf(buf + ret, buf_size - ret, 2905 ret += tipc_snprintf(buf + ret, buf_size - ret,
2940 " Congestion bearer:%u link:%u Send queue" 2906 " Congestion link:%u Send queue"
2941 " max:%u avg:%u\n", s->bearer_congs, s->link_congs, 2907 " max:%u avg:%u\n", s->link_congs,
2942 s->max_queue_sz, s->queue_sz_counts ? 2908 s->max_queue_sz, s->queue_sz_counts ?
2943 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2909 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2944 2910
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 6e921121be06..c048ed1cbd76 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -40,9 +40,6 @@
40#include "msg.h" 40#include "msg.h"
41#include "node.h" 41#include "node.h"
42 42
43#define PUSH_FAILED 1
44#define PUSH_FINISHED 2
45
46/* 43/*
47 * Out-of-range value for link sequence numbers 44 * Out-of-range value for link sequence numbers
48 */ 45 */
@@ -82,7 +79,6 @@ struct tipc_stats {
82 u32 recv_fragmented; 79 u32 recv_fragmented;
83 u32 recv_fragments; 80 u32 recv_fragments;
84 u32 link_congs; /* # port sends blocked by congestion */ 81 u32 link_congs; /* # port sends blocked by congestion */
85 u32 bearer_congs;
86 u32 deferred_recv; 82 u32 deferred_recv;
87 u32 duplicates; 83 u32 duplicates;
88 u32 max_queue_sz; /* send queue size high water mark */ 84 u32 max_queue_sz; /* send queue size high water mark */
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 55d3928dfd67..e0d08055754e 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -262,7 +262,7 @@ void tipc_named_node_up(unsigned long nodearg)
262 named_distribute(&message_list, node, &publ_zone, max_item_buf); 262 named_distribute(&message_list, node, &publ_zone, max_item_buf);
263 read_unlock_bh(&tipc_nametbl_lock); 263 read_unlock_bh(&tipc_nametbl_lock);
264 264
265 tipc_link_send_names(&message_list, (u32)node); 265 tipc_link_send_names(&message_list, node);
266} 266}
267 267
268/** 268/**
diff --git a/net/tipc/node.c b/net/tipc/node.c
index d21db204e25a..48f39dd3eae8 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/node.c: TIPC node management routines 2 * net/tipc/node.c: TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2012 Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -263,12 +263,9 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
263static void node_established_contact(struct tipc_node *n_ptr) 263static void node_established_contact(struct tipc_node *n_ptr)
264{ 264{
265 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 265 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
266 266 n_ptr->bclink.oos_state = 0;
267 if (n_ptr->bclink.supportable) { 267 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
268 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 268 tipc_bclink_add_node(n_ptr->addr);
269 tipc_bclink_add_node(n_ptr->addr);
270 n_ptr->bclink.supported = 1;
271 }
272} 269}
273 270
274static void node_name_purge_complete(unsigned long node_addr) 271static void node_name_purge_complete(unsigned long node_addr)
@@ -294,7 +291,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
294 tipc_addr_string_fill(addr_string, n_ptr->addr)); 291 tipc_addr_string_fill(addr_string, n_ptr->addr));
295 292
296 /* Flush broadcast link info associated with lost node */ 293 /* Flush broadcast link info associated with lost node */
297 if (n_ptr->bclink.supported) { 294 if (n_ptr->bclink.recv_permitted) {
298 while (n_ptr->bclink.deferred_head) { 295 while (n_ptr->bclink.deferred_head) {
299 struct sk_buff *buf = n_ptr->bclink.deferred_head; 296 struct sk_buff *buf = n_ptr->bclink.deferred_head;
300 n_ptr->bclink.deferred_head = buf->next; 297 n_ptr->bclink.deferred_head = buf->next;
@@ -310,7 +307,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
310 tipc_bclink_remove_node(n_ptr->addr); 307 tipc_bclink_remove_node(n_ptr->addr);
311 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); 308 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
312 309
313 n_ptr->bclink.supported = 0; 310 n_ptr->bclink.recv_permitted = false;
314 } 311 }
315 312
316 /* Abort link changeover */ 313 /* Abort link changeover */
diff --git a/net/tipc/node.h b/net/tipc/node.h
index cfcaf4d6e480..3c189b35b102 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -67,8 +67,6 @@
67 * @permit_changeover: non-zero if node has redundant links to this system 67 * @permit_changeover: non-zero if node has redundant links to this system
68 * @signature: node instance identifier 68 * @signature: node instance identifier
69 * @bclink: broadcast-related info 69 * @bclink: broadcast-related info
70 * @supportable: non-zero if node supports TIPC b'cast link capability
71 * @supported: non-zero if node supports TIPC b'cast capability
72 * @acked: sequence # of last outbound b'cast message acknowledged by node 70 * @acked: sequence # of last outbound b'cast message acknowledged by node
73 * @last_in: sequence # of last in-sequence b'cast message received from node 71 * @last_in: sequence # of last in-sequence b'cast message received from node
74 * @last_sent: sequence # of last b'cast message sent by node 72 * @last_sent: sequence # of last b'cast message sent by node
@@ -77,6 +75,7 @@
77 * @deferred_head: oldest OOS b'cast message received from node 75 * @deferred_head: oldest OOS b'cast message received from node
78 * @deferred_tail: newest OOS b'cast message received from node 76 * @deferred_tail: newest OOS b'cast message received from node
79 * @defragm: list of partially reassembled b'cast message fragments from node 77 * @defragm: list of partially reassembled b'cast message fragments from node
78 * @recv_permitted: true if node is allowed to receive b'cast messages
80 */ 79 */
81struct tipc_node { 80struct tipc_node {
82 u32 addr; 81 u32 addr;
@@ -92,8 +91,6 @@ struct tipc_node {
92 int permit_changeover; 91 int permit_changeover;
93 u32 signature; 92 u32 signature;
94 struct { 93 struct {
95 u8 supportable;
96 u8 supported;
97 u32 acked; 94 u32 acked;
98 u32 last_in; 95 u32 last_in;
99 u32 last_sent; 96 u32 last_sent;
@@ -102,6 +99,7 @@ struct tipc_node {
102 struct sk_buff *deferred_head; 99 struct sk_buff *deferred_head;
103 struct sk_buff *deferred_tail; 100 struct sk_buff *deferred_tail;
104 struct sk_buff *defragm; 101 struct sk_buff *defragm;
102 bool recv_permitted;
105 } bclink; 103 } bclink;
106}; 104};
107 105
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 07c42fba672b..18098cac62f2 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -726,7 +726,7 @@ static void port_dispatcher_sigh(void *dummy)
726 if (unlikely(!cb)) 726 if (unlikely(!cb))
727 goto reject; 727 goto reject;
728 if (unlikely(!connected)) { 728 if (unlikely(!connected)) {
729 if (tipc_connect2port(dref, &orig)) 729 if (tipc_connect(dref, &orig))
730 goto reject; 730 goto reject;
731 } else if (peer_invalid) 731 } else if (peer_invalid)
732 goto reject; 732 goto reject;
@@ -1036,15 +1036,30 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1036 return res; 1036 return res;
1037} 1037}
1038 1038
1039int tipc_connect2port(u32 ref, struct tipc_portid const *peer) 1039int tipc_connect(u32 ref, struct tipc_portid const *peer)
1040{ 1040{
1041 struct tipc_port *p_ptr; 1041 struct tipc_port *p_ptr;
1042 struct tipc_msg *msg; 1042 int res;
1043 int res = -EINVAL;
1044 1043
1045 p_ptr = tipc_port_lock(ref); 1044 p_ptr = tipc_port_lock(ref);
1046 if (!p_ptr) 1045 if (!p_ptr)
1047 return -EINVAL; 1046 return -EINVAL;
1047 res = __tipc_connect(ref, p_ptr, peer);
1048 tipc_port_unlock(p_ptr);
1049 return res;
1050}
1051
1052/*
1053 * __tipc_connect - connect to a remote peer
1054 *
1055 * Port must be locked.
1056 */
1057int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
1058 struct tipc_portid const *peer)
1059{
1060 struct tipc_msg *msg;
1061 int res = -EINVAL;
1062
1048 if (p_ptr->published || p_ptr->connected) 1063 if (p_ptr->published || p_ptr->connected)
1049 goto exit; 1064 goto exit;
1050 if (!peer->ref) 1065 if (!peer->ref)
@@ -1067,17 +1082,16 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1067 (net_ev_handler)port_handle_node_down); 1082 (net_ev_handler)port_handle_node_down);
1068 res = 0; 1083 res = 0;
1069exit: 1084exit:
1070 tipc_port_unlock(p_ptr);
1071 p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref); 1085 p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1072 return res; 1086 return res;
1073} 1087}
1074 1088
1075/** 1089/*
1076 * tipc_disconnect_port - disconnect port from peer 1090 * __tipc_disconnect - disconnect port from peer
1077 * 1091 *
1078 * Port must be locked. 1092 * Port must be locked.
1079 */ 1093 */
1080int tipc_disconnect_port(struct tipc_port *tp_ptr) 1094int __tipc_disconnect(struct tipc_port *tp_ptr)
1081{ 1095{
1082 int res; 1096 int res;
1083 1097
@@ -1104,7 +1118,7 @@ int tipc_disconnect(u32 ref)
1104 p_ptr = tipc_port_lock(ref); 1118 p_ptr = tipc_port_lock(ref);
1105 if (!p_ptr) 1119 if (!p_ptr)
1106 return -EINVAL; 1120 return -EINVAL;
1107 res = tipc_disconnect_port(p_ptr); 1121 res = __tipc_disconnect(p_ptr);
1108 tipc_port_unlock(p_ptr); 1122 tipc_port_unlock(p_ptr);
1109 return res; 1123 return res;
1110} 1124}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 4660e3065790..fb66e2e5f4d1 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -190,7 +190,7 @@ int tipc_publish(u32 portref, unsigned int scope,
190int tipc_withdraw(u32 portref, unsigned int scope, 190int tipc_withdraw(u32 portref, unsigned int scope,
191 struct tipc_name_seq const *name_seq); 191 struct tipc_name_seq const *name_seq);
192 192
193int tipc_connect2port(u32 portref, struct tipc_portid const *port); 193int tipc_connect(u32 portref, struct tipc_portid const *port);
194 194
195int tipc_disconnect(u32 portref); 195int tipc_disconnect(u32 portref);
196 196
@@ -200,7 +200,9 @@ int tipc_shutdown(u32 ref);
200/* 200/*
201 * The following routines require that the port be locked on entry 201 * The following routines require that the port be locked on entry
202 */ 202 */
203int tipc_disconnect_port(struct tipc_port *tp_ptr); 203int __tipc_disconnect(struct tipc_port *tp_ptr);
204int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
205 struct tipc_portid const *peer);
204int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg); 206int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
205 207
206/* 208/*
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index fd5f042dbff4..9b4e4833a484 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, Ericsson AB 4 * Copyright (c) 2001-2007, 2012 Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2012, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
43#define SS_LISTENING -1 /* socket is listening */ 43#define SS_LISTENING -1 /* socket is listening */
44#define SS_READY -2 /* socket is connectionless */ 44#define SS_READY -2 /* socket is connectionless */
45 45
46#define OVERLOAD_LIMIT_BASE 5000 46#define OVERLOAD_LIMIT_BASE 10000
47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 48
49struct tipc_sock { 49struct tipc_sock {
@@ -62,6 +62,8 @@ struct tipc_sock {
62static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 62static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
63static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); 63static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
64static void wakeupdispatch(struct tipc_port *tport); 64static void wakeupdispatch(struct tipc_port *tport);
65static void tipc_data_ready(struct sock *sk, int len);
66static void tipc_write_space(struct sock *sk);
65 67
66static const struct proto_ops packet_ops; 68static const struct proto_ops packet_ops;
67static const struct proto_ops stream_ops; 69static const struct proto_ops stream_ops;
@@ -71,8 +73,6 @@ static struct proto tipc_proto;
71 73
72static int sockets_enabled; 74static int sockets_enabled;
73 75
74static atomic_t tipc_queue_size = ATOMIC_INIT(0);
75
76/* 76/*
77 * Revised TIPC socket locking policy: 77 * Revised TIPC socket locking policy:
78 * 78 *
@@ -126,7 +126,6 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
126static void advance_rx_queue(struct sock *sk) 126static void advance_rx_queue(struct sock *sk)
127{ 127{
128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
129 atomic_dec(&tipc_queue_size);
130} 129}
131 130
132/** 131/**
@@ -138,10 +137,8 @@ static void discard_rx_queue(struct sock *sk)
138{ 137{
139 struct sk_buff *buf; 138 struct sk_buff *buf;
140 139
141 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 140 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
142 atomic_dec(&tipc_queue_size);
143 kfree_skb(buf); 141 kfree_skb(buf);
144 }
145} 142}
146 143
147/** 144/**
@@ -153,10 +150,8 @@ static void reject_rx_queue(struct sock *sk)
153{ 150{
154 struct sk_buff *buf; 151 struct sk_buff *buf;
155 152
156 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 153 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
157 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 154 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
158 atomic_dec(&tipc_queue_size);
159 }
160} 155}
161 156
162/** 157/**
@@ -221,6 +216,8 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
221 sock_init_data(sock, sk); 216 sock_init_data(sock, sk);
222 sk->sk_backlog_rcv = backlog_rcv; 217 sk->sk_backlog_rcv = backlog_rcv;
223 sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2; 218 sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2;
219 sk->sk_data_ready = tipc_data_ready;
220 sk->sk_write_space = tipc_write_space;
224 tipc_sk(sk)->p = tp_ptr; 221 tipc_sk(sk)->p = tp_ptr;
225 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; 222 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
226 223
@@ -276,7 +273,6 @@ static int release(struct socket *sock)
276 buf = __skb_dequeue(&sk->sk_receive_queue); 273 buf = __skb_dequeue(&sk->sk_receive_queue);
277 if (buf == NULL) 274 if (buf == NULL)
278 break; 275 break;
279 atomic_dec(&tipc_queue_size);
280 if (TIPC_SKB_CB(buf)->handle != 0) 276 if (TIPC_SKB_CB(buf)->handle != 0)
281 kfree_skb(buf); 277 kfree_skb(buf);
282 else { 278 else {
@@ -408,7 +404,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
408 * socket state flags set 404 * socket state flags set
409 * ------------ --------- 405 * ------------ ---------
410 * unconnected no read flags 406 * unconnected no read flags
411 * no write flags 407 * POLLOUT if port is not congested
412 * 408 *
413 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue 409 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
414 * no write flags 410 * no write flags
@@ -435,9 +431,13 @@ static unsigned int poll(struct file *file, struct socket *sock,
435 struct sock *sk = sock->sk; 431 struct sock *sk = sock->sk;
436 u32 mask = 0; 432 u32 mask = 0;
437 433
438 poll_wait(file, sk_sleep(sk), wait); 434 sock_poll_wait(file, sk_sleep(sk), wait);
439 435
440 switch ((int)sock->state) { 436 switch ((int)sock->state) {
437 case SS_UNCONNECTED:
438 if (!tipc_sk_port(sk)->congested)
439 mask |= POLLOUT;
440 break;
441 case SS_READY: 441 case SS_READY:
442 case SS_CONNECTED: 442 case SS_CONNECTED:
443 if (!tipc_sk_port(sk)->congested) 443 if (!tipc_sk_port(sk)->congested)
@@ -775,16 +775,19 @@ exit:
775static int auto_connect(struct socket *sock, struct tipc_msg *msg) 775static int auto_connect(struct socket *sock, struct tipc_msg *msg)
776{ 776{
777 struct tipc_sock *tsock = tipc_sk(sock->sk); 777 struct tipc_sock *tsock = tipc_sk(sock->sk);
778 778 struct tipc_port *p_ptr;
779 if (msg_errcode(msg)) {
780 sock->state = SS_DISCONNECTING;
781 return -ECONNREFUSED;
782 }
783 779
784 tsock->peer_name.ref = msg_origport(msg); 780 tsock->peer_name.ref = msg_origport(msg);
785 tsock->peer_name.node = msg_orignode(msg); 781 tsock->peer_name.node = msg_orignode(msg);
786 tipc_connect2port(tsock->p->ref, &tsock->peer_name); 782 p_ptr = tipc_port_deref(tsock->p->ref);
787 tipc_set_portimportance(tsock->p->ref, msg_importance(msg)); 783 if (!p_ptr)
784 return -EINVAL;
785
786 __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
787
788 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
789 return -EINVAL;
790 msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
788 sock->state = SS_CONNECTED; 791 sock->state = SS_CONNECTED;
789 return 0; 792 return 0;
790} 793}
@@ -943,13 +946,6 @@ restart:
943 sz = msg_data_sz(msg); 946 sz = msg_data_sz(msg);
944 err = msg_errcode(msg); 947 err = msg_errcode(msg);
945 948
946 /* Complete connection setup for an implied connect */
947 if (unlikely(sock->state == SS_CONNECTING)) {
948 res = auto_connect(sock, msg);
949 if (res)
950 goto exit;
951 }
952
953 /* Discard an empty non-errored message & try again */ 949 /* Discard an empty non-errored message & try again */
954 if ((!sz) && (!err)) { 950 if ((!sz) && (!err)) {
955 advance_rx_queue(sk); 951 advance_rx_queue(sk);
@@ -1126,6 +1122,39 @@ exit:
1126} 1122}
1127 1123
1128/** 1124/**
1125 * tipc_write_space - wake up thread if port congestion is released
1126 * @sk: socket
1127 */
1128static void tipc_write_space(struct sock *sk)
1129{
1130 struct socket_wq *wq;
1131
1132 rcu_read_lock();
1133 wq = rcu_dereference(sk->sk_wq);
1134 if (wq_has_sleeper(wq))
1135 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1136 POLLWRNORM | POLLWRBAND);
1137 rcu_read_unlock();
1138}
1139
1140/**
1141 * tipc_data_ready - wake up threads to indicate messages have been received
1142 * @sk: socket
1143 * @len: the length of messages
1144 */
1145static void tipc_data_ready(struct sock *sk, int len)
1146{
1147 struct socket_wq *wq;
1148
1149 rcu_read_lock();
1150 wq = rcu_dereference(sk->sk_wq);
1151 if (wq_has_sleeper(wq))
1152 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1153 POLLRDNORM | POLLRDBAND);
1154 rcu_read_unlock();
1155}
1156
1157/**
1129 * rx_queue_full - determine if receive queue can accept another message 1158 * rx_queue_full - determine if receive queue can accept another message
1130 * @msg: message to be added to queue 1159 * @msg: message to be added to queue
1131 * @queue_size: current size of queue 1160 * @queue_size: current size of queue
@@ -1154,6 +1183,83 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1154} 1183}
1155 1184
1156/** 1185/**
1186 * filter_connect - Handle all incoming messages for a connection-based socket
1187 * @tsock: TIPC socket
1188 * @msg: message
1189 *
1190 * Returns TIPC error status code and socket error status code
1191 * once it encounters some errors
1192 */
1193static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1194{
1195 struct socket *sock = tsock->sk.sk_socket;
1196 struct tipc_msg *msg = buf_msg(*buf);
1197 struct sock *sk = &tsock->sk;
1198 u32 retval = TIPC_ERR_NO_PORT;
1199 int res;
1200
1201 if (msg_mcast(msg))
1202 return retval;
1203
1204 switch ((int)sock->state) {
1205 case SS_CONNECTED:
1206 /* Accept only connection-based messages sent by peer */
1207 if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
1208 if (unlikely(msg_errcode(msg))) {
1209 sock->state = SS_DISCONNECTING;
1210 __tipc_disconnect(tsock->p);
1211 }
1212 retval = TIPC_OK;
1213 }
1214 break;
1215 case SS_CONNECTING:
1216 /* Accept only ACK or NACK message */
1217 if (unlikely(msg_errcode(msg))) {
1218 sock->state = SS_DISCONNECTING;
1219 sk->sk_err = -ECONNREFUSED;
1220 retval = TIPC_OK;
1221 break;
1222 }
1223
1224 if (unlikely(!msg_connected(msg)))
1225 break;
1226
1227 res = auto_connect(sock, msg);
1228 if (res) {
1229 sock->state = SS_DISCONNECTING;
1230 sk->sk_err = res;
1231 retval = TIPC_OK;
1232 break;
1233 }
1234
1235 /* If an incoming message is an 'ACK-', it should be
1236 * discarded here because it doesn't contain useful
1237 * data. In addition, we should try to wake up
1238 * connect() routine if sleeping.
1239 */
1240 if (msg_data_sz(msg) == 0) {
1241 kfree_skb(*buf);
1242 *buf = NULL;
1243 if (waitqueue_active(sk_sleep(sk)))
1244 wake_up_interruptible(sk_sleep(sk));
1245 }
1246 retval = TIPC_OK;
1247 break;
1248 case SS_LISTENING:
1249 case SS_UNCONNECTED:
1250 /* Accept only SYN message */
1251 if (!msg_connected(msg) && !(msg_errcode(msg)))
1252 retval = TIPC_OK;
1253 break;
1254 case SS_DISCONNECTING:
1255 break;
1256 default:
1257 pr_err("Unknown socket state %u\n", sock->state);
1258 }
1259 return retval;
1260}
1261
1262/**
1157 * filter_rcv - validate incoming message 1263 * filter_rcv - validate incoming message
1158 * @sk: socket 1264 * @sk: socket
1159 * @buf: message 1265 * @buf: message
@@ -1170,6 +1276,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1170 struct socket *sock = sk->sk_socket; 1276 struct socket *sock = sk->sk_socket;
1171 struct tipc_msg *msg = buf_msg(buf); 1277 struct tipc_msg *msg = buf_msg(buf);
1172 u32 recv_q_len; 1278 u32 recv_q_len;
1279 u32 res = TIPC_OK;
1173 1280
1174 /* Reject message if it is wrong sort of message for socket */ 1281 /* Reject message if it is wrong sort of message for socket */
1175 if (msg_type(msg) > TIPC_DIRECT_MSG) 1282 if (msg_type(msg) > TIPC_DIRECT_MSG)
@@ -1179,32 +1286,12 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1179 if (msg_connected(msg)) 1286 if (msg_connected(msg))
1180 return TIPC_ERR_NO_PORT; 1287 return TIPC_ERR_NO_PORT;
1181 } else { 1288 } else {
1182 if (msg_mcast(msg)) 1289 res = filter_connect(tipc_sk(sk), &buf);
1183 return TIPC_ERR_NO_PORT; 1290 if (res != TIPC_OK || buf == NULL)
1184 if (sock->state == SS_CONNECTED) { 1291 return res;
1185 if (!msg_connected(msg) ||
1186 !tipc_port_peer_msg(tipc_sk_port(sk), msg))
1187 return TIPC_ERR_NO_PORT;
1188 } else if (sock->state == SS_CONNECTING) {
1189 if (!msg_connected(msg) && (msg_errcode(msg) == 0))
1190 return TIPC_ERR_NO_PORT;
1191 } else if (sock->state == SS_LISTENING) {
1192 if (msg_connected(msg) || msg_errcode(msg))
1193 return TIPC_ERR_NO_PORT;
1194 } else if (sock->state == SS_DISCONNECTING) {
1195 return TIPC_ERR_NO_PORT;
1196 } else /* (sock->state == SS_UNCONNECTED) */ {
1197 if (msg_connected(msg) || msg_errcode(msg))
1198 return TIPC_ERR_NO_PORT;
1199 }
1200 } 1292 }
1201 1293
1202 /* Reject message if there isn't room to queue it */ 1294 /* Reject message if there isn't room to queue it */
1203 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1204 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1205 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1206 return TIPC_ERR_OVERLOAD;
1207 }
1208 recv_q_len = skb_queue_len(&sk->sk_receive_queue); 1295 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1209 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) { 1296 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1210 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2)) 1297 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
@@ -1213,17 +1300,9 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1213 1300
1214 /* Enqueue message (finally!) */ 1301 /* Enqueue message (finally!) */
1215 TIPC_SKB_CB(buf)->handle = 0; 1302 TIPC_SKB_CB(buf)->handle = 0;
1216 atomic_inc(&tipc_queue_size);
1217 __skb_queue_tail(&sk->sk_receive_queue, buf); 1303 __skb_queue_tail(&sk->sk_receive_queue, buf);
1218 1304
1219 /* Initiate connection termination for an incoming 'FIN' */ 1305 sk->sk_data_ready(sk, 0);
1220 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1221 sock->state = SS_DISCONNECTING;
1222 tipc_disconnect_port(tipc_sk_port(sk));
1223 }
1224
1225 if (waitqueue_active(sk_sleep(sk)))
1226 wake_up_interruptible(sk_sleep(sk));
1227 return TIPC_OK; 1306 return TIPC_OK;
1228} 1307}
1229 1308
@@ -1290,8 +1369,7 @@ static void wakeupdispatch(struct tipc_port *tport)
1290{ 1369{
1291 struct sock *sk = (struct sock *)tport->usr_handle; 1370 struct sock *sk = (struct sock *)tport->usr_handle;
1292 1371
1293 if (waitqueue_active(sk_sleep(sk))) 1372 sk->sk_write_space(sk);
1294 wake_up_interruptible(sk_sleep(sk));
1295} 1373}
1296 1374
1297/** 1375/**
@@ -1309,8 +1387,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1309 struct sock *sk = sock->sk; 1387 struct sock *sk = sock->sk;
1310 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 1388 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1311 struct msghdr m = {NULL,}; 1389 struct msghdr m = {NULL,};
1312 struct sk_buff *buf;
1313 struct tipc_msg *msg;
1314 unsigned int timeout; 1390 unsigned int timeout;
1315 int res; 1391 int res;
1316 1392
@@ -1322,26 +1398,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1322 goto exit; 1398 goto exit;
1323 } 1399 }
1324 1400
1325 /* For now, TIPC does not support the non-blocking form of connect() */
1326 if (flags & O_NONBLOCK) {
1327 res = -EOPNOTSUPP;
1328 goto exit;
1329 }
1330
1331 /* Issue Posix-compliant error code if socket is in the wrong state */
1332 if (sock->state == SS_LISTENING) {
1333 res = -EOPNOTSUPP;
1334 goto exit;
1335 }
1336 if (sock->state == SS_CONNECTING) {
1337 res = -EALREADY;
1338 goto exit;
1339 }
1340 if (sock->state != SS_UNCONNECTED) {
1341 res = -EISCONN;
1342 goto exit;
1343 }
1344
1345 /* 1401 /*
1346 * Reject connection attempt using multicast address 1402 * Reject connection attempt using multicast address
1347 * 1403 *
@@ -1353,49 +1409,66 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1353 goto exit; 1409 goto exit;
1354 } 1410 }
1355 1411
1356 /* Reject any messages already in receive queue (very unlikely) */ 1412 timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1357 reject_rx_queue(sk);
1358 1413
1359 /* Send a 'SYN-' to destination */ 1414 switch (sock->state) {
1360 m.msg_name = dest; 1415 case SS_UNCONNECTED:
1361 m.msg_namelen = destlen; 1416 /* Send a 'SYN-' to destination */
1362 res = send_msg(NULL, sock, &m, 0); 1417 m.msg_name = dest;
1363 if (res < 0) 1418 m.msg_namelen = destlen;
1419
1420 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1421 * indicate send_msg() is never blocked.
1422 */
1423 if (!timeout)
1424 m.msg_flags = MSG_DONTWAIT;
1425
1426 res = send_msg(NULL, sock, &m, 0);
1427 if ((res < 0) && (res != -EWOULDBLOCK))
1428 goto exit;
1429
1430 /* Just entered SS_CONNECTING state; the only
1431 * difference is that return value in non-blocking
1432 * case is EINPROGRESS, rather than EALREADY.
1433 */
1434 res = -EINPROGRESS;
1435 break;
1436 case SS_CONNECTING:
1437 res = -EALREADY;
1438 break;
1439 case SS_CONNECTED:
1440 res = -EISCONN;
1441 break;
1442 default:
1443 res = -EINVAL;
1364 goto exit; 1444 goto exit;
1445 }
1365 1446
1366 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1447 if (sock->state == SS_CONNECTING) {
1367 timeout = tipc_sk(sk)->conn_timeout; 1448 if (!timeout)
1368 release_sock(sk); 1449 goto exit;
1369 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1370 (!skb_queue_empty(&sk->sk_receive_queue) ||
1371 (sock->state != SS_CONNECTING)),
1372 timeout ? (long)msecs_to_jiffies(timeout)
1373 : MAX_SCHEDULE_TIMEOUT);
1374 lock_sock(sk);
1375 1450
1376 if (res > 0) { 1451 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1377 buf = skb_peek(&sk->sk_receive_queue); 1452 release_sock(sk);
1378 if (buf != NULL) { 1453 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1379 msg = buf_msg(buf); 1454 sock->state != SS_CONNECTING,
1380 res = auto_connect(sock, msg); 1455 timeout ? (long)msecs_to_jiffies(timeout)
1381 if (!res) { 1456 : MAX_SCHEDULE_TIMEOUT);
1382 if (!msg_data_sz(msg)) 1457 lock_sock(sk);
1383 advance_rx_queue(sk); 1458 if (res <= 0) {
1384 } 1459 if (res == 0)
1385 } else { 1460 res = -ETIMEDOUT;
1386 if (sock->state == SS_CONNECTED)
1387 res = -EISCONN;
1388 else 1461 else
1389 res = -ECONNREFUSED; 1462 ; /* leave "res" unchanged */
1463 goto exit;
1390 } 1464 }
1391 } else {
1392 if (res == 0)
1393 res = -ETIMEDOUT;
1394 else
1395 ; /* leave "res" unchanged */
1396 sock->state = SS_DISCONNECTING;
1397 } 1465 }
1398 1466
1467 if (unlikely(sock->state == SS_DISCONNECTING))
1468 res = sock_error(sk);
1469 else
1470 res = 0;
1471
1399exit: 1472exit:
1400 release_sock(sk); 1473 release_sock(sk);
1401 return res; 1474 return res;
@@ -1436,8 +1509,13 @@ static int listen(struct socket *sock, int len)
1436 */ 1509 */
1437static int accept(struct socket *sock, struct socket *new_sock, int flags) 1510static int accept(struct socket *sock, struct socket *new_sock, int flags)
1438{ 1511{
1439 struct sock *sk = sock->sk; 1512 struct sock *new_sk, *sk = sock->sk;
1440 struct sk_buff *buf; 1513 struct sk_buff *buf;
1514 struct tipc_sock *new_tsock;
1515 struct tipc_port *new_tport;
1516 struct tipc_msg *msg;
1517 u32 new_ref;
1518
1441 int res; 1519 int res;
1442 1520
1443 lock_sock(sk); 1521 lock_sock(sk);
@@ -1463,48 +1541,51 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1463 buf = skb_peek(&sk->sk_receive_queue); 1541 buf = skb_peek(&sk->sk_receive_queue);
1464 1542
1465 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0); 1543 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
1466 if (!res) { 1544 if (res)
1467 struct sock *new_sk = new_sock->sk; 1545 goto exit;
1468 struct tipc_sock *new_tsock = tipc_sk(new_sk);
1469 struct tipc_port *new_tport = new_tsock->p;
1470 u32 new_ref = new_tport->ref;
1471 struct tipc_msg *msg = buf_msg(buf);
1472
1473 lock_sock(new_sk);
1474
1475 /*
1476 * Reject any stray messages received by new socket
1477 * before the socket lock was taken (very, very unlikely)
1478 */
1479 reject_rx_queue(new_sk);
1480
1481 /* Connect new socket to it's peer */
1482 new_tsock->peer_name.ref = msg_origport(msg);
1483 new_tsock->peer_name.node = msg_orignode(msg);
1484 tipc_connect2port(new_ref, &new_tsock->peer_name);
1485 new_sock->state = SS_CONNECTED;
1486
1487 tipc_set_portimportance(new_ref, msg_importance(msg));
1488 if (msg_named(msg)) {
1489 new_tport->conn_type = msg_nametype(msg);
1490 new_tport->conn_instance = msg_nameinst(msg);
1491 }
1492 1546
1493 /* 1547 new_sk = new_sock->sk;
1494 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1548 new_tsock = tipc_sk(new_sk);
1495 * Respond to 'SYN+' by queuing it on new socket. 1549 new_tport = new_tsock->p;
1496 */ 1550 new_ref = new_tport->ref;
1497 if (!msg_data_sz(msg)) { 1551 msg = buf_msg(buf);
1498 struct msghdr m = {NULL,};
1499 1552
1500 advance_rx_queue(sk); 1553 /* we lock on new_sk; but lockdep sees the lock on sk */
1501 send_packet(NULL, new_sock, &m, 0); 1554 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1502 } else { 1555
1503 __skb_dequeue(&sk->sk_receive_queue); 1556 /*
1504 __skb_queue_head(&new_sk->sk_receive_queue, buf); 1557 * Reject any stray messages received by new socket
1505 } 1558 * before the socket lock was taken (very, very unlikely)
1506 release_sock(new_sk); 1559 */
1560 reject_rx_queue(new_sk);
1561
1562 /* Connect new socket to it's peer */
1563 new_tsock->peer_name.ref = msg_origport(msg);
1564 new_tsock->peer_name.node = msg_orignode(msg);
1565 tipc_connect(new_ref, &new_tsock->peer_name);
1566 new_sock->state = SS_CONNECTED;
1567
1568 tipc_set_portimportance(new_ref, msg_importance(msg));
1569 if (msg_named(msg)) {
1570 new_tport->conn_type = msg_nametype(msg);
1571 new_tport->conn_instance = msg_nameinst(msg);
1507 } 1572 }
1573
1574 /*
1575 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1576 * Respond to 'SYN+' by queuing it on new socket.
1577 */
1578 if (!msg_data_sz(msg)) {
1579 struct msghdr m = {NULL,};
1580
1581 advance_rx_queue(sk);
1582 send_packet(NULL, new_sock, &m, 0);
1583 } else {
1584 __skb_dequeue(&sk->sk_receive_queue);
1585 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1586 }
1587 release_sock(new_sk);
1588
1508exit: 1589exit:
1509 release_sock(sk); 1590 release_sock(sk);
1510 return res; 1591 return res;
@@ -1539,7 +1620,6 @@ restart:
1539 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ 1620 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1540 buf = __skb_dequeue(&sk->sk_receive_queue); 1621 buf = __skb_dequeue(&sk->sk_receive_queue);
1541 if (buf) { 1622 if (buf) {
1542 atomic_dec(&tipc_queue_size);
1543 if (TIPC_SKB_CB(buf)->handle != 0) { 1623 if (TIPC_SKB_CB(buf)->handle != 0) {
1544 kfree_skb(buf); 1624 kfree_skb(buf);
1545 goto restart; 1625 goto restart;
@@ -1556,10 +1636,11 @@ restart:
1556 1636
1557 case SS_DISCONNECTING: 1637 case SS_DISCONNECTING:
1558 1638
1559 /* Discard any unreceived messages; wake up sleeping tasks */ 1639 /* Discard any unreceived messages */
1560 discard_rx_queue(sk); 1640 discard_rx_queue(sk);
1561 if (waitqueue_active(sk_sleep(sk))) 1641
1562 wake_up_interruptible(sk_sleep(sk)); 1642 /* Wake up anyone sleeping in poll */
1643 sk->sk_state_change(sk);
1563 res = 0; 1644 res = 0;
1564 break; 1645 break;
1565 1646
@@ -1677,7 +1758,7 @@ static int getsockopt(struct socket *sock,
1677 /* no need to set "res", since already 0 at this point */ 1758 /* no need to set "res", since already 0 at this point */
1678 break; 1759 break;
1679 case TIPC_NODE_RECVQ_DEPTH: 1760 case TIPC_NODE_RECVQ_DEPTH:
1680 value = (u32)atomic_read(&tipc_queue_size); 1761 value = 0; /* was tipc_queue_size, now obsolete */
1681 break; 1762 break;
1682 case TIPC_SOCK_RECVQ_DEPTH: 1763 case TIPC_SOCK_RECVQ_DEPTH:
1683 value = skb_queue_len(&sk->sk_receive_queue); 1764 value = skb_queue_len(&sk->sk_receive_queue);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0f7d0d007e22..6b42d47029af 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -462,7 +462,7 @@ static void subscr_named_msg_event(void *usr_handle,
462 kfree(subscriber); 462 kfree(subscriber);
463 return; 463 return;
464 } 464 }
465 tipc_connect2port(subscriber->port_ref, orig); 465 tipc_connect(subscriber->port_ref, orig);
466 466
467 /* Lock server port (& save lock address for future use) */ 467 /* Lock server port (& save lock address for future use) */
468 subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock; 468 subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 06748f108a57..5ac19dc1d5e4 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -151,6 +151,9 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
151 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) 151 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
152 goto out_nlmsg_trim; 152 goto out_nlmsg_trim;
153 153
154 if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
155 goto out_nlmsg_trim;
156
154 return nlmsg_end(skb, nlh); 157 return nlmsg_end(skb, nlh);
155 158
156out_nlmsg_trim: 159out_nlmsg_trim:
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index b34b5b9792f0..8800604c93f4 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -34,6 +34,10 @@ int __net_init unix_sysctl_register(struct net *net)
34 if (table == NULL) 34 if (table == NULL)
35 goto err_alloc; 35 goto err_alloc;
36 36
37 /* Don't export sysctls to unprivileged users */
38 if (net->user_ns != &init_user_ns)
39 table[0].procname = NULL;
40
37 table[0].data = &net->unx.sysctl_max_dgram_qlen; 41 table[0].data = &net->unx.sysctl_max_dgram_qlen;
38 net->unx.ctl = register_net_sysctl(net, "net/unix", table); 42 net->unx.ctl = register_net_sysctl(net, "net/unix", table);
39 if (net->unx.ctl == NULL) 43 if (net->unx.ctl == NULL)
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index fe4adb12b3ef..16d08b399210 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -140,14 +140,13 @@ config CFG80211_WEXT
140 extensions with cfg80211-based drivers. 140 extensions with cfg80211-based drivers.
141 141
142config LIB80211 142config LIB80211
143 tristate "Common routines for IEEE802.11 drivers" 143 tristate
144 default n 144 default n
145 help 145 help
146 This options enables a library of common routines used 146 This options enables a library of common routines used
147 by IEEE802.11 wireless LAN drivers. 147 by IEEE802.11 wireless LAN drivers.
148 148
149 Drivers should select this themselves if needed. Say Y if 149 Drivers should select this themselves if needed.
150 you want this built into your kernel.
151 150
152config LIB80211_CRYPT_WEP 151config LIB80211_CRYPT_WEP
153 tristate 152 tristate
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 0f7e0d621ab0..a761670af31d 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -10,11 +10,13 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o
10obj-$(CONFIG_WEXT_PRIV) += wext-priv.o 10obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
11 11
12cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o 12cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o 13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o
14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o 14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o 15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o 16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
17 17
18CFLAGS_trace.o := -I$(src)
19
18ccflags-y += -D__CHECK_ENDIAN__ 20ccflags-y += -D__CHECK_ENDIAN__
19 21
20$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk 22$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index fcc60d8dbefa..324e8d851dc4 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -3,6 +3,7 @@
3#include <net/cfg80211.h> 3#include <net/cfg80211.h>
4#include "nl80211.h" 4#include "nl80211.h"
5#include "core.h" 5#include "core.h"
6#include "rdev-ops.h"
6 7
7 8
8static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, 9static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
@@ -23,10 +24,11 @@ static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
23 if (!wdev->beacon_interval) 24 if (!wdev->beacon_interval)
24 return -ENOENT; 25 return -ENOENT;
25 26
26 err = rdev->ops->stop_ap(&rdev->wiphy, dev); 27 err = rdev_stop_ap(rdev, dev);
27 if (!err) { 28 if (!err) {
28 wdev->beacon_interval = 0; 29 wdev->beacon_interval = 0;
29 wdev->channel = NULL; 30 wdev->channel = NULL;
31 wdev->ssid_len = 0;
30 } 32 }
31 33
32 return err; 34 return err;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2f876b9ee344..a7990bb16529 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -9,90 +9,303 @@
9#include <linux/export.h> 9#include <linux/export.h>
10#include <net/cfg80211.h> 10#include <net/cfg80211.h>
11#include "core.h" 11#include "core.h"
12#include "rdev-ops.h"
12 13
13struct ieee80211_channel * 14void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
14rdev_freq_to_chan(struct cfg80211_registered_device *rdev, 15 struct ieee80211_channel *chan,
15 int freq, enum nl80211_channel_type channel_type) 16 enum nl80211_channel_type chan_type)
16{ 17{
17 struct ieee80211_channel *chan; 18 if (WARN_ON(!chan))
18 struct ieee80211_sta_ht_cap *ht_cap; 19 return;
20
21 chandef->chan = chan;
22 chandef->center_freq2 = 0;
23
24 switch (chan_type) {
25 case NL80211_CHAN_NO_HT:
26 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
27 chandef->center_freq1 = chan->center_freq;
28 break;
29 case NL80211_CHAN_HT20:
30 chandef->width = NL80211_CHAN_WIDTH_20;
31 chandef->center_freq1 = chan->center_freq;
32 break;
33 case NL80211_CHAN_HT40PLUS:
34 chandef->width = NL80211_CHAN_WIDTH_40;
35 chandef->center_freq1 = chan->center_freq + 10;
36 break;
37 case NL80211_CHAN_HT40MINUS:
38 chandef->width = NL80211_CHAN_WIDTH_40;
39 chandef->center_freq1 = chan->center_freq - 10;
40 break;
41 default:
42 WARN_ON(1);
43 }
44}
45EXPORT_SYMBOL(cfg80211_chandef_create);
46
47bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
48{
49 u32 control_freq;
50
51 if (!chandef->chan)
52 return false;
19 53
20 chan = ieee80211_get_channel(&rdev->wiphy, freq); 54 control_freq = chandef->chan->center_freq;
55
56 switch (chandef->width) {
57 case NL80211_CHAN_WIDTH_20:
58 case NL80211_CHAN_WIDTH_20_NOHT:
59 if (chandef->center_freq1 != control_freq)
60 return false;
61 if (chandef->center_freq2)
62 return false;
63 break;
64 case NL80211_CHAN_WIDTH_40:
65 if (chandef->center_freq1 != control_freq + 10 &&
66 chandef->center_freq1 != control_freq - 10)
67 return false;
68 if (chandef->center_freq2)
69 return false;
70 break;
71 case NL80211_CHAN_WIDTH_80P80:
72 if (chandef->center_freq1 != control_freq + 30 &&
73 chandef->center_freq1 != control_freq + 10 &&
74 chandef->center_freq1 != control_freq - 10 &&
75 chandef->center_freq1 != control_freq - 30)
76 return false;
77 if (!chandef->center_freq2)
78 return false;
79 break;
80 case NL80211_CHAN_WIDTH_80:
81 if (chandef->center_freq1 != control_freq + 30 &&
82 chandef->center_freq1 != control_freq + 10 &&
83 chandef->center_freq1 != control_freq - 10 &&
84 chandef->center_freq1 != control_freq - 30)
85 return false;
86 if (chandef->center_freq2)
87 return false;
88 break;
89 case NL80211_CHAN_WIDTH_160:
90 if (chandef->center_freq1 != control_freq + 70 &&
91 chandef->center_freq1 != control_freq + 50 &&
92 chandef->center_freq1 != control_freq + 30 &&
93 chandef->center_freq1 != control_freq + 10 &&
94 chandef->center_freq1 != control_freq - 10 &&
95 chandef->center_freq1 != control_freq - 30 &&
96 chandef->center_freq1 != control_freq - 50 &&
97 chandef->center_freq1 != control_freq - 70)
98 return false;
99 if (chandef->center_freq2)
100 return false;
101 break;
102 default:
103 return false;
104 }
105
106 return true;
107}
108EXPORT_SYMBOL(cfg80211_chandef_valid);
109
110static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
111 int *pri40, int *pri80)
112{
113 int tmp;
114
115 switch (c->width) {
116 case NL80211_CHAN_WIDTH_40:
117 *pri40 = c->center_freq1;
118 *pri80 = 0;
119 break;
120 case NL80211_CHAN_WIDTH_80:
121 case NL80211_CHAN_WIDTH_80P80:
122 *pri80 = c->center_freq1;
123 /* n_P20 */
124 tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
125 /* n_P40 */
126 tmp /= 2;
127 /* freq_P40 */
128 *pri40 = c->center_freq1 - 20 + 40 * tmp;
129 break;
130 case NL80211_CHAN_WIDTH_160:
131 /* n_P20 */
132 tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
133 /* n_P40 */
134 tmp /= 2;
135 /* freq_P40 */
136 *pri40 = c->center_freq1 - 60 + 40 * tmp;
137 /* n_P80 */
138 tmp /= 2;
139 *pri80 = c->center_freq1 - 40 + 80 * tmp;
140 break;
141 default:
142 WARN_ON_ONCE(1);
143 }
144}
21 145
22 /* Primary channel not allowed */ 146const struct cfg80211_chan_def *
23 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 147cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
148 const struct cfg80211_chan_def *c2)
149{
150 u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80;
151
152 /* If they are identical, return */
153 if (cfg80211_chandef_identical(c1, c2))
154 return c1;
155
156 /* otherwise, must have same control channel */
157 if (c1->chan != c2->chan)
24 return NULL; 158 return NULL;
25 159
26 if (channel_type == NL80211_CHAN_HT40MINUS && 160 /*
27 chan->flags & IEEE80211_CHAN_NO_HT40MINUS) 161 * If they have the same width, but aren't identical,
162 * then they can't be compatible.
163 */
164 if (c1->width == c2->width)
28 return NULL; 165 return NULL;
29 else if (channel_type == NL80211_CHAN_HT40PLUS && 166
30 chan->flags & IEEE80211_CHAN_NO_HT40PLUS) 167 if (c1->width == NL80211_CHAN_WIDTH_20_NOHT ||
168 c1->width == NL80211_CHAN_WIDTH_20)
169 return c2;
170
171 if (c2->width == NL80211_CHAN_WIDTH_20_NOHT ||
172 c2->width == NL80211_CHAN_WIDTH_20)
173 return c1;
174
175 chandef_primary_freqs(c1, &c1_pri40, &c1_pri80);
176 chandef_primary_freqs(c2, &c2_pri40, &c2_pri80);
177
178 if (c1_pri40 != c2_pri40)
31 return NULL; 179 return NULL;
32 180
33 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; 181 WARN_ON(!c1_pri80 && !c2_pri80);
182 if (c1_pri80 && c2_pri80 && c1_pri80 != c2_pri80)
183 return NULL;
34 184
35 if (channel_type != NL80211_CHAN_NO_HT) { 185 if (c1->width > c2->width)
36 if (!ht_cap->ht_supported) 186 return c1;
37 return NULL; 187 return c2;
188}
189EXPORT_SYMBOL(cfg80211_chandef_compatible);
38 190
39 if (channel_type != NL80211_CHAN_HT20 && 191static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
40 (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 192 u32 center_freq, u32 bandwidth,
41 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) 193 u32 prohibited_flags)
42 return NULL; 194{
195 struct ieee80211_channel *c;
196 u32 freq;
197
198 for (freq = center_freq - bandwidth/2 + 10;
199 freq <= center_freq + bandwidth/2 - 10;
200 freq += 20) {
201 c = ieee80211_get_channel(wiphy, freq);
202 if (!c || c->flags & prohibited_flags)
203 return false;
43 } 204 }
44 205
45 return chan; 206 return true;
46} 207}
47 208
48bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, 209bool cfg80211_chandef_usable(struct wiphy *wiphy,
49 struct ieee80211_channel *chan, 210 const struct cfg80211_chan_def *chandef,
50 enum nl80211_channel_type channel_type) 211 u32 prohibited_flags)
51{ 212{
52 struct ieee80211_channel *sec_chan; 213 struct ieee80211_sta_ht_cap *ht_cap;
53 int diff; 214 struct ieee80211_sta_vht_cap *vht_cap;
215 u32 width, control_freq;
54 216
55 switch (channel_type) { 217 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
56 case NL80211_CHAN_HT40PLUS: 218 return false;
57 diff = 20; 219
220 ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap;
221 vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap;
222
223 control_freq = chandef->chan->center_freq;
224
225 switch (chandef->width) {
226 case NL80211_CHAN_WIDTH_20:
227 if (!ht_cap->ht_supported)
228 return false;
229 case NL80211_CHAN_WIDTH_20_NOHT:
230 width = 20;
58 break; 231 break;
59 case NL80211_CHAN_HT40MINUS: 232 case NL80211_CHAN_WIDTH_40:
60 diff = -20; 233 width = 40;
234 if (!ht_cap->ht_supported)
235 return false;
236 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
237 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
238 return false;
239 if (chandef->center_freq1 < control_freq &&
240 chandef->chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
241 return false;
242 if (chandef->center_freq1 > control_freq &&
243 chandef->chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
244 return false;
245 break;
246 case NL80211_CHAN_WIDTH_80P80:
247 if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
248 return false;
249 case NL80211_CHAN_WIDTH_80:
250 if (!vht_cap->vht_supported)
251 return false;
252 width = 80;
253 break;
254 case NL80211_CHAN_WIDTH_160:
255 if (!vht_cap->vht_supported)
256 return false;
257 if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
258 return false;
259 width = 160;
61 break; 260 break;
62 default: 261 default:
63 return true; 262 WARN_ON_ONCE(1);
263 return false;
64 } 264 }
65 265
66 sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); 266 /* TODO: missing regulatory check on 80/160 bandwidth */
67 if (!sec_chan)
68 return false;
69 267
70 /* we'll need a DFS capability later */ 268 if (width > 20)
71 if (sec_chan->flags & (IEEE80211_CHAN_DISABLED | 269 prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
72 IEEE80211_CHAN_PASSIVE_SCAN | 270
73 IEEE80211_CHAN_NO_IBSS | 271 if (!cfg80211_secondary_chans_ok(wiphy, chandef->center_freq1,
74 IEEE80211_CHAN_RADAR)) 272 width, prohibited_flags))
75 return false; 273 return false;
76 274
77 return true; 275 if (!chandef->center_freq2)
276 return true;
277 return cfg80211_secondary_chans_ok(wiphy, chandef->center_freq2,
278 width, prohibited_flags);
78} 279}
79EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan); 280EXPORT_SYMBOL(cfg80211_chandef_usable);
80 281
81int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 282bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
82 int freq, enum nl80211_channel_type chantype) 283 struct cfg80211_chan_def *chandef)
83{ 284{
84 struct ieee80211_channel *chan; 285 bool res;
286
287 trace_cfg80211_reg_can_beacon(wiphy, chandef);
85 288
289 res = cfg80211_chandef_usable(wiphy, chandef,
290 IEEE80211_CHAN_DISABLED |
291 IEEE80211_CHAN_PASSIVE_SCAN |
292 IEEE80211_CHAN_NO_IBSS |
293 IEEE80211_CHAN_RADAR);
294
295 trace_cfg80211_return_bool(res);
296 return res;
297}
298EXPORT_SYMBOL(cfg80211_reg_can_beacon);
299
300int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
301 struct cfg80211_chan_def *chandef)
302{
86 if (!rdev->ops->set_monitor_channel) 303 if (!rdev->ops->set_monitor_channel)
87 return -EOPNOTSUPP; 304 return -EOPNOTSUPP;
88 if (!cfg80211_has_monitors_only(rdev)) 305 if (!cfg80211_has_monitors_only(rdev))
89 return -EBUSY; 306 return -EBUSY;
90 307
91 chan = rdev_freq_to_chan(rdev, freq, chantype); 308 return rdev_set_monitor_channel(rdev, chandef);
92 if (!chan)
93 return -EINVAL;
94
95 return rdev->ops->set_monitor_channel(&rdev->wiphy, chan, chantype);
96} 309}
97 310
98void 311void
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3f7253052088..b677eab55b68 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -26,6 +26,7 @@
26#include "debugfs.h" 26#include "debugfs.h"
27#include "wext-compat.h" 27#include "wext-compat.h"
28#include "ethtool.h" 28#include "ethtool.h"
29#include "rdev-ops.h"
29 30
30/* name for sysfs, %d is appended */ 31/* name for sysfs, %d is appended */
31#define PHY_NAME "phy" 32#define PHY_NAME "phy"
@@ -216,7 +217,7 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
216{ 217{
217 struct cfg80211_registered_device *rdev = data; 218 struct cfg80211_registered_device *rdev = data;
218 219
219 rdev->ops->rfkill_poll(&rdev->wiphy); 220 rdev_rfkill_poll(rdev);
220} 221}
221 222
222static int cfg80211_rfkill_set_block(void *data, bool blocked) 223static int cfg80211_rfkill_set_block(void *data, bool blocked)
@@ -240,7 +241,7 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
240 case NL80211_IFTYPE_P2P_DEVICE: 241 case NL80211_IFTYPE_P2P_DEVICE:
241 if (!wdev->p2p_started) 242 if (!wdev->p2p_started)
242 break; 243 break;
243 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); 244 rdev_stop_p2p_device(rdev, wdev);
244 wdev->p2p_started = false; 245 wdev->p2p_started = false;
245 rdev->opencount--; 246 rdev->opencount--;
246 break; 247 break;
@@ -325,6 +326,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
325 mutex_init(&rdev->devlist_mtx); 326 mutex_init(&rdev->devlist_mtx);
326 mutex_init(&rdev->sched_scan_mtx); 327 mutex_init(&rdev->sched_scan_mtx);
327 INIT_LIST_HEAD(&rdev->wdev_list); 328 INIT_LIST_HEAD(&rdev->wdev_list);
329 INIT_LIST_HEAD(&rdev->beacon_registrations);
330 spin_lock_init(&rdev->beacon_registrations_lock);
328 spin_lock_init(&rdev->bss_lock); 331 spin_lock_init(&rdev->bss_lock);
329 INIT_LIST_HEAD(&rdev->bss_list); 332 INIT_LIST_HEAD(&rdev->bss_list);
330 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); 333 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
@@ -370,6 +373,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
370 rdev->wiphy.rts_threshold = (u32) -1; 373 rdev->wiphy.rts_threshold = (u32) -1;
371 rdev->wiphy.coverage_class = 0; 374 rdev->wiphy.coverage_class = 0;
372 375
376 rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
377
373 return &rdev->wiphy; 378 return &rdev->wiphy;
374} 379}
375EXPORT_SYMBOL(wiphy_new); 380EXPORT_SYMBOL(wiphy_new);
@@ -687,7 +692,7 @@ void wiphy_unregister(struct wiphy *wiphy)
687 flush_work(&rdev->event_work); 692 flush_work(&rdev->event_work);
688 693
689 if (rdev->wowlan && rdev->ops->set_wakeup) 694 if (rdev->wowlan && rdev->ops->set_wakeup)
690 rdev->ops->set_wakeup(&rdev->wiphy, false); 695 rdev_set_wakeup(rdev, false);
691 cfg80211_rdev_free_wowlan(rdev); 696 cfg80211_rdev_free_wowlan(rdev);
692} 697}
693EXPORT_SYMBOL(wiphy_unregister); 698EXPORT_SYMBOL(wiphy_unregister);
@@ -695,10 +700,15 @@ EXPORT_SYMBOL(wiphy_unregister);
695void cfg80211_dev_free(struct cfg80211_registered_device *rdev) 700void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
696{ 701{
697 struct cfg80211_internal_bss *scan, *tmp; 702 struct cfg80211_internal_bss *scan, *tmp;
703 struct cfg80211_beacon_registration *reg, *treg;
698 rfkill_destroy(rdev->rfkill); 704 rfkill_destroy(rdev->rfkill);
699 mutex_destroy(&rdev->mtx); 705 mutex_destroy(&rdev->mtx);
700 mutex_destroy(&rdev->devlist_mtx); 706 mutex_destroy(&rdev->devlist_mtx);
701 mutex_destroy(&rdev->sched_scan_mtx); 707 mutex_destroy(&rdev->sched_scan_mtx);
708 list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
709 list_del(&reg->list);
710 kfree(reg);
711 }
702 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 712 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
703 cfg80211_put_bss(&scan->pub); 713 cfg80211_put_bss(&scan->pub);
704 kfree(rdev); 714 kfree(rdev);
@@ -770,7 +780,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
770 case NL80211_IFTYPE_P2P_DEVICE: 780 case NL80211_IFTYPE_P2P_DEVICE:
771 if (!wdev->p2p_started) 781 if (!wdev->p2p_started)
772 break; 782 break;
773 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); 783 rdev_stop_p2p_device(rdev, wdev);
774 wdev->p2p_started = false; 784 wdev->p2p_started = false;
775 rdev->opencount--; 785 rdev->opencount--;
776 break; 786 break;
@@ -856,8 +866,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
856 /* allow mac80211 to determine the timeout */ 866 /* allow mac80211 to determine the timeout */
857 wdev->ps_timeout = -1; 867 wdev->ps_timeout = -1;
858 868
859 if (!dev->ethtool_ops) 869 netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops);
860 dev->ethtool_ops = &cfg80211_ethtool_ops;
861 870
862 if ((wdev->iftype == NL80211_IFTYPE_STATION || 871 if ((wdev->iftype == NL80211_IFTYPE_STATION ||
863 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || 872 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
@@ -961,9 +970,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
961 if ((wdev->iftype == NL80211_IFTYPE_STATION || 970 if ((wdev->iftype == NL80211_IFTYPE_STATION ||
962 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && 971 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
963 rdev->ops->set_power_mgmt) 972 rdev->ops->set_power_mgmt)
964 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, 973 if (rdev_set_power_mgmt(rdev, dev, wdev->ps,
965 wdev->ps, 974 wdev->ps_timeout)) {
966 wdev->ps_timeout)) {
967 /* assume this means it's off */ 975 /* assume this means it's off */
968 wdev->ps = false; 976 wdev->ps = false;
969 } 977 }
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a343be4a52bd..3563097169cb 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -55,7 +55,8 @@ struct cfg80211_registered_device {
55 int opencount; /* also protected by devlist_mtx */ 55 int opencount; /* also protected by devlist_mtx */
56 wait_queue_head_t dev_wait; 56 wait_queue_head_t dev_wait;
57 57
58 u32 ap_beacons_nlportid; 58 struct list_head beacon_registrations;
59 spinlock_t beacon_registrations_lock;
59 60
60 /* protected by RTNL only */ 61 /* protected by RTNL only */
61 int num_running_ifaces; 62 int num_running_ifaces;
@@ -137,8 +138,6 @@ struct cfg80211_internal_bss {
137 unsigned long ts; 138 unsigned long ts;
138 struct kref ref; 139 struct kref ref;
139 atomic_t hold; 140 atomic_t hold;
140 bool beacon_ies_allocated;
141 bool proberesp_ies_allocated;
142 141
143 /* must be last because of priv member */ 142 /* must be last because of priv member */
144 struct cfg80211_bss pub; 143 struct cfg80211_bss pub;
@@ -260,6 +259,10 @@ enum cfg80211_chan_mode {
260 CHAN_MODE_EXCLUSIVE, 259 CHAN_MODE_EXCLUSIVE,
261}; 260};
262 261
262struct cfg80211_beacon_registration {
263 struct list_head list;
264 u32 nlportid;
265};
263 266
264/* free object */ 267/* free object */
265extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev); 268extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
@@ -304,9 +307,9 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
304 const struct mesh_config *conf); 307 const struct mesh_config *conf);
305int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 308int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
306 struct net_device *dev); 309 struct net_device *dev);
307int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev, 310int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
308 struct wireless_dev *wdev, int freq, 311 struct wireless_dev *wdev,
309 enum nl80211_channel_type channel_type); 312 struct cfg80211_chan_def *chandef);
310 313
311/* AP */ 314/* AP */
312int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, 315int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
@@ -320,13 +323,15 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
320 const u8 *bssid, 323 const u8 *bssid,
321 const u8 *ssid, int ssid_len, 324 const u8 *ssid, int ssid_len,
322 const u8 *ie, int ie_len, 325 const u8 *ie, int ie_len,
323 const u8 *key, int key_len, int key_idx); 326 const u8 *key, int key_len, int key_idx,
327 const u8 *sae_data, int sae_data_len);
324int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, 328int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
325 struct net_device *dev, struct ieee80211_channel *chan, 329 struct net_device *dev, struct ieee80211_channel *chan,
326 enum nl80211_auth_type auth_type, const u8 *bssid, 330 enum nl80211_auth_type auth_type, const u8 *bssid,
327 const u8 *ssid, int ssid_len, 331 const u8 *ssid, int ssid_len,
328 const u8 *ie, int ie_len, 332 const u8 *ie, int ie_len,
329 const u8 *key, int key_len, int key_idx); 333 const u8 *key, int key_len, int key_idx,
334 const u8 *sae_data, int sae_data_len);
330int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 335int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
331 struct net_device *dev, 336 struct net_device *dev,
332 struct ieee80211_channel *chan, 337 struct ieee80211_channel *chan,
@@ -371,10 +376,8 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
371int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, 376int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
372 struct wireless_dev *wdev, 377 struct wireless_dev *wdev,
373 struct ieee80211_channel *chan, bool offchan, 378 struct ieee80211_channel *chan, bool offchan,
374 enum nl80211_channel_type channel_type, 379 unsigned int wait, const u8 *buf, size_t len,
375 bool channel_type_valid, unsigned int wait, 380 bool no_cck, bool dont_wait_for_ack, u64 *cookie);
376 const u8 *buf, size_t len, bool no_cck,
377 bool dont_wait_for_ack, u64 *cookie);
378void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, 381void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
379 const struct ieee80211_ht_cap *ht_capa_mask); 382 const struct ieee80211_ht_cap *ht_capa_mask);
380 383
@@ -465,11 +468,8 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
465 struct ieee80211_channel **chan, 468 struct ieee80211_channel **chan,
466 enum cfg80211_chan_mode *chanmode); 469 enum cfg80211_chan_mode *chanmode);
467 470
468struct ieee80211_channel *
469rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
470 int freq, enum nl80211_channel_type channel_type);
471int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 471int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
472 int freq, enum nl80211_channel_type chantype); 472 struct cfg80211_chan_def *chandef);
473 473
474int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, 474int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
475 const u8 *rates, unsigned int n_rates, 475 const u8 *rates, unsigned int n_rates,
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 7eecdf40cf80..48c48ffafa1d 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -2,6 +2,7 @@
2#include <net/cfg80211.h> 2#include <net/cfg80211.h>
3#include "core.h" 3#include "core.h"
4#include "ethtool.h" 4#include "ethtool.h"
5#include "rdev-ops.h"
5 6
6static void cfg80211_get_drvinfo(struct net_device *dev, 7static void cfg80211_get_drvinfo(struct net_device *dev,
7 struct ethtool_drvinfo *info) 8 struct ethtool_drvinfo *info)
@@ -47,9 +48,8 @@ static void cfg80211_get_ringparam(struct net_device *dev,
47 memset(rp, 0, sizeof(*rp)); 48 memset(rp, 0, sizeof(*rp));
48 49
49 if (rdev->ops->get_ringparam) 50 if (rdev->ops->get_ringparam)
50 rdev->ops->get_ringparam(wdev->wiphy, 51 rdev_get_ringparam(rdev, &rp->tx_pending, &rp->tx_max_pending,
51 &rp->tx_pending, &rp->tx_max_pending, 52 &rp->rx_pending, &rp->rx_max_pending);
52 &rp->rx_pending, &rp->rx_max_pending);
53} 53}
54 54
55static int cfg80211_set_ringparam(struct net_device *dev, 55static int cfg80211_set_ringparam(struct net_device *dev,
@@ -62,8 +62,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
62 return -EINVAL; 62 return -EINVAL;
63 63
64 if (rdev->ops->set_ringparam) 64 if (rdev->ops->set_ringparam)
65 return rdev->ops->set_ringparam(wdev->wiphy, 65 return rdev_set_ringparam(rdev, rp->tx_pending, rp->rx_pending);
66 rp->tx_pending, rp->rx_pending);
67 66
68 return -ENOTSUPP; 67 return -ENOTSUPP;
69} 68}
@@ -73,7 +72,7 @@ static int cfg80211_get_sset_count(struct net_device *dev, int sset)
73 struct wireless_dev *wdev = dev->ieee80211_ptr; 72 struct wireless_dev *wdev = dev->ieee80211_ptr;
74 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 73 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
75 if (rdev->ops->get_et_sset_count) 74 if (rdev->ops->get_et_sset_count)
76 return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset); 75 return rdev_get_et_sset_count(rdev, dev, sset);
77 return -EOPNOTSUPP; 76 return -EOPNOTSUPP;
78} 77}
79 78
@@ -83,7 +82,7 @@ static void cfg80211_get_stats(struct net_device *dev,
83 struct wireless_dev *wdev = dev->ieee80211_ptr; 82 struct wireless_dev *wdev = dev->ieee80211_ptr;
84 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 83 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
85 if (rdev->ops->get_et_stats) 84 if (rdev->ops->get_et_stats)
86 rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data); 85 rdev_get_et_stats(rdev, dev, stats, data);
87} 86}
88 87
89static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) 88static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
@@ -91,7 +90,7 @@ static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
91 struct wireless_dev *wdev = dev->ieee80211_ptr; 90 struct wireless_dev *wdev = dev->ieee80211_ptr;
92 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 91 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
93 if (rdev->ops->get_et_strings) 92 if (rdev->ops->get_et_strings)
94 rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data); 93 rdev_get_et_strings(rdev, dev, sset, data);
95} 94}
96 95
97const struct ethtool_ops cfg80211_ethtool_ops = { 96const struct ethtool_ops cfg80211_ethtool_ops = {
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index ca5672f6ee2f..9b9551e4a6f9 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -11,6 +11,7 @@
11#include <net/cfg80211.h> 11#include <net/cfg80211.h>
12#include "wext-compat.h" 12#include "wext-compat.h"
13#include "nl80211.h" 13#include "nl80211.h"
14#include "rdev-ops.h"
14 15
15 16
16void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) 17void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
@@ -61,6 +62,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
61 struct cfg80211_event *ev; 62 struct cfg80211_event *ev;
62 unsigned long flags; 63 unsigned long flags;
63 64
65 trace_cfg80211_ibss_joined(dev, bssid);
66
64 CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); 67 CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
65 68
66 ev = kzalloc(sizeof(*ev), gfp); 69 ev = kzalloc(sizeof(*ev), gfp);
@@ -97,9 +100,9 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
97 * 11a for maximum compatibility. 100 * 11a for maximum compatibility.
98 */ 101 */
99 struct ieee80211_supported_band *sband = 102 struct ieee80211_supported_band *sband =
100 rdev->wiphy.bands[params->channel->band]; 103 rdev->wiphy.bands[params->chandef.chan->band];
101 int j; 104 int j;
102 u32 flag = params->channel->band == IEEE80211_BAND_5GHZ ? 105 u32 flag = params->chandef.chan->band == IEEE80211_BAND_5GHZ ?
103 IEEE80211_RATE_MANDATORY_A : 106 IEEE80211_RATE_MANDATORY_A :
104 IEEE80211_RATE_MANDATORY_B; 107 IEEE80211_RATE_MANDATORY_B;
105 108
@@ -115,11 +118,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
115 118
116 wdev->ibss_fixed = params->channel_fixed; 119 wdev->ibss_fixed = params->channel_fixed;
117#ifdef CONFIG_CFG80211_WEXT 120#ifdef CONFIG_CFG80211_WEXT
118 wdev->wext.ibss.channel = params->channel; 121 wdev->wext.ibss.chandef = params->chandef;
119#endif 122#endif
120 wdev->sme_state = CFG80211_SME_CONNECTING; 123 wdev->sme_state = CFG80211_SME_CONNECTING;
121 124
122 err = cfg80211_can_use_chan(rdev, wdev, params->channel, 125 err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan,
123 params->channel_fixed 126 params->channel_fixed
124 ? CHAN_MODE_SHARED 127 ? CHAN_MODE_SHARED
125 : CHAN_MODE_EXCLUSIVE); 128 : CHAN_MODE_EXCLUSIVE);
@@ -128,7 +131,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
128 return err; 131 return err;
129 } 132 }
130 133
131 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); 134 err = rdev_join_ibss(rdev, dev, params);
132 if (err) { 135 if (err) {
133 wdev->connect_keys = NULL; 136 wdev->connect_keys = NULL;
134 wdev->sme_state = CFG80211_SME_IDLE; 137 wdev->sme_state = CFG80211_SME_IDLE;
@@ -175,7 +178,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
175 */ 178 */
176 if (rdev->ops->del_key) 179 if (rdev->ops->del_key)
177 for (i = 0; i < 6; i++) 180 for (i = 0; i < 6; i++)
178 rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL); 181 rdev_del_key(rdev, dev, i, false, NULL);
179 182
180 if (wdev->current_bss) { 183 if (wdev->current_bss) {
181 cfg80211_unhold_bss(wdev->current_bss); 184 cfg80211_unhold_bss(wdev->current_bss);
@@ -211,7 +214,7 @@ int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
211 if (!wdev->ssid_len) 214 if (!wdev->ssid_len)
212 return -ENOLINK; 215 return -ENOLINK;
213 216
214 err = rdev->ops->leave_ibss(&rdev->wiphy, dev); 217 err = rdev_leave_ibss(rdev, dev);
215 218
216 if (err) 219 if (err)
217 return err; 220 return err;
@@ -248,7 +251,9 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
248 wdev->wext.ibss.beacon_interval = 100; 251 wdev->wext.ibss.beacon_interval = 100;
249 252
250 /* try to find an IBSS channel if none requested ... */ 253 /* try to find an IBSS channel if none requested ... */
251 if (!wdev->wext.ibss.channel) { 254 if (!wdev->wext.ibss.chandef.chan) {
255 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
256
252 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 257 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
253 struct ieee80211_supported_band *sband; 258 struct ieee80211_supported_band *sband;
254 struct ieee80211_channel *chan; 259 struct ieee80211_channel *chan;
@@ -263,15 +268,15 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
263 continue; 268 continue;
264 if (chan->flags & IEEE80211_CHAN_DISABLED) 269 if (chan->flags & IEEE80211_CHAN_DISABLED)
265 continue; 270 continue;
266 wdev->wext.ibss.channel = chan; 271 wdev->wext.ibss.chandef.chan = chan;
267 break; 272 break;
268 } 273 }
269 274
270 if (wdev->wext.ibss.channel) 275 if (wdev->wext.ibss.chandef.chan)
271 break; 276 break;
272 } 277 }
273 278
274 if (!wdev->wext.ibss.channel) 279 if (!wdev->wext.ibss.chandef.chan)
275 return -EINVAL; 280 return -EINVAL;
276 } 281 }
277 282
@@ -333,7 +338,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
333 return -EINVAL; 338 return -EINVAL;
334 } 339 }
335 340
336 if (wdev->wext.ibss.channel == chan) 341 if (wdev->wext.ibss.chandef.chan == chan)
337 return 0; 342 return 0;
338 343
339 wdev_lock(wdev); 344 wdev_lock(wdev);
@@ -346,7 +351,8 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
346 return err; 351 return err;
347 352
348 if (chan) { 353 if (chan) {
349 wdev->wext.ibss.channel = chan; 354 wdev->wext.ibss.chandef.chan = chan;
355 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
350 wdev->wext.ibss.channel_fixed = true; 356 wdev->wext.ibss.channel_fixed = true;
351 } else { 357 } else {
352 /* cfg80211_ibss_wext_join will pick one if needed */ 358 /* cfg80211_ibss_wext_join will pick one if needed */
@@ -376,8 +382,8 @@ int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
376 wdev_lock(wdev); 382 wdev_lock(wdev);
377 if (wdev->current_bss) 383 if (wdev->current_bss)
378 chan = wdev->current_bss->pub.channel; 384 chan = wdev->current_bss->pub.channel;
379 else if (wdev->wext.ibss.channel) 385 else if (wdev->wext.ibss.chandef.chan)
380 chan = wdev->wext.ibss.channel; 386 chan = wdev->wext.ibss.chandef.chan;
381 wdev_unlock(wdev); 387 wdev_unlock(wdev);
382 388
383 if (chan) { 389 if (chan) {
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index c384e77ff77a..f9d6ce5cfabb 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -3,6 +3,7 @@
3#include <net/cfg80211.h> 3#include <net/cfg80211.h>
4#include "nl80211.h" 4#include "nl80211.h"
5#include "core.h" 5#include "core.h"
6#include "rdev-ops.h"
6 7
7/* Default values, timeouts in ms */ 8/* Default values, timeouts in ms */
8#define MESH_TTL 31 9#define MESH_TTL 31
@@ -72,8 +73,6 @@ const struct mesh_config default_mesh_config = {
72 73
73const struct mesh_setup default_mesh_setup = { 74const struct mesh_setup default_mesh_setup = {
74 /* cfg80211_join_mesh() will pick a channel if needed */ 75 /* cfg80211_join_mesh() will pick a channel if needed */
75 .channel = NULL,
76 .channel_type = NL80211_CHAN_NO_HT,
77 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, 76 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
78 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 77 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
79 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 78 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
@@ -110,13 +109,12 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
110 if (!rdev->ops->join_mesh) 109 if (!rdev->ops->join_mesh)
111 return -EOPNOTSUPP; 110 return -EOPNOTSUPP;
112 111
113 if (!setup->channel) { 112 if (!setup->chandef.chan) {
114 /* if no channel explicitly given, use preset channel */ 113 /* if no channel explicitly given, use preset channel */
115 setup->channel = wdev->preset_chan; 114 setup->chandef = wdev->preset_chandef;
116 setup->channel_type = wdev->preset_chantype;
117 } 115 }
118 116
119 if (!setup->channel) { 117 if (!setup->chandef.chan) {
120 /* if we don't have that either, use the first usable channel */ 118 /* if we don't have that either, use the first usable channel */
121 enum ieee80211_band band; 119 enum ieee80211_band band;
122 120
@@ -136,35 +134,35 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
136 IEEE80211_CHAN_DISABLED | 134 IEEE80211_CHAN_DISABLED |
137 IEEE80211_CHAN_RADAR)) 135 IEEE80211_CHAN_RADAR))
138 continue; 136 continue;
139 setup->channel = chan; 137 setup->chandef.chan = chan;
140 break; 138 break;
141 } 139 }
142 140
143 if (setup->channel) 141 if (setup->chandef.chan)
144 break; 142 break;
145 } 143 }
146 144
147 /* no usable channel ... */ 145 /* no usable channel ... */
148 if (!setup->channel) 146 if (!setup->chandef.chan)
149 return -EINVAL; 147 return -EINVAL;
150 148
151 setup->channel_type = NL80211_CHAN_NO_HT; 149 setup->chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
150 setup->chandef.center_freq1 = setup->chandef.chan->center_freq;
152 } 151 }
153 152
154 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, setup->channel, 153 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
155 setup->channel_type))
156 return -EINVAL; 154 return -EINVAL;
157 155
158 err = cfg80211_can_use_chan(rdev, wdev, setup->channel, 156 err = cfg80211_can_use_chan(rdev, wdev, setup->chandef.chan,
159 CHAN_MODE_SHARED); 157 CHAN_MODE_SHARED);
160 if (err) 158 if (err)
161 return err; 159 return err;
162 160
163 err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); 161 err = rdev_join_mesh(rdev, dev, conf, setup);
164 if (!err) { 162 if (!err) {
165 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); 163 memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
166 wdev->mesh_id_len = setup->mesh_id_len; 164 wdev->mesh_id_len = setup->mesh_id_len;
167 wdev->channel = setup->channel; 165 wdev->channel = setup->chandef.chan;
168 } 166 }
169 167
170 return err; 168 return err;
@@ -187,20 +185,12 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
187 return err; 185 return err;
188} 186}
189 187
190int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev, 188int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
191 struct wireless_dev *wdev, int freq, 189 struct wireless_dev *wdev,
192 enum nl80211_channel_type channel_type) 190 struct cfg80211_chan_def *chandef)
193{ 191{
194 struct ieee80211_channel *channel;
195 int err; 192 int err;
196 193
197 channel = rdev_freq_to_chan(rdev, freq, channel_type);
198 if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
199 channel,
200 channel_type)) {
201 return -EINVAL;
202 }
203
204 /* 194 /*
205 * Workaround for libertas (only!), it puts the interface 195 * Workaround for libertas (only!), it puts the interface
206 * into mesh mode but doesn't implement join_mesh. Instead, 196 * into mesh mode but doesn't implement join_mesh. Instead,
@@ -209,22 +199,21 @@ int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
209 * compatible with 802.11 mesh. 199 * compatible with 802.11 mesh.
210 */ 200 */
211 if (rdev->ops->libertas_set_mesh_channel) { 201 if (rdev->ops->libertas_set_mesh_channel) {
212 if (channel_type != NL80211_CHAN_NO_HT) 202 if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
213 return -EINVAL; 203 return -EINVAL;
214 204
215 if (!netif_running(wdev->netdev)) 205 if (!netif_running(wdev->netdev))
216 return -ENETDOWN; 206 return -ENETDOWN;
217 207
218 err = cfg80211_can_use_chan(rdev, wdev, channel, 208 err = cfg80211_can_use_chan(rdev, wdev, chandef->chan,
219 CHAN_MODE_SHARED); 209 CHAN_MODE_SHARED);
220 if (err) 210 if (err)
221 return err; 211 return err;
222 212
223 err = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, 213 err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
224 wdev->netdev, 214 chandef->chan);
225 channel);
226 if (!err) 215 if (!err)
227 wdev->channel = channel; 216 wdev->channel = chandef->chan;
228 217
229 return err; 218 return err;
230 } 219 }
@@ -232,8 +221,7 @@ int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
232 if (wdev->mesh_id_len) 221 if (wdev->mesh_id_len)
233 return -EBUSY; 222 return -EBUSY;
234 223
235 wdev->preset_chan = channel; 224 wdev->preset_chandef = *chandef;
236 wdev->preset_chantype = channel_type;
237 return 0; 225 return 0;
238} 226}
239 227
@@ -242,6 +230,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev,
242{ 230{
243 struct wireless_dev *wdev = dev->ieee80211_ptr; 231 struct wireless_dev *wdev = dev->ieee80211_ptr;
244 232
233 trace_cfg80211_notify_new_peer_candidate(dev, macaddr);
245 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT)) 234 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
246 return; 235 return;
247 236
@@ -267,7 +256,7 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
267 if (!wdev->mesh_id_len) 256 if (!wdev->mesh_id_len)
268 return -ENOTCONN; 257 return -ENOTCONN;
269 258
270 err = rdev->ops->leave_mesh(&rdev->wiphy, dev); 259 err = rdev_leave_mesh(rdev, dev);
271 if (!err) { 260 if (!err) {
272 wdev->mesh_id_len = 0; 261 wdev->mesh_id_len = 0;
273 wdev->channel = NULL; 262 wdev->channel = NULL;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 904a7f368325..5e8123ee63fd 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -15,6 +15,8 @@
15#include <net/iw_handler.h> 15#include <net/iw_handler.h>
16#include "core.h" 16#include "core.h"
17#include "nl80211.h" 17#include "nl80211.h"
18#include "rdev-ops.h"
19
18 20
19void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) 21void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
20{ 22{
@@ -22,6 +24,7 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
22 struct wiphy *wiphy = wdev->wiphy; 24 struct wiphy *wiphy = wdev->wiphy;
23 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 25 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
24 26
27 trace_cfg80211_send_rx_auth(dev);
25 wdev_lock(wdev); 28 wdev_lock(wdev);
26 29
27 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); 30 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
@@ -42,6 +45,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
42 u8 *ie = mgmt->u.assoc_resp.variable; 45 u8 *ie = mgmt->u.assoc_resp.variable;
43 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); 46 int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
44 47
48 trace_cfg80211_send_rx_assoc(dev, bss);
45 wdev_lock(wdev); 49 wdev_lock(wdev);
46 50
47 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); 51 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
@@ -98,6 +102,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
98 const u8 *bssid = mgmt->bssid; 102 const u8 *bssid = mgmt->bssid;
99 bool was_current = false; 103 bool was_current = false;
100 104
105 trace___cfg80211_send_deauth(dev);
101 ASSERT_WDEV_LOCK(wdev); 106 ASSERT_WDEV_LOCK(wdev);
102 107
103 if (wdev->current_bss && 108 if (wdev->current_bss &&
@@ -147,6 +152,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
147 u16 reason_code; 152 u16 reason_code;
148 bool from_ap; 153 bool from_ap;
149 154
155 trace___cfg80211_send_disassoc(dev);
150 ASSERT_WDEV_LOCK(wdev); 156 ASSERT_WDEV_LOCK(wdev);
151 157
152 nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); 158 nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL);
@@ -188,6 +194,7 @@ void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
188 struct wiphy *wiphy = wdev->wiphy; 194 struct wiphy *wiphy = wdev->wiphy;
189 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 195 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
190 196
197 trace_cfg80211_send_unprot_deauth(dev);
191 nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC); 198 nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC);
192} 199}
193EXPORT_SYMBOL(cfg80211_send_unprot_deauth); 200EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
@@ -199,6 +206,7 @@ void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
199 struct wiphy *wiphy = wdev->wiphy; 206 struct wiphy *wiphy = wdev->wiphy;
200 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 207 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
201 208
209 trace_cfg80211_send_unprot_disassoc(dev);
202 nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC); 210 nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC);
203} 211}
204EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); 212EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
@@ -209,6 +217,7 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
209 struct wiphy *wiphy = wdev->wiphy; 217 struct wiphy *wiphy = wdev->wiphy;
210 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 218 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
211 219
220 trace_cfg80211_send_auth_timeout(dev, addr);
212 wdev_lock(wdev); 221 wdev_lock(wdev);
213 222
214 nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); 223 nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
@@ -227,6 +236,7 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
227 struct wiphy *wiphy = wdev->wiphy; 236 struct wiphy *wiphy = wdev->wiphy;
228 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 237 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
229 238
239 trace_cfg80211_send_assoc_timeout(dev, addr);
230 wdev_lock(wdev); 240 wdev_lock(wdev);
231 241
232 nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); 242 nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL);
@@ -261,6 +271,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
261 } 271 }
262#endif 272#endif
263 273
274 trace_cfg80211_michael_mic_failure(dev, addr, key_type, key_id, tsc);
264 nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); 275 nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp);
265} 276}
266EXPORT_SYMBOL(cfg80211_michael_mic_failure); 277EXPORT_SYMBOL(cfg80211_michael_mic_failure);
@@ -273,7 +284,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
273 const u8 *bssid, 284 const u8 *bssid,
274 const u8 *ssid, int ssid_len, 285 const u8 *ssid, int ssid_len,
275 const u8 *ie, int ie_len, 286 const u8 *ie, int ie_len,
276 const u8 *key, int key_len, int key_idx) 287 const u8 *key, int key_len, int key_idx,
288 const u8 *sae_data, int sae_data_len)
277{ 289{
278 struct wireless_dev *wdev = dev->ieee80211_ptr; 290 struct wireless_dev *wdev = dev->ieee80211_ptr;
279 struct cfg80211_auth_request req; 291 struct cfg80211_auth_request req;
@@ -293,6 +305,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
293 305
294 req.ie = ie; 306 req.ie = ie;
295 req.ie_len = ie_len; 307 req.ie_len = ie_len;
308 req.sae_data = sae_data;
309 req.sae_data_len = sae_data_len;
296 req.auth_type = auth_type; 310 req.auth_type = auth_type;
297 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, 311 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
298 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 312 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@@ -307,7 +321,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
307 if (err) 321 if (err)
308 goto out; 322 goto out;
309 323
310 err = rdev->ops->auth(&rdev->wiphy, dev, &req); 324 err = rdev_auth(rdev, dev, &req);
311 325
312out: 326out:
313 cfg80211_put_bss(req.bss); 327 cfg80211_put_bss(req.bss);
@@ -319,7 +333,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
319 enum nl80211_auth_type auth_type, const u8 *bssid, 333 enum nl80211_auth_type auth_type, const u8 *bssid,
320 const u8 *ssid, int ssid_len, 334 const u8 *ssid, int ssid_len,
321 const u8 *ie, int ie_len, 335 const u8 *ie, int ie_len,
322 const u8 *key, int key_len, int key_idx) 336 const u8 *key, int key_len, int key_idx,
337 const u8 *sae_data, int sae_data_len)
323{ 338{
324 int err; 339 int err;
325 340
@@ -327,7 +342,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
327 wdev_lock(dev->ieee80211_ptr); 342 wdev_lock(dev->ieee80211_ptr);
328 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 343 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
329 ssid, ssid_len, ie, ie_len, 344 ssid, ssid_len, ie, ie_len,
330 key, key_len, key_idx); 345 key, key_len, key_idx,
346 sae_data, sae_data_len);
331 wdev_unlock(dev->ieee80211_ptr); 347 wdev_unlock(dev->ieee80211_ptr);
332 mutex_unlock(&rdev->devlist_mtx); 348 mutex_unlock(&rdev->devlist_mtx);
333 349
@@ -410,7 +426,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
410 if (err) 426 if (err)
411 goto out; 427 goto out;
412 428
413 err = rdev->ops->assoc(&rdev->wiphy, dev, &req); 429 err = rdev_assoc(rdev, dev, &req);
414 430
415out: 431out:
416 if (err) { 432 if (err) {
@@ -466,7 +482,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
466 !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) 482 !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
467 return 0; 483 return 0;
468 484
469 return rdev->ops->deauth(&rdev->wiphy, dev, &req); 485 return rdev_deauth(rdev, dev, &req);
470} 486}
471 487
472int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 488int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
@@ -511,7 +527,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
511 else 527 else
512 return -ENOTCONN; 528 return -ENOTCONN;
513 529
514 return rdev->ops->disassoc(&rdev->wiphy, dev, &req); 530 return rdev_disassoc(rdev, dev, &req);
515} 531}
516 532
517int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 533int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
@@ -552,7 +568,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
552 568
553 memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); 569 memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
554 req.bssid = bssid; 570 req.bssid = bssid;
555 rdev->ops->deauth(&rdev->wiphy, dev, &req); 571 rdev_deauth(rdev, dev, &req);
556 572
557 if (wdev->current_bss) { 573 if (wdev->current_bss) {
558 cfg80211_unhold_bss(wdev->current_bss); 574 cfg80211_unhold_bss(wdev->current_bss);
@@ -563,27 +579,25 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
563 579
564void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, 580void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
565 struct ieee80211_channel *chan, 581 struct ieee80211_channel *chan,
566 enum nl80211_channel_type channel_type,
567 unsigned int duration, gfp_t gfp) 582 unsigned int duration, gfp_t gfp)
568{ 583{
569 struct wiphy *wiphy = wdev->wiphy; 584 struct wiphy *wiphy = wdev->wiphy;
570 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 585 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
571 586
572 nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, channel_type, 587 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
573 duration, gfp); 588 nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, duration, gfp);
574} 589}
575EXPORT_SYMBOL(cfg80211_ready_on_channel); 590EXPORT_SYMBOL(cfg80211_ready_on_channel);
576 591
577void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, 592void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
578 struct ieee80211_channel *chan, 593 struct ieee80211_channel *chan,
579 enum nl80211_channel_type channel_type,
580 gfp_t gfp) 594 gfp_t gfp)
581{ 595{
582 struct wiphy *wiphy = wdev->wiphy; 596 struct wiphy *wiphy = wdev->wiphy;
583 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 597 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
584 598
585 nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan, 599 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
586 channel_type, gfp); 600 nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan, gfp);
587} 601}
588EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); 602EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
589 603
@@ -593,6 +607,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
593 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 607 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
594 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 608 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
595 609
610 trace_cfg80211_new_sta(dev, mac_addr, sinfo);
596 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp); 611 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
597} 612}
598EXPORT_SYMBOL(cfg80211_new_sta); 613EXPORT_SYMBOL(cfg80211_new_sta);
@@ -602,6 +617,7 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
602 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; 617 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
603 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 618 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
604 619
620 trace_cfg80211_del_sta(dev, mac_addr);
605 nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp); 621 nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp);
606} 622}
607EXPORT_SYMBOL(cfg80211_del_sta); 623EXPORT_SYMBOL(cfg80211_del_sta);
@@ -682,7 +698,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
682 list_add(&nreg->list, &wdev->mgmt_registrations); 698 list_add(&nreg->list, &wdev->mgmt_registrations);
683 699
684 if (rdev->ops->mgmt_frame_register) 700 if (rdev->ops->mgmt_frame_register)
685 rdev->ops->mgmt_frame_register(wiphy, wdev, frame_type, true); 701 rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
686 702
687 out: 703 out:
688 spin_unlock_bh(&wdev->mgmt_registrations_lock); 704 spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -705,8 +721,8 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
705 if (rdev->ops->mgmt_frame_register) { 721 if (rdev->ops->mgmt_frame_register) {
706 u16 frame_type = le16_to_cpu(reg->frame_type); 722 u16 frame_type = le16_to_cpu(reg->frame_type);
707 723
708 rdev->ops->mgmt_frame_register(wiphy, wdev, 724 rdev_mgmt_frame_register(rdev, wdev,
709 frame_type, false); 725 frame_type, false);
710 } 726 }
711 727
712 list_del(&reg->list); 728 list_del(&reg->list);
@@ -736,10 +752,8 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
736int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, 752int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
737 struct wireless_dev *wdev, 753 struct wireless_dev *wdev,
738 struct ieee80211_channel *chan, bool offchan, 754 struct ieee80211_channel *chan, bool offchan,
739 enum nl80211_channel_type channel_type, 755 unsigned int wait, const u8 *buf, size_t len,
740 bool channel_type_valid, unsigned int wait, 756 bool no_cck, bool dont_wait_for_ack, u64 *cookie)
741 const u8 *buf, size_t len, bool no_cck,
742 bool dont_wait_for_ack, u64 *cookie)
743{ 757{
744 const struct ieee80211_mgmt *mgmt; 758 const struct ieee80211_mgmt *mgmt;
745 u16 stype; 759 u16 stype;
@@ -832,10 +846,9 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
832 return -EINVAL; 846 return -EINVAL;
833 847
834 /* Transmit the Action frame as requested by user space */ 848 /* Transmit the Action frame as requested by user space */
835 return rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan, 849 return rdev_mgmt_tx(rdev, wdev, chan, offchan,
836 channel_type, channel_type_valid, 850 wait, buf, len, no_cck, dont_wait_for_ack,
837 wait, buf, len, no_cck, dont_wait_for_ack, 851 cookie);
838 cookie);
839} 852}
840 853
841bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, 854bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
@@ -854,10 +867,13 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
854 cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); 867 cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
855 u16 stype; 868 u16 stype;
856 869
870 trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm);
857 stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4; 871 stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
858 872
859 if (!(stypes->rx & BIT(stype))) 873 if (!(stypes->rx & BIT(stype))) {
874 trace_cfg80211_return_bool(false);
860 return false; 875 return false;
876 }
861 877
862 data = buf + ieee80211_hdrlen(mgmt->frame_control); 878 data = buf + ieee80211_hdrlen(mgmt->frame_control);
863 data_len = len - ieee80211_hdrlen(mgmt->frame_control); 879 data_len = len - ieee80211_hdrlen(mgmt->frame_control);
@@ -888,6 +904,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
888 904
889 spin_unlock_bh(&wdev->mgmt_registrations_lock); 905 spin_unlock_bh(&wdev->mgmt_registrations_lock);
890 906
907 trace_cfg80211_return_bool(result);
891 return result; 908 return result;
892} 909}
893EXPORT_SYMBOL(cfg80211_rx_mgmt); 910EXPORT_SYMBOL(cfg80211_rx_mgmt);
@@ -898,6 +915,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
898 struct wiphy *wiphy = wdev->wiphy; 915 struct wiphy *wiphy = wdev->wiphy;
899 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 916 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
900 917
918 trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
919
901 /* Indicate TX status of the Action frame to user space */ 920 /* Indicate TX status of the Action frame to user space */
902 nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp); 921 nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp);
903} 922}
@@ -911,6 +930,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
911 struct wiphy *wiphy = wdev->wiphy; 930 struct wiphy *wiphy = wdev->wiphy;
912 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 931 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
913 932
933 trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
934
914 /* Indicate roaming trigger event to user space */ 935 /* Indicate roaming trigger event to user space */
915 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp); 936 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
916} 937}
@@ -923,6 +944,8 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
923 struct wiphy *wiphy = wdev->wiphy; 944 struct wiphy *wiphy = wdev->wiphy;
924 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 945 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
925 946
947 trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
948
926 /* Indicate roaming trigger event to user space */ 949 /* Indicate roaming trigger event to user space */
927 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp); 950 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp);
928} 951}
@@ -948,6 +971,7 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
948 struct wiphy *wiphy = wdev->wiphy; 971 struct wiphy *wiphy = wdev->wiphy;
949 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 972 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
950 973
974 trace_cfg80211_gtk_rekey_notify(dev, bssid);
951 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); 975 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
952} 976}
953EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); 977EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
@@ -959,17 +983,19 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
959 struct wiphy *wiphy = wdev->wiphy; 983 struct wiphy *wiphy = wdev->wiphy;
960 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 984 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
961 985
986 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
962 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); 987 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
963} 988}
964EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); 989EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
965 990
966void cfg80211_ch_switch_notify(struct net_device *dev, int freq, 991void cfg80211_ch_switch_notify(struct net_device *dev,
967 enum nl80211_channel_type type) 992 struct cfg80211_chan_def *chandef)
968{ 993{
969 struct wireless_dev *wdev = dev->ieee80211_ptr; 994 struct wireless_dev *wdev = dev->ieee80211_ptr;
970 struct wiphy *wiphy = wdev->wiphy; 995 struct wiphy *wiphy = wdev->wiphy;
971 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 996 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
972 struct ieee80211_channel *chan; 997
998 trace_cfg80211_ch_switch_notify(dev, chandef);
973 999
974 wdev_lock(wdev); 1000 wdev_lock(wdev);
975 1001
@@ -977,12 +1003,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
977 wdev->iftype != NL80211_IFTYPE_P2P_GO)) 1003 wdev->iftype != NL80211_IFTYPE_P2P_GO))
978 goto out; 1004 goto out;
979 1005
980 chan = rdev_freq_to_chan(rdev, freq, type); 1006 wdev->channel = chandef->chan;
981 if (WARN_ON(!chan)) 1007 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
982 goto out;
983
984 wdev->channel = chan;
985 nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
986out: 1008out:
987 wdev_unlock(wdev); 1009 wdev_unlock(wdev);
988 return; 1010 return;
@@ -993,12 +1015,18 @@ bool cfg80211_rx_spurious_frame(struct net_device *dev,
993 const u8 *addr, gfp_t gfp) 1015 const u8 *addr, gfp_t gfp)
994{ 1016{
995 struct wireless_dev *wdev = dev->ieee80211_ptr; 1017 struct wireless_dev *wdev = dev->ieee80211_ptr;
1018 bool ret;
1019
1020 trace_cfg80211_rx_spurious_frame(dev, addr);
996 1021
997 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && 1022 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
998 wdev->iftype != NL80211_IFTYPE_P2P_GO)) 1023 wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
1024 trace_cfg80211_return_bool(false);
999 return false; 1025 return false;
1000 1026 }
1001 return nl80211_unexpected_frame(dev, addr, gfp); 1027 ret = nl80211_unexpected_frame(dev, addr, gfp);
1028 trace_cfg80211_return_bool(ret);
1029 return ret;
1002} 1030}
1003EXPORT_SYMBOL(cfg80211_rx_spurious_frame); 1031EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
1004 1032
@@ -1006,12 +1034,18 @@ bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
1006 const u8 *addr, gfp_t gfp) 1034 const u8 *addr, gfp_t gfp)
1007{ 1035{
1008 struct wireless_dev *wdev = dev->ieee80211_ptr; 1036 struct wireless_dev *wdev = dev->ieee80211_ptr;
1037 bool ret;
1038
1039 trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
1009 1040
1010 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && 1041 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
1011 wdev->iftype != NL80211_IFTYPE_P2P_GO && 1042 wdev->iftype != NL80211_IFTYPE_P2P_GO &&
1012 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) 1043 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) {
1044 trace_cfg80211_return_bool(false);
1013 return false; 1045 return false;
1014 1046 }
1015 return nl80211_unexpected_4addr_frame(dev, addr, gfp); 1047 ret = nl80211_unexpected_4addr_frame(dev, addr, gfp);
1048 trace_cfg80211_return_bool(ret);
1049 return ret;
1016} 1050}
1017EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); 1051EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 0418a6d5c1a6..f45706adaf34 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -22,8 +22,8 @@
22#include "core.h" 22#include "core.h"
23#include "nl80211.h" 23#include "nl80211.h"
24#include "reg.h" 24#include "reg.h"
25#include "rdev-ops.h"
25 26
26static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type);
27static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, 27static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
28 struct genl_info *info, 28 struct genl_info *info,
29 struct cfg80211_crypto_settings *settings, 29 struct cfg80211_crypto_settings *settings,
@@ -223,8 +223,13 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
223 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, 223 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
224 .len = 20-1 }, 224 .len = 20-1 },
225 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, 225 [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED },
226
226 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, 227 [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
227 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, 228 [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
229 [NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 },
230 [NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 },
231 [NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 },
232
228 [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 }, 233 [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 },
229 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, 234 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
230 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, 235 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
@@ -355,6 +360,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
355 [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, 360 [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
356 [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, 361 [NL80211_ATTR_WDEV] = { .type = NLA_U64 },
357 [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, 362 [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
363 [NL80211_ATTR_SAE_DATA] = { .type = NLA_BINARY, },
364 [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
365 [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
366 [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
367 [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
358}; 368};
359 369
360/* policy for the key attributes */ 370/* policy for the key attributes */
@@ -690,7 +700,7 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
690 700
691static struct cfg80211_cached_keys * 701static struct cfg80211_cached_keys *
692nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, 702nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
693 struct nlattr *keys) 703 struct nlattr *keys, bool *no_ht)
694{ 704{
695 struct key_parse parse; 705 struct key_parse parse;
696 struct nlattr *key; 706 struct nlattr *key;
@@ -733,6 +743,12 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
733 result->params[parse.idx].key_len = parse.p.key_len; 743 result->params[parse.idx].key_len = parse.p.key_len;
734 result->params[parse.idx].key = result->data[parse.idx]; 744 result->params[parse.idx].key = result->data[parse.idx];
735 memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); 745 memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len);
746
747 if (parse.p.cipher == WLAN_CIPHER_SUITE_WEP40 ||
748 parse.p.cipher == WLAN_CIPHER_SUITE_WEP104) {
749 if (no_ht)
750 *no_ht = true;
751 }
736 } 752 }
737 753
738 return result; 754 return result;
@@ -943,7 +959,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
943 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { 959 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
944 u32 tx_ant = 0, rx_ant = 0; 960 u32 tx_ant = 0, rx_ant = 0;
945 int res; 961 int res;
946 res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant); 962 res = rdev_get_antenna(dev, &tx_ant, &rx_ant);
947 if (!res) { 963 if (!res) {
948 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, 964 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
949 tx_ant) || 965 tx_ant) ||
@@ -1101,6 +1117,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1101 goto nla_put_failure; 1117 goto nla_put_failure;
1102 } 1118 }
1103 CMD(start_p2p_device, START_P2P_DEVICE); 1119 CMD(start_p2p_device, START_P2P_DEVICE);
1120 CMD(set_mcast_rate, SET_MCAST_RATE);
1104 1121
1105#ifdef CONFIG_NL80211_TESTMODE 1122#ifdef CONFIG_NL80211_TESTMODE
1106 CMD(testmode_cmd, TESTMODE); 1123 CMD(testmode_cmd, TESTMODE);
@@ -1350,51 +1367,83 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
1350 wdev->iftype == NL80211_IFTYPE_P2P_GO; 1367 wdev->iftype == NL80211_IFTYPE_P2P_GO;
1351} 1368}
1352 1369
1353static bool nl80211_valid_channel_type(struct genl_info *info, 1370static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
1354 enum nl80211_channel_type *channel_type) 1371 struct genl_info *info,
1372 struct cfg80211_chan_def *chandef)
1355{ 1373{
1356 enum nl80211_channel_type tmp; 1374 u32 control_freq;
1357 1375
1358 if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) 1376 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
1359 return false; 1377 return -EINVAL;
1360 1378
1361 tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); 1379 control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
1362 if (tmp != NL80211_CHAN_NO_HT &&
1363 tmp != NL80211_CHAN_HT20 &&
1364 tmp != NL80211_CHAN_HT40PLUS &&
1365 tmp != NL80211_CHAN_HT40MINUS)
1366 return false;
1367 1380
1368 if (channel_type) 1381 chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
1369 *channel_type = tmp; 1382 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
1383 chandef->center_freq1 = control_freq;
1384 chandef->center_freq2 = 0;
1370 1385
1371 return true; 1386 /* Primary channel not allowed */
1387 if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED)
1388 return -EINVAL;
1389
1390 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
1391 enum nl80211_channel_type chantype;
1392
1393 chantype = nla_get_u32(
1394 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
1395
1396 switch (chantype) {
1397 case NL80211_CHAN_NO_HT:
1398 case NL80211_CHAN_HT20:
1399 case NL80211_CHAN_HT40PLUS:
1400 case NL80211_CHAN_HT40MINUS:
1401 cfg80211_chandef_create(chandef, chandef->chan,
1402 chantype);
1403 break;
1404 default:
1405 return -EINVAL;
1406 }
1407 } else if (info->attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
1408 chandef->width =
1409 nla_get_u32(info->attrs[NL80211_ATTR_CHANNEL_WIDTH]);
1410 if (info->attrs[NL80211_ATTR_CENTER_FREQ1])
1411 chandef->center_freq1 =
1412 nla_get_u32(
1413 info->attrs[NL80211_ATTR_CENTER_FREQ1]);
1414 if (info->attrs[NL80211_ATTR_CENTER_FREQ2])
1415 chandef->center_freq2 =
1416 nla_get_u32(
1417 info->attrs[NL80211_ATTR_CENTER_FREQ2]);
1418 }
1419
1420 if (!cfg80211_chandef_valid(chandef))
1421 return -EINVAL;
1422
1423 if (!cfg80211_chandef_usable(&rdev->wiphy, chandef,
1424 IEEE80211_CHAN_DISABLED))
1425 return -EINVAL;
1426
1427 return 0;
1372} 1428}
1373 1429
1374static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, 1430static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1375 struct wireless_dev *wdev, 1431 struct wireless_dev *wdev,
1376 struct genl_info *info) 1432 struct genl_info *info)
1377{ 1433{
1378 struct ieee80211_channel *channel; 1434 struct cfg80211_chan_def chandef;
1379 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
1380 u32 freq;
1381 int result; 1435 int result;
1382 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR; 1436 enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
1383 1437
1384 if (wdev) 1438 if (wdev)
1385 iftype = wdev->iftype; 1439 iftype = wdev->iftype;
1386 1440
1387 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
1388 return -EINVAL;
1389
1390 if (!nl80211_can_set_dev_channel(wdev)) 1441 if (!nl80211_can_set_dev_channel(wdev))
1391 return -EOPNOTSUPP; 1442 return -EOPNOTSUPP;
1392 1443
1393 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && 1444 result = nl80211_parse_chandef(rdev, info, &chandef);
1394 !nl80211_valid_channel_type(info, &channel_type)) 1445 if (result)
1395 return -EINVAL; 1446 return result;
1396
1397 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
1398 1447
1399 mutex_lock(&rdev->devlist_mtx); 1448 mutex_lock(&rdev->devlist_mtx);
1400 switch (iftype) { 1449 switch (iftype) {
@@ -1404,22 +1453,18 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1404 result = -EBUSY; 1453 result = -EBUSY;
1405 break; 1454 break;
1406 } 1455 }
1407 channel = rdev_freq_to_chan(rdev, freq, channel_type); 1456 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef)) {
1408 if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
1409 channel,
1410 channel_type)) {
1411 result = -EINVAL; 1457 result = -EINVAL;
1412 break; 1458 break;
1413 } 1459 }
1414 wdev->preset_chan = channel; 1460 wdev->preset_chandef = chandef;
1415 wdev->preset_chantype = channel_type;
1416 result = 0; 1461 result = 0;
1417 break; 1462 break;
1418 case NL80211_IFTYPE_MESH_POINT: 1463 case NL80211_IFTYPE_MESH_POINT:
1419 result = cfg80211_set_mesh_freq(rdev, wdev, freq, channel_type); 1464 result = cfg80211_set_mesh_channel(rdev, wdev, &chandef);
1420 break; 1465 break;
1421 case NL80211_IFTYPE_MONITOR: 1466 case NL80211_IFTYPE_MONITOR:
1422 result = cfg80211_set_monitor_channel(rdev, freq, channel_type); 1467 result = cfg80211_set_monitor_channel(rdev, &chandef);
1423 break; 1468 break;
1424 default: 1469 default:
1425 result = -EINVAL; 1470 result = -EINVAL;
@@ -1457,7 +1502,7 @@ static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
1457 return -EOPNOTSUPP; 1502 return -EOPNOTSUPP;
1458 1503
1459 bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); 1504 bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
1460 return rdev->ops->set_wds_peer(wdev->wiphy, dev, bssid); 1505 return rdev_set_wds_peer(rdev, dev, bssid);
1461} 1506}
1462 1507
1463 1508
@@ -1507,10 +1552,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1507 result = 0; 1552 result = 0;
1508 1553
1509 mutex_lock(&rdev->mtx); 1554 mutex_lock(&rdev->mtx);
1510 } else if (nl80211_can_set_dev_channel(netdev->ieee80211_ptr)) 1555 } else
1511 wdev = netdev->ieee80211_ptr; 1556 wdev = netdev->ieee80211_ptr;
1512 else
1513 wdev = NULL;
1514 1557
1515 /* 1558 /*
1516 * end workaround code, by now the rdev is available 1559 * end workaround code, by now the rdev is available
@@ -1562,24 +1605,29 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1562 if (result) 1605 if (result)
1563 goto bad_res; 1606 goto bad_res;
1564 1607
1565 result = rdev->ops->set_txq_params(&rdev->wiphy, 1608 result = rdev_set_txq_params(rdev, netdev,
1566 netdev, 1609 &txq_params);
1567 &txq_params);
1568 if (result) 1610 if (result)
1569 goto bad_res; 1611 goto bad_res;
1570 } 1612 }
1571 } 1613 }
1572 1614
1573 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 1615 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
1574 result = __nl80211_set_channel(rdev, wdev, info); 1616 result = __nl80211_set_channel(rdev,
1617 nl80211_can_set_dev_channel(wdev) ? wdev : NULL,
1618 info);
1575 if (result) 1619 if (result)
1576 goto bad_res; 1620 goto bad_res;
1577 } 1621 }
1578 1622
1579 if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { 1623 if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) {
1624 struct wireless_dev *txp_wdev = wdev;
1580 enum nl80211_tx_power_setting type; 1625 enum nl80211_tx_power_setting type;
1581 int idx, mbm = 0; 1626 int idx, mbm = 0;
1582 1627
1628 if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER))
1629 txp_wdev = NULL;
1630
1583 if (!rdev->ops->set_tx_power) { 1631 if (!rdev->ops->set_tx_power) {
1584 result = -EOPNOTSUPP; 1632 result = -EOPNOTSUPP;
1585 goto bad_res; 1633 goto bad_res;
@@ -1599,7 +1647,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1599 mbm = nla_get_u32(info->attrs[idx]); 1647 mbm = nla_get_u32(info->attrs[idx]);
1600 } 1648 }
1601 1649
1602 result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm); 1650 result = rdev_set_tx_power(rdev, txp_wdev, type, mbm);
1603 if (result) 1651 if (result)
1604 goto bad_res; 1652 goto bad_res;
1605 } 1653 }
@@ -1628,7 +1676,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1628 tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; 1676 tx_ant = tx_ant & rdev->wiphy.available_antennas_tx;
1629 rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; 1677 rx_ant = rx_ant & rdev->wiphy.available_antennas_rx;
1630 1678
1631 result = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); 1679 result = rdev_set_antenna(rdev, tx_ant, rx_ant);
1632 if (result) 1680 if (result)
1633 goto bad_res; 1681 goto bad_res;
1634 } 1682 }
@@ -1713,7 +1761,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1713 if (changed & WIPHY_PARAM_COVERAGE_CLASS) 1761 if (changed & WIPHY_PARAM_COVERAGE_CLASS)
1714 rdev->wiphy.coverage_class = coverage_class; 1762 rdev->wiphy.coverage_class = coverage_class;
1715 1763
1716 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); 1764 result = rdev_set_wiphy_params(rdev, changed);
1717 if (result) { 1765 if (result) {
1718 rdev->wiphy.retry_short = old_retry_short; 1766 rdev->wiphy.retry_short = old_retry_short;
1719 rdev->wiphy.retry_long = old_retry_long; 1767 rdev->wiphy.retry_long = old_retry_long;
@@ -1736,6 +1784,35 @@ static inline u64 wdev_id(struct wireless_dev *wdev)
1736 ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32); 1784 ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
1737} 1785}
1738 1786
1787static int nl80211_send_chandef(struct sk_buff *msg,
1788 struct cfg80211_chan_def *chandef)
1789{
1790 WARN_ON(!cfg80211_chandef_valid(chandef));
1791
1792 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
1793 chandef->chan->center_freq))
1794 return -ENOBUFS;
1795 switch (chandef->width) {
1796 case NL80211_CHAN_WIDTH_20_NOHT:
1797 case NL80211_CHAN_WIDTH_20:
1798 case NL80211_CHAN_WIDTH_40:
1799 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
1800 cfg80211_get_chandef_type(chandef)))
1801 return -ENOBUFS;
1802 break;
1803 default:
1804 break;
1805 }
1806 if (nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, chandef->width))
1807 return -ENOBUFS;
1808 if (nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ1, chandef->center_freq1))
1809 return -ENOBUFS;
1810 if (chandef->center_freq2 &&
1811 nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2))
1812 return -ENOBUFS;
1813 return 0;
1814}
1815
1739static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, 1816static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
1740 struct cfg80211_registered_device *rdev, 1817 struct cfg80211_registered_device *rdev,
1741 struct wireless_dev *wdev) 1818 struct wireless_dev *wdev)
@@ -1762,16 +1839,18 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
1762 goto nla_put_failure; 1839 goto nla_put_failure;
1763 1840
1764 if (rdev->ops->get_channel) { 1841 if (rdev->ops->get_channel) {
1765 struct ieee80211_channel *chan; 1842 int ret;
1766 enum nl80211_channel_type channel_type; 1843 struct cfg80211_chan_def chandef;
1767 1844
1768 chan = rdev->ops->get_channel(&rdev->wiphy, wdev, 1845 ret = rdev_get_channel(rdev, wdev, &chandef);
1769 &channel_type); 1846 if (ret == 0) {
1770 if (chan && 1847 if (nl80211_send_chandef(msg, &chandef))
1771 (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, 1848 goto nla_put_failure;
1772 chan->center_freq) || 1849 }
1773 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, 1850 }
1774 channel_type))) 1851
1852 if (wdev->ssid_len) {
1853 if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
1775 goto nla_put_failure; 1854 goto nla_put_failure;
1776 } 1855 }
1777 1856
@@ -2014,9 +2093,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2014 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? 2093 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
2015 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 2094 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
2016 &flags); 2095 &flags);
2017 wdev = rdev->ops->add_virtual_intf(&rdev->wiphy, 2096 wdev = rdev_add_virtual_intf(rdev,
2018 nla_data(info->attrs[NL80211_ATTR_IFNAME]), 2097 nla_data(info->attrs[NL80211_ATTR_IFNAME]),
2019 type, err ? NULL : &flags, &params); 2098 type, err ? NULL : &flags, &params);
2020 if (IS_ERR(wdev)) { 2099 if (IS_ERR(wdev)) {
2021 nlmsg_free(msg); 2100 nlmsg_free(msg);
2022 return PTR_ERR(wdev); 2101 return PTR_ERR(wdev);
@@ -2083,7 +2162,7 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
2083 if (!wdev->netdev) 2162 if (!wdev->netdev)
2084 info->user_ptr[1] = NULL; 2163 info->user_ptr[1] = NULL;
2085 2164
2086 return rdev->ops->del_virtual_intf(&rdev->wiphy, wdev); 2165 return rdev_del_virtual_intf(rdev, wdev);
2087} 2166}
2088 2167
2089static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) 2168static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
@@ -2100,7 +2179,7 @@ static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
2100 2179
2101 noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]); 2180 noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]);
2102 2181
2103 return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map); 2182 return rdev_set_noack_map(rdev, dev, noack_map);
2104} 2183}
2105 2184
2106struct get_key_cookie { 2185struct get_key_cookie {
@@ -2210,8 +2289,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2210 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) 2289 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2211 return -ENOENT; 2290 return -ENOENT;
2212 2291
2213 err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, pairwise, 2292 err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
2214 mac_addr, &cookie, get_key_callback); 2293 get_key_callback);
2215 2294
2216 if (err) 2295 if (err)
2217 goto free_msg; 2296 goto free_msg;
@@ -2259,7 +2338,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
2259 if (err) 2338 if (err)
2260 goto out; 2339 goto out;
2261 2340
2262 err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx, 2341 err = rdev_set_default_key(rdev, dev, key.idx,
2263 key.def_uni, key.def_multi); 2342 key.def_uni, key.def_multi);
2264 2343
2265 if (err) 2344 if (err)
@@ -2283,8 +2362,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
2283 if (err) 2362 if (err)
2284 goto out; 2363 goto out;
2285 2364
2286 err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, 2365 err = rdev_set_default_mgmt_key(rdev, dev, key.idx);
2287 dev, key.idx);
2288 if (err) 2366 if (err)
2289 goto out; 2367 goto out;
2290 2368
@@ -2340,9 +2418,9 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
2340 wdev_lock(dev->ieee80211_ptr); 2418 wdev_lock(dev->ieee80211_ptr);
2341 err = nl80211_key_allowed(dev->ieee80211_ptr); 2419 err = nl80211_key_allowed(dev->ieee80211_ptr);
2342 if (!err) 2420 if (!err)
2343 err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx, 2421 err = rdev_add_key(rdev, dev, key.idx,
2344 key.type == NL80211_KEYTYPE_PAIRWISE, 2422 key.type == NL80211_KEYTYPE_PAIRWISE,
2345 mac_addr, &key.p); 2423 mac_addr, &key.p);
2346 wdev_unlock(dev->ieee80211_ptr); 2424 wdev_unlock(dev->ieee80211_ptr);
2347 2425
2348 return err; 2426 return err;
@@ -2386,9 +2464,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2386 err = -ENOENT; 2464 err = -ENOENT;
2387 2465
2388 if (!err) 2466 if (!err)
2389 err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, 2467 err = rdev_del_key(rdev, dev, key.idx,
2390 key.type == NL80211_KEYTYPE_PAIRWISE, 2468 key.type == NL80211_KEYTYPE_PAIRWISE,
2391 mac_addr); 2469 mac_addr);
2392 2470
2393#ifdef CONFIG_CFG80211_WEXT 2471#ifdef CONFIG_CFG80211_WEXT
2394 if (!err) { 2472 if (!err) {
@@ -2476,11 +2554,10 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
2476 wdev->iftype != NL80211_IFTYPE_P2P_GO) 2554 wdev->iftype != NL80211_IFTYPE_P2P_GO)
2477 continue; 2555 continue;
2478 2556
2479 if (!wdev->preset_chan) 2557 if (!wdev->preset_chandef.chan)
2480 continue; 2558 continue;
2481 2559
2482 params->channel = wdev->preset_chan; 2560 params->chandef = wdev->preset_chandef;
2483 params->channel_type = wdev->preset_chantype;
2484 ret = true; 2561 ret = true;
2485 break; 2562 break;
2486 } 2563 }
@@ -2490,6 +2567,30 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
2490 return ret; 2567 return ret;
2491} 2568}
2492 2569
2570static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
2571 enum nl80211_auth_type auth_type,
2572 enum nl80211_commands cmd)
2573{
2574 if (auth_type > NL80211_AUTHTYPE_MAX)
2575 return false;
2576
2577 switch (cmd) {
2578 case NL80211_CMD_AUTHENTICATE:
2579 if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
2580 auth_type == NL80211_AUTHTYPE_SAE)
2581 return false;
2582 return true;
2583 case NL80211_CMD_CONNECT:
2584 case NL80211_CMD_START_AP:
2585 /* SAE not supported yet */
2586 if (auth_type == NL80211_AUTHTYPE_SAE)
2587 return false;
2588 return true;
2589 default:
2590 return false;
2591 }
2592}
2593
2493static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) 2594static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2494{ 2595{
2495 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2596 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2559,7 +2660,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2559 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { 2660 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
2560 params.auth_type = nla_get_u32( 2661 params.auth_type = nla_get_u32(
2561 info->attrs[NL80211_ATTR_AUTH_TYPE]); 2662 info->attrs[NL80211_ATTR_AUTH_TYPE]);
2562 if (!nl80211_valid_auth_type(params.auth_type)) 2663 if (!nl80211_valid_auth_type(rdev, params.auth_type,
2664 NL80211_CMD_START_AP))
2563 return -EINVAL; 2665 return -EINVAL;
2564 } else 2666 } else
2565 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC; 2667 params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
@@ -2576,43 +2678,59 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2576 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]); 2678 info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
2577 } 2679 }
2578 2680
2579 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 2681 if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) {
2580 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 2682 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2581
2582 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
2583 !nl80211_valid_channel_type(info, &channel_type))
2584 return -EINVAL; 2683 return -EINVAL;
2684 params.p2p_ctwindow =
2685 nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
2686 if (params.p2p_ctwindow > 127)
2687 return -EINVAL;
2688 if (params.p2p_ctwindow != 0 &&
2689 !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
2690 return -EINVAL;
2691 }
2692
2693 if (info->attrs[NL80211_ATTR_P2P_OPPPS]) {
2694 u8 tmp;
2585 2695
2586 params.channel = rdev_freq_to_chan(rdev, 2696 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2587 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
2588 channel_type);
2589 if (!params.channel)
2590 return -EINVAL; 2697 return -EINVAL;
2591 params.channel_type = channel_type; 2698 tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
2592 } else if (wdev->preset_chan) { 2699 if (tmp > 1)
2593 params.channel = wdev->preset_chan; 2700 return -EINVAL;
2594 params.channel_type = wdev->preset_chantype; 2701 params.p2p_opp_ps = tmp;
2702 if (params.p2p_opp_ps != 0 &&
2703 !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
2704 return -EINVAL;
2705 }
2706
2707 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
2708 err = nl80211_parse_chandef(rdev, info, &params.chandef);
2709 if (err)
2710 return err;
2711 } else if (wdev->preset_chandef.chan) {
2712 params.chandef = wdev->preset_chandef;
2595 } else if (!nl80211_get_ap_channel(rdev, &params)) 2713 } else if (!nl80211_get_ap_channel(rdev, &params))
2596 return -EINVAL; 2714 return -EINVAL;
2597 2715
2598 if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, params.channel, 2716 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
2599 params.channel_type))
2600 return -EINVAL; 2717 return -EINVAL;
2601 2718
2602 mutex_lock(&rdev->devlist_mtx); 2719 mutex_lock(&rdev->devlist_mtx);
2603 err = cfg80211_can_use_chan(rdev, wdev, params.channel, 2720 err = cfg80211_can_use_chan(rdev, wdev, params.chandef.chan,
2604 CHAN_MODE_SHARED); 2721 CHAN_MODE_SHARED);
2605 mutex_unlock(&rdev->devlist_mtx); 2722 mutex_unlock(&rdev->devlist_mtx);
2606 2723
2607 if (err) 2724 if (err)
2608 return err; 2725 return err;
2609 2726
2610 err = rdev->ops->start_ap(&rdev->wiphy, dev, &params); 2727 err = rdev_start_ap(rdev, dev, &params);
2611 if (!err) { 2728 if (!err) {
2612 wdev->preset_chan = params.channel; 2729 wdev->preset_chandef = params.chandef;
2613 wdev->preset_chantype = params.channel_type;
2614 wdev->beacon_interval = params.beacon_interval; 2730 wdev->beacon_interval = params.beacon_interval;
2615 wdev->channel = params.channel; 2731 wdev->channel = params.chandef.chan;
2732 wdev->ssid_len = params.ssid_len;
2733 memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
2616 } 2734 }
2617 return err; 2735 return err;
2618} 2736}
@@ -2639,7 +2757,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
2639 if (err) 2757 if (err)
2640 return err; 2758 return err;
2641 2759
2642 return rdev->ops->change_beacon(&rdev->wiphy, dev, &params); 2760 return rdev_change_beacon(rdev, dev, &params);
2643} 2761}
2644 2762
2645static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info) 2763static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
@@ -2744,29 +2862,52 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
2744 2862
2745 rate = nla_nest_start(msg, attr); 2863 rate = nla_nest_start(msg, attr);
2746 if (!rate) 2864 if (!rate)
2747 goto nla_put_failure; 2865 return false;
2748 2866
2749 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ 2867 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
2750 bitrate = cfg80211_calculate_bitrate(info); 2868 bitrate = cfg80211_calculate_bitrate(info);
2751 /* report 16-bit bitrate only if we can */ 2869 /* report 16-bit bitrate only if we can */
2752 bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0; 2870 bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0;
2753 if ((bitrate > 0 && 2871 if (bitrate > 0 &&
2754 nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) || 2872 nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate))
2755 (bitrate_compat > 0 && 2873 return false;
2756 nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) || 2874 if (bitrate_compat > 0 &&
2757 ((info->flags & RATE_INFO_FLAGS_MCS) && 2875 nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat))
2758 nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) || 2876 return false;
2759 ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) && 2877
2760 nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) || 2878 if (info->flags & RATE_INFO_FLAGS_MCS) {
2761 ((info->flags & RATE_INFO_FLAGS_SHORT_GI) && 2879 if (nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs))
2762 nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))) 2880 return false;
2763 goto nla_put_failure; 2881 if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH &&
2882 nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH))
2883 return false;
2884 if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
2885 nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
2886 return false;
2887 } else if (info->flags & RATE_INFO_FLAGS_VHT_MCS) {
2888 if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_MCS, info->mcs))
2889 return false;
2890 if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_NSS, info->nss))
2891 return false;
2892 if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH &&
2893 nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH))
2894 return false;
2895 if (info->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH &&
2896 nla_put_flag(msg, NL80211_RATE_INFO_80_MHZ_WIDTH))
2897 return false;
2898 if (info->flags & RATE_INFO_FLAGS_80P80_MHZ_WIDTH &&
2899 nla_put_flag(msg, NL80211_RATE_INFO_80P80_MHZ_WIDTH))
2900 return false;
2901 if (info->flags & RATE_INFO_FLAGS_160_MHZ_WIDTH &&
2902 nla_put_flag(msg, NL80211_RATE_INFO_160_MHZ_WIDTH))
2903 return false;
2904 if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
2905 nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
2906 return false;
2907 }
2764 2908
2765 nla_nest_end(msg, rate); 2909 nla_nest_end(msg, rate);
2766 return true; 2910 return true;
2767
2768nla_put_failure:
2769 return false;
2770} 2911}
2771 2912
2772static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq, 2913static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
@@ -2923,8 +3064,8 @@ static int nl80211_dump_station(struct sk_buff *skb,
2923 3064
2924 while (1) { 3065 while (1) {
2925 memset(&sinfo, 0, sizeof(sinfo)); 3066 memset(&sinfo, 0, sizeof(sinfo));
2926 err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, 3067 err = rdev_dump_station(dev, netdev, sta_idx,
2927 mac_addr, &sinfo); 3068 mac_addr, &sinfo);
2928 if (err == -ENOENT) 3069 if (err == -ENOENT)
2929 break; 3070 break;
2930 if (err) 3071 if (err)
@@ -2969,7 +3110,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
2969 if (!rdev->ops->get_station) 3110 if (!rdev->ops->get_station)
2970 return -EOPNOTSUPP; 3111 return -EOPNOTSUPP;
2971 3112
2972 err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo); 3113 err = rdev_get_station(rdev, dev, mac_addr, &sinfo);
2973 if (err) 3114 if (err)
2974 return err; 3115 return err;
2975 3116
@@ -3146,7 +3287,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3146 3287
3147 /* be aware of params.vlan when changing code here */ 3288 /* be aware of params.vlan when changing code here */
3148 3289
3149 err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params); 3290 err = rdev_change_station(rdev, dev, mac_addr, &params);
3150 3291
3151 if (params.vlan) 3292 if (params.vlan)
3152 dev_put(params.vlan); 3293 dev_put(params.vlan);
@@ -3198,6 +3339,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3198 params.ht_capa = 3339 params.ht_capa =
3199 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 3340 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
3200 3341
3342 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3343 params.vht_capa =
3344 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3345
3201 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) 3346 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
3202 params.plink_action = 3347 params.plink_action =
3203 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 3348 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
@@ -3275,7 +3420,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3275 3420
3276 /* be aware of params.vlan when changing code here */ 3421 /* be aware of params.vlan when changing code here */
3277 3422
3278 err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params); 3423 err = rdev_add_station(rdev, dev, mac_addr, &params);
3279 3424
3280 if (params.vlan) 3425 if (params.vlan)
3281 dev_put(params.vlan); 3426 dev_put(params.vlan);
@@ -3300,7 +3445,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
3300 if (!rdev->ops->del_station) 3445 if (!rdev->ops->del_station)
3301 return -EOPNOTSUPP; 3446 return -EOPNOTSUPP;
3302 3447
3303 return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr); 3448 return rdev_del_station(rdev, dev, mac_addr);
3304} 3449}
3305 3450
3306static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, 3451static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
@@ -3382,8 +3527,8 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
3382 } 3527 }
3383 3528
3384 while (1) { 3529 while (1) {
3385 err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, 3530 err = rdev_dump_mpath(dev, netdev, path_idx, dst, next_hop,
3386 dst, next_hop, &pinfo); 3531 &pinfo);
3387 if (err == -ENOENT) 3532 if (err == -ENOENT)
3388 break; 3533 break;
3389 if (err) 3534 if (err)
@@ -3430,7 +3575,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
3430 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) 3575 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
3431 return -EOPNOTSUPP; 3576 return -EOPNOTSUPP;
3432 3577
3433 err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo); 3578 err = rdev_get_mpath(rdev, dev, dst, next_hop, &pinfo);
3434 if (err) 3579 if (err)
3435 return err; 3580 return err;
3436 3581
@@ -3469,7 +3614,7 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
3469 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) 3614 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
3470 return -EOPNOTSUPP; 3615 return -EOPNOTSUPP;
3471 3616
3472 return rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); 3617 return rdev_change_mpath(rdev, dev, dst, next_hop);
3473} 3618}
3474 3619
3475static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) 3620static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
@@ -3494,7 +3639,7 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
3494 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) 3639 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
3495 return -EOPNOTSUPP; 3640 return -EOPNOTSUPP;
3496 3641
3497 return rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); 3642 return rdev_add_mpath(rdev, dev, dst, next_hop);
3498} 3643}
3499 3644
3500static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) 3645static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
@@ -3509,7 +3654,7 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
3509 if (!rdev->ops->del_mpath) 3654 if (!rdev->ops->del_mpath)
3510 return -EOPNOTSUPP; 3655 return -EOPNOTSUPP;
3511 3656
3512 return rdev->ops->del_mpath(&rdev->wiphy, dev, dst); 3657 return rdev_del_mpath(rdev, dev, dst);
3513} 3658}
3514 3659
3515static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) 3660static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
@@ -3525,6 +3670,8 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
3525 params.use_short_slot_time = -1; 3670 params.use_short_slot_time = -1;
3526 params.ap_isolate = -1; 3671 params.ap_isolate = -1;
3527 params.ht_opmode = -1; 3672 params.ht_opmode = -1;
3673 params.p2p_ctwindow = -1;
3674 params.p2p_opp_ps = -1;
3528 3675
3529 if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) 3676 if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
3530 params.use_cts_prot = 3677 params.use_cts_prot =
@@ -3547,6 +3694,32 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
3547 params.ht_opmode = 3694 params.ht_opmode =
3548 nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]); 3695 nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]);
3549 3696
3697 if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) {
3698 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
3699 return -EINVAL;
3700 params.p2p_ctwindow =
3701 nla_get_s8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
3702 if (params.p2p_ctwindow < 0)
3703 return -EINVAL;
3704 if (params.p2p_ctwindow != 0 &&
3705 !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
3706 return -EINVAL;
3707 }
3708
3709 if (info->attrs[NL80211_ATTR_P2P_OPPPS]) {
3710 u8 tmp;
3711
3712 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
3713 return -EINVAL;
3714 tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
3715 if (tmp > 1)
3716 return -EINVAL;
3717 params.p2p_opp_ps = tmp;
3718 if (params.p2p_opp_ps &&
3719 !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
3720 return -EINVAL;
3721 }
3722
3550 if (!rdev->ops->change_bss) 3723 if (!rdev->ops->change_bss)
3551 return -EOPNOTSUPP; 3724 return -EOPNOTSUPP;
3552 3725
@@ -3554,7 +3727,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
3554 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 3727 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
3555 return -EOPNOTSUPP; 3728 return -EOPNOTSUPP;
3556 3729
3557 return rdev->ops->change_bss(&rdev->wiphy, dev, &params); 3730 return rdev_change_bss(rdev, dev, &params);
3558} 3731}
3559 3732
3560static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { 3733static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
@@ -3668,8 +3841,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3668 if (!wdev->mesh_id_len) 3841 if (!wdev->mesh_id_len)
3669 memcpy(&cur_params, &default_mesh_config, sizeof(cur_params)); 3842 memcpy(&cur_params, &default_mesh_config, sizeof(cur_params));
3670 else 3843 else
3671 err = rdev->ops->get_mesh_config(&rdev->wiphy, dev, 3844 err = rdev_get_mesh_config(rdev, dev, &cur_params);
3672 &cur_params);
3673 wdev_unlock(wdev); 3845 wdev_unlock(wdev);
3674 3846
3675 if (err) 3847 if (err)
@@ -3971,8 +4143,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
3971 err = -ENOLINK; 4143 err = -ENOLINK;
3972 4144
3973 if (!err) 4145 if (!err)
3974 err = rdev->ops->update_mesh_config(&rdev->wiphy, dev, 4146 err = rdev_update_mesh_config(rdev, dev, mask, &cfg);
3975 mask, &cfg);
3976 4147
3977 wdev_unlock(wdev); 4148 wdev_unlock(wdev);
3978 4149
@@ -4337,14 +4508,27 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
4337 } 4508 }
4338 } 4509 }
4339 4510
4511 if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
4512 request->flags = nla_get_u32(
4513 info->attrs[NL80211_ATTR_SCAN_FLAGS]);
4514 if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
4515 !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
4516 ((request->flags & NL80211_SCAN_FLAG_FLUSH) &&
4517 !(wiphy->features & NL80211_FEATURE_SCAN_FLUSH))) {
4518 err = -EOPNOTSUPP;
4519 goto out_free;
4520 }
4521 }
4522
4340 request->no_cck = 4523 request->no_cck =
4341 nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); 4524 nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
4342 4525
4343 request->wdev = wdev; 4526 request->wdev = wdev;
4344 request->wiphy = &rdev->wiphy; 4527 request->wiphy = &rdev->wiphy;
4528 request->scan_start = jiffies;
4345 4529
4346 rdev->scan_req = request; 4530 rdev->scan_req = request;
4347 err = rdev->ops->scan(&rdev->wiphy, request); 4531 err = rdev_scan(rdev, request);
4348 4532
4349 if (!err) { 4533 if (!err) {
4350 nl80211_send_scan_start(rdev, wdev); 4534 nl80211_send_scan_start(rdev, wdev);
@@ -4568,11 +4752,24 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
4568 request->ie_len); 4752 request->ie_len);
4569 } 4753 }
4570 4754
4755 if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
4756 request->flags = nla_get_u32(
4757 info->attrs[NL80211_ATTR_SCAN_FLAGS]);
4758 if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
4759 !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
4760 ((request->flags & NL80211_SCAN_FLAG_FLUSH) &&
4761 !(wiphy->features & NL80211_FEATURE_SCAN_FLUSH))) {
4762 err = -EOPNOTSUPP;
4763 goto out_free;
4764 }
4765 }
4766
4571 request->dev = dev; 4767 request->dev = dev;
4572 request->wiphy = &rdev->wiphy; 4768 request->wiphy = &rdev->wiphy;
4573 request->interval = interval; 4769 request->interval = interval;
4770 request->scan_start = jiffies;
4574 4771
4575 err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); 4772 err = rdev_sched_scan_start(rdev, dev, request);
4576 if (!err) { 4773 if (!err) {
4577 rdev->sched_scan_req = request; 4774 rdev->sched_scan_req = request;
4578 nl80211_send_sched_scan(rdev, dev, 4775 nl80211_send_sched_scan(rdev, dev,
@@ -4611,6 +4808,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4611 struct cfg80211_internal_bss *intbss) 4808 struct cfg80211_internal_bss *intbss)
4612{ 4809{
4613 struct cfg80211_bss *res = &intbss->pub; 4810 struct cfg80211_bss *res = &intbss->pub;
4811 const struct cfg80211_bss_ies *ies;
4614 void *hdr; 4812 void *hdr;
4615 struct nlattr *bss; 4813 struct nlattr *bss;
4616 4814
@@ -4631,16 +4829,24 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4631 if (!bss) 4829 if (!bss)
4632 goto nla_put_failure; 4830 goto nla_put_failure;
4633 if ((!is_zero_ether_addr(res->bssid) && 4831 if ((!is_zero_ether_addr(res->bssid) &&
4634 nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) || 4832 nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)))
4635 (res->information_elements && res->len_information_elements && 4833 goto nla_put_failure;
4636 nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, 4834
4637 res->len_information_elements, 4835 rcu_read_lock();
4638 res->information_elements)) || 4836 ies = rcu_dereference(res->ies);
4639 (res->beacon_ies && res->len_beacon_ies && 4837 if (ies && ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
4640 res->beacon_ies != res->information_elements && 4838 ies->len, ies->data)) {
4641 nla_put(msg, NL80211_BSS_BEACON_IES, 4839 rcu_read_unlock();
4642 res->len_beacon_ies, res->beacon_ies))) 4840 goto nla_put_failure;
4841 }
4842 ies = rcu_dereference(res->beacon_ies);
4843 if (ies && ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
4844 ies->len, ies->data)) {
4845 rcu_read_unlock();
4643 goto nla_put_failure; 4846 goto nla_put_failure;
4847 }
4848 rcu_read_unlock();
4849
4644 if (res->tsf && 4850 if (res->tsf &&
4645 nla_put_u64(msg, NL80211_BSS_TSF, res->tsf)) 4851 nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
4646 goto nla_put_failure; 4852 goto nla_put_failure;
@@ -4815,8 +5021,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
4815 while (1) { 5021 while (1) {
4816 struct ieee80211_channel *chan; 5022 struct ieee80211_channel *chan;
4817 5023
4818 res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx, 5024 res = rdev_dump_survey(dev, netdev, survey_idx, &survey);
4819 &survey);
4820 if (res == -ENOENT) 5025 if (res == -ENOENT)
4821 break; 5026 break;
4822 if (res) 5027 if (res)
@@ -4852,11 +5057,6 @@ static int nl80211_dump_survey(struct sk_buff *skb,
4852 return res; 5057 return res;
4853} 5058}
4854 5059
4855static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type)
4856{
4857 return auth_type <= NL80211_AUTHTYPE_MAX;
4858}
4859
4860static bool nl80211_valid_wpa_versions(u32 wpa_versions) 5060static bool nl80211_valid_wpa_versions(u32 wpa_versions)
4861{ 5061{
4862 return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | 5062 return !(wpa_versions & ~(NL80211_WPA_VERSION_1 |
@@ -4868,8 +5068,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
4868 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 5068 struct cfg80211_registered_device *rdev = info->user_ptr[0];
4869 struct net_device *dev = info->user_ptr[1]; 5069 struct net_device *dev = info->user_ptr[1];
4870 struct ieee80211_channel *chan; 5070 struct ieee80211_channel *chan;
4871 const u8 *bssid, *ssid, *ie = NULL; 5071 const u8 *bssid, *ssid, *ie = NULL, *sae_data = NULL;
4872 int err, ssid_len, ie_len = 0; 5072 int err, ssid_len, ie_len = 0, sae_data_len = 0;
4873 enum nl80211_auth_type auth_type; 5073 enum nl80211_auth_type auth_type;
4874 struct key_parse key; 5074 struct key_parse key;
4875 bool local_state_change; 5075 bool local_state_change;
@@ -4945,9 +5145,23 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
4945 } 5145 }
4946 5146
4947 auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); 5147 auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
4948 if (!nl80211_valid_auth_type(auth_type)) 5148 if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
5149 return -EINVAL;
5150
5151 if (auth_type == NL80211_AUTHTYPE_SAE &&
5152 !info->attrs[NL80211_ATTR_SAE_DATA])
4949 return -EINVAL; 5153 return -EINVAL;
4950 5154
5155 if (info->attrs[NL80211_ATTR_SAE_DATA]) {
5156 if (auth_type != NL80211_AUTHTYPE_SAE)
5157 return -EINVAL;
5158 sae_data = nla_data(info->attrs[NL80211_ATTR_SAE_DATA]);
5159 sae_data_len = nla_len(info->attrs[NL80211_ATTR_SAE_DATA]);
5160 /* need to include at least Auth Transaction and Status Code */
5161 if (sae_data_len < 4)
5162 return -EINVAL;
5163 }
5164
4951 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; 5165 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
4952 5166
4953 /* 5167 /*
@@ -4959,7 +5173,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
4959 5173
4960 return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 5174 return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
4961 ssid, ssid_len, ie, ie_len, 5175 ssid, ssid_len, ie, ie_len,
4962 key.p.key, key.p.key_len, key.idx); 5176 key.p.key, key.p.key_len, key.idx,
5177 sae_data, sae_data_len);
4963} 5178}
4964 5179
4965static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, 5180static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
@@ -5250,8 +5465,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
5250 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 5465 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
5251 return -EINVAL; 5466 return -EINVAL;
5252 5467
5253 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || 5468 if (!info->attrs[NL80211_ATTR_SSID] ||
5254 !info->attrs[NL80211_ATTR_SSID] ||
5255 !nla_len(info->attrs[NL80211_ATTR_SSID])) 5469 !nla_len(info->attrs[NL80211_ATTR_SSID]))
5256 return -EINVAL; 5470 return -EINVAL;
5257 5471
@@ -5286,34 +5500,17 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
5286 ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 5500 ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
5287 } 5501 }
5288 5502
5289 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 5503 err = nl80211_parse_chandef(rdev, info, &ibss.chandef);
5290 enum nl80211_channel_type channel_type; 5504 if (err)
5291 5505 return err;
5292 if (!nl80211_valid_channel_type(info, &channel_type))
5293 return -EINVAL;
5294
5295 if (channel_type != NL80211_CHAN_NO_HT &&
5296 !(wiphy->features & NL80211_FEATURE_HT_IBSS))
5297 return -EINVAL;
5298
5299 ibss.channel_type = channel_type;
5300 } else {
5301 ibss.channel_type = NL80211_CHAN_NO_HT;
5302 }
5303 5506
5304 ibss.channel = rdev_freq_to_chan(rdev, 5507 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef))
5305 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
5306 ibss.channel_type);
5307 if (!ibss.channel ||
5308 ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
5309 ibss.channel->flags & IEEE80211_CHAN_DISABLED)
5310 return -EINVAL; 5508 return -EINVAL;
5311 5509
5312 /* Both channels should be able to initiate communication */ 5510 if (ibss.chandef.width > NL80211_CHAN_WIDTH_40)
5313 if ((ibss.channel_type == NL80211_CHAN_HT40PLUS || 5511 return -EINVAL;
5314 ibss.channel_type == NL80211_CHAN_HT40MINUS) && 5512 if (ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
5315 !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel, 5513 !(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS))
5316 ibss.channel_type))
5317 return -EINVAL; 5514 return -EINVAL;
5318 5515
5319 ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; 5516 ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
@@ -5325,7 +5522,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
5325 int n_rates = 5522 int n_rates =
5326 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); 5523 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
5327 struct ieee80211_supported_band *sband = 5524 struct ieee80211_supported_band *sband =
5328 wiphy->bands[ibss.channel->band]; 5525 wiphy->bands[ibss.chandef.chan->band];
5329 5526
5330 err = ieee80211_get_ratemask(sband, rates, n_rates, 5527 err = ieee80211_get_ratemask(sband, rates, n_rates,
5331 &ibss.basic_rates); 5528 &ibss.basic_rates);
@@ -5339,10 +5536,19 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
5339 return -EINVAL; 5536 return -EINVAL;
5340 5537
5341 if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { 5538 if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
5539 bool no_ht = false;
5540
5342 connkeys = nl80211_parse_connkeys(rdev, 5541 connkeys = nl80211_parse_connkeys(rdev,
5343 info->attrs[NL80211_ATTR_KEYS]); 5542 info->attrs[NL80211_ATTR_KEYS],
5543 &no_ht);
5344 if (IS_ERR(connkeys)) 5544 if (IS_ERR(connkeys))
5345 return PTR_ERR(connkeys); 5545 return PTR_ERR(connkeys);
5546
5547 if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) &&
5548 no_ht) {
5549 kfree(connkeys);
5550 return -EINVAL;
5551 }
5346 } 5552 }
5347 5553
5348 ibss.control_port = 5554 ibss.control_port =
@@ -5368,6 +5574,36 @@ static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info)
5368 return cfg80211_leave_ibss(rdev, dev, false); 5574 return cfg80211_leave_ibss(rdev, dev, false);
5369} 5575}
5370 5576
5577static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
5578{
5579 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5580 struct net_device *dev = info->user_ptr[1];
5581 int mcast_rate[IEEE80211_NUM_BANDS];
5582 u32 nla_rate;
5583 int err;
5584
5585 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
5586 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
5587 return -EOPNOTSUPP;
5588
5589 if (!rdev->ops->set_mcast_rate)
5590 return -EOPNOTSUPP;
5591
5592 memset(mcast_rate, 0, sizeof(mcast_rate));
5593
5594 if (!info->attrs[NL80211_ATTR_MCAST_RATE])
5595 return -EINVAL;
5596
5597 nla_rate = nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]);
5598 if (!nl80211_parse_mcast_rate(rdev, mcast_rate, nla_rate))
5599 return -EINVAL;
5600
5601 err = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate);
5602
5603 return err;
5604}
5605
5606
5371#ifdef CONFIG_NL80211_TESTMODE 5607#ifdef CONFIG_NL80211_TESTMODE
5372static struct genl_multicast_group nl80211_testmode_mcgrp = { 5608static struct genl_multicast_group nl80211_testmode_mcgrp = {
5373 .name = "testmode", 5609 .name = "testmode",
@@ -5384,7 +5620,7 @@ static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
5384 err = -EOPNOTSUPP; 5620 err = -EOPNOTSUPP;
5385 if (rdev->ops->testmode_cmd) { 5621 if (rdev->ops->testmode_cmd) {
5386 rdev->testmode_info = info; 5622 rdev->testmode_info = info;
5387 err = rdev->ops->testmode_cmd(&rdev->wiphy, 5623 err = rdev_testmode_cmd(rdev,
5388 nla_data(info->attrs[NL80211_ATTR_TESTDATA]), 5624 nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
5389 nla_len(info->attrs[NL80211_ATTR_TESTDATA])); 5625 nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
5390 rdev->testmode_info = NULL; 5626 rdev->testmode_info = NULL;
@@ -5466,8 +5702,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
5466 genlmsg_cancel(skb, hdr); 5702 genlmsg_cancel(skb, hdr);
5467 break; 5703 break;
5468 } 5704 }
5469 err = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, 5705 err = rdev_testmode_dump(rdev, skb, cb, data, data_len);
5470 data, data_len);
5471 nla_nest_end(skb, tmdata); 5706 nla_nest_end(skb, tmdata);
5472 5707
5473 if (err == -ENOBUFS || err == -ENOENT) { 5708 if (err == -ENOBUFS || err == -ENOENT) {
@@ -5596,7 +5831,8 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
5596 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { 5831 if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
5597 connect.auth_type = 5832 connect.auth_type =
5598 nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); 5833 nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
5599 if (!nl80211_valid_auth_type(connect.auth_type)) 5834 if (!nl80211_valid_auth_type(rdev, connect.auth_type,
5835 NL80211_CMD_CONNECT))
5600 return -EINVAL; 5836 return -EINVAL;
5601 } else 5837 } else
5602 connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; 5838 connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
@@ -5642,7 +5878,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
5642 5878
5643 if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { 5879 if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
5644 connkeys = nl80211_parse_connkeys(rdev, 5880 connkeys = nl80211_parse_connkeys(rdev,
5645 info->attrs[NL80211_ATTR_KEYS]); 5881 info->attrs[NL80211_ATTR_KEYS], NULL);
5646 if (IS_ERR(connkeys)) 5882 if (IS_ERR(connkeys))
5647 return PTR_ERR(connkeys); 5883 return PTR_ERR(connkeys);
5648 } 5884 }
@@ -5771,7 +6007,7 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
5771 if (!rdev->ops->flush_pmksa) 6007 if (!rdev->ops->flush_pmksa)
5772 return -EOPNOTSUPP; 6008 return -EOPNOTSUPP;
5773 6009
5774 return rdev->ops->flush_pmksa(&rdev->wiphy, dev); 6010 return rdev_flush_pmksa(rdev, dev);
5775} 6011}
5776 6012
5777static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) 6013static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
@@ -5798,10 +6034,10 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
5798 status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); 6034 status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
5799 dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]); 6035 dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
5800 6036
5801 return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code, 6037 return rdev_tdls_mgmt(rdev, dev, peer, action_code,
5802 dialog_token, status_code, 6038 dialog_token, status_code,
5803 nla_data(info->attrs[NL80211_ATTR_IE]), 6039 nla_data(info->attrs[NL80211_ATTR_IE]),
5804 nla_len(info->attrs[NL80211_ATTR_IE])); 6040 nla_len(info->attrs[NL80211_ATTR_IE]));
5805} 6041}
5806 6042
5807static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info) 6043static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
@@ -5822,7 +6058,7 @@ static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
5822 operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]); 6058 operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]);
5823 peer = nla_data(info->attrs[NL80211_ATTR_MAC]); 6059 peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
5824 6060
5825 return rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, operation); 6061 return rdev_tdls_oper(rdev, dev, peer, operation);
5826} 6062}
5827 6063
5828static int nl80211_remain_on_channel(struct sk_buff *skb, 6064static int nl80211_remain_on_channel(struct sk_buff *skb,
@@ -5830,12 +6066,11 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5830{ 6066{
5831 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6067 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5832 struct wireless_dev *wdev = info->user_ptr[1]; 6068 struct wireless_dev *wdev = info->user_ptr[1];
5833 struct ieee80211_channel *chan; 6069 struct cfg80211_chan_def chandef;
5834 struct sk_buff *msg; 6070 struct sk_buff *msg;
5835 void *hdr; 6071 void *hdr;
5836 u64 cookie; 6072 u64 cookie;
5837 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 6073 u32 duration;
5838 u32 freq, duration;
5839 int err; 6074 int err;
5840 6075
5841 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || 6076 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
@@ -5856,14 +6091,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5856 duration > rdev->wiphy.max_remain_on_channel_duration) 6091 duration > rdev->wiphy.max_remain_on_channel_duration)
5857 return -EINVAL; 6092 return -EINVAL;
5858 6093
5859 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && 6094 err = nl80211_parse_chandef(rdev, info, &chandef);
5860 !nl80211_valid_channel_type(info, &channel_type)) 6095 if (err)
5861 return -EINVAL; 6096 return err;
5862
5863 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
5864 chan = rdev_freq_to_chan(rdev, freq, channel_type);
5865 if (chan == NULL)
5866 return -EINVAL;
5867 6097
5868 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 6098 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
5869 if (!msg) 6099 if (!msg)
@@ -5877,8 +6107,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5877 goto free_msg; 6107 goto free_msg;
5878 } 6108 }
5879 6109
5880 err = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan, 6110 err = rdev_remain_on_channel(rdev, wdev, chandef.chan,
5881 channel_type, duration, &cookie); 6111 duration, &cookie);
5882 6112
5883 if (err) 6113 if (err)
5884 goto free_msg; 6114 goto free_msg;
@@ -5912,7 +6142,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
5912 6142
5913 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); 6143 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
5914 6144
5915 return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); 6145 return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
5916} 6146}
5917 6147
5918static u32 rateset_to_mask(struct ieee80211_supported_band *sband, 6148static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
@@ -6055,7 +6285,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
6055 } 6285 }
6056 } 6286 }
6057 6287
6058 return rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask); 6288 return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
6059} 6289}
6060 6290
6061static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) 6291static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
@@ -6097,10 +6327,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6097{ 6327{
6098 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6328 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6099 struct wireless_dev *wdev = info->user_ptr[1]; 6329 struct wireless_dev *wdev = info->user_ptr[1];
6100 struct ieee80211_channel *chan; 6330 struct cfg80211_chan_def chandef;
6101 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
6102 bool channel_type_valid = false;
6103 u32 freq;
6104 int err; 6331 int err;
6105 void *hdr = NULL; 6332 void *hdr = NULL;
6106 u64 cookie; 6333 u64 cookie;
@@ -6110,8 +6337,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6110 6337
6111 dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK]; 6338 dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK];
6112 6339
6113 if (!info->attrs[NL80211_ATTR_FRAME] || 6340 if (!info->attrs[NL80211_ATTR_FRAME])
6114 !info->attrs[NL80211_ATTR_WIPHY_FREQ])
6115 return -EINVAL; 6341 return -EINVAL;
6116 6342
6117 if (!rdev->ops->mgmt_tx) 6343 if (!rdev->ops->mgmt_tx)
@@ -6146,12 +6372,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6146 6372
6147 } 6373 }
6148 6374
6149 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
6150 if (!nl80211_valid_channel_type(info, &channel_type))
6151 return -EINVAL;
6152 channel_type_valid = true;
6153 }
6154
6155 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; 6375 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
6156 6376
6157 if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) 6377 if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
@@ -6159,10 +6379,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6159 6379
6160 no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); 6380 no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
6161 6381
6162 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 6382 err = nl80211_parse_chandef(rdev, info, &chandef);
6163 chan = rdev_freq_to_chan(rdev, freq, channel_type); 6383 if (err)
6164 if (chan == NULL) 6384 return err;
6165 return -EINVAL;
6166 6385
6167 if (!dont_wait_for_ack) { 6386 if (!dont_wait_for_ack) {
6168 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 6387 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -6178,8 +6397,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
6178 } 6397 }
6179 } 6398 }
6180 6399
6181 err = cfg80211_mlme_mgmt_tx(rdev, wdev, chan, offchan, channel_type, 6400 err = cfg80211_mlme_mgmt_tx(rdev, wdev, chandef.chan, offchan, wait,
6182 channel_type_valid, wait,
6183 nla_data(info->attrs[NL80211_ATTR_FRAME]), 6401 nla_data(info->attrs[NL80211_ATTR_FRAME]),
6184 nla_len(info->attrs[NL80211_ATTR_FRAME]), 6402 nla_len(info->attrs[NL80211_ATTR_FRAME]),
6185 no_cck, dont_wait_for_ack, &cookie); 6403 no_cck, dont_wait_for_ack, &cookie);
@@ -6230,7 +6448,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
6230 6448
6231 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); 6449 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
6232 6450
6233 return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); 6451 return rdev_mgmt_tx_cancel_wait(rdev, wdev, cookie);
6234} 6452}
6235 6453
6236static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) 6454static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
@@ -6260,8 +6478,7 @@ static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
6260 if (state == wdev->ps) 6478 if (state == wdev->ps)
6261 return 0; 6479 return 0;
6262 6480
6263 err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, state, 6481 err = rdev_set_power_mgmt(rdev, dev, state, wdev->ps_timeout);
6264 wdev->ps_timeout);
6265 if (!err) 6482 if (!err)
6266 wdev->ps = state; 6483 wdev->ps = state;
6267 return err; 6484 return err;
@@ -6322,14 +6539,13 @@ nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
6322}; 6539};
6323 6540
6324static int nl80211_set_cqm_txe(struct genl_info *info, 6541static int nl80211_set_cqm_txe(struct genl_info *info,
6325 u32 rate, u32 pkts, u32 intvl) 6542 u32 rate, u32 pkts, u32 intvl)
6326{ 6543{
6327 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6544 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6328 struct wireless_dev *wdev; 6545 struct wireless_dev *wdev;
6329 struct net_device *dev = info->user_ptr[1]; 6546 struct net_device *dev = info->user_ptr[1];
6330 6547
6331 if ((rate < 0 || rate > 100) || 6548 if (rate > 100 || intvl > NL80211_CQM_TXE_MAX_INTVL)
6332 (intvl < 0 || intvl > NL80211_CQM_TXE_MAX_INTVL))
6333 return -EINVAL; 6549 return -EINVAL;
6334 6550
6335 wdev = dev->ieee80211_ptr; 6551 wdev = dev->ieee80211_ptr;
@@ -6341,8 +6557,7 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
6341 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) 6557 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
6342 return -EOPNOTSUPP; 6558 return -EOPNOTSUPP;
6343 6559
6344 return rdev->ops->set_cqm_txe_config(wdev->wiphy, dev, 6560 return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl);
6345 rate, pkts, intvl);
6346} 6561}
6347 6562
6348static int nl80211_set_cqm_rssi(struct genl_info *info, 6563static int nl80211_set_cqm_rssi(struct genl_info *info,
@@ -6364,8 +6579,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
6364 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) 6579 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
6365 return -EOPNOTSUPP; 6580 return -EOPNOTSUPP;
6366 6581
6367 return rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev, 6582 return rdev_set_cqm_rssi_config(rdev, dev, threshold, hysteresis);
6368 threshold, hysteresis);
6369} 6583}
6370 6584
6371static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) 6585static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
@@ -6446,21 +6660,12 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
6446 } 6660 }
6447 6661
6448 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 6662 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
6449 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 6663 err = nl80211_parse_chandef(rdev, info, &setup.chandef);
6450 6664 if (err)
6451 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && 6665 return err;
6452 !nl80211_valid_channel_type(info, &channel_type))
6453 return -EINVAL;
6454
6455 setup.channel = rdev_freq_to_chan(rdev,
6456 nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
6457 channel_type);
6458 if (!setup.channel)
6459 return -EINVAL;
6460 setup.channel_type = channel_type;
6461 } else { 6666 } else {
6462 /* cfg80211_join_mesh() will sort it out */ 6667 /* cfg80211_join_mesh() will sort it out */
6463 setup.channel = NULL; 6668 setup.chandef.chan = NULL;
6464 } 6669 }
6465 6670
6466 return cfg80211_join_mesh(rdev, dev, &setup, &cfg); 6671 return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
@@ -6690,7 +6895,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6690 6895
6691 set_wakeup: 6896 set_wakeup:
6692 if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) 6897 if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
6693 rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan); 6898 rdev_set_wakeup(rdev, rdev->wowlan);
6694 6899
6695 return 0; 6900 return 0;
6696 error: 6901 error:
@@ -6746,7 +6951,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
6746 goto out; 6951 goto out;
6747 } 6952 }
6748 6953
6749 err = rdev->ops->set_rekey_data(&rdev->wiphy, dev, &rekey_data); 6954 err = rdev_set_rekey_data(rdev, dev, &rekey_data);
6750 out: 6955 out:
6751 wdev_unlock(wdev); 6956 wdev_unlock(wdev);
6752 return err; 6957 return err;
@@ -6805,7 +7010,7 @@ static int nl80211_probe_client(struct sk_buff *skb,
6805 7010
6806 addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 7011 addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
6807 7012
6808 err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie); 7013 err = rdev_probe_client(rdev, dev, addr, &cookie);
6809 if (err) 7014 if (err)
6810 goto free_msg; 7015 goto free_msg;
6811 7016
@@ -6826,16 +7031,35 @@ static int nl80211_probe_client(struct sk_buff *skb,
6826static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info) 7031static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
6827{ 7032{
6828 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 7033 struct cfg80211_registered_device *rdev = info->user_ptr[0];
7034 struct cfg80211_beacon_registration *reg, *nreg;
7035 int rv;
6829 7036
6830 if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) 7037 if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
6831 return -EOPNOTSUPP; 7038 return -EOPNOTSUPP;
6832 7039
6833 if (rdev->ap_beacons_nlportid) 7040 nreg = kzalloc(sizeof(*nreg), GFP_KERNEL);
6834 return -EBUSY; 7041 if (!nreg)
7042 return -ENOMEM;
7043
7044 /* First, check if already registered. */
7045 spin_lock_bh(&rdev->beacon_registrations_lock);
7046 list_for_each_entry(reg, &rdev->beacon_registrations, list) {
7047 if (reg->nlportid == info->snd_portid) {
7048 rv = -EALREADY;
7049 goto out_err;
7050 }
7051 }
7052 /* Add it to the list */
7053 nreg->nlportid = info->snd_portid;
7054 list_add(&nreg->list, &rdev->beacon_registrations);
6835 7055
6836 rdev->ap_beacons_nlportid = info->snd_portid; 7056 spin_unlock_bh(&rdev->beacon_registrations_lock);
6837 7057
6838 return 0; 7058 return 0;
7059out_err:
7060 spin_unlock_bh(&rdev->beacon_registrations_lock);
7061 kfree(nreg);
7062 return rv;
6839} 7063}
6840 7064
6841static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) 7065static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
@@ -6859,7 +7083,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
6859 if (err) 7083 if (err)
6860 return err; 7084 return err;
6861 7085
6862 err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev); 7086 err = rdev_start_p2p_device(rdev, wdev);
6863 if (err) 7087 if (err)
6864 return err; 7088 return err;
6865 7089
@@ -6885,7 +7109,7 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
6885 if (!wdev->p2p_started) 7109 if (!wdev->p2p_started)
6886 return 0; 7110 return 0;
6887 7111
6888 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); 7112 rdev_stop_p2p_device(rdev, wdev);
6889 wdev->p2p_started = false; 7113 wdev->p2p_started = false;
6890 7114
6891 mutex_lock(&rdev->devlist_mtx); 7115 mutex_lock(&rdev->devlist_mtx);
@@ -7552,6 +7776,14 @@ static struct genl_ops nl80211_ops[] = {
7552 .internal_flags = NL80211_FLAG_NEED_WDEV_UP | 7776 .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
7553 NL80211_FLAG_NEED_RTNL, 7777 NL80211_FLAG_NEED_RTNL,
7554 }, 7778 },
7779 {
7780 .cmd = NL80211_CMD_SET_MCAST_RATE,
7781 .doit = nl80211_set_mcast_rate,
7782 .policy = nl80211_policy,
7783 .flags = GENL_ADMIN_PERM,
7784 .internal_flags = NL80211_FLAG_NEED_NETDEV |
7785 NL80211_FLAG_NEED_RTNL,
7786 },
7555}; 7787};
7556 7788
7557static struct genl_multicast_group nl80211_mlme_mcgrp = { 7789static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7622,6 +7854,9 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
7622 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) 7854 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
7623 goto nla_put_failure; 7855 goto nla_put_failure;
7624 7856
7857 if (req->flags)
7858 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags);
7859
7625 return 0; 7860 return 0;
7626 nla_put_failure: 7861 nla_put_failure:
7627 return -ENOBUFS; 7862 return -ENOBUFS;
@@ -8250,7 +8485,6 @@ static void nl80211_send_remain_on_chan_event(
8250 int cmd, struct cfg80211_registered_device *rdev, 8485 int cmd, struct cfg80211_registered_device *rdev,
8251 struct wireless_dev *wdev, u64 cookie, 8486 struct wireless_dev *wdev, u64 cookie,
8252 struct ieee80211_channel *chan, 8487 struct ieee80211_channel *chan,
8253 enum nl80211_channel_type channel_type,
8254 unsigned int duration, gfp_t gfp) 8488 unsigned int duration, gfp_t gfp)
8255{ 8489{
8256 struct sk_buff *msg; 8490 struct sk_buff *msg;
@@ -8271,7 +8505,8 @@ static void nl80211_send_remain_on_chan_event(
8271 wdev->netdev->ifindex)) || 8505 wdev->netdev->ifindex)) ||
8272 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || 8506 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
8273 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || 8507 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
8274 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) || 8508 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
8509 NL80211_CHAN_NO_HT) ||
8275 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) 8510 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
8276 goto nla_put_failure; 8511 goto nla_put_failure;
8277 8512
@@ -8293,23 +8528,20 @@ static void nl80211_send_remain_on_chan_event(
8293void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, 8528void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
8294 struct wireless_dev *wdev, u64 cookie, 8529 struct wireless_dev *wdev, u64 cookie,
8295 struct ieee80211_channel *chan, 8530 struct ieee80211_channel *chan,
8296 enum nl80211_channel_type channel_type,
8297 unsigned int duration, gfp_t gfp) 8531 unsigned int duration, gfp_t gfp)
8298{ 8532{
8299 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, 8533 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
8300 rdev, wdev, cookie, chan, 8534 rdev, wdev, cookie, chan,
8301 channel_type, duration, gfp); 8535 duration, gfp);
8302} 8536}
8303 8537
8304void nl80211_send_remain_on_channel_cancel( 8538void nl80211_send_remain_on_channel_cancel(
8305 struct cfg80211_registered_device *rdev, 8539 struct cfg80211_registered_device *rdev,
8306 struct wireless_dev *wdev, 8540 struct wireless_dev *wdev,
8307 u64 cookie, struct ieee80211_channel *chan, 8541 u64 cookie, struct ieee80211_channel *chan, gfp_t gfp)
8308 enum nl80211_channel_type channel_type, gfp_t gfp)
8309{ 8542{
8310 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, 8543 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
8311 rdev, wdev, cookie, chan, 8544 rdev, wdev, cookie, chan, 0, gfp);
8312 channel_type, 0, gfp);
8313} 8545}
8314 8546
8315void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, 8547void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
@@ -8665,8 +8897,8 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
8665} 8897}
8666 8898
8667void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, 8899void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
8668 struct net_device *netdev, int freq, 8900 struct net_device *netdev,
8669 enum nl80211_channel_type type, gfp_t gfp) 8901 struct cfg80211_chan_def *chandef, gfp_t gfp)
8670{ 8902{
8671 struct sk_buff *msg; 8903 struct sk_buff *msg;
8672 void *hdr; 8904 void *hdr;
@@ -8681,9 +8913,10 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
8681 return; 8913 return;
8682 } 8914 }
8683 8915
8684 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 8916 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
8685 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || 8917 goto nla_put_failure;
8686 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type)) 8918
8919 if (nl80211_send_chandef(msg, chandef))
8687 goto nla_put_failure; 8920 goto nla_put_failure;
8688 8921
8689 genlmsg_end(msg, hdr); 8922 genlmsg_end(msg, hdr);
@@ -8800,7 +9033,10 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
8800 void *hdr; 9033 void *hdr;
8801 int err; 9034 int err;
8802 9035
9036 trace_cfg80211_probe_status(dev, addr, cookie, acked);
9037
8803 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9038 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9039
8804 if (!msg) 9040 if (!msg)
8805 return; 9041 return;
8806 9042
@@ -8835,44 +9071,96 @@ EXPORT_SYMBOL(cfg80211_probe_status);
8835 9071
8836void cfg80211_report_obss_beacon(struct wiphy *wiphy, 9072void cfg80211_report_obss_beacon(struct wiphy *wiphy,
8837 const u8 *frame, size_t len, 9073 const u8 *frame, size_t len,
8838 int freq, int sig_dbm, gfp_t gfp) 9074 int freq, int sig_dbm)
8839{ 9075{
8840 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 9076 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
8841 struct sk_buff *msg; 9077 struct sk_buff *msg;
8842 void *hdr; 9078 void *hdr;
8843 u32 nlportid = ACCESS_ONCE(rdev->ap_beacons_nlportid); 9079 struct cfg80211_beacon_registration *reg;
8844 9080
8845 if (!nlportid) 9081 trace_cfg80211_report_obss_beacon(wiphy, frame, len, freq, sig_dbm);
8846 return; 9082
9083 spin_lock_bh(&rdev->beacon_registrations_lock);
9084 list_for_each_entry(reg, &rdev->beacon_registrations, list) {
9085 msg = nlmsg_new(len + 100, GFP_ATOMIC);
9086 if (!msg) {
9087 spin_unlock_bh(&rdev->beacon_registrations_lock);
9088 return;
9089 }
9090
9091 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
9092 if (!hdr)
9093 goto nla_put_failure;
8847 9094
8848 msg = nlmsg_new(len + 100, gfp); 9095 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9096 (freq &&
9097 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
9098 (sig_dbm &&
9099 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
9100 nla_put(msg, NL80211_ATTR_FRAME, len, frame))
9101 goto nla_put_failure;
9102
9103 genlmsg_end(msg, hdr);
9104
9105 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, reg->nlportid);
9106 }
9107 spin_unlock_bh(&rdev->beacon_registrations_lock);
9108 return;
9109
9110 nla_put_failure:
9111 spin_unlock_bh(&rdev->beacon_registrations_lock);
9112 if (hdr)
9113 genlmsg_cancel(msg, hdr);
9114 nlmsg_free(msg);
9115}
9116EXPORT_SYMBOL(cfg80211_report_obss_beacon);
9117
9118void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
9119 enum nl80211_tdls_operation oper,
9120 u16 reason_code, gfp_t gfp)
9121{
9122 struct wireless_dev *wdev = dev->ieee80211_ptr;
9123 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
9124 struct sk_buff *msg;
9125 void *hdr;
9126 int err;
9127
9128 trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper,
9129 reason_code);
9130
9131 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
8849 if (!msg) 9132 if (!msg)
8850 return; 9133 return;
8851 9134
8852 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); 9135 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_TDLS_OPER);
8853 if (!hdr) { 9136 if (!hdr) {
8854 nlmsg_free(msg); 9137 nlmsg_free(msg);
8855 return; 9138 return;
8856 } 9139 }
8857 9140
8858 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 9141 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
8859 (freq && 9142 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
8860 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) || 9143 nla_put_u8(msg, NL80211_ATTR_TDLS_OPERATION, oper) ||
8861 (sig_dbm && 9144 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer) ||
8862 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || 9145 (reason_code > 0 &&
8863 nla_put(msg, NL80211_ATTR_FRAME, len, frame)) 9146 nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code)))
8864 goto nla_put_failure; 9147 goto nla_put_failure;
8865 9148
8866 genlmsg_end(msg, hdr); 9149 err = genlmsg_end(msg, hdr);
9150 if (err < 0) {
9151 nlmsg_free(msg);
9152 return;
9153 }
8867 9154
8868 genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); 9155 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
9156 nl80211_mlme_mcgrp.id, gfp);
8869 return; 9157 return;
8870 9158
8871 nla_put_failure: 9159 nla_put_failure:
8872 genlmsg_cancel(msg, hdr); 9160 genlmsg_cancel(msg, hdr);
8873 nlmsg_free(msg); 9161 nlmsg_free(msg);
8874} 9162}
8875EXPORT_SYMBOL(cfg80211_report_obss_beacon); 9163EXPORT_SYMBOL(cfg80211_tdls_oper_request);
8876 9164
8877static int nl80211_netlink_notify(struct notifier_block * nb, 9165static int nl80211_netlink_notify(struct notifier_block * nb,
8878 unsigned long state, 9166 unsigned long state,
@@ -8881,6 +9169,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
8881 struct netlink_notify *notify = _notify; 9169 struct netlink_notify *notify = _notify;
8882 struct cfg80211_registered_device *rdev; 9170 struct cfg80211_registered_device *rdev;
8883 struct wireless_dev *wdev; 9171 struct wireless_dev *wdev;
9172 struct cfg80211_beacon_registration *reg, *tmp;
8884 9173
8885 if (state != NETLINK_URELEASE) 9174 if (state != NETLINK_URELEASE)
8886 return NOTIFY_DONE; 9175 return NOTIFY_DONE;
@@ -8890,8 +9179,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
8890 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { 9179 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
8891 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) 9180 list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
8892 cfg80211_mlme_unregister_socket(wdev, notify->portid); 9181 cfg80211_mlme_unregister_socket(wdev, notify->portid);
8893 if (rdev->ap_beacons_nlportid == notify->portid) 9182
8894 rdev->ap_beacons_nlportid = 0; 9183 spin_lock_bh(&rdev->beacon_registrations_lock);
9184 list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations,
9185 list) {
9186 if (reg->nlportid == notify->portid) {
9187 list_del(&reg->list);
9188 kfree(reg);
9189 break;
9190 }
9191 }
9192 spin_unlock_bh(&rdev->beacon_registrations_lock);
8895 } 9193 }
8896 9194
8897 rcu_read_unlock(); 9195 rcu_read_unlock();
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index f6153516068c..2acba8477e9d 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -76,13 +76,11 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
76void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, 76void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
77 struct wireless_dev *wdev, u64 cookie, 77 struct wireless_dev *wdev, u64 cookie,
78 struct ieee80211_channel *chan, 78 struct ieee80211_channel *chan,
79 enum nl80211_channel_type channel_type,
80 unsigned int duration, gfp_t gfp); 79 unsigned int duration, gfp_t gfp);
81void nl80211_send_remain_on_channel_cancel( 80void nl80211_send_remain_on_channel_cancel(
82 struct cfg80211_registered_device *rdev, 81 struct cfg80211_registered_device *rdev,
83 struct wireless_dev *wdev, 82 struct wireless_dev *wdev,
84 u64 cookie, struct ieee80211_channel *chan, 83 u64 cookie, struct ieee80211_channel *chan, gfp_t gfp);
85 enum nl80211_channel_type channel_type, gfp_t gfp);
86 84
87void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, 85void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
88 struct net_device *dev, const u8 *mac_addr, 86 struct net_device *dev, const u8 *mac_addr,
@@ -129,8 +127,8 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
129 const u8 *bssid, bool preauth, gfp_t gfp); 127 const u8 *bssid, bool preauth, gfp_t gfp);
130 128
131void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, 129void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
132 struct net_device *dev, int freq, 130 struct net_device *dev,
133 enum nl80211_channel_type type, gfp_t gfp); 131 struct cfg80211_chan_def *chandef, gfp_t gfp);
134 132
135bool nl80211_unexpected_frame(struct net_device *dev, 133bool nl80211_unexpected_frame(struct net_device *dev,
136 const u8 *addr, gfp_t gfp); 134 const u8 *addr, gfp_t gfp);
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
new file mode 100644
index 000000000000..6c0c8191f837
--- /dev/null
+++ b/net/wireless/rdev-ops.h
@@ -0,0 +1,878 @@
1#ifndef __CFG80211_RDEV_OPS
2#define __CFG80211_RDEV_OPS
3
4#include <linux/rtnetlink.h>
5#include <net/cfg80211.h>
6#include "core.h"
7#include "trace.h"
8
9static inline int rdev_suspend(struct cfg80211_registered_device *rdev)
10{
11 int ret;
12 trace_rdev_suspend(&rdev->wiphy, rdev->wowlan);
13 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
14 trace_rdev_return_int(&rdev->wiphy, ret);
15 return ret;
16}
17
18static inline int rdev_resume(struct cfg80211_registered_device *rdev)
19{
20 int ret;
21 trace_rdev_resume(&rdev->wiphy);
22 ret = rdev->ops->resume(&rdev->wiphy);
23 trace_rdev_return_int(&rdev->wiphy, ret);
24 return ret;
25}
26
27static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev,
28 bool enabled)
29{
30 trace_rdev_set_wakeup(&rdev->wiphy, enabled);
31 rdev->ops->set_wakeup(&rdev->wiphy, enabled);
32 trace_rdev_return_void(&rdev->wiphy);
33}
34
35static inline struct wireless_dev
36*rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name,
37 enum nl80211_iftype type, u32 *flags,
38 struct vif_params *params)
39{
40 struct wireless_dev *ret;
41 trace_rdev_add_virtual_intf(&rdev->wiphy, name, type);
42 ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, type, flags,
43 params);
44 trace_rdev_return_wdev(&rdev->wiphy, ret);
45 return ret;
46}
47
48static inline int
49rdev_del_virtual_intf(struct cfg80211_registered_device *rdev,
50 struct wireless_dev *wdev)
51{
52 int ret;
53 trace_rdev_del_virtual_intf(&rdev->wiphy, wdev);
54 ret = rdev->ops->del_virtual_intf(&rdev->wiphy, wdev);
55 trace_rdev_return_int(&rdev->wiphy, ret);
56 return ret;
57}
58
59static inline int
60rdev_change_virtual_intf(struct cfg80211_registered_device *rdev,
61 struct net_device *dev, enum nl80211_iftype type,
62 u32 *flags, struct vif_params *params)
63{
64 int ret;
65 trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type);
66 ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, flags,
67 params);
68 trace_rdev_return_int(&rdev->wiphy, ret);
69 return ret;
70}
71
72static inline int rdev_add_key(struct cfg80211_registered_device *rdev,
73 struct net_device *netdev, u8 key_index,
74 bool pairwise, const u8 *mac_addr,
75 struct key_params *params)
76{
77 int ret;
78 trace_rdev_add_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr);
79 ret = rdev->ops->add_key(&rdev->wiphy, netdev, key_index, pairwise,
80 mac_addr, params);
81 trace_rdev_return_int(&rdev->wiphy, ret);
82 return ret;
83}
84
85static inline int
86rdev_get_key(struct cfg80211_registered_device *rdev, struct net_device *netdev,
87 u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
88 void (*callback)(void *cookie, struct key_params*))
89{
90 int ret;
91 trace_rdev_get_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr);
92 ret = rdev->ops->get_key(&rdev->wiphy, netdev, key_index, pairwise,
93 mac_addr, cookie, callback);
94 trace_rdev_return_int(&rdev->wiphy, ret);
95 return ret;
96}
97
98static inline int rdev_del_key(struct cfg80211_registered_device *rdev,
99 struct net_device *netdev, u8 key_index,
100 bool pairwise, const u8 *mac_addr)
101{
102 int ret;
103 trace_rdev_del_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr);
104 ret = rdev->ops->del_key(&rdev->wiphy, netdev, key_index, pairwise,
105 mac_addr);
106 trace_rdev_return_int(&rdev->wiphy, ret);
107 return ret;
108}
109
110static inline int
111rdev_set_default_key(struct cfg80211_registered_device *rdev,
112 struct net_device *netdev, u8 key_index, bool unicast,
113 bool multicast)
114{
115 int ret;
116 trace_rdev_set_default_key(&rdev->wiphy, netdev, key_index,
117 unicast, multicast);
118 ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, key_index,
119 unicast, multicast);
120 trace_rdev_return_int(&rdev->wiphy, ret);
121 return ret;
122}
123
124static inline int
125rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev,
126 struct net_device *netdev, u8 key_index)
127{
128 int ret;
129 trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, key_index);
130 ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev,
131 key_index);
132 trace_rdev_return_int(&rdev->wiphy, ret);
133 return ret;
134}
135
136static inline int rdev_start_ap(struct cfg80211_registered_device *rdev,
137 struct net_device *dev,
138 struct cfg80211_ap_settings *settings)
139{
140 int ret;
141 trace_rdev_start_ap(&rdev->wiphy, dev, settings);
142 ret = rdev->ops->start_ap(&rdev->wiphy, dev, settings);
143 trace_rdev_return_int(&rdev->wiphy, ret);
144 return ret;
145}
146
147static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev,
148 struct net_device *dev,
149 struct cfg80211_beacon_data *info)
150{
151 int ret;
152 trace_rdev_change_beacon(&rdev->wiphy, dev, info);
153 ret = rdev->ops->change_beacon(&rdev->wiphy, dev, info);
154 trace_rdev_return_int(&rdev->wiphy, ret);
155 return ret;
156}
157
158static inline int rdev_stop_ap(struct cfg80211_registered_device *rdev,
159 struct net_device *dev)
160{
161 int ret;
162 trace_rdev_stop_ap(&rdev->wiphy, dev);
163 ret = rdev->ops->stop_ap(&rdev->wiphy, dev);
164 trace_rdev_return_int(&rdev->wiphy, ret);
165 return ret;
166}
167
168static inline int rdev_add_station(struct cfg80211_registered_device *rdev,
169 struct net_device *dev, u8 *mac,
170 struct station_parameters *params)
171{
172 int ret;
173 trace_rdev_add_station(&rdev->wiphy, dev, mac, params);
174 ret = rdev->ops->add_station(&rdev->wiphy, dev, mac, params);
175 trace_rdev_return_int(&rdev->wiphy, ret);
176 return ret;
177}
178
179static inline int rdev_del_station(struct cfg80211_registered_device *rdev,
180 struct net_device *dev, u8 *mac)
181{
182 int ret;
183 trace_rdev_del_station(&rdev->wiphy, dev, mac);
184 ret = rdev->ops->del_station(&rdev->wiphy, dev, mac);
185 trace_rdev_return_int(&rdev->wiphy, ret);
186 return ret;
187}
188
189static inline int rdev_change_station(struct cfg80211_registered_device *rdev,
190 struct net_device *dev, u8 *mac,
191 struct station_parameters *params)
192{
193 int ret;
194 trace_rdev_change_station(&rdev->wiphy, dev, mac, params);
195 ret = rdev->ops->change_station(&rdev->wiphy, dev, mac, params);
196 trace_rdev_return_int(&rdev->wiphy, ret);
197 return ret;
198}
199
200static inline int rdev_get_station(struct cfg80211_registered_device *rdev,
201 struct net_device *dev, u8 *mac,
202 struct station_info *sinfo)
203{
204 int ret;
205 trace_rdev_get_station(&rdev->wiphy, dev, mac);
206 ret = rdev->ops->get_station(&rdev->wiphy, dev, mac, sinfo);
207 trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo);
208 return ret;
209}
210
211static inline int rdev_dump_station(struct cfg80211_registered_device *rdev,
212 struct net_device *dev, int idx, u8 *mac,
213 struct station_info *sinfo)
214{
215 int ret;
216 trace_rdev_dump_station(&rdev->wiphy, dev, idx, mac);
217 ret = rdev->ops->dump_station(&rdev->wiphy, dev, idx, mac, sinfo);
218 trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo);
219 return ret;
220}
221
222static inline int rdev_add_mpath(struct cfg80211_registered_device *rdev,
223 struct net_device *dev, u8 *dst, u8 *next_hop)
224{
225 int ret;
226 trace_rdev_add_mpath(&rdev->wiphy, dev, dst, next_hop);
227 ret = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop);
228 trace_rdev_return_int(&rdev->wiphy, ret);
229 return ret;
230}
231
232static inline int rdev_del_mpath(struct cfg80211_registered_device *rdev,
233 struct net_device *dev, u8 *dst)
234{
235 int ret;
236 trace_rdev_del_mpath(&rdev->wiphy, dev, dst);
237 ret = rdev->ops->del_mpath(&rdev->wiphy, dev, dst);
238 trace_rdev_return_int(&rdev->wiphy, ret);
239 return ret;
240}
241
242static inline int rdev_change_mpath(struct cfg80211_registered_device *rdev,
243 struct net_device *dev, u8 *dst,
244 u8 *next_hop)
245{
246 int ret;
247 trace_rdev_change_mpath(&rdev->wiphy, dev, dst, next_hop);
248 ret = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop);
249 trace_rdev_return_int(&rdev->wiphy, ret);
250 return ret;
251}
252
253static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev,
254 struct net_device *dev, u8 *dst, u8 *next_hop,
255 struct mpath_info *pinfo)
256{
257 int ret;
258 trace_rdev_get_mpath(&rdev->wiphy, dev, dst, next_hop);
259 ret = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, pinfo);
260 trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo);
261 return ret;
262
263}
264
265static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev,
266 struct net_device *dev, int idx, u8 *dst,
267 u8 *next_hop, struct mpath_info *pinfo)
268
269{
270 int ret;
271 trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop);
272 ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop,
273 pinfo);
274 trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo);
275 return ret;
276}
277
278static inline int
279rdev_get_mesh_config(struct cfg80211_registered_device *rdev,
280 struct net_device *dev, struct mesh_config *conf)
281{
282 int ret;
283 trace_rdev_get_mesh_config(&rdev->wiphy, dev);
284 ret = rdev->ops->get_mesh_config(&rdev->wiphy, dev, conf);
285 trace_rdev_return_int_mesh_config(&rdev->wiphy, ret, conf);
286 return ret;
287}
288
289static inline int
290rdev_update_mesh_config(struct cfg80211_registered_device *rdev,
291 struct net_device *dev, u32 mask,
292 const struct mesh_config *nconf)
293{
294 int ret;
295 trace_rdev_update_mesh_config(&rdev->wiphy, dev, mask, nconf);
296 ret = rdev->ops->update_mesh_config(&rdev->wiphy, dev, mask, nconf);
297 trace_rdev_return_int(&rdev->wiphy, ret);
298 return ret;
299}
300
301static inline int rdev_join_mesh(struct cfg80211_registered_device *rdev,
302 struct net_device *dev,
303 const struct mesh_config *conf,
304 const struct mesh_setup *setup)
305{
306 int ret;
307 trace_rdev_join_mesh(&rdev->wiphy, dev, conf, setup);
308 ret = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
309 trace_rdev_return_int(&rdev->wiphy, ret);
310 return ret;
311}
312
313
314static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev,
315 struct net_device *dev)
316{
317 int ret;
318 trace_rdev_leave_mesh(&rdev->wiphy, dev);
319 ret = rdev->ops->leave_mesh(&rdev->wiphy, dev);
320 trace_rdev_return_int(&rdev->wiphy, ret);
321 return ret;
322}
323
324static inline int rdev_change_bss(struct cfg80211_registered_device *rdev,
325 struct net_device *dev,
326 struct bss_parameters *params)
327
328{
329 int ret;
330 trace_rdev_change_bss(&rdev->wiphy, dev, params);
331 ret = rdev->ops->change_bss(&rdev->wiphy, dev, params);
332 trace_rdev_return_int(&rdev->wiphy, ret);
333 return ret;
334}
335
336static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev,
337 struct net_device *dev,
338 struct ieee80211_txq_params *params)
339
340{
341 int ret;
342 trace_rdev_set_txq_params(&rdev->wiphy, dev, params);
343 ret = rdev->ops->set_txq_params(&rdev->wiphy, dev, params);
344 trace_rdev_return_int(&rdev->wiphy, ret);
345 return ret;
346}
347
348static inline int
349rdev_libertas_set_mesh_channel(struct cfg80211_registered_device *rdev,
350 struct net_device *dev,
351 struct ieee80211_channel *chan)
352{
353 int ret;
354 trace_rdev_libertas_set_mesh_channel(&rdev->wiphy, dev, chan);
355 ret = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, dev, chan);
356 trace_rdev_return_int(&rdev->wiphy, ret);
357 return ret;
358}
359
360static inline int
361rdev_set_monitor_channel(struct cfg80211_registered_device *rdev,
362 struct cfg80211_chan_def *chandef)
363{
364 int ret;
365 trace_rdev_set_monitor_channel(&rdev->wiphy, chandef);
366 ret = rdev->ops->set_monitor_channel(&rdev->wiphy, chandef);
367 trace_rdev_return_int(&rdev->wiphy, ret);
368 return ret;
369}
370
371static inline int rdev_scan(struct cfg80211_registered_device *rdev,
372 struct cfg80211_scan_request *request)
373{
374 int ret;
375 trace_rdev_scan(&rdev->wiphy, request);
376 ret = rdev->ops->scan(&rdev->wiphy, request);
377 trace_rdev_return_int(&rdev->wiphy, ret);
378 return ret;
379}
380
381static inline int rdev_auth(struct cfg80211_registered_device *rdev,
382 struct net_device *dev,
383 struct cfg80211_auth_request *req)
384{
385 int ret;
386 trace_rdev_auth(&rdev->wiphy, dev, req);
387 ret = rdev->ops->auth(&rdev->wiphy, dev, req);
388 trace_rdev_return_int(&rdev->wiphy, ret);
389 return ret;
390}
391
392static inline int rdev_assoc(struct cfg80211_registered_device *rdev,
393 struct net_device *dev,
394 struct cfg80211_assoc_request *req)
395{
396 int ret;
397 trace_rdev_assoc(&rdev->wiphy, dev, req);
398 ret = rdev->ops->assoc(&rdev->wiphy, dev, req);
399 trace_rdev_return_int(&rdev->wiphy, ret);
400 return ret;
401}
402
403static inline int rdev_deauth(struct cfg80211_registered_device *rdev,
404 struct net_device *dev,
405 struct cfg80211_deauth_request *req)
406{
407 int ret;
408 trace_rdev_deauth(&rdev->wiphy, dev, req);
409 ret = rdev->ops->deauth(&rdev->wiphy, dev, req);
410 trace_rdev_return_int(&rdev->wiphy, ret);
411 return ret;
412}
413
414static inline int rdev_disassoc(struct cfg80211_registered_device *rdev,
415 struct net_device *dev,
416 struct cfg80211_disassoc_request *req)
417{
418 int ret;
419 trace_rdev_disassoc(&rdev->wiphy, dev, req);
420 ret = rdev->ops->disassoc(&rdev->wiphy, dev, req);
421 trace_rdev_return_int(&rdev->wiphy, ret);
422 return ret;
423}
424
425static inline int rdev_connect(struct cfg80211_registered_device *rdev,
426 struct net_device *dev,
427 struct cfg80211_connect_params *sme)
428{
429 int ret;
430 trace_rdev_connect(&rdev->wiphy, dev, sme);
431 ret = rdev->ops->connect(&rdev->wiphy, dev, sme);
432 trace_rdev_return_int(&rdev->wiphy, ret);
433 return ret;
434}
435
436static inline int rdev_disconnect(struct cfg80211_registered_device *rdev,
437 struct net_device *dev, u16 reason_code)
438{
439 int ret;
440 trace_rdev_disconnect(&rdev->wiphy, dev, reason_code);
441 ret = rdev->ops->disconnect(&rdev->wiphy, dev, reason_code);
442 trace_rdev_return_int(&rdev->wiphy, ret);
443 return ret;
444}
445
446static inline int rdev_join_ibss(struct cfg80211_registered_device *rdev,
447 struct net_device *dev,
448 struct cfg80211_ibss_params *params)
449{
450 int ret;
451 trace_rdev_join_ibss(&rdev->wiphy, dev, params);
452 ret = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
453 trace_rdev_return_int(&rdev->wiphy, ret);
454 return ret;
455}
456
457static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev,
458 struct net_device *dev)
459{
460 int ret;
461 trace_rdev_leave_ibss(&rdev->wiphy, dev);
462 ret = rdev->ops->leave_ibss(&rdev->wiphy, dev);
463 trace_rdev_return_int(&rdev->wiphy, ret);
464 return ret;
465}
466
467static inline int
468rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
469{
470 int ret;
471 trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
472 ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
473 trace_rdev_return_int(&rdev->wiphy, ret);
474 return ret;
475}
476
477static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev,
478 struct wireless_dev *wdev,
479 enum nl80211_tx_power_setting type, int mbm)
480{
481 int ret;
482 trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm);
483 ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm);
484 trace_rdev_return_int(&rdev->wiphy, ret);
485 return ret;
486}
487
488static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev,
489 struct wireless_dev *wdev, int *dbm)
490{
491 int ret;
492 trace_rdev_get_tx_power(&rdev->wiphy, wdev);
493 ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm);
494 trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm);
495 return ret;
496}
497
498static inline int rdev_set_wds_peer(struct cfg80211_registered_device *rdev,
499 struct net_device *dev, const u8 *addr)
500{
501 int ret;
502 trace_rdev_set_wds_peer(&rdev->wiphy, dev, addr);
503 ret = rdev->ops->set_wds_peer(&rdev->wiphy, dev, addr);
504 trace_rdev_return_int(&rdev->wiphy, ret);
505 return ret;
506}
507
508static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev)
509{
510 trace_rdev_rfkill_poll(&rdev->wiphy);
511 rdev->ops->rfkill_poll(&rdev->wiphy);
512 trace_rdev_return_void(&rdev->wiphy);
513}
514
515
516#ifdef CONFIG_NL80211_TESTMODE
517static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev,
518 void *data, int len)
519{
520 int ret;
521 trace_rdev_testmode_cmd(&rdev->wiphy);
522 ret = rdev->ops->testmode_cmd(&rdev->wiphy, data, len);
523 trace_rdev_return_int(&rdev->wiphy, ret);
524 return ret;
525}
526
527static inline int rdev_testmode_dump(struct cfg80211_registered_device *rdev,
528 struct sk_buff *skb,
529 struct netlink_callback *cb, void *data,
530 int len)
531{
532 int ret;
533 trace_rdev_testmode_dump(&rdev->wiphy);
534 ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len);
535 trace_rdev_return_int(&rdev->wiphy, ret);
536 return ret;
537}
538#endif
539
540static inline int
541rdev_set_bitrate_mask(struct cfg80211_registered_device *rdev,
542 struct net_device *dev, const u8 *peer,
543 const struct cfg80211_bitrate_mask *mask)
544{
545 int ret;
546 trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
547 ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
548 trace_rdev_return_int(&rdev->wiphy, ret);
549 return ret;
550}
551
552static inline int rdev_dump_survey(struct cfg80211_registered_device *rdev,
553 struct net_device *netdev, int idx,
554 struct survey_info *info)
555{
556 int ret;
557 trace_rdev_dump_survey(&rdev->wiphy, netdev, idx);
558 ret = rdev->ops->dump_survey(&rdev->wiphy, netdev, idx, info);
559 if (ret < 0)
560 trace_rdev_return_int(&rdev->wiphy, ret);
561 else
562 trace_rdev_return_int_survey_info(&rdev->wiphy, ret, info);
563 return ret;
564}
565
566static inline int rdev_set_pmksa(struct cfg80211_registered_device *rdev,
567 struct net_device *netdev,
568 struct cfg80211_pmksa *pmksa)
569{
570 int ret;
571 trace_rdev_set_pmksa(&rdev->wiphy, netdev, pmksa);
572 ret = rdev->ops->set_pmksa(&rdev->wiphy, netdev, pmksa);
573 trace_rdev_return_int(&rdev->wiphy, ret);
574 return ret;
575}
576
577static inline int rdev_del_pmksa(struct cfg80211_registered_device *rdev,
578 struct net_device *netdev,
579 struct cfg80211_pmksa *pmksa)
580{
581 int ret;
582 trace_rdev_del_pmksa(&rdev->wiphy, netdev, pmksa);
583 ret = rdev->ops->del_pmksa(&rdev->wiphy, netdev, pmksa);
584 trace_rdev_return_int(&rdev->wiphy, ret);
585 return ret;
586}
587
588static inline int rdev_flush_pmksa(struct cfg80211_registered_device *rdev,
589 struct net_device *netdev)
590{
591 int ret;
592 trace_rdev_flush_pmksa(&rdev->wiphy, netdev);
593 ret = rdev->ops->flush_pmksa(&rdev->wiphy, netdev);
594 trace_rdev_return_int(&rdev->wiphy, ret);
595 return ret;
596}
597
598static inline int
599rdev_remain_on_channel(struct cfg80211_registered_device *rdev,
600 struct wireless_dev *wdev,
601 struct ieee80211_channel *chan,
602 unsigned int duration, u64 *cookie)
603{
604 int ret;
605 trace_rdev_remain_on_channel(&rdev->wiphy, wdev, chan, duration);
606 ret = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan,
607 duration, cookie);
608 trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie);
609 return ret;
610}
611
612static inline int
613rdev_cancel_remain_on_channel(struct cfg80211_registered_device *rdev,
614 struct wireless_dev *wdev, u64 cookie)
615{
616 int ret;
617 trace_rdev_cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
618 ret = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
619 trace_rdev_return_int(&rdev->wiphy, ret);
620 return ret;
621}
622
623static inline int rdev_mgmt_tx(struct cfg80211_registered_device *rdev,
624 struct wireless_dev *wdev,
625 struct ieee80211_channel *chan, bool offchan,
626 unsigned int wait, const u8 *buf, size_t len,
627 bool no_cck, bool dont_wait_for_ack, u64 *cookie)
628{
629 int ret;
630 trace_rdev_mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
631 wait, no_cck, dont_wait_for_ack);
632 ret = rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
633 wait, buf, len, no_cck,
634 dont_wait_for_ack, cookie);
635 trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie);
636 return ret;
637}
638
639static inline int
640rdev_mgmt_tx_cancel_wait(struct cfg80211_registered_device *rdev,
641 struct wireless_dev *wdev, u64 cookie)
642{
643 int ret;
644 trace_rdev_mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
645 ret = rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
646 trace_rdev_return_int(&rdev->wiphy, ret);
647 return ret;
648}
649
650static inline int rdev_set_power_mgmt(struct cfg80211_registered_device *rdev,
651 struct net_device *dev, bool enabled,
652 int timeout)
653{
654 int ret;
655 trace_rdev_set_power_mgmt(&rdev->wiphy, dev, enabled, timeout);
656 ret = rdev->ops->set_power_mgmt(&rdev->wiphy, dev, enabled, timeout);
657 trace_rdev_return_int(&rdev->wiphy, ret);
658 return ret;
659}
660
661static inline int
662rdev_set_cqm_rssi_config(struct cfg80211_registered_device *rdev,
663 struct net_device *dev, s32 rssi_thold, u32 rssi_hyst)
664{
665 int ret;
666 trace_rdev_set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold,
667 rssi_hyst);
668 ret = rdev->ops->set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold,
669 rssi_hyst);
670 trace_rdev_return_int(&rdev->wiphy, ret);
671 return ret;
672}
673
674static inline int
675rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev,
676 struct net_device *dev, u32 rate, u32 pkts, u32 intvl)
677{
678 int ret;
679 trace_rdev_set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl);
680 ret = rdev->ops->set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts,
681 intvl);
682 trace_rdev_return_int(&rdev->wiphy, ret);
683 return ret;
684}
685
686static inline void
687rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
688 struct wireless_dev *wdev, u16 frame_type, bool reg)
689{
690 trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
691 rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
692 trace_rdev_return_void(&rdev->wiphy);
693}
694
695static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev,
696 u32 tx_ant, u32 rx_ant)
697{
698 int ret;
699 trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant);
700 ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
701 trace_rdev_return_int(&rdev->wiphy, ret);
702 return ret;
703}
704
705static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev,
706 u32 *tx_ant, u32 *rx_ant)
707{
708 int ret;
709 trace_rdev_get_antenna(&rdev->wiphy);
710 ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant);
711 if (ret)
712 trace_rdev_return_int(&rdev->wiphy, ret);
713 else
714 trace_rdev_return_int_tx_rx(&rdev->wiphy, ret, *tx_ant,
715 *rx_ant);
716 return ret;
717}
718
719static inline int rdev_set_ringparam(struct cfg80211_registered_device *rdev,
720 u32 tx, u32 rx)
721{
722 int ret;
723 trace_rdev_set_ringparam(&rdev->wiphy, tx, rx);
724 ret = rdev->ops->set_ringparam(&rdev->wiphy, tx, rx);
725 trace_rdev_return_int(&rdev->wiphy, ret);
726 return ret;
727}
728
729static inline void rdev_get_ringparam(struct cfg80211_registered_device *rdev,
730 u32 *tx, u32 *tx_max, u32 *rx,
731 u32 *rx_max)
732{
733 trace_rdev_get_ringparam(&rdev->wiphy);
734 rdev->ops->get_ringparam(&rdev->wiphy, tx, tx_max, rx, rx_max);
735 trace_rdev_return_void_tx_rx(&rdev->wiphy, *tx, *tx_max, *rx, *rx_max);
736}
737
738static inline int
739rdev_sched_scan_start(struct cfg80211_registered_device *rdev,
740 struct net_device *dev,
741 struct cfg80211_sched_scan_request *request)
742{
743 int ret;
744 trace_rdev_sched_scan_start(&rdev->wiphy, dev, request);
745 ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request);
746 trace_rdev_return_int(&rdev->wiphy, ret);
747 return ret;
748}
749
750static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev,
751 struct net_device *dev)
752{
753 int ret;
754 trace_rdev_sched_scan_stop(&rdev->wiphy, dev);
755 ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev);
756 trace_rdev_return_int(&rdev->wiphy, ret);
757 return ret;
758}
759
760static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev,
761 struct net_device *dev,
762 struct cfg80211_gtk_rekey_data *data)
763{
764 int ret;
765 trace_rdev_set_rekey_data(&rdev->wiphy, dev);
766 ret = rdev->ops->set_rekey_data(&rdev->wiphy, dev, data);
767 trace_rdev_return_int(&rdev->wiphy, ret);
768 return ret;
769}
770
771static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev,
772 struct net_device *dev, u8 *peer,
773 u8 action_code, u8 dialog_token,
774 u16 status_code, const u8 *buf, size_t len)
775{
776 int ret;
777 trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
778 dialog_token, status_code, buf, len);
779 ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
780 dialog_token, status_code, buf, len);
781 trace_rdev_return_int(&rdev->wiphy, ret);
782 return ret;
783}
784
785static inline int rdev_tdls_oper(struct cfg80211_registered_device *rdev,
786 struct net_device *dev, u8 *peer,
787 enum nl80211_tdls_operation oper)
788{
789 int ret;
790 trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper);
791 ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper);
792 trace_rdev_return_int(&rdev->wiphy, ret);
793 return ret;
794}
795
796static inline int rdev_probe_client(struct cfg80211_registered_device *rdev,
797 struct net_device *dev, const u8 *peer,
798 u64 *cookie)
799{
800 int ret;
801 trace_rdev_probe_client(&rdev->wiphy, dev, peer);
802 ret = rdev->ops->probe_client(&rdev->wiphy, dev, peer, cookie);
803 trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie);
804 return ret;
805}
806
807static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev,
808 struct net_device *dev, u16 noack_map)
809{
810 int ret;
811 trace_rdev_set_noack_map(&rdev->wiphy, dev, noack_map);
812 ret = rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map);
813 trace_rdev_return_int(&rdev->wiphy, ret);
814 return ret;
815}
816
817static inline int
818rdev_get_et_sset_count(struct cfg80211_registered_device *rdev,
819 struct net_device *dev, int sset)
820{
821 int ret;
822 trace_rdev_get_et_sset_count(&rdev->wiphy, dev, sset);
823 ret = rdev->ops->get_et_sset_count(&rdev->wiphy, dev, sset);
824 trace_rdev_return_int(&rdev->wiphy, ret);
825 return ret;
826}
827
828static inline void rdev_get_et_stats(struct cfg80211_registered_device *rdev,
829 struct net_device *dev,
830 struct ethtool_stats *stats, u64 *data)
831{
832 trace_rdev_get_et_stats(&rdev->wiphy, dev);
833 rdev->ops->get_et_stats(&rdev->wiphy, dev, stats, data);
834 trace_rdev_return_void(&rdev->wiphy);
835}
836
837static inline void rdev_get_et_strings(struct cfg80211_registered_device *rdev,
838 struct net_device *dev, u32 sset,
839 u8 *data)
840{
841 trace_rdev_get_et_strings(&rdev->wiphy, dev, sset);
842 rdev->ops->get_et_strings(&rdev->wiphy, dev, sset, data);
843 trace_rdev_return_void(&rdev->wiphy);
844}
845
846static inline int
847rdev_get_channel(struct cfg80211_registered_device *rdev,
848 struct wireless_dev *wdev,
849 struct cfg80211_chan_def *chandef)
850{
851 int ret;
852
853 trace_rdev_get_channel(&rdev->wiphy, wdev);
854 ret = rdev->ops->get_channel(&rdev->wiphy, wdev, chandef);
855 trace_rdev_return_chandef(&rdev->wiphy, ret, chandef);
856
857 return ret;
858}
859
860static inline int rdev_start_p2p_device(struct cfg80211_registered_device *rdev,
861 struct wireless_dev *wdev)
862{
863 int ret;
864
865 trace_rdev_start_p2p_device(&rdev->wiphy, wdev);
866 ret = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
867 trace_rdev_return_int(&rdev->wiphy, ret);
868 return ret;
869}
870
871static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev,
872 struct wireless_dev *wdev)
873{
874 trace_rdev_stop_p2p_device(&rdev->wiphy, wdev);
875 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
876 trace_rdev_return_void(&rdev->wiphy);
877}
878#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index bcc7d7ee5a51..82c4fc7c994c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -141,9 +141,8 @@ static const struct ieee80211_regdomain world_regdom = {
141 .reg_rules = { 141 .reg_rules = {
142 /* IEEE 802.11b/g, channels 1..11 */ 142 /* IEEE 802.11b/g, channels 1..11 */
143 REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), 143 REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
144 /* IEEE 802.11b/g, channels 12..13. No HT40 144 /* IEEE 802.11b/g, channels 12..13. */
145 * channel fits here. */ 145 REG_RULE(2467-10, 2472+10, 40, 6, 20,
146 REG_RULE(2467-10, 2472+10, 20, 6, 20,
147 NL80211_RRF_PASSIVE_SCAN | 146 NL80211_RRF_PASSIVE_SCAN |
148 NL80211_RRF_NO_IBSS), 147 NL80211_RRF_NO_IBSS),
149 /* IEEE 802.11 channel 14 - Only JP enables 148 /* IEEE 802.11 channel 14 - Only JP enables
@@ -1797,7 +1796,7 @@ EXPORT_SYMBOL(regulatory_hint);
1797 */ 1796 */
1798void regulatory_hint_11d(struct wiphy *wiphy, 1797void regulatory_hint_11d(struct wiphy *wiphy,
1799 enum ieee80211_band band, 1798 enum ieee80211_band band,
1800 u8 *country_ie, 1799 const u8 *country_ie,
1801 u8 country_ie_len) 1800 u8 country_ie_len)
1802{ 1801{
1803 char alpha2[2]; 1802 char alpha2[2];
@@ -2366,7 +2365,6 @@ int set_regdom(const struct ieee80211_regdomain *rd)
2366 return r; 2365 return r;
2367} 2366}
2368 2367
2369#ifdef CONFIG_HOTPLUG
2370int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) 2368int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
2371{ 2369{
2372 if (last_request && !last_request->processed) { 2370 if (last_request && !last_request->processed) {
@@ -2378,12 +2376,6 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
2378 2376
2379 return 0; 2377 return 0;
2380} 2378}
2381#else
2382int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
2383{
2384 return -ENODEV;
2385}
2386#endif /* CONFIG_HOTPLUG */
2387 2379
2388void wiphy_regulatory_register(struct wiphy *wiphy) 2380void wiphy_regulatory_register(struct wiphy *wiphy)
2389{ 2381{
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index f023c8a31c60..4c0a32ffd530 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -81,7 +81,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
81 */ 81 */
82void regulatory_hint_11d(struct wiphy *wiphy, 82void regulatory_hint_11d(struct wiphy *wiphy,
83 enum ieee80211_band band, 83 enum ieee80211_band band,
84 u8 *country_ie, 84 const u8 *country_ie,
85 u8 country_ie_len); 85 u8 country_ie_len);
86 86
87/** 87/**
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 9730c9862bdc..01592d7d4789 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -17,9 +17,63 @@
17#include "core.h" 17#include "core.h"
18#include "nl80211.h" 18#include "nl80211.h"
19#include "wext-compat.h" 19#include "wext-compat.h"
20#include "rdev-ops.h"
20 21
21#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) 22#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
22 23
24static void bss_release(struct kref *ref)
25{
26 struct cfg80211_bss_ies *ies;
27 struct cfg80211_internal_bss *bss;
28
29 bss = container_of(ref, struct cfg80211_internal_bss, ref);
30
31 if (WARN_ON(atomic_read(&bss->hold)))
32 return;
33
34 if (bss->pub.free_priv)
35 bss->pub.free_priv(&bss->pub);
36
37 ies = (void *)rcu_access_pointer(bss->pub.beacon_ies);
38 if (ies)
39 kfree_rcu(ies, rcu_head);
40 ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies);
41 if (ies)
42 kfree_rcu(ies, rcu_head);
43
44 kfree(bss);
45}
46
47/* must hold dev->bss_lock! */
48static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
49 struct cfg80211_internal_bss *bss)
50{
51 list_del_init(&bss->list);
52 rb_erase(&bss->rbn, &dev->bss_tree);
53 kref_put(&bss->ref, bss_release);
54}
55
56/* must hold dev->bss_lock! */
57static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
58 unsigned long expire_time)
59{
60 struct cfg80211_internal_bss *bss, *tmp;
61 bool expired = false;
62
63 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
64 if (atomic_read(&bss->hold))
65 continue;
66 if (!time_after(expire_time, bss->ts))
67 continue;
68
69 __cfg80211_unlink_bss(dev, bss);
70 expired = true;
71 }
72
73 if (expired)
74 dev->bss_generation++;
75}
76
23void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) 77void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
24{ 78{
25 struct cfg80211_scan_request *request; 79 struct cfg80211_scan_request *request;
@@ -45,10 +99,17 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
45 if (wdev->netdev) 99 if (wdev->netdev)
46 cfg80211_sme_scan_done(wdev->netdev); 100 cfg80211_sme_scan_done(wdev->netdev);
47 101
48 if (request->aborted) 102 if (request->aborted) {
49 nl80211_send_scan_aborted(rdev, wdev); 103 nl80211_send_scan_aborted(rdev, wdev);
50 else 104 } else {
105 if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
106 /* flush entries from previous scans */
107 spin_lock_bh(&rdev->bss_lock);
108 __cfg80211_bss_expire(rdev, request->scan_start);
109 spin_unlock_bh(&rdev->bss_lock);
110 }
51 nl80211_send_scan_done(rdev, wdev); 111 nl80211_send_scan_done(rdev, wdev);
112 }
52 113
53#ifdef CONFIG_CFG80211_WEXT 114#ifdef CONFIG_CFG80211_WEXT
54 if (wdev->netdev && !request->aborted) { 115 if (wdev->netdev && !request->aborted) {
@@ -89,6 +150,7 @@ void __cfg80211_scan_done(struct work_struct *wk)
89 150
90void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) 151void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
91{ 152{
153 trace_cfg80211_scan_done(request, aborted);
92 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); 154 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
93 155
94 request->aborted = aborted; 156 request->aborted = aborted;
@@ -99,22 +161,34 @@ EXPORT_SYMBOL(cfg80211_scan_done);
99void __cfg80211_sched_scan_results(struct work_struct *wk) 161void __cfg80211_sched_scan_results(struct work_struct *wk)
100{ 162{
101 struct cfg80211_registered_device *rdev; 163 struct cfg80211_registered_device *rdev;
164 struct cfg80211_sched_scan_request *request;
102 165
103 rdev = container_of(wk, struct cfg80211_registered_device, 166 rdev = container_of(wk, struct cfg80211_registered_device,
104 sched_scan_results_wk); 167 sched_scan_results_wk);
105 168
169 request = rdev->sched_scan_req;
170
106 mutex_lock(&rdev->sched_scan_mtx); 171 mutex_lock(&rdev->sched_scan_mtx);
107 172
108 /* we don't have sched_scan_req anymore if the scan is stopping */ 173 /* we don't have sched_scan_req anymore if the scan is stopping */
109 if (rdev->sched_scan_req) 174 if (request) {
110 nl80211_send_sched_scan_results(rdev, 175 if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
111 rdev->sched_scan_req->dev); 176 /* flush entries from previous scans */
177 spin_lock_bh(&rdev->bss_lock);
178 __cfg80211_bss_expire(rdev, request->scan_start);
179 spin_unlock_bh(&rdev->bss_lock);
180 request->scan_start =
181 jiffies + msecs_to_jiffies(request->interval);
182 }
183 nl80211_send_sched_scan_results(rdev, request->dev);
184 }
112 185
113 mutex_unlock(&rdev->sched_scan_mtx); 186 mutex_unlock(&rdev->sched_scan_mtx);
114} 187}
115 188
116void cfg80211_sched_scan_results(struct wiphy *wiphy) 189void cfg80211_sched_scan_results(struct wiphy *wiphy)
117{ 190{
191 trace_cfg80211_sched_scan_results(wiphy);
118 /* ignore if we're not scanning */ 192 /* ignore if we're not scanning */
119 if (wiphy_to_dev(wiphy)->sched_scan_req) 193 if (wiphy_to_dev(wiphy)->sched_scan_req)
120 queue_work(cfg80211_wq, 194 queue_work(cfg80211_wq,
@@ -126,6 +200,8 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
126{ 200{
127 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 201 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
128 202
203 trace_cfg80211_sched_scan_stopped(wiphy);
204
129 mutex_lock(&rdev->sched_scan_mtx); 205 mutex_lock(&rdev->sched_scan_mtx);
130 __cfg80211_stop_sched_scan(rdev, true); 206 __cfg80211_stop_sched_scan(rdev, true);
131 mutex_unlock(&rdev->sched_scan_mtx); 207 mutex_unlock(&rdev->sched_scan_mtx);
@@ -145,7 +221,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
145 dev = rdev->sched_scan_req->dev; 221 dev = rdev->sched_scan_req->dev;
146 222
147 if (!driver_initiated) { 223 if (!driver_initiated) {
148 int err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); 224 int err = rdev_sched_scan_stop(rdev, dev);
149 if (err) 225 if (err)
150 return err; 226 return err;
151 } 227 }
@@ -158,24 +234,6 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
158 return 0; 234 return 0;
159} 235}
160 236
161static void bss_release(struct kref *ref)
162{
163 struct cfg80211_internal_bss *bss;
164
165 bss = container_of(ref, struct cfg80211_internal_bss, ref);
166 if (bss->pub.free_priv)
167 bss->pub.free_priv(&bss->pub);
168
169 if (bss->beacon_ies_allocated)
170 kfree(bss->pub.beacon_ies);
171 if (bss->proberesp_ies_allocated)
172 kfree(bss->pub.proberesp_ies);
173
174 BUG_ON(atomic_read(&bss->hold));
175
176 kfree(bss);
177}
178
179/* must hold dev->bss_lock! */ 237/* must hold dev->bss_lock! */
180void cfg80211_bss_age(struct cfg80211_registered_device *dev, 238void cfg80211_bss_age(struct cfg80211_registered_device *dev,
181 unsigned long age_secs) 239 unsigned long age_secs)
@@ -183,37 +241,13 @@ void cfg80211_bss_age(struct cfg80211_registered_device *dev,
183 struct cfg80211_internal_bss *bss; 241 struct cfg80211_internal_bss *bss;
184 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); 242 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
185 243
186 list_for_each_entry(bss, &dev->bss_list, list) { 244 list_for_each_entry(bss, &dev->bss_list, list)
187 bss->ts -= age_jiffies; 245 bss->ts -= age_jiffies;
188 }
189}
190
191/* must hold dev->bss_lock! */
192static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
193 struct cfg80211_internal_bss *bss)
194{
195 list_del_init(&bss->list);
196 rb_erase(&bss->rbn, &dev->bss_tree);
197 kref_put(&bss->ref, bss_release);
198} 246}
199 247
200/* must hold dev->bss_lock! */
201void cfg80211_bss_expire(struct cfg80211_registered_device *dev) 248void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
202{ 249{
203 struct cfg80211_internal_bss *bss, *tmp; 250 __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
204 bool expired = false;
205
206 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
207 if (atomic_read(&bss->hold))
208 continue;
209 if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
210 continue;
211 __cfg80211_unlink_bss(dev, bss);
212 expired = true;
213 }
214
215 if (expired)
216 dev->bss_generation++;
217} 251}
218 252
219const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) 253const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
@@ -257,7 +291,7 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
257} 291}
258EXPORT_SYMBOL(cfg80211_find_vendor_ie); 292EXPORT_SYMBOL(cfg80211_find_vendor_ie);
259 293
260static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) 294static int cmp_ies(u8 num, const u8 *ies1, int len1, const u8 *ies2, int len2)
261{ 295{
262 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1); 296 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
263 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2); 297 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
@@ -277,10 +311,10 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
277 return memcmp(ie1 + 2, ie2 + 2, ie1[1]); 311 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
278} 312}
279 313
280static bool is_bss(struct cfg80211_bss *a, 314static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
281 const u8 *bssid,
282 const u8 *ssid, size_t ssid_len) 315 const u8 *ssid, size_t ssid_len)
283{ 316{
317 const struct cfg80211_bss_ies *ies;
284 const u8 *ssidie; 318 const u8 *ssidie;
285 319
286 if (bssid && !ether_addr_equal(a->bssid, bssid)) 320 if (bssid && !ether_addr_equal(a->bssid, bssid))
@@ -289,9 +323,10 @@ static bool is_bss(struct cfg80211_bss *a,
289 if (!ssid) 323 if (!ssid)
290 return true; 324 return true;
291 325
292 ssidie = cfg80211_find_ie(WLAN_EID_SSID, 326 ies = rcu_access_pointer(a->ies);
293 a->information_elements, 327 if (!ies)
294 a->len_information_elements); 328 return false;
329 ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
295 if (!ssidie) 330 if (!ssidie)
296 return false; 331 return false;
297 if (ssidie[1] != ssid_len) 332 if (ssidie[1] != ssid_len)
@@ -301,20 +336,21 @@ static bool is_bss(struct cfg80211_bss *a,
301 336
302static bool is_mesh_bss(struct cfg80211_bss *a) 337static bool is_mesh_bss(struct cfg80211_bss *a)
303{ 338{
339 const struct cfg80211_bss_ies *ies;
304 const u8 *ie; 340 const u8 *ie;
305 341
306 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability)) 342 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
307 return false; 343 return false;
308 344
309 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, 345 ies = rcu_access_pointer(a->ies);
310 a->information_elements, 346 if (!ies)
311 a->len_information_elements); 347 return false;
348
349 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
312 if (!ie) 350 if (!ie)
313 return false; 351 return false;
314 352
315 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, 353 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
316 a->information_elements,
317 a->len_information_elements);
318 if (!ie) 354 if (!ie)
319 return false; 355 return false;
320 356
@@ -325,14 +361,17 @@ static bool is_mesh(struct cfg80211_bss *a,
325 const u8 *meshid, size_t meshidlen, 361 const u8 *meshid, size_t meshidlen,
326 const u8 *meshcfg) 362 const u8 *meshcfg)
327{ 363{
364 const struct cfg80211_bss_ies *ies;
328 const u8 *ie; 365 const u8 *ie;
329 366
330 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability)) 367 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
331 return false; 368 return false;
332 369
333 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, 370 ies = rcu_access_pointer(a->ies);
334 a->information_elements, 371 if (!ies)
335 a->len_information_elements); 372 return false;
373
374 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
336 if (!ie) 375 if (!ie)
337 return false; 376 return false;
338 if (ie[1] != meshidlen) 377 if (ie[1] != meshidlen)
@@ -340,9 +379,7 @@ static bool is_mesh(struct cfg80211_bss *a,
340 if (memcmp(ie + 2, meshid, meshidlen)) 379 if (memcmp(ie + 2, meshid, meshidlen))
341 return false; 380 return false;
342 381
343 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, 382 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
344 a->information_elements,
345 a->len_information_elements);
346 if (!ie) 383 if (!ie)
347 return false; 384 return false;
348 if (ie[1] != sizeof(struct ieee80211_meshconf_ie)) 385 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
@@ -354,30 +391,33 @@ static bool is_mesh(struct cfg80211_bss *a,
354 * part in the same mesh. 391 * part in the same mesh.
355 */ 392 */
356 return memcmp(ie + 2, meshcfg, 393 return memcmp(ie + 2, meshcfg,
357 sizeof(struct ieee80211_meshconf_ie) - 2) == 0; 394 sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
358} 395}
359 396
360static int cmp_bss_core(struct cfg80211_bss *a, 397static int cmp_bss_core(struct cfg80211_bss *a, struct cfg80211_bss *b)
361 struct cfg80211_bss *b)
362{ 398{
399 const struct cfg80211_bss_ies *a_ies, *b_ies;
363 int r; 400 int r;
364 401
365 if (a->channel != b->channel) 402 if (a->channel != b->channel)
366 return b->channel->center_freq - a->channel->center_freq; 403 return b->channel->center_freq - a->channel->center_freq;
367 404
368 if (is_mesh_bss(a) && is_mesh_bss(b)) { 405 if (is_mesh_bss(a) && is_mesh_bss(b)) {
406 a_ies = rcu_access_pointer(a->ies);
407 if (!a_ies)
408 return -1;
409 b_ies = rcu_access_pointer(b->ies);
410 if (!b_ies)
411 return 1;
412
369 r = cmp_ies(WLAN_EID_MESH_ID, 413 r = cmp_ies(WLAN_EID_MESH_ID,
370 a->information_elements, 414 a_ies->data, a_ies->len,
371 a->len_information_elements, 415 b_ies->data, b_ies->len);
372 b->information_elements,
373 b->len_information_elements);
374 if (r) 416 if (r)
375 return r; 417 return r;
376 return cmp_ies(WLAN_EID_MESH_CONFIG, 418 return cmp_ies(WLAN_EID_MESH_CONFIG,
377 a->information_elements, 419 a_ies->data, a_ies->len,
378 a->len_information_elements, 420 b_ies->data, b_ies->len);
379 b->information_elements,
380 b->len_information_elements);
381 } 421 }
382 422
383 /* 423 /*
@@ -390,22 +430,28 @@ static int cmp_bss_core(struct cfg80211_bss *a,
390static int cmp_bss(struct cfg80211_bss *a, 430static int cmp_bss(struct cfg80211_bss *a,
391 struct cfg80211_bss *b) 431 struct cfg80211_bss *b)
392{ 432{
433 const struct cfg80211_bss_ies *a_ies, *b_ies;
393 int r; 434 int r;
394 435
395 r = cmp_bss_core(a, b); 436 r = cmp_bss_core(a, b);
396 if (r) 437 if (r)
397 return r; 438 return r;
398 439
440 a_ies = rcu_access_pointer(a->ies);
441 if (!a_ies)
442 return -1;
443 b_ies = rcu_access_pointer(b->ies);
444 if (!b_ies)
445 return 1;
446
399 return cmp_ies(WLAN_EID_SSID, 447 return cmp_ies(WLAN_EID_SSID,
400 a->information_elements, 448 a_ies->data, a_ies->len,
401 a->len_information_elements, 449 b_ies->data, b_ies->len);
402 b->information_elements,
403 b->len_information_elements);
404} 450}
405 451
406static int cmp_hidden_bss(struct cfg80211_bss *a, 452static int cmp_hidden_bss(struct cfg80211_bss *a, struct cfg80211_bss *b)
407 struct cfg80211_bss *b)
408{ 453{
454 const struct cfg80211_bss_ies *a_ies, *b_ies;
409 const u8 *ie1; 455 const u8 *ie1;
410 const u8 *ie2; 456 const u8 *ie2;
411 int i; 457 int i;
@@ -415,17 +461,26 @@ static int cmp_hidden_bss(struct cfg80211_bss *a,
415 if (r) 461 if (r)
416 return r; 462 return r;
417 463
418 ie1 = cfg80211_find_ie(WLAN_EID_SSID, 464 a_ies = rcu_access_pointer(a->ies);
419 a->information_elements, 465 if (!a_ies)
420 a->len_information_elements); 466 return -1;
421 ie2 = cfg80211_find_ie(WLAN_EID_SSID, 467 b_ies = rcu_access_pointer(b->ies);
422 b->information_elements, 468 if (!b_ies)
423 b->len_information_elements); 469 return 1;
470
471 ie1 = cfg80211_find_ie(WLAN_EID_SSID, a_ies->data, a_ies->len);
472 ie2 = cfg80211_find_ie(WLAN_EID_SSID, b_ies->data, b_ies->len);
424 473
425 /* Key comparator must use same algorithm in any rb-tree 474 /*
475 * Key comparator must use same algorithm in any rb-tree
426 * search function (order is important), otherwise ordering 476 * search function (order is important), otherwise ordering
427 * of items in the tree is broken and search gives incorrect 477 * of items in the tree is broken and search gives incorrect
428 * results. This code uses same order as cmp_ies() does. */ 478 * results. This code uses same order as cmp_ies() does.
479 *
480 * Note that due to the differring behaviour with hidden SSIDs
481 * this function only works when "b" is the tree element and
482 * "a" is the key we're looking for.
483 */
429 484
430 /* sort missing IE before (left of) present IE */ 485 /* sort missing IE before (left of) present IE */
431 if (!ie1) 486 if (!ie1)
@@ -441,10 +496,14 @@ static int cmp_hidden_bss(struct cfg80211_bss *a,
441 if (ie1[1] != ie2[1]) 496 if (ie1[1] != ie2[1])
442 return ie2[1] - ie1[1]; 497 return ie2[1] - ie1[1];
443 498
444 /* zeroed SSID ie is another indication of a hidden bss */ 499 /*
500 * zeroed SSID ie is another indication of a hidden bss;
501 * if it isn't zeroed just return the regular sort value
502 * to find the next candidate
503 */
445 for (i = 0; i < ie2[1]; i++) 504 for (i = 0; i < ie2[1]; i++)
446 if (ie2[i + 2]) 505 if (ie2[i + 2])
447 return -1; 506 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
448 507
449 return 0; 508 return 0;
450} 509}
@@ -459,6 +518,9 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
459 struct cfg80211_internal_bss *bss, *res = NULL; 518 struct cfg80211_internal_bss *bss, *res = NULL;
460 unsigned long now = jiffies; 519 unsigned long now = jiffies;
461 520
521 trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
522 capa_val);
523
462 spin_lock_bh(&dev->bss_lock); 524 spin_lock_bh(&dev->bss_lock);
463 525
464 list_for_each_entry(bss, &dev->bss_list, list) { 526 list_for_each_entry(bss, &dev->bss_list, list) {
@@ -480,6 +542,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
480 spin_unlock_bh(&dev->bss_lock); 542 spin_unlock_bh(&dev->bss_lock);
481 if (!res) 543 if (!res)
482 return NULL; 544 return NULL;
545 trace_cfg80211_return_bss(&res->pub);
483 return &res->pub; 546 return &res->pub;
484} 547}
485EXPORT_SYMBOL(cfg80211_get_bss); 548EXPORT_SYMBOL(cfg80211_get_bss);
@@ -566,7 +629,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
566 629
567static struct cfg80211_internal_bss * 630static struct cfg80211_internal_bss *
568rb_find_hidden_bss(struct cfg80211_registered_device *dev, 631rb_find_hidden_bss(struct cfg80211_registered_device *dev,
569 struct cfg80211_internal_bss *res) 632 struct cfg80211_internal_bss *res)
570{ 633{
571 struct rb_node *n = dev->bss_tree.rb_node; 634 struct rb_node *n = dev->bss_tree.rb_node;
572 struct cfg80211_internal_bss *bss; 635 struct cfg80211_internal_bss *bss;
@@ -589,127 +652,86 @@ rb_find_hidden_bss(struct cfg80211_registered_device *dev,
589 652
590static void 653static void
591copy_hidden_ies(struct cfg80211_internal_bss *res, 654copy_hidden_ies(struct cfg80211_internal_bss *res,
592 struct cfg80211_internal_bss *hidden) 655 struct cfg80211_internal_bss *hidden)
593{ 656{
594 if (unlikely(res->pub.beacon_ies)) 657 const struct cfg80211_bss_ies *ies;
595 return; 658
596 if (WARN_ON(!hidden->pub.beacon_ies)) 659 if (rcu_access_pointer(res->pub.beacon_ies))
597 return; 660 return;
598 661
599 res->pub.beacon_ies = kmalloc(hidden->pub.len_beacon_ies, GFP_ATOMIC); 662 ies = rcu_access_pointer(hidden->pub.beacon_ies);
600 if (unlikely(!res->pub.beacon_ies)) 663 if (WARN_ON(!ies))
601 return; 664 return;
602 665
603 res->beacon_ies_allocated = true; 666 ies = kmemdup(ies, sizeof(*ies) + ies->len, GFP_ATOMIC);
604 res->pub.len_beacon_ies = hidden->pub.len_beacon_ies; 667 if (unlikely(!ies))
605 memcpy(res->pub.beacon_ies, hidden->pub.beacon_ies, 668 return;
606 res->pub.len_beacon_ies); 669 rcu_assign_pointer(res->pub.beacon_ies, ies);
607} 670}
608 671
609static struct cfg80211_internal_bss * 672static struct cfg80211_internal_bss *
610cfg80211_bss_update(struct cfg80211_registered_device *dev, 673cfg80211_bss_update(struct cfg80211_registered_device *dev,
611 struct cfg80211_internal_bss *res) 674 struct cfg80211_internal_bss *tmp)
612{ 675{
613 struct cfg80211_internal_bss *found = NULL; 676 struct cfg80211_internal_bss *found = NULL;
614 677
615 /* 678 if (WARN_ON(!tmp->pub.channel))
616 * The reference to "res" is donated to this function.
617 */
618
619 if (WARN_ON(!res->pub.channel)) {
620 kref_put(&res->ref, bss_release);
621 return NULL; 679 return NULL;
622 }
623 680
624 res->ts = jiffies; 681 tmp->ts = jiffies;
625 682
626 spin_lock_bh(&dev->bss_lock); 683 spin_lock_bh(&dev->bss_lock);
627 684
628 found = rb_find_bss(dev, res); 685 if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) {
686 spin_unlock_bh(&dev->bss_lock);
687 return NULL;
688 }
689
690 found = rb_find_bss(dev, tmp);
629 691
630 if (found) { 692 if (found) {
631 found->pub.beacon_interval = res->pub.beacon_interval; 693 found->pub.beacon_interval = tmp->pub.beacon_interval;
632 found->pub.tsf = res->pub.tsf; 694 found->pub.tsf = tmp->pub.tsf;
633 found->pub.signal = res->pub.signal; 695 found->pub.signal = tmp->pub.signal;
634 found->pub.capability = res->pub.capability; 696 found->pub.capability = tmp->pub.capability;
635 found->ts = res->ts; 697 found->ts = tmp->ts;
636 698
637 /* Update IEs */ 699 /* Update IEs */
638 if (res->pub.proberesp_ies) { 700 if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
639 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 701 const struct cfg80211_bss_ies *old;
640 size_t ielen = res->pub.len_proberesp_ies; 702
641 703 old = rcu_access_pointer(found->pub.proberesp_ies);
642 if (found->pub.proberesp_ies &&
643 !found->proberesp_ies_allocated &&
644 ksize(found) >= used + ielen) {
645 memcpy(found->pub.proberesp_ies,
646 res->pub.proberesp_ies, ielen);
647 found->pub.len_proberesp_ies = ielen;
648 } else {
649 u8 *ies = found->pub.proberesp_ies;
650
651 if (found->proberesp_ies_allocated)
652 ies = krealloc(ies, ielen, GFP_ATOMIC);
653 else
654 ies = kmalloc(ielen, GFP_ATOMIC);
655
656 if (ies) {
657 memcpy(ies, res->pub.proberesp_ies,
658 ielen);
659 found->proberesp_ies_allocated = true;
660 found->pub.proberesp_ies = ies;
661 found->pub.len_proberesp_ies = ielen;
662 }
663 }
664 704
705 rcu_assign_pointer(found->pub.proberesp_ies,
706 tmp->pub.proberesp_ies);
665 /* Override possible earlier Beacon frame IEs */ 707 /* Override possible earlier Beacon frame IEs */
666 found->pub.information_elements = 708 rcu_assign_pointer(found->pub.ies,
667 found->pub.proberesp_ies; 709 tmp->pub.proberesp_ies);
668 found->pub.len_information_elements = 710 if (old)
669 found->pub.len_proberesp_ies; 711 kfree_rcu((struct cfg80211_bss_ies *)old,
670 } 712 rcu_head);
671 if (res->pub.beacon_ies) { 713 } else if (rcu_access_pointer(tmp->pub.beacon_ies)) {
672 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 714 const struct cfg80211_bss_ies *old, *ies;
673 size_t ielen = res->pub.len_beacon_ies; 715
674 bool information_elements_is_beacon_ies = 716 old = rcu_access_pointer(found->pub.beacon_ies);
675 (found->pub.information_elements == 717 ies = rcu_access_pointer(found->pub.ies);
676 found->pub.beacon_ies); 718
677 719 rcu_assign_pointer(found->pub.beacon_ies,
678 if (found->pub.beacon_ies && 720 tmp->pub.beacon_ies);
679 !found->beacon_ies_allocated &&
680 ksize(found) >= used + ielen) {
681 memcpy(found->pub.beacon_ies,
682 res->pub.beacon_ies, ielen);
683 found->pub.len_beacon_ies = ielen;
684 } else {
685 u8 *ies = found->pub.beacon_ies;
686
687 if (found->beacon_ies_allocated)
688 ies = krealloc(ies, ielen, GFP_ATOMIC);
689 else
690 ies = kmalloc(ielen, GFP_ATOMIC);
691
692 if (ies) {
693 memcpy(ies, res->pub.beacon_ies,
694 ielen);
695 found->beacon_ies_allocated = true;
696 found->pub.beacon_ies = ies;
697 found->pub.len_beacon_ies = ielen;
698 }
699 }
700 721
701 /* Override IEs if they were from a beacon before */ 722 /* Override IEs if they were from a beacon before */
702 if (information_elements_is_beacon_ies) { 723 if (old == ies)
703 found->pub.information_elements = 724 rcu_assign_pointer(found->pub.ies,
704 found->pub.beacon_ies; 725 tmp->pub.beacon_ies);
705 found->pub.len_information_elements =
706 found->pub.len_beacon_ies;
707 }
708 }
709 726
710 kref_put(&res->ref, bss_release); 727 if (old)
728 kfree_rcu((struct cfg80211_bss_ies *)old,
729 rcu_head);
730 }
711 } else { 731 } else {
732 struct cfg80211_internal_bss *new;
712 struct cfg80211_internal_bss *hidden; 733 struct cfg80211_internal_bss *hidden;
734 struct cfg80211_bss_ies *ies;
713 735
714 /* First check if the beacon is a probe response from 736 /* First check if the beacon is a probe response from
715 * a hidden bss. If so, copy beacon ies (with nullified 737 * a hidden bss. If so, copy beacon ies (with nullified
@@ -720,14 +742,32 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
720 /* TODO: The code is not trying to update existing probe 742 /* TODO: The code is not trying to update existing probe
721 * response bss entries when beacon ies are 743 * response bss entries when beacon ies are
722 * getting changed. */ 744 * getting changed. */
723 hidden = rb_find_hidden_bss(dev, res); 745 hidden = rb_find_hidden_bss(dev, tmp);
724 if (hidden) 746 if (hidden)
725 copy_hidden_ies(res, hidden); 747 copy_hidden_ies(tmp, hidden);
726 748
727 /* this "consumes" the reference */ 749 /*
728 list_add_tail(&res->list, &dev->bss_list); 750 * create a copy -- the "res" variable that is passed in
729 rb_insert_bss(dev, res); 751 * is allocated on the stack since it's not needed in the
730 found = res; 752 * more common case of an update
753 */
754 new = kzalloc(sizeof(*new) + dev->wiphy.bss_priv_size,
755 GFP_ATOMIC);
756 if (!new) {
757 ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
758 if (ies)
759 kfree_rcu(ies, rcu_head);
760 ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
761 if (ies)
762 kfree_rcu(ies, rcu_head);
763 spin_unlock_bh(&dev->bss_lock);
764 return NULL;
765 }
766 memcpy(new, tmp, sizeof(*new));
767 kref_init(&new->ref);
768 list_add_tail(&new->list, &dev->bss_list);
769 rb_insert_bss(dev, new);
770 found = new;
731 } 771 }
732 772
733 dev->bss_generation++; 773 dev->bss_generation++;
@@ -737,6 +777,38 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
737 return found; 777 return found;
738} 778}
739 779
780static struct ieee80211_channel *
781cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
782 struct ieee80211_channel *channel)
783{
784 const u8 *tmp;
785 u32 freq;
786 int channel_number = -1;
787
788 tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
789 if (tmp && tmp[1] == 1) {
790 channel_number = tmp[2];
791 } else {
792 tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen);
793 if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) {
794 struct ieee80211_ht_operation *htop = (void *)(tmp + 2);
795
796 channel_number = htop->primary_chan;
797 }
798 }
799
800 if (channel_number < 0)
801 return channel;
802
803 freq = ieee80211_channel_to_frequency(channel_number, channel->band);
804 channel = ieee80211_get_channel(wiphy, freq);
805 if (!channel)
806 return NULL;
807 if (channel->flags & IEEE80211_CHAN_DISABLED)
808 return NULL;
809 return channel;
810}
811
740struct cfg80211_bss* 812struct cfg80211_bss*
741cfg80211_inform_bss(struct wiphy *wiphy, 813cfg80211_inform_bss(struct wiphy *wiphy,
742 struct ieee80211_channel *channel, 814 struct ieee80211_channel *channel,
@@ -744,54 +816,54 @@ cfg80211_inform_bss(struct wiphy *wiphy,
744 u16 beacon_interval, const u8 *ie, size_t ielen, 816 u16 beacon_interval, const u8 *ie, size_t ielen,
745 s32 signal, gfp_t gfp) 817 s32 signal, gfp_t gfp)
746{ 818{
747 struct cfg80211_internal_bss *res; 819 struct cfg80211_bss_ies *ies;
748 size_t privsz; 820 struct cfg80211_internal_bss tmp = {}, *res;
749 821
750 if (WARN_ON(!wiphy)) 822 if (WARN_ON(!wiphy))
751 return NULL; 823 return NULL;
752 824
753 privsz = wiphy->bss_priv_size;
754
755 if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && 825 if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
756 (signal < 0 || signal > 100))) 826 (signal < 0 || signal > 100)))
757 return NULL; 827 return NULL;
758 828
759 res = kzalloc(sizeof(*res) + privsz + ielen, gfp); 829 channel = cfg80211_get_bss_channel(wiphy, ie, ielen, channel);
760 if (!res) 830 if (!channel)
761 return NULL; 831 return NULL;
762 832
763 memcpy(res->pub.bssid, bssid, ETH_ALEN); 833 memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
764 res->pub.channel = channel; 834 tmp.pub.channel = channel;
765 res->pub.signal = signal; 835 tmp.pub.signal = signal;
766 res->pub.tsf = tsf; 836 tmp.pub.tsf = tsf;
767 res->pub.beacon_interval = beacon_interval; 837 tmp.pub.beacon_interval = beacon_interval;
768 res->pub.capability = capability; 838 tmp.pub.capability = capability;
769 /* 839 /*
770 * Since we do not know here whether the IEs are from a Beacon or Probe 840 * Since we do not know here whether the IEs are from a Beacon or Probe
771 * Response frame, we need to pick one of the options and only use it 841 * Response frame, we need to pick one of the options and only use it
772 * with the driver that does not provide the full Beacon/Probe Response 842 * with the driver that does not provide the full Beacon/Probe Response
773 * frame. Use Beacon frame pointer to avoid indicating that this should 843 * frame. Use Beacon frame pointer to avoid indicating that this should
774 * override the information_elements pointer should we have received an 844 * override the iies pointer should we have received an earlier
775 * earlier indication of Probe Response data. 845 * indication of Probe Response data.
776 * 846 *
777 * The initial buffer for the IEs is allocated with the BSS entry and 847 * The initial buffer for the IEs is allocated with the BSS entry and
778 * is located after the private area. 848 * is located after the private area.
779 */ 849 */
780 res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz; 850 ies = kmalloc(sizeof(*ies) + ielen, gfp);
781 memcpy(res->pub.beacon_ies, ie, ielen); 851 if (!ies)
782 res->pub.len_beacon_ies = ielen; 852 return NULL;
783 res->pub.information_elements = res->pub.beacon_ies; 853 ies->len = ielen;
784 res->pub.len_information_elements = res->pub.len_beacon_ies; 854 memcpy(ies->data, ie, ielen);
785 855
786 kref_init(&res->ref); 856 rcu_assign_pointer(tmp.pub.beacon_ies, ies);
857 rcu_assign_pointer(tmp.pub.ies, ies);
787 858
788 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res); 859 res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp);
789 if (!res) 860 if (!res)
790 return NULL; 861 return NULL;
791 862
792 if (res->pub.capability & WLAN_CAPABILITY_ESS) 863 if (res->pub.capability & WLAN_CAPABILITY_ESS)
793 regulatory_hint_found_beacon(wiphy, channel, gfp); 864 regulatory_hint_found_beacon(wiphy, channel, gfp);
794 865
866 trace_cfg80211_return_bss(&res->pub);
795 /* cfg80211_bss_update gives us a referenced result */ 867 /* cfg80211_bss_update gives us a referenced result */
796 return &res->pub; 868 return &res->pub;
797} 869}
@@ -803,10 +875,15 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
803 struct ieee80211_mgmt *mgmt, size_t len, 875 struct ieee80211_mgmt *mgmt, size_t len,
804 s32 signal, gfp_t gfp) 876 s32 signal, gfp_t gfp)
805{ 877{
806 struct cfg80211_internal_bss *res; 878 struct cfg80211_internal_bss tmp = {}, *res;
879 struct cfg80211_bss_ies *ies;
807 size_t ielen = len - offsetof(struct ieee80211_mgmt, 880 size_t ielen = len - offsetof(struct ieee80211_mgmt,
808 u.probe_resp.variable); 881 u.probe_resp.variable);
809 size_t privsz; 882
883 BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
884 offsetof(struct ieee80211_mgmt, u.beacon.variable));
885
886 trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal);
810 887
811 if (WARN_ON(!mgmt)) 888 if (WARN_ON(!mgmt))
812 return NULL; 889 return NULL;
@@ -821,46 +898,38 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
821 if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable))) 898 if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
822 return NULL; 899 return NULL;
823 900
824 privsz = wiphy->bss_priv_size; 901 channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
825 902 ielen, channel);
826 res = kzalloc(sizeof(*res) + privsz + ielen, gfp); 903 if (!channel)
827 if (!res)
828 return NULL; 904 return NULL;
829 905
830 memcpy(res->pub.bssid, mgmt->bssid, ETH_ALEN); 906 ies = kmalloc(sizeof(*ies) + ielen, gfp);
831 res->pub.channel = channel; 907 if (!ies)
832 res->pub.signal = signal; 908 return NULL;
833 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); 909 ies->len = ielen;
834 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 910 memcpy(ies->data, mgmt->u.probe_resp.variable, ielen);
835 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
836 /*
837 * The initial buffer for the IEs is allocated with the BSS entry and
838 * is located after the private area.
839 */
840 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
841 res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz;
842 memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable,
843 ielen);
844 res->pub.len_proberesp_ies = ielen;
845 res->pub.information_elements = res->pub.proberesp_ies;
846 res->pub.len_information_elements = res->pub.len_proberesp_ies;
847 } else {
848 res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz;
849 memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen);
850 res->pub.len_beacon_ies = ielen;
851 res->pub.information_elements = res->pub.beacon_ies;
852 res->pub.len_information_elements = res->pub.len_beacon_ies;
853 }
854
855 kref_init(&res->ref);
856 911
857 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res); 912 if (ieee80211_is_probe_resp(mgmt->frame_control))
913 rcu_assign_pointer(tmp.pub.proberesp_ies, ies);
914 else
915 rcu_assign_pointer(tmp.pub.beacon_ies, ies);
916 rcu_assign_pointer(tmp.pub.ies, ies);
917
918 memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
919 tmp.pub.channel = channel;
920 tmp.pub.signal = signal;
921 tmp.pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
922 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
923 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
924
925 res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp);
858 if (!res) 926 if (!res)
859 return NULL; 927 return NULL;
860 928
861 if (res->pub.capability & WLAN_CAPABILITY_ESS) 929 if (res->pub.capability & WLAN_CAPABILITY_ESS)
862 regulatory_hint_found_beacon(wiphy, channel, gfp); 930 regulatory_hint_found_beacon(wiphy, channel, gfp);
863 931
932 trace_cfg80211_return_bss(&res->pub);
864 /* cfg80211_bss_update gives us a referenced result */ 933 /* cfg80211_bss_update gives us a referenced result */
865 return &res->pub; 934 return &res->pub;
866} 935}
@@ -962,6 +1031,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
962 creq->ssids = (void *)&creq->channels[n_channels]; 1031 creq->ssids = (void *)&creq->channels[n_channels];
963 creq->n_channels = n_channels; 1032 creq->n_channels = n_channels;
964 creq->n_ssids = 1; 1033 creq->n_ssids = 1;
1034 creq->scan_start = jiffies;
965 1035
966 /* translate "Scan on frequencies" request */ 1036 /* translate "Scan on frequencies" request */
967 i = 0; 1037 i = 0;
@@ -1026,7 +1096,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1026 creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; 1096 creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
1027 1097
1028 rdev->scan_req = creq; 1098 rdev->scan_req = creq;
1029 err = rdev->ops->scan(wiphy, creq); 1099 err = rdev_scan(rdev, creq);
1030 if (err) { 1100 if (err) {
1031 rdev->scan_req = NULL; 1101 rdev->scan_req = NULL;
1032 /* creq will be freed below */ 1102 /* creq will be freed below */
@@ -1044,22 +1114,21 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1044EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan); 1114EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan);
1045 1115
1046static void ieee80211_scan_add_ies(struct iw_request_info *info, 1116static void ieee80211_scan_add_ies(struct iw_request_info *info,
1047 struct cfg80211_bss *bss, 1117 const struct cfg80211_bss_ies *ies,
1048 char **current_ev, char *end_buf) 1118 char **current_ev, char *end_buf)
1049{ 1119{
1050 u8 *pos, *end, *next; 1120 const u8 *pos, *end, *next;
1051 struct iw_event iwe; 1121 struct iw_event iwe;
1052 1122
1053 if (!bss->information_elements || 1123 if (!ies)
1054 !bss->len_information_elements)
1055 return; 1124 return;
1056 1125
1057 /* 1126 /*
1058 * If needed, fragment the IEs buffer (at IE boundaries) into short 1127 * If needed, fragment the IEs buffer (at IE boundaries) into short
1059 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages. 1128 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages.
1060 */ 1129 */
1061 pos = bss->information_elements; 1130 pos = ies->data;
1062 end = pos + bss->len_information_elements; 1131 end = pos + ies->len;
1063 1132
1064 while (end - pos > IW_GENERIC_IE_MAX) { 1133 while (end - pos > IW_GENERIC_IE_MAX) {
1065 next = pos + 2 + pos[1]; 1134 next = pos + 2 + pos[1];
@@ -1070,7 +1139,8 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
1070 iwe.cmd = IWEVGENIE; 1139 iwe.cmd = IWEVGENIE;
1071 iwe.u.data.length = next - pos; 1140 iwe.u.data.length = next - pos;
1072 *current_ev = iwe_stream_add_point(info, *current_ev, 1141 *current_ev = iwe_stream_add_point(info, *current_ev,
1073 end_buf, &iwe, pos); 1142 end_buf, &iwe,
1143 (void *)pos);
1074 1144
1075 pos = next; 1145 pos = next;
1076 } 1146 }
@@ -1080,7 +1150,8 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
1080 iwe.cmd = IWEVGENIE; 1150 iwe.cmd = IWEVGENIE;
1081 iwe.u.data.length = end - pos; 1151 iwe.u.data.length = end - pos;
1082 *current_ev = iwe_stream_add_point(info, *current_ev, 1152 *current_ev = iwe_stream_add_point(info, *current_ev,
1083 end_buf, &iwe, pos); 1153 end_buf, &iwe,
1154 (void *)pos);
1084 } 1155 }
1085} 1156}
1086 1157
@@ -1099,10 +1170,11 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1099 struct cfg80211_internal_bss *bss, char *current_ev, 1170 struct cfg80211_internal_bss *bss, char *current_ev,
1100 char *end_buf) 1171 char *end_buf)
1101{ 1172{
1173 const struct cfg80211_bss_ies *ies;
1102 struct iw_event iwe; 1174 struct iw_event iwe;
1175 const u8 *ie;
1103 u8 *buf, *cfg, *p; 1176 u8 *buf, *cfg, *p;
1104 u8 *ie = bss->pub.information_elements; 1177 int rem, i, sig;
1105 int rem = bss->pub.len_information_elements, i, sig;
1106 bool ismesh = false; 1178 bool ismesh = false;
1107 1179
1108 memset(&iwe, 0, sizeof(iwe)); 1180 memset(&iwe, 0, sizeof(iwe));
@@ -1167,7 +1239,17 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1167 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 1239 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1168 &iwe, ""); 1240 &iwe, "");
1169 1241
1170 while (rem >= 2) { 1242 rcu_read_lock();
1243 ies = rcu_dereference(bss->pub.ies);
1244 if (ies) {
1245 rem = ies->len;
1246 ie = ies->data;
1247 } else {
1248 rem = 0;
1249 ie = NULL;
1250 }
1251
1252 while (ies && rem >= 2) {
1171 /* invalid data */ 1253 /* invalid data */
1172 if (ie[1] > rem - 2) 1254 if (ie[1] > rem - 2)
1173 break; 1255 break;
@@ -1179,7 +1261,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1179 iwe.u.data.length = ie[1]; 1261 iwe.u.data.length = ie[1];
1180 iwe.u.data.flags = 1; 1262 iwe.u.data.flags = 1;
1181 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 1263 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1182 &iwe, ie + 2); 1264 &iwe, (u8 *)ie + 2);
1183 break; 1265 break;
1184 case WLAN_EID_MESH_ID: 1266 case WLAN_EID_MESH_ID:
1185 memset(&iwe, 0, sizeof(iwe)); 1267 memset(&iwe, 0, sizeof(iwe));
@@ -1187,7 +1269,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1187 iwe.u.data.length = ie[1]; 1269 iwe.u.data.length = ie[1];
1188 iwe.u.data.flags = 1; 1270 iwe.u.data.flags = 1;
1189 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 1271 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1190 &iwe, ie + 2); 1272 &iwe, (u8 *)ie + 2);
1191 break; 1273 break;
1192 case WLAN_EID_MESH_CONFIG: 1274 case WLAN_EID_MESH_CONFIG:
1193 ismesh = true; 1275 ismesh = true;
@@ -1196,7 +1278,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1196 buf = kmalloc(50, GFP_ATOMIC); 1278 buf = kmalloc(50, GFP_ATOMIC);
1197 if (!buf) 1279 if (!buf)
1198 break; 1280 break;
1199 cfg = ie + 2; 1281 cfg = (u8 *)ie + 2;
1200 memset(&iwe, 0, sizeof(iwe)); 1282 memset(&iwe, 0, sizeof(iwe));
1201 iwe.cmd = IWEVCUSTOM; 1283 iwe.cmd = IWEVCUSTOM;
1202 sprintf(buf, "Mesh Network Path Selection Protocol ID: " 1284 sprintf(buf, "Mesh Network Path Selection Protocol ID: "
@@ -1294,7 +1376,8 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1294 kfree(buf); 1376 kfree(buf);
1295 } 1377 }
1296 1378
1297 ieee80211_scan_add_ies(info, &bss->pub, &current_ev, end_buf); 1379 ieee80211_scan_add_ies(info, ies, &current_ev, end_buf);
1380 rcu_read_unlock();
1298 1381
1299 return current_ev; 1382 return current_ev;
1300} 1383}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 6f39cb808302..f2431e41a373 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -16,6 +16,7 @@
16#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
17#include "nl80211.h" 17#include "nl80211.h"
18#include "reg.h" 18#include "reg.h"
19#include "rdev-ops.h"
19 20
20struct cfg80211_conn { 21struct cfg80211_conn {
21 struct cfg80211_connect_params params; 22 struct cfg80211_connect_params params;
@@ -138,10 +139,11 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
138 139
139 request->wdev = wdev; 140 request->wdev = wdev;
140 request->wiphy = &rdev->wiphy; 141 request->wiphy = &rdev->wiphy;
142 request->scan_start = jiffies;
141 143
142 rdev->scan_req = request; 144 rdev->scan_req = request;
143 145
144 err = rdev->ops->scan(wdev->wiphy, request); 146 err = rdev_scan(rdev, request);
145 if (!err) { 147 if (!err) {
146 wdev->conn->state = CFG80211_CONN_SCANNING; 148 wdev->conn->state = CFG80211_CONN_SCANNING;
147 nl80211_send_scan_start(rdev, wdev); 149 nl80211_send_scan_start(rdev, wdev);
@@ -179,7 +181,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
179 params->ssid, params->ssid_len, 181 params->ssid, params->ssid_len,
180 NULL, 0, 182 NULL, 0,
181 params->key, params->key_len, 183 params->key, params->key_len,
182 params->key_idx); 184 params->key_idx, NULL, 0);
183 case CFG80211_CONN_ASSOCIATE_NEXT: 185 case CFG80211_CONN_ASSOCIATE_NEXT:
184 BUG_ON(!rdev->ops->assoc); 186 BUG_ON(!rdev->ops->assoc);
185 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 187 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -415,7 +417,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
415 struct cfg80211_bss *bss) 417 struct cfg80211_bss *bss)
416{ 418{
417 struct wireless_dev *wdev = dev->ieee80211_ptr; 419 struct wireless_dev *wdev = dev->ieee80211_ptr;
418 u8 *country_ie; 420 const u8 *country_ie;
419#ifdef CONFIG_CFG80211_WEXT 421#ifdef CONFIG_CFG80211_WEXT
420 union iwreq_data wrqu; 422 union iwreq_data wrqu;
421#endif 423#endif
@@ -499,7 +501,15 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
499 wdev->sme_state = CFG80211_SME_CONNECTED; 501 wdev->sme_state = CFG80211_SME_CONNECTED;
500 cfg80211_upload_connect_keys(wdev); 502 cfg80211_upload_connect_keys(wdev);
501 503
502 country_ie = (u8 *) ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); 504 rcu_read_lock();
505 country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
506 if (!country_ie) {
507 rcu_read_unlock();
508 return;
509 }
510
511 country_ie = kmemdup(country_ie, 2 + country_ie[1], GFP_ATOMIC);
512 rcu_read_unlock();
503 513
504 if (!country_ie) 514 if (!country_ie)
505 return; 515 return;
@@ -513,6 +523,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
513 bss->channel->band, 523 bss->channel->band,
514 country_ie + 2, 524 country_ie + 2,
515 country_ie[1]); 525 country_ie[1]);
526 kfree(country_ie);
516} 527}
517 528
518void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, 529void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -716,7 +727,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
716 */ 727 */
717 if (rdev->ops->del_key) 728 if (rdev->ops->del_key)
718 for (i = 0; i < 6; i++) 729 for (i = 0; i < 6; i++)
719 rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL); 730 rdev_del_key(rdev, dev, i, false, NULL);
720 731
721#ifdef CONFIG_CFG80211_WEXT 732#ifdef CONFIG_CFG80211_WEXT
722 memset(&wrqu, 0, sizeof(wrqu)); 733 memset(&wrqu, 0, sizeof(wrqu));
@@ -892,7 +903,7 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
892 } else { 903 } else {
893 wdev->sme_state = CFG80211_SME_CONNECTING; 904 wdev->sme_state = CFG80211_SME_CONNECTING;
894 wdev->connect_keys = connkeys; 905 wdev->connect_keys = connkeys;
895 err = rdev->ops->connect(&rdev->wiphy, dev, connect); 906 err = rdev_connect(rdev, dev, connect);
896 if (err) { 907 if (err) {
897 wdev->connect_keys = NULL; 908 wdev->connect_keys = NULL;
898 wdev->sme_state = CFG80211_SME_IDLE; 909 wdev->sme_state = CFG80211_SME_IDLE;
@@ -964,7 +975,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
964 if (err) 975 if (err)
965 return err; 976 return err;
966 } else { 977 } else {
967 err = rdev->ops->disconnect(&rdev->wiphy, dev, reason); 978 err = rdev_disconnect(rdev, dev, reason);
968 if (err) 979 if (err)
969 return err; 980 return err;
970 } 981 }
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index ff574597a854..1f6f01e2dc4c 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -16,6 +16,7 @@
16#include <net/cfg80211.h> 16#include <net/cfg80211.h>
17#include "sysfs.h" 17#include "sysfs.h"
18#include "core.h" 18#include "core.h"
19#include "rdev-ops.h"
19 20
20static inline struct cfg80211_registered_device *dev_to_rdev( 21static inline struct cfg80211_registered_device *dev_to_rdev(
21 struct device *dev) 22 struct device *dev)
@@ -76,13 +77,11 @@ static void wiphy_dev_release(struct device *dev)
76 cfg80211_dev_free(rdev); 77 cfg80211_dev_free(rdev);
77} 78}
78 79
79#ifdef CONFIG_HOTPLUG
80static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) 80static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
81{ 81{
82 /* TODO, we probably need stuff here */ 82 /* TODO, we probably need stuff here */
83 return 0; 83 return 0;
84} 84}
85#endif
86 85
87static int wiphy_suspend(struct device *dev, pm_message_t state) 86static int wiphy_suspend(struct device *dev, pm_message_t state)
88{ 87{
@@ -94,7 +93,7 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
94 if (rdev->ops->suspend) { 93 if (rdev->ops->suspend) {
95 rtnl_lock(); 94 rtnl_lock();
96 if (rdev->wiphy.registered) 95 if (rdev->wiphy.registered)
97 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); 96 ret = rdev_suspend(rdev);
98 rtnl_unlock(); 97 rtnl_unlock();
99 } 98 }
100 99
@@ -114,7 +113,7 @@ static int wiphy_resume(struct device *dev)
114 if (rdev->ops->resume) { 113 if (rdev->ops->resume) {
115 rtnl_lock(); 114 rtnl_lock();
116 if (rdev->wiphy.registered) 115 if (rdev->wiphy.registered)
117 ret = rdev->ops->resume(&rdev->wiphy); 116 ret = rdev_resume(rdev);
118 rtnl_unlock(); 117 rtnl_unlock();
119 } 118 }
120 119
@@ -133,9 +132,7 @@ struct class ieee80211_class = {
133 .owner = THIS_MODULE, 132 .owner = THIS_MODULE,
134 .dev_release = wiphy_dev_release, 133 .dev_release = wiphy_dev_release,
135 .dev_attrs = ieee80211_dev_attrs, 134 .dev_attrs = ieee80211_dev_attrs,
136#ifdef CONFIG_HOTPLUG
137 .dev_uevent = wiphy_uevent, 135 .dev_uevent = wiphy_uevent,
138#endif
139 .suspend = wiphy_suspend, 136 .suspend = wiphy_suspend,
140 .resume = wiphy_resume, 137 .resume = wiphy_resume,
141 .ns_type = &net_ns_type_operations, 138 .ns_type = &net_ns_type_operations,
diff --git a/net/wireless/trace.c b/net/wireless/trace.c
new file mode 100644
index 000000000000..95f997fad755
--- /dev/null
+++ b/net/wireless/trace.c
@@ -0,0 +1,7 @@
1#include <linux/module.h>
2
3#ifndef __CHECKER__
4#define CREATE_TRACE_POINTS
5#include "trace.h"
6
7#endif
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
new file mode 100644
index 000000000000..2134576f426e
--- /dev/null
+++ b/net/wireless/trace.h
@@ -0,0 +1,2324 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM cfg80211
3
4#if !defined(__RDEV_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ)
5#define __RDEV_OPS_TRACE
6
7#include <linux/tracepoint.h>
8
9#include <linux/rtnetlink.h>
10#include <net/cfg80211.h>
11#include "core.h"
12
13#define MAC_ENTRY(entry_mac) __array(u8, entry_mac, ETH_ALEN)
14#define MAC_ASSIGN(entry_mac, given_mac) do { \
15 if (given_mac) \
16 memcpy(__entry->entry_mac, given_mac, ETH_ALEN); \
17 else \
18 memset(__entry->entry_mac, 0, ETH_ALEN); \
19 } while (0)
20#define MAC_PR_FMT "%pM"
21#define MAC_PR_ARG(entry_mac) (__entry->entry_mac)
22
23#define MAXNAME 32
24#define WIPHY_ENTRY __array(char, wiphy_name, 32)
25#define WIPHY_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME)
26#define WIPHY_PR_FMT "%s"
27#define WIPHY_PR_ARG __entry->wiphy_name
28
29#define WDEV_ENTRY __field(u32, id)
30#define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0)
31#define WDEV_PR_FMT "wdev(%u)"
32#define WDEV_PR_ARG (__entry->id)
33
34#define NETDEV_ENTRY __array(char, name, IFNAMSIZ) \
35 __field(int, ifindex)
36#define NETDEV_ASSIGN \
37 do { \
38 memcpy(__entry->name, netdev->name, IFNAMSIZ); \
39 (__entry->ifindex) = (netdev->ifindex); \
40 } while (0)
41#define NETDEV_PR_FMT "netdev:%s(%d)"
42#define NETDEV_PR_ARG __entry->name, __entry->ifindex
43
44#define MESH_CFG_ENTRY __field(u16, dot11MeshRetryTimeout) \
45 __field(u16, dot11MeshConfirmTimeout) \
46 __field(u16, dot11MeshHoldingTimeout) \
47 __field(u16, dot11MeshMaxPeerLinks) \
48 __field(u8, dot11MeshMaxRetries) \
49 __field(u8, dot11MeshTTL) \
50 __field(u8, element_ttl) \
51 __field(bool, auto_open_plinks) \
52 __field(u32, dot11MeshNbrOffsetMaxNeighbor) \
53 __field(u8, dot11MeshHWMPmaxPREQretries) \
54 __field(u32, path_refresh_time) \
55 __field(u32, dot11MeshHWMPactivePathTimeout) \
56 __field(u16, min_discovery_timeout) \
57 __field(u16, dot11MeshHWMPpreqMinInterval) \
58 __field(u16, dot11MeshHWMPperrMinInterval) \
59 __field(u16, dot11MeshHWMPnetDiameterTraversalTime) \
60 __field(u8, dot11MeshHWMPRootMode) \
61 __field(u16, dot11MeshHWMPRannInterval) \
62 __field(bool, dot11MeshGateAnnouncementProtocol) \
63 __field(bool, dot11MeshForwarding) \
64 __field(s32, rssi_threshold) \
65 __field(u16, ht_opmode) \
66 __field(u32, dot11MeshHWMPactivePathToRootTimeout) \
67 __field(u16, dot11MeshHWMProotInterval) \
68 __field(u16, dot11MeshHWMPconfirmationInterval)
69#define MESH_CFG_ASSIGN \
70 do { \
71 __entry->dot11MeshRetryTimeout = conf->dot11MeshRetryTimeout; \
72 __entry->dot11MeshConfirmTimeout = \
73 conf->dot11MeshConfirmTimeout; \
74 __entry->dot11MeshHoldingTimeout = \
75 conf->dot11MeshHoldingTimeout; \
76 __entry->dot11MeshMaxPeerLinks = conf->dot11MeshMaxPeerLinks; \
77 __entry->dot11MeshMaxRetries = conf->dot11MeshMaxRetries; \
78 __entry->dot11MeshTTL = conf->dot11MeshTTL; \
79 __entry->element_ttl = conf->element_ttl; \
80 __entry->auto_open_plinks = conf->auto_open_plinks; \
81 __entry->dot11MeshNbrOffsetMaxNeighbor = \
82 conf->dot11MeshNbrOffsetMaxNeighbor; \
83 __entry->dot11MeshHWMPmaxPREQretries = \
84 conf->dot11MeshHWMPmaxPREQretries; \
85 __entry->path_refresh_time = conf->path_refresh_time; \
86 __entry->dot11MeshHWMPactivePathTimeout = \
87 conf->dot11MeshHWMPactivePathTimeout; \
88 __entry->min_discovery_timeout = conf->min_discovery_timeout; \
89 __entry->dot11MeshHWMPpreqMinInterval = \
90 conf->dot11MeshHWMPpreqMinInterval; \
91 __entry->dot11MeshHWMPperrMinInterval = \
92 conf->dot11MeshHWMPperrMinInterval; \
93 __entry->dot11MeshHWMPnetDiameterTraversalTime = \
94 conf->dot11MeshHWMPnetDiameterTraversalTime; \
95 __entry->dot11MeshHWMPRootMode = conf->dot11MeshHWMPRootMode; \
96 __entry->dot11MeshHWMPRannInterval = \
97 conf->dot11MeshHWMPRannInterval; \
98 __entry->dot11MeshGateAnnouncementProtocol = \
99 conf->dot11MeshGateAnnouncementProtocol; \
100 __entry->dot11MeshForwarding = conf->dot11MeshForwarding; \
101 __entry->rssi_threshold = conf->rssi_threshold; \
102 __entry->ht_opmode = conf->ht_opmode; \
103 __entry->dot11MeshHWMPactivePathToRootTimeout = \
104 conf->dot11MeshHWMPactivePathToRootTimeout; \
105 __entry->dot11MeshHWMProotInterval = \
106 conf->dot11MeshHWMProotInterval; \
107 __entry->dot11MeshHWMPconfirmationInterval = \
108 conf->dot11MeshHWMPconfirmationInterval; \
109 } while (0)
110
111#define CHAN_ENTRY __field(enum ieee80211_band, band) \
112 __field(u16, center_freq)
113#define CHAN_ASSIGN(chan) \
114 do { \
115 if (chan) { \
116 __entry->band = chan->band; \
117 __entry->center_freq = chan->center_freq; \
118 } else { \
119 __entry->band = 0; \
120 __entry->center_freq = 0; \
121 } \
122 } while (0)
123#define CHAN_PR_FMT "band: %d, freq: %u"
124#define CHAN_PR_ARG __entry->band, __entry->center_freq
125
126#define CHAN_DEF_ENTRY __field(enum ieee80211_band, band) \
127 __field(u32, control_freq) \
128 __field(u32, width) \
129 __field(u32, center_freq1) \
130 __field(u32, center_freq2)
131#define CHAN_DEF_ASSIGN(chandef) \
132 do { \
133 if ((chandef) && (chandef)->chan) { \
134 __entry->band = (chandef)->chan->band; \
135 __entry->control_freq = \
136 (chandef)->chan->center_freq; \
137 __entry->width = (chandef)->width; \
138 __entry->center_freq1 = (chandef)->center_freq1;\
139 __entry->center_freq2 = (chandef)->center_freq2;\
140 } else { \
141 __entry->band = 0; \
142 __entry->control_freq = 0; \
143 __entry->width = 0; \
144 __entry->center_freq1 = 0; \
145 __entry->center_freq2 = 0; \
146 } \
147 } while (0)
148#define CHAN_DEF_PR_FMT \
149 "band: %d, control freq: %u, width: %d, cf1: %u, cf2: %u"
150#define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \
151 __entry->width, __entry->center_freq1, \
152 __entry->center_freq2
153
154#define SINFO_ENTRY __field(int, generation) \
155 __field(u32, connected_time) \
156 __field(u32, inactive_time) \
157 __field(u32, rx_bytes) \
158 __field(u32, tx_bytes) \
159 __field(u32, rx_packets) \
160 __field(u32, tx_packets) \
161 __field(u32, tx_retries) \
162 __field(u32, tx_failed) \
163 __field(u32, rx_dropped_misc) \
164 __field(u32, beacon_loss_count) \
165 __field(u16, llid) \
166 __field(u16, plid) \
167 __field(u8, plink_state)
168#define SINFO_ASSIGN \
169 do { \
170 __entry->generation = sinfo->generation; \
171 __entry->connected_time = sinfo->connected_time; \
172 __entry->inactive_time = sinfo->inactive_time; \
173 __entry->rx_bytes = sinfo->rx_bytes; \
174 __entry->tx_bytes = sinfo->tx_bytes; \
175 __entry->rx_packets = sinfo->rx_packets; \
176 __entry->tx_packets = sinfo->tx_packets; \
177 __entry->tx_retries = sinfo->tx_retries; \
178 __entry->tx_failed = sinfo->tx_failed; \
179 __entry->rx_dropped_misc = sinfo->rx_dropped_misc; \
180 __entry->beacon_loss_count = sinfo->beacon_loss_count; \
181 __entry->llid = sinfo->llid; \
182 __entry->plid = sinfo->plid; \
183 __entry->plink_state = sinfo->plink_state; \
184 } while (0)
185
186#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
187
188/*************************************************************
189 * rdev->ops traces *
190 *************************************************************/
191
192TRACE_EVENT(rdev_suspend,
193 TP_PROTO(struct wiphy *wiphy, struct cfg80211_wowlan *wow),
194 TP_ARGS(wiphy, wow),
195 TP_STRUCT__entry(
196 WIPHY_ENTRY
197 __field(bool, any)
198 __field(bool, disconnect)
199 __field(bool, magic_pkt)
200 __field(bool, gtk_rekey_failure)
201 __field(bool, eap_identity_req)
202 __field(bool, four_way_handshake)
203 __field(bool, rfkill_release)
204 __field(bool, valid_wow)
205 ),
206 TP_fast_assign(
207 WIPHY_ASSIGN;
208 if (wow) {
209 __entry->any = wow->any;
210 __entry->disconnect = wow->disconnect;
211 __entry->magic_pkt = wow->magic_pkt;
212 __entry->gtk_rekey_failure = wow->gtk_rekey_failure;
213 __entry->eap_identity_req = wow->eap_identity_req;
214 __entry->four_way_handshake = wow->four_way_handshake;
215 __entry->rfkill_release = wow->rfkill_release;
216 __entry->valid_wow = true;
217 } else {
218 __entry->valid_wow = false;
219 }
220 ),
221 TP_printk(WIPHY_PR_FMT ", wow%s - any: %d, disconnect: %d, "
222 "magic pkt: %d, gtk rekey failure: %d, eap identify req: %d, "
223 "four way handshake: %d, rfkill release: %d.",
224 WIPHY_PR_ARG, __entry->valid_wow ? "" : "(Not configured!)",
225 __entry->any, __entry->disconnect, __entry->magic_pkt,
226 __entry->gtk_rekey_failure, __entry->eap_identity_req,
227 __entry->four_way_handshake, __entry->rfkill_release)
228);
229
230TRACE_EVENT(rdev_return_int,
231 TP_PROTO(struct wiphy *wiphy, int ret),
232 TP_ARGS(wiphy, ret),
233 TP_STRUCT__entry(
234 WIPHY_ENTRY
235 __field(int, ret)
236 ),
237 TP_fast_assign(
238 WIPHY_ASSIGN;
239 __entry->ret = ret;
240 ),
241 TP_printk(WIPHY_PR_FMT ", returned: %d", WIPHY_PR_ARG, __entry->ret)
242);
243
244TRACE_EVENT(rdev_scan,
245 TP_PROTO(struct wiphy *wiphy, struct cfg80211_scan_request *request),
246 TP_ARGS(wiphy, request),
247 TP_STRUCT__entry(
248 WIPHY_ENTRY
249 ),
250 TP_fast_assign(
251 WIPHY_ASSIGN;
252 ),
253 TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
254);
255
256DECLARE_EVENT_CLASS(wiphy_only_evt,
257 TP_PROTO(struct wiphy *wiphy),
258 TP_ARGS(wiphy),
259 TP_STRUCT__entry(
260 WIPHY_ENTRY
261 ),
262 TP_fast_assign(
263 WIPHY_ASSIGN;
264 ),
265 TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
266);
267
268DEFINE_EVENT(wiphy_only_evt, rdev_resume,
269 TP_PROTO(struct wiphy *wiphy),
270 TP_ARGS(wiphy)
271);
272
273DEFINE_EVENT(wiphy_only_evt, rdev_return_void,
274 TP_PROTO(struct wiphy *wiphy),
275 TP_ARGS(wiphy)
276);
277
278DEFINE_EVENT(wiphy_only_evt, rdev_get_ringparam,
279 TP_PROTO(struct wiphy *wiphy),
280 TP_ARGS(wiphy)
281);
282
283DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna,
284 TP_PROTO(struct wiphy *wiphy),
285 TP_ARGS(wiphy)
286);
287
288DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll,
289 TP_PROTO(struct wiphy *wiphy),
290 TP_ARGS(wiphy)
291);
292
293DECLARE_EVENT_CLASS(wiphy_enabled_evt,
294 TP_PROTO(struct wiphy *wiphy, bool enabled),
295 TP_ARGS(wiphy, enabled),
296 TP_STRUCT__entry(
297 WIPHY_ENTRY
298 __field(bool, enabled)
299 ),
300 TP_fast_assign(
301 WIPHY_ASSIGN;
302 __entry->enabled = enabled;
303 ),
304 TP_printk(WIPHY_PR_FMT ", %senabled ",
305 WIPHY_PR_ARG, __entry->enabled ? "" : "not ")
306);
307
308DEFINE_EVENT(wiphy_enabled_evt, rdev_set_wakeup,
309 TP_PROTO(struct wiphy *wiphy, bool enabled),
310 TP_ARGS(wiphy, enabled)
311);
312
313TRACE_EVENT(rdev_add_virtual_intf,
314 TP_PROTO(struct wiphy *wiphy, char *name, enum nl80211_iftype type),
315 TP_ARGS(wiphy, name, type),
316 TP_STRUCT__entry(
317 WIPHY_ENTRY
318 __string(vir_intf_name, name ? name : "<noname>")
319 __field(enum nl80211_iftype, type)
320 ),
321 TP_fast_assign(
322 WIPHY_ASSIGN;
323 __assign_str(vir_intf_name, name ? name : "<noname>");
324 __entry->type = type;
325 ),
326 TP_printk(WIPHY_PR_FMT ", virtual intf name: %s, type: %d",
327 WIPHY_PR_ARG, __get_str(vir_intf_name), __entry->type)
328);
329
330DECLARE_EVENT_CLASS(wiphy_wdev_evt,
331 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
332 TP_ARGS(wiphy, wdev),
333 TP_STRUCT__entry(
334 WIPHY_ENTRY
335 WDEV_ENTRY
336 ),
337 TP_fast_assign(
338 WIPHY_ASSIGN;
339 WDEV_ASSIGN;
340 ),
341 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
342);
343
344DEFINE_EVENT(wiphy_wdev_evt, rdev_return_wdev,
345 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
346 TP_ARGS(wiphy, wdev)
347);
348
349DEFINE_EVENT(wiphy_wdev_evt, rdev_del_virtual_intf,
350 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
351 TP_ARGS(wiphy, wdev)
352);
353
354TRACE_EVENT(rdev_change_virtual_intf,
355 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
356 enum nl80211_iftype type),
357 TP_ARGS(wiphy, netdev, type),
358 TP_STRUCT__entry(
359 WIPHY_ENTRY
360 NETDEV_ENTRY
361 __field(enum nl80211_iftype, type)
362 ),
363 TP_fast_assign(
364 WIPHY_ASSIGN;
365 NETDEV_ASSIGN;
366 __entry->type = type;
367 ),
368 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", type: %d",
369 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->type)
370);
371
372DECLARE_EVENT_CLASS(key_handle,
373 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
374 bool pairwise, const u8 *mac_addr),
375 TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr),
376 TP_STRUCT__entry(
377 WIPHY_ENTRY
378 NETDEV_ENTRY
379 MAC_ENTRY(mac_addr)
380 __field(u8, key_index)
381 __field(bool, pairwise)
382 ),
383 TP_fast_assign(
384 WIPHY_ASSIGN;
385 NETDEV_ASSIGN;
386 MAC_ASSIGN(mac_addr, mac_addr);
387 __entry->key_index = key_index;
388 __entry->pairwise = pairwise;
389 ),
390 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key_index: %u, pairwise: %s, mac addr: " MAC_PR_FMT,
391 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index,
392 BOOL_TO_STR(__entry->pairwise), MAC_PR_ARG(mac_addr))
393);
394
395DEFINE_EVENT(key_handle, rdev_add_key,
396 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
397 bool pairwise, const u8 *mac_addr),
398 TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr)
399);
400
401DEFINE_EVENT(key_handle, rdev_get_key,
402 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
403 bool pairwise, const u8 *mac_addr),
404 TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr)
405);
406
407DEFINE_EVENT(key_handle, rdev_del_key,
408 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
409 bool pairwise, const u8 *mac_addr),
410 TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr)
411);
412
413TRACE_EVENT(rdev_set_default_key,
414 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
415 bool unicast, bool multicast),
416 TP_ARGS(wiphy, netdev, key_index, unicast, multicast),
417 TP_STRUCT__entry(
418 WIPHY_ENTRY
419 NETDEV_ENTRY
420 __field(u8, key_index)
421 __field(bool, unicast)
422 __field(bool, multicast)
423 ),
424 TP_fast_assign(
425 WIPHY_ASSIGN;
426 NETDEV_ASSIGN;
427 __entry->key_index = key_index;
428 __entry->unicast = unicast;
429 __entry->multicast = multicast;
430 ),
431 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u, unicast: %s, multicast: %s",
432 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index,
433 BOOL_TO_STR(__entry->unicast),
434 BOOL_TO_STR(__entry->multicast))
435);
436
437TRACE_EVENT(rdev_set_default_mgmt_key,
438 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index),
439 TP_ARGS(wiphy, netdev, key_index),
440 TP_STRUCT__entry(
441 WIPHY_ENTRY
442 NETDEV_ENTRY
443 __field(u8, key_index)
444 ),
445 TP_fast_assign(
446 WIPHY_ASSIGN;
447 NETDEV_ASSIGN;
448 __entry->key_index = key_index;
449 ),
450 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u",
451 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index)
452);
453
454TRACE_EVENT(rdev_start_ap,
455 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
456 struct cfg80211_ap_settings *settings),
457 TP_ARGS(wiphy, netdev, settings),
458 TP_STRUCT__entry(
459 WIPHY_ENTRY
460 NETDEV_ENTRY
461 CHAN_DEF_ENTRY
462 __field(int, beacon_interval)
463 __field(int, dtim_period)
464 __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1)
465 __field(enum nl80211_hidden_ssid, hidden_ssid)
466 __field(u32, wpa_ver)
467 __field(bool, privacy)
468 __field(enum nl80211_auth_type, auth_type)
469 __field(int, inactivity_timeout)
470 ),
471 TP_fast_assign(
472 WIPHY_ASSIGN;
473 NETDEV_ASSIGN;
474 CHAN_DEF_ASSIGN(&settings->chandef);
475 __entry->beacon_interval = settings->beacon_interval;
476 __entry->dtim_period = settings->dtim_period;
477 __entry->hidden_ssid = settings->hidden_ssid;
478 __entry->wpa_ver = settings->crypto.wpa_versions;
479 __entry->privacy = settings->privacy;
480 __entry->auth_type = settings->auth_type;
481 __entry->inactivity_timeout = settings->inactivity_timeout;
482 memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
483 memcpy(__entry->ssid, settings->ssid, settings->ssid_len);
484 ),
485 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", AP settings - ssid: %s, "
486 CHAN_DEF_PR_FMT ", beacon interval: %d, dtim period: %d, "
487 "hidden ssid: %d, wpa versions: %u, privacy: %s, "
488 "auth type: %d, inactivity timeout: %d",
489 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ssid, CHAN_DEF_PR_ARG,
490 __entry->beacon_interval, __entry->dtim_period,
491 __entry->hidden_ssid, __entry->wpa_ver,
492 BOOL_TO_STR(__entry->privacy), __entry->auth_type,
493 __entry->inactivity_timeout)
494);
495
496TRACE_EVENT(rdev_change_beacon,
497 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
498 struct cfg80211_beacon_data *info),
499 TP_ARGS(wiphy, netdev, info),
500 TP_STRUCT__entry(
501 WIPHY_ENTRY
502 NETDEV_ENTRY
503 __dynamic_array(u8, head, info ? info->head_len : 0)
504 __dynamic_array(u8, tail, info ? info->tail_len : 0)
505 __dynamic_array(u8, beacon_ies, info ? info->beacon_ies_len : 0)
506 __dynamic_array(u8, proberesp_ies,
507 info ? info->proberesp_ies_len : 0)
508 __dynamic_array(u8, assocresp_ies,
509 info ? info->assocresp_ies_len : 0)
510 __dynamic_array(u8, probe_resp, info ? info->probe_resp_len : 0)
511 ),
512 TP_fast_assign(
513 WIPHY_ASSIGN;
514 NETDEV_ASSIGN;
515 if (info) {
516 if (info->head)
517 memcpy(__get_dynamic_array(head), info->head,
518 info->head_len);
519 if (info->tail)
520 memcpy(__get_dynamic_array(tail), info->tail,
521 info->tail_len);
522 if (info->beacon_ies)
523 memcpy(__get_dynamic_array(beacon_ies),
524 info->beacon_ies, info->beacon_ies_len);
525 if (info->proberesp_ies)
526 memcpy(__get_dynamic_array(proberesp_ies),
527 info->proberesp_ies,
528 info->proberesp_ies_len);
529 if (info->assocresp_ies)
530 memcpy(__get_dynamic_array(assocresp_ies),
531 info->assocresp_ies,
532 info->assocresp_ies_len);
533 if (info->probe_resp)
534 memcpy(__get_dynamic_array(probe_resp),
535 info->probe_resp, info->probe_resp_len);
536 }
537 ),
538 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG)
539);
540
541DECLARE_EVENT_CLASS(wiphy_netdev_evt,
542 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
543 TP_ARGS(wiphy, netdev),
544 TP_STRUCT__entry(
545 WIPHY_ENTRY
546 NETDEV_ENTRY
547 ),
548 TP_fast_assign(
549 WIPHY_ASSIGN;
550 NETDEV_ASSIGN;
551 ),
552 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG)
553);
554
555DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap,
556 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
557 TP_ARGS(wiphy, netdev)
558);
559
560DEFINE_EVENT(wiphy_netdev_evt, rdev_get_et_stats,
561 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
562 TP_ARGS(wiphy, netdev)
563);
564
565DEFINE_EVENT(wiphy_netdev_evt, rdev_sched_scan_stop,
566 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
567 TP_ARGS(wiphy, netdev)
568);
569
570DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data,
571 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
572 TP_ARGS(wiphy, netdev)
573);
574
575DEFINE_EVENT(wiphy_netdev_evt, rdev_get_mesh_config,
576 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
577 TP_ARGS(wiphy, netdev)
578);
579
580DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_mesh,
581 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
582 TP_ARGS(wiphy, netdev)
583);
584
585DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ibss,
586 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
587 TP_ARGS(wiphy, netdev)
588);
589
590DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
591 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
592 TP_ARGS(wiphy, netdev)
593);
594
595DECLARE_EVENT_CLASS(station_add_change,
596 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
597 struct station_parameters *params),
598 TP_ARGS(wiphy, netdev, mac, params),
599 TP_STRUCT__entry(
600 WIPHY_ENTRY
601 NETDEV_ENTRY
602 MAC_ENTRY(sta_mac)
603 __field(u32, sta_flags_mask)
604 __field(u32, sta_flags_set)
605 __field(u32, sta_modify_mask)
606 __field(int, listen_interval)
607 __field(u16, aid)
608 __field(u8, plink_action)
609 __field(u8, plink_state)
610 __field(u8, uapsd_queues)
611 __array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap))
612 ),
613 TP_fast_assign(
614 WIPHY_ASSIGN;
615 NETDEV_ASSIGN;
616 MAC_ASSIGN(sta_mac, mac);
617 __entry->sta_flags_mask = params->sta_flags_mask;
618 __entry->sta_flags_set = params->sta_flags_set;
619 __entry->sta_modify_mask = params->sta_modify_mask;
620 __entry->listen_interval = params->listen_interval;
621 __entry->aid = params->aid;
622 __entry->plink_action = params->plink_action;
623 __entry->plink_state = params->plink_state;
624 __entry->uapsd_queues = params->uapsd_queues;
625 memset(__entry->ht_capa, 0, sizeof(struct ieee80211_ht_cap));
626 if (params->ht_capa)
627 memcpy(__entry->ht_capa, params->ht_capa,
628 sizeof(struct ieee80211_ht_cap));
629 ),
630 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT
631 ", station flags mask: %u, station flags set: %u, "
632 "station modify mask: %u, listen interval: %d, aid: %u, "
633 "plink action: %u, plink state: %u, uapsd queues: %u",
634 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
635 __entry->sta_flags_mask, __entry->sta_flags_set,
636 __entry->sta_modify_mask, __entry->listen_interval,
637 __entry->aid, __entry->plink_action, __entry->plink_state,
638 __entry->uapsd_queues)
639);
640
641DEFINE_EVENT(station_add_change, rdev_add_station,
642 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
643 struct station_parameters *params),
644 TP_ARGS(wiphy, netdev, mac, params)
645);
646
647DEFINE_EVENT(station_add_change, rdev_change_station,
648 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac,
649 struct station_parameters *params),
650 TP_ARGS(wiphy, netdev, mac, params)
651);
652
653DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt,
654 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
655 TP_ARGS(wiphy, netdev, mac),
656 TP_STRUCT__entry(
657 WIPHY_ENTRY
658 NETDEV_ENTRY
659 MAC_ENTRY(sta_mac)
660 ),
661 TP_fast_assign(
662 WIPHY_ASSIGN;
663 NETDEV_ASSIGN;
664 MAC_ASSIGN(sta_mac, mac);
665 ),
666 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
667 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac))
668);
669
670DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_station,
671 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
672 TP_ARGS(wiphy, netdev, mac)
673);
674
675DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_get_station,
676 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
677 TP_ARGS(wiphy, netdev, mac)
678);
679
680DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_mpath,
681 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
682 TP_ARGS(wiphy, netdev, mac)
683);
684
685DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_set_wds_peer,
686 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac),
687 TP_ARGS(wiphy, netdev, mac)
688);
689
690TRACE_EVENT(rdev_dump_station,
691 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
692 u8 *mac),
693 TP_ARGS(wiphy, netdev, idx, mac),
694 TP_STRUCT__entry(
695 WIPHY_ENTRY
696 NETDEV_ENTRY
697 MAC_ENTRY(sta_mac)
698 __field(int, idx)
699 ),
700 TP_fast_assign(
701 WIPHY_ASSIGN;
702 NETDEV_ASSIGN;
703 MAC_ASSIGN(sta_mac, mac);
704 __entry->idx = idx;
705 ),
706 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", idx: %d",
707 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
708 __entry->idx)
709);
710
711TRACE_EVENT(rdev_return_int_station_info,
712 TP_PROTO(struct wiphy *wiphy, int ret, struct station_info *sinfo),
713 TP_ARGS(wiphy, ret, sinfo),
714 TP_STRUCT__entry(
715 WIPHY_ENTRY
716 __field(int, ret)
717 SINFO_ENTRY
718 ),
719 TP_fast_assign(
720 WIPHY_ASSIGN;
721 __entry->ret = ret;
722 SINFO_ASSIGN;
723 ),
724 TP_printk(WIPHY_PR_FMT ", returned %d" ,
725 WIPHY_PR_ARG, __entry->ret)
726);
727
728DECLARE_EVENT_CLASS(mpath_evt,
729 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
730 u8 *next_hop),
731 TP_ARGS(wiphy, netdev, dst, next_hop),
732 TP_STRUCT__entry(
733 WIPHY_ENTRY
734 NETDEV_ENTRY
735 MAC_ENTRY(dst)
736 MAC_ENTRY(next_hop)
737 ),
738 TP_fast_assign(
739 WIPHY_ASSIGN;
740 NETDEV_ASSIGN;
741 MAC_ASSIGN(dst, dst);
742 MAC_ASSIGN(next_hop, next_hop);
743 ),
744 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT ", next hop: " MAC_PR_FMT,
745 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dst),
746 MAC_PR_ARG(next_hop))
747);
748
749DEFINE_EVENT(mpath_evt, rdev_add_mpath,
750 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
751 u8 *next_hop),
752 TP_ARGS(wiphy, netdev, dst, next_hop)
753);
754
755DEFINE_EVENT(mpath_evt, rdev_change_mpath,
756 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
757 u8 *next_hop),
758 TP_ARGS(wiphy, netdev, dst, next_hop)
759);
760
761DEFINE_EVENT(mpath_evt, rdev_get_mpath,
762 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst,
763 u8 *next_hop),
764 TP_ARGS(wiphy, netdev, dst, next_hop)
765);
766
767TRACE_EVENT(rdev_dump_mpath,
768 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
769 u8 *dst, u8 *next_hop),
770 TP_ARGS(wiphy, netdev, idx, dst, next_hop),
771 TP_STRUCT__entry(
772 WIPHY_ENTRY
773 NETDEV_ENTRY
774 MAC_ENTRY(dst)
775 MAC_ENTRY(next_hop)
776 __field(int, idx)
777 ),
778 TP_fast_assign(
779 WIPHY_ASSIGN;
780 NETDEV_ASSIGN;
781 MAC_ASSIGN(dst, dst);
782 MAC_ASSIGN(next_hop, next_hop);
783 __entry->idx = idx;
784 ),
785 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: "
786 MAC_PR_FMT ", next hop: " MAC_PR_FMT,
787 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst),
788 MAC_PR_ARG(next_hop))
789);
790
791TRACE_EVENT(rdev_return_int_mpath_info,
792 TP_PROTO(struct wiphy *wiphy, int ret, struct mpath_info *pinfo),
793 TP_ARGS(wiphy, ret, pinfo),
794 TP_STRUCT__entry(
795 WIPHY_ENTRY
796 __field(int, ret)
797 __field(int, generation)
798 __field(u32, filled)
799 __field(u32, frame_qlen)
800 __field(u32, sn)
801 __field(u32, metric)
802 __field(u32, exptime)
803 __field(u32, discovery_timeout)
804 __field(u8, discovery_retries)
805 __field(u8, flags)
806 ),
807 TP_fast_assign(
808 WIPHY_ASSIGN;
809 __entry->ret = ret;
810 __entry->generation = pinfo->generation;
811 __entry->filled = pinfo->filled;
812 __entry->frame_qlen = pinfo->frame_qlen;
813 __entry->sn = pinfo->sn;
814 __entry->metric = pinfo->metric;
815 __entry->exptime = pinfo->exptime;
816 __entry->discovery_timeout = pinfo->discovery_timeout;
817 __entry->discovery_retries = pinfo->discovery_retries;
818 __entry->flags = pinfo->flags;
819 ),
820 TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, "
821 "filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u,"
822 " discovery timeout: %u, discovery retries: %u, flags: %u",
823 WIPHY_PR_ARG, __entry->ret, __entry->generation,
824 __entry->filled, __entry->frame_qlen, __entry->sn,
825 __entry->metric, __entry->exptime, __entry->discovery_timeout,
826 __entry->discovery_retries, __entry->flags)
827);
828
829TRACE_EVENT(rdev_return_int_mesh_config,
830 TP_PROTO(struct wiphy *wiphy, int ret, struct mesh_config *conf),
831 TP_ARGS(wiphy, ret, conf),
832 TP_STRUCT__entry(
833 WIPHY_ENTRY
834 MESH_CFG_ENTRY
835 __field(int, ret)
836 ),
837 TP_fast_assign(
838 WIPHY_ASSIGN;
839 MESH_CFG_ASSIGN;
840 __entry->ret = ret;
841 ),
842 TP_printk(WIPHY_PR_FMT ", returned: %d",
843 WIPHY_PR_ARG, __entry->ret)
844);
845
846TRACE_EVENT(rdev_update_mesh_config,
847 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 mask,
848 const struct mesh_config *conf),
849 TP_ARGS(wiphy, netdev, mask, conf),
850 TP_STRUCT__entry(
851 WIPHY_ENTRY
852 NETDEV_ENTRY
853 MESH_CFG_ENTRY
854 __field(u32, mask)
855 ),
856 TP_fast_assign(
857 WIPHY_ASSIGN;
858 NETDEV_ASSIGN;
859 MESH_CFG_ASSIGN;
860 __entry->mask = mask;
861 ),
862 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mask: %u",
863 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mask)
864);
865
866TRACE_EVENT(rdev_join_mesh,
867 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
868 const struct mesh_config *conf,
869 const struct mesh_setup *setup),
870 TP_ARGS(wiphy, netdev, conf, setup),
871 TP_STRUCT__entry(
872 WIPHY_ENTRY
873 NETDEV_ENTRY
874 MESH_CFG_ENTRY
875 ),
876 TP_fast_assign(
877 WIPHY_ASSIGN;
878 NETDEV_ASSIGN;
879 MESH_CFG_ASSIGN;
880 ),
881 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
882 WIPHY_PR_ARG, NETDEV_PR_ARG)
883);
884
885TRACE_EVENT(rdev_change_bss,
886 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
887 struct bss_parameters *params),
888 TP_ARGS(wiphy, netdev, params),
889 TP_STRUCT__entry(
890 WIPHY_ENTRY
891 NETDEV_ENTRY
892 __field(int, use_cts_prot)
893 __field(int, use_short_preamble)
894 __field(int, use_short_slot_time)
895 __field(int, ap_isolate)
896 __field(int, ht_opmode)
897 ),
898 TP_fast_assign(
899 WIPHY_ASSIGN;
900 NETDEV_ASSIGN;
901 __entry->use_cts_prot = params->use_cts_prot;
902 __entry->use_short_preamble = params->use_short_preamble;
903 __entry->use_short_slot_time = params->use_short_slot_time;
904 __entry->ap_isolate = params->ap_isolate;
905 __entry->ht_opmode = params->ht_opmode;
906 ),
907 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", use cts prot: %d, "
908 "use short preamble: %d, use short slot time: %d, "
909 "ap isolate: %d, ht opmode: %d",
910 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->use_cts_prot,
911 __entry->use_short_preamble, __entry->use_short_slot_time,
912 __entry->ap_isolate, __entry->ht_opmode)
913);
914
915TRACE_EVENT(rdev_set_txq_params,
916 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
917 struct ieee80211_txq_params *params),
918 TP_ARGS(wiphy, netdev, params),
919 TP_STRUCT__entry(
920 WIPHY_ENTRY
921 NETDEV_ENTRY
922 __field(enum nl80211_ac, ac)
923 __field(u16, txop)
924 __field(u16, cwmin)
925 __field(u16, cwmax)
926 __field(u8, aifs)
927 ),
928 TP_fast_assign(
929 WIPHY_ASSIGN;
930 NETDEV_ASSIGN;
931 __entry->ac = params->ac;
932 __entry->txop = params->txop;
933 __entry->cwmin = params->cwmin;
934 __entry->cwmax = params->cwmax;
935 __entry->aifs = params->aifs;
936 ),
937 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", ac: %d, txop: %u, cwmin: %u, cwmax: %u, aifs: %u",
938 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ac, __entry->txop,
939 __entry->cwmin, __entry->cwmax, __entry->aifs)
940);
941
942TRACE_EVENT(rdev_libertas_set_mesh_channel,
943 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
944 struct ieee80211_channel *chan),
945 TP_ARGS(wiphy, netdev, chan),
946 TP_STRUCT__entry(
947 WIPHY_ENTRY
948 NETDEV_ENTRY
949 CHAN_ENTRY
950 ),
951 TP_fast_assign(
952 WIPHY_ASSIGN;
953 NETDEV_ASSIGN;
954 CHAN_ASSIGN(chan);
955 ),
956 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_PR_FMT, WIPHY_PR_ARG,
957 NETDEV_PR_ARG, CHAN_PR_ARG)
958);
959
960TRACE_EVENT(rdev_set_monitor_channel,
961 TP_PROTO(struct wiphy *wiphy,
962 struct cfg80211_chan_def *chandef),
963 TP_ARGS(wiphy, chandef),
964 TP_STRUCT__entry(
965 WIPHY_ENTRY
966 CHAN_DEF_ENTRY
967 ),
968 TP_fast_assign(
969 WIPHY_ASSIGN;
970 CHAN_DEF_ASSIGN(chandef);
971 ),
972 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
973 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
974);
975
976TRACE_EVENT(rdev_auth,
977 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
978 struct cfg80211_auth_request *req),
979 TP_ARGS(wiphy, netdev, req),
980 TP_STRUCT__entry(
981 WIPHY_ENTRY
982 NETDEV_ENTRY
983 MAC_ENTRY(bssid)
984 __field(enum nl80211_auth_type, auth_type)
985 ),
986 TP_fast_assign(
987 WIPHY_ASSIGN;
988 NETDEV_ASSIGN;
989 if (req->bss)
990 MAC_ASSIGN(bssid, req->bss->bssid);
991 else
992 memset(__entry->bssid, 0, ETH_ALEN);
993 __entry->auth_type = req->auth_type;
994 ),
995 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT,
996 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->auth_type,
997 MAC_PR_ARG(bssid))
998);
999
1000TRACE_EVENT(rdev_assoc,
1001 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1002 struct cfg80211_assoc_request *req),
1003 TP_ARGS(wiphy, netdev, req),
1004 TP_STRUCT__entry(
1005 WIPHY_ENTRY
1006 NETDEV_ENTRY
1007 MAC_ENTRY(bssid)
1008 MAC_ENTRY(prev_bssid)
1009 __field(bool, use_mfp)
1010 __field(u32, flags)
1011 ),
1012 TP_fast_assign(
1013 WIPHY_ASSIGN;
1014 NETDEV_ASSIGN;
1015 if (req->bss)
1016 MAC_ASSIGN(bssid, req->bss->bssid);
1017 else
1018 memset(__entry->bssid, 0, ETH_ALEN);
1019 MAC_ASSIGN(prev_bssid, req->prev_bssid);
1020 __entry->use_mfp = req->use_mfp;
1021 __entry->flags = req->flags;
1022 ),
1023 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
1024 ", previous bssid: " MAC_PR_FMT ", use mfp: %s, flags: %u",
1025 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
1026 MAC_PR_ARG(prev_bssid), BOOL_TO_STR(__entry->use_mfp),
1027 __entry->flags)
1028);
1029
1030TRACE_EVENT(rdev_deauth,
1031 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1032 struct cfg80211_deauth_request *req),
1033 TP_ARGS(wiphy, netdev, req),
1034 TP_STRUCT__entry(
1035 WIPHY_ENTRY
1036 NETDEV_ENTRY
1037 MAC_ENTRY(bssid)
1038 __field(u16, reason_code)
1039 ),
1040 TP_fast_assign(
1041 WIPHY_ASSIGN;
1042 NETDEV_ASSIGN;
1043 MAC_ASSIGN(bssid, req->bssid);
1044 __entry->reason_code = req->reason_code;
1045 ),
1046 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", reason: %u",
1047 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
1048 __entry->reason_code)
1049);
1050
1051TRACE_EVENT(rdev_disassoc,
1052 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1053 struct cfg80211_disassoc_request *req),
1054 TP_ARGS(wiphy, netdev, req),
1055 TP_STRUCT__entry(
1056 WIPHY_ENTRY
1057 NETDEV_ENTRY
1058 MAC_ENTRY(bssid)
1059 __field(u16, reason_code)
1060 __field(bool, local_state_change)
1061 ),
1062 TP_fast_assign(
1063 WIPHY_ASSIGN;
1064 NETDEV_ASSIGN;
1065 if (req->bss)
1066 MAC_ASSIGN(bssid, req->bss->bssid);
1067 else
1068 memset(__entry->bssid, 0, ETH_ALEN);
1069 __entry->reason_code = req->reason_code;
1070 __entry->local_state_change = req->local_state_change;
1071 ),
1072 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
1073 ", reason: %u, local state change: %s",
1074 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid),
1075 __entry->reason_code,
1076 BOOL_TO_STR(__entry->local_state_change))
1077);
1078
1079TRACE_EVENT(rdev_mgmt_tx_cancel_wait,
1080 TP_PROTO(struct wiphy *wiphy,
1081 struct wireless_dev *wdev, u64 cookie),
1082 TP_ARGS(wiphy, wdev, cookie),
1083 TP_STRUCT__entry(
1084 WIPHY_ENTRY
1085 WDEV_ENTRY
1086 __field(u64, cookie)
1087 ),
1088 TP_fast_assign(
1089 WIPHY_ASSIGN;
1090 WDEV_ASSIGN;
1091 __entry->cookie = cookie;
1092 ),
1093 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu ",
1094 WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie)
1095);
1096
1097TRACE_EVENT(rdev_set_power_mgmt,
1098 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1099 bool enabled, int timeout),
1100 TP_ARGS(wiphy, netdev, enabled, timeout),
1101 TP_STRUCT__entry(
1102 WIPHY_ENTRY
1103 NETDEV_ENTRY
1104 __field(bool, enabled)
1105 __field(int, timeout)
1106 ),
1107 TP_fast_assign(
1108 WIPHY_ASSIGN;
1109 NETDEV_ASSIGN;
1110 __entry->enabled = enabled;
1111 __entry->timeout = timeout;
1112 ),
1113 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %senabled, timeout: %d ",
1114 WIPHY_PR_ARG, NETDEV_PR_ARG,
1115 __entry->enabled ? "" : "not ", __entry->timeout)
1116);
1117
1118TRACE_EVENT(rdev_connect,
1119 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1120 struct cfg80211_connect_params *sme),
1121 TP_ARGS(wiphy, netdev, sme),
1122 TP_STRUCT__entry(
1123 WIPHY_ENTRY
1124 NETDEV_ENTRY
1125 MAC_ENTRY(bssid)
1126 __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1)
1127 __field(enum nl80211_auth_type, auth_type)
1128 __field(bool, privacy)
1129 __field(u32, wpa_versions)
1130 __field(u32, flags)
1131 ),
1132 TP_fast_assign(
1133 WIPHY_ASSIGN;
1134 NETDEV_ASSIGN;
1135 MAC_ASSIGN(bssid, sme->bssid);
1136 memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
1137 memcpy(__entry->ssid, sme->ssid, sme->ssid_len);
1138 __entry->auth_type = sme->auth_type;
1139 __entry->privacy = sme->privacy;
1140 __entry->wpa_versions = sme->crypto.wpa_versions;
1141 __entry->flags = sme->flags;
1142 ),
1143 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
1144 ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
1145 "flags: %u",
1146 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
1147 __entry->auth_type, BOOL_TO_STR(__entry->privacy),
1148 __entry->wpa_versions, __entry->flags)
1149);
1150
1151TRACE_EVENT(rdev_set_cqm_rssi_config,
1152 TP_PROTO(struct wiphy *wiphy,
1153 struct net_device *netdev, s32 rssi_thold,
1154 u32 rssi_hyst),
1155 TP_ARGS(wiphy, netdev, rssi_thold, rssi_hyst),
1156 TP_STRUCT__entry(
1157 WIPHY_ENTRY
1158 NETDEV_ENTRY
1159 __field(s32, rssi_thold)
1160 __field(u32, rssi_hyst)
1161 ),
1162 TP_fast_assign(
1163 WIPHY_ASSIGN;
1164 NETDEV_ASSIGN;
1165 __entry->rssi_thold = rssi_thold;
1166 __entry->rssi_hyst = rssi_hyst;
1167 ),
1168 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT
1169 ", rssi_thold: %d, rssi_hyst: %u ",
1170 WIPHY_PR_ARG, NETDEV_PR_ARG,
1171 __entry->rssi_thold, __entry->rssi_hyst)
1172);
1173
1174TRACE_EVENT(rdev_set_cqm_txe_config,
1175 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 rate,
1176 u32 pkts, u32 intvl),
1177 TP_ARGS(wiphy, netdev, rate, pkts, intvl),
1178 TP_STRUCT__entry(
1179 WIPHY_ENTRY
1180 NETDEV_ENTRY
1181 __field(u32, rate)
1182 __field(u32, pkts)
1183 __field(u32, intvl)
1184 ),
1185 TP_fast_assign(
1186 WIPHY_ASSIGN;
1187 NETDEV_ASSIGN;
1188 __entry->rate = rate;
1189 __entry->pkts = pkts;
1190 __entry->intvl = intvl;
1191 ),
1192 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", rate: %u, packets: %u, interval: %u",
1193 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rate, __entry->pkts,
1194 __entry->intvl)
1195);
1196
1197TRACE_EVENT(rdev_disconnect,
1198 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1199 u16 reason_code),
1200 TP_ARGS(wiphy, netdev, reason_code),
1201 TP_STRUCT__entry(
1202 WIPHY_ENTRY
1203 NETDEV_ENTRY
1204 __field(u16, reason_code)
1205 ),
1206 TP_fast_assign(
1207 WIPHY_ASSIGN;
1208 NETDEV_ASSIGN;
1209 __entry->reason_code = reason_code;
1210 ),
1211 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", reason code: %u", WIPHY_PR_ARG,
1212 NETDEV_PR_ARG, __entry->reason_code)
1213);
1214
1215TRACE_EVENT(rdev_join_ibss,
1216 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1217 struct cfg80211_ibss_params *params),
1218 TP_ARGS(wiphy, netdev, params),
1219 TP_STRUCT__entry(
1220 WIPHY_ENTRY
1221 NETDEV_ENTRY
1222 MAC_ENTRY(bssid)
1223 __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1)
1224 ),
1225 TP_fast_assign(
1226 WIPHY_ASSIGN;
1227 NETDEV_ASSIGN;
1228 MAC_ASSIGN(bssid, params->bssid);
1229 memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
1230 memcpy(__entry->ssid, params->ssid, params->ssid_len);
1231 ),
1232 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s",
1233 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid)
1234);
1235
1236TRACE_EVENT(rdev_set_wiphy_params,
1237 TP_PROTO(struct wiphy *wiphy, u32 changed),
1238 TP_ARGS(wiphy, changed),
1239 TP_STRUCT__entry(
1240 WIPHY_ENTRY
1241 __field(u32, changed)
1242 ),
1243 TP_fast_assign(
1244 WIPHY_ASSIGN;
1245 __entry->changed = changed;
1246 ),
1247 TP_printk(WIPHY_PR_FMT ", changed: %u",
1248 WIPHY_PR_ARG, __entry->changed)
1249);
1250
1251DEFINE_EVENT(wiphy_wdev_evt, rdev_get_tx_power,
1252 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
1253 TP_ARGS(wiphy, wdev)
1254);
1255
1256TRACE_EVENT(rdev_set_tx_power,
1257 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
1258 enum nl80211_tx_power_setting type, int mbm),
1259 TP_ARGS(wiphy, wdev, type, mbm),
1260 TP_STRUCT__entry(
1261 WIPHY_ENTRY
1262 WDEV_ENTRY
1263 __field(enum nl80211_tx_power_setting, type)
1264 __field(int, mbm)
1265 ),
1266 TP_fast_assign(
1267 WIPHY_ASSIGN;
1268 WDEV_ASSIGN;
1269 __entry->type = type;
1270 __entry->mbm = mbm;
1271 ),
1272 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d",
1273 WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm)
1274);
1275
1276TRACE_EVENT(rdev_return_int_int,
1277 TP_PROTO(struct wiphy *wiphy, int func_ret, int func_fill),
1278 TP_ARGS(wiphy, func_ret, func_fill),
1279 TP_STRUCT__entry(
1280 WIPHY_ENTRY
1281 __field(int, func_ret)
1282 __field(int, func_fill)
1283 ),
1284 TP_fast_assign(
1285 WIPHY_ASSIGN;
1286 __entry->func_ret = func_ret;
1287 __entry->func_fill = func_fill;
1288 ),
1289 TP_printk(WIPHY_PR_FMT ", function returns: %d, function filled: %d",
1290 WIPHY_PR_ARG, __entry->func_ret, __entry->func_fill)
1291);
1292
1293#ifdef CONFIG_NL80211_TESTMODE
1294TRACE_EVENT(rdev_testmode_cmd,
1295 TP_PROTO(struct wiphy *wiphy),
1296 TP_ARGS(wiphy),
1297 TP_STRUCT__entry(
1298 WIPHY_ENTRY
1299 ),
1300 TP_fast_assign(
1301 WIPHY_ASSIGN;
1302 ),
1303 TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
1304);
1305
1306TRACE_EVENT(rdev_testmode_dump,
1307 TP_PROTO(struct wiphy *wiphy),
1308 TP_ARGS(wiphy),
1309 TP_STRUCT__entry(
1310 WIPHY_ENTRY
1311 ),
1312 TP_fast_assign(
1313 WIPHY_ASSIGN;
1314 ),
1315 TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
1316);
1317#endif /* CONFIG_NL80211_TESTMODE */
1318
1319TRACE_EVENT(rdev_set_bitrate_mask,
1320 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1321 const u8 *peer, const struct cfg80211_bitrate_mask *mask),
1322 TP_ARGS(wiphy, netdev, peer, mask),
1323 TP_STRUCT__entry(
1324 WIPHY_ENTRY
1325 NETDEV_ENTRY
1326 MAC_ENTRY(peer)
1327 ),
1328 TP_fast_assign(
1329 WIPHY_ASSIGN;
1330 NETDEV_ASSIGN;
1331 MAC_ASSIGN(peer, peer);
1332 ),
1333 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
1334 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
1335);
1336
1337TRACE_EVENT(rdev_mgmt_frame_register,
1338 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
1339 u16 frame_type, bool reg),
1340 TP_ARGS(wiphy, wdev, frame_type, reg),
1341 TP_STRUCT__entry(
1342 WIPHY_ENTRY
1343 WDEV_ENTRY
1344 __field(u16, frame_type)
1345 __field(bool, reg)
1346 ),
1347 TP_fast_assign(
1348 WIPHY_ASSIGN;
1349 WDEV_ASSIGN;
1350 __entry->frame_type = frame_type;
1351 __entry->reg = reg;
1352 ),
1353 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", frame_type: 0x%.2x, reg: %s ",
1354 WIPHY_PR_ARG, WDEV_PR_ARG, __entry->frame_type,
1355 __entry->reg ? "true" : "false")
1356);
1357
1358TRACE_EVENT(rdev_return_int_tx_rx,
1359 TP_PROTO(struct wiphy *wiphy, int ret, u32 tx, u32 rx),
1360 TP_ARGS(wiphy, ret, tx, rx),
1361 TP_STRUCT__entry(
1362 WIPHY_ENTRY
1363 __field(int, ret)
1364 __field(u32, tx)
1365 __field(u32, rx)
1366 ),
1367 TP_fast_assign(
1368 WIPHY_ASSIGN;
1369 __entry->ret = ret;
1370 __entry->tx = tx;
1371 __entry->rx = rx;
1372 ),
1373 TP_printk(WIPHY_PR_FMT ", returned %d, tx: %u, rx: %u",
1374 WIPHY_PR_ARG, __entry->ret, __entry->tx, __entry->rx)
1375);
1376
1377TRACE_EVENT(rdev_return_void_tx_rx,
1378 TP_PROTO(struct wiphy *wiphy, u32 tx, u32 tx_max,
1379 u32 rx, u32 rx_max),
1380 TP_ARGS(wiphy, tx, tx_max, rx, rx_max),
1381 TP_STRUCT__entry(
1382 WIPHY_ENTRY
1383 __field(u32, tx)
1384 __field(u32, tx_max)
1385 __field(u32, rx)
1386 __field(u32, rx_max)
1387 ),
1388 TP_fast_assign(
1389 WIPHY_ASSIGN;
1390 __entry->tx = tx;
1391 __entry->tx_max = tx_max;
1392 __entry->rx = rx;
1393 __entry->rx_max = rx_max;
1394 ),
1395 TP_printk(WIPHY_PR_FMT ", tx: %u, tx_max: %u, rx: %u, rx_max: %u ",
1396 WIPHY_PR_ARG, __entry->tx, __entry->tx_max, __entry->rx,
1397 __entry->rx_max)
1398);
1399
1400DECLARE_EVENT_CLASS(tx_rx_evt,
1401 TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
1402 TP_ARGS(wiphy, rx, tx),
1403 TP_STRUCT__entry(
1404 WIPHY_ENTRY
1405 __field(u32, tx)
1406 __field(u32, rx)
1407 ),
1408 TP_fast_assign(
1409 WIPHY_ASSIGN;
1410 __entry->tx = tx;
1411 __entry->rx = rx;
1412 ),
1413 TP_printk(WIPHY_PR_FMT ", tx: %u, rx: %u ",
1414 WIPHY_PR_ARG, __entry->tx, __entry->rx)
1415);
1416
1417DEFINE_EVENT(tx_rx_evt, rdev_set_ringparam,
1418 TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
1419 TP_ARGS(wiphy, rx, tx)
1420);
1421
1422DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
1423 TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
1424 TP_ARGS(wiphy, rx, tx)
1425);
1426
1427TRACE_EVENT(rdev_sched_scan_start,
1428 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1429 struct cfg80211_sched_scan_request *request),
1430 TP_ARGS(wiphy, netdev, request),
1431 TP_STRUCT__entry(
1432 WIPHY_ENTRY
1433 NETDEV_ENTRY
1434 ),
1435 TP_fast_assign(
1436 WIPHY_ASSIGN;
1437 NETDEV_ASSIGN;
1438 ),
1439 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT,
1440 WIPHY_PR_ARG, NETDEV_PR_ARG)
1441);
1442
1443TRACE_EVENT(rdev_tdls_mgmt,
1444 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1445 u8 *peer, u8 action_code, u8 dialog_token,
1446 u16 status_code, const u8 *buf, size_t len),
1447 TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code,
1448 buf, len),
1449 TP_STRUCT__entry(
1450 WIPHY_ENTRY
1451 NETDEV_ENTRY
1452 MAC_ENTRY(peer)
1453 __field(u8, action_code)
1454 __field(u8, dialog_token)
1455 __field(u16, status_code)
1456 __dynamic_array(u8, buf, len)
1457 ),
1458 TP_fast_assign(
1459 WIPHY_ASSIGN;
1460 NETDEV_ASSIGN;
1461 MAC_ASSIGN(peer, peer);
1462 __entry->action_code = action_code;
1463 __entry->dialog_token = dialog_token;
1464 __entry->status_code = status_code;
1465 memcpy(__get_dynamic_array(buf), buf, len);
1466 ),
1467 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", action_code: %u, "
1468 "dialog_token: %u, status_code: %u, buf: %#.2x ",
1469 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
1470 __entry->action_code, __entry->dialog_token,
1471 __entry->status_code, ((u8 *)__get_dynamic_array(buf))[0])
1472);
1473
1474TRACE_EVENT(rdev_dump_survey,
1475 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx),
1476 TP_ARGS(wiphy, netdev, idx),
1477 TP_STRUCT__entry(
1478 WIPHY_ENTRY
1479 NETDEV_ENTRY
1480 __field(int, idx)
1481 ),
1482 TP_fast_assign(
1483 WIPHY_ASSIGN;
1484 NETDEV_ASSIGN;
1485 __entry->idx = idx;
1486 ),
1487 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d",
1488 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx)
1489);
1490
1491TRACE_EVENT(rdev_return_int_survey_info,
1492 TP_PROTO(struct wiphy *wiphy, int ret, struct survey_info *info),
1493 TP_ARGS(wiphy, ret, info),
1494 TP_STRUCT__entry(
1495 WIPHY_ENTRY
1496 CHAN_ENTRY
1497 __field(int, ret)
1498 __field(u64, channel_time)
1499 __field(u64, channel_time_busy)
1500 __field(u64, channel_time_ext_busy)
1501 __field(u64, channel_time_rx)
1502 __field(u64, channel_time_tx)
1503 __field(u32, filled)
1504 __field(s8, noise)
1505 ),
1506 TP_fast_assign(
1507 WIPHY_ASSIGN;
1508 CHAN_ASSIGN(info->channel);
1509 __entry->ret = ret;
1510 __entry->channel_time = info->channel_time;
1511 __entry->channel_time_busy = info->channel_time_busy;
1512 __entry->channel_time_ext_busy = info->channel_time_ext_busy;
1513 __entry->channel_time_rx = info->channel_time_rx;
1514 __entry->channel_time_tx = info->channel_time_tx;
1515 __entry->filled = info->filled;
1516 __entry->noise = info->noise;
1517 ),
1518 TP_printk(WIPHY_PR_FMT ", returned: %d, " CHAN_PR_FMT
1519 ", channel time: %llu, channel time busy: %llu, "
1520 "channel time extension busy: %llu, channel time rx: %llu, "
1521 "channel time tx: %llu, filled: %u, noise: %d",
1522 WIPHY_PR_ARG, __entry->ret, CHAN_PR_ARG,
1523 __entry->channel_time, __entry->channel_time_busy,
1524 __entry->channel_time_ext_busy, __entry->channel_time_rx,
1525 __entry->channel_time_tx, __entry->filled, __entry->noise)
1526);
1527
1528TRACE_EVENT(rdev_tdls_oper,
1529 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1530 u8 *peer, enum nl80211_tdls_operation oper),
1531 TP_ARGS(wiphy, netdev, peer, oper),
1532 TP_STRUCT__entry(
1533 WIPHY_ENTRY
1534 NETDEV_ENTRY
1535 MAC_ENTRY(peer)
1536 __field(enum nl80211_tdls_operation, oper)
1537 ),
1538 TP_fast_assign(
1539 WIPHY_ASSIGN;
1540 NETDEV_ASSIGN;
1541 MAC_ASSIGN(peer, peer);
1542 __entry->oper = oper;
1543 ),
1544 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", oper: %d",
1545 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper)
1546);
1547
1548DECLARE_EVENT_CLASS(rdev_pmksa,
1549 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1550 struct cfg80211_pmksa *pmksa),
1551 TP_ARGS(wiphy, netdev, pmksa),
1552 TP_STRUCT__entry(
1553 WIPHY_ENTRY
1554 NETDEV_ENTRY
1555 MAC_ENTRY(bssid)
1556 ),
1557 TP_fast_assign(
1558 WIPHY_ASSIGN;
1559 NETDEV_ASSIGN;
1560 MAC_ASSIGN(bssid, pmksa->bssid);
1561 ),
1562 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT,
1563 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid))
1564);
1565
1566TRACE_EVENT(rdev_probe_client,
1567 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1568 const u8 *peer),
1569 TP_ARGS(wiphy, netdev, peer),
1570 TP_STRUCT__entry(
1571 WIPHY_ENTRY
1572 NETDEV_ENTRY
1573 MAC_ENTRY(peer)
1574 ),
1575 TP_fast_assign(
1576 WIPHY_ASSIGN;
1577 NETDEV_ASSIGN;
1578 MAC_ASSIGN(peer, peer);
1579 ),
1580 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT,
1581 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
1582);
1583
1584DEFINE_EVENT(rdev_pmksa, rdev_set_pmksa,
1585 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1586 struct cfg80211_pmksa *pmksa),
1587 TP_ARGS(wiphy, netdev, pmksa)
1588);
1589
1590DEFINE_EVENT(rdev_pmksa, rdev_del_pmksa,
1591 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1592 struct cfg80211_pmksa *pmksa),
1593 TP_ARGS(wiphy, netdev, pmksa)
1594);
1595
1596TRACE_EVENT(rdev_remain_on_channel,
1597 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
1598 struct ieee80211_channel *chan,
1599 unsigned int duration),
1600 TP_ARGS(wiphy, wdev, chan, duration),
1601 TP_STRUCT__entry(
1602 WIPHY_ENTRY
1603 WDEV_ENTRY
1604 CHAN_ENTRY
1605 __field(unsigned int, duration)
1606 ),
1607 TP_fast_assign(
1608 WIPHY_ASSIGN;
1609 WDEV_ASSIGN;
1610 CHAN_ASSIGN(chan);
1611 __entry->duration = duration;
1612 ),
1613 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", duration: %u",
1614 WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, __entry->duration)
1615);
1616
1617TRACE_EVENT(rdev_return_int_cookie,
1618 TP_PROTO(struct wiphy *wiphy, int ret, u64 cookie),
1619 TP_ARGS(wiphy, ret, cookie),
1620 TP_STRUCT__entry(
1621 WIPHY_ENTRY
1622 __field(int, ret)
1623 __field(u64, cookie)
1624 ),
1625 TP_fast_assign(
1626 WIPHY_ASSIGN;
1627 __entry->ret = ret;
1628 __entry->cookie = cookie;
1629 ),
1630 TP_printk(WIPHY_PR_FMT ", returned %d, cookie: %llu",
1631 WIPHY_PR_ARG, __entry->ret, __entry->cookie)
1632);
1633
1634TRACE_EVENT(rdev_cancel_remain_on_channel,
1635 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie),
1636 TP_ARGS(wiphy, wdev, cookie),
1637 TP_STRUCT__entry(
1638 WIPHY_ENTRY
1639 WDEV_ENTRY
1640 __field(u64, cookie)
1641 ),
1642 TP_fast_assign(
1643 WIPHY_ASSIGN;
1644 WDEV_ASSIGN;
1645 __entry->cookie = cookie;
1646 ),
1647 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu",
1648 WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie)
1649);
1650
1651TRACE_EVENT(rdev_mgmt_tx,
1652 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
1653 struct ieee80211_channel *chan, bool offchan,
1654 unsigned int wait, bool no_cck, bool dont_wait_for_ack),
1655 TP_ARGS(wiphy, wdev, chan, offchan, wait, no_cck, dont_wait_for_ack),
1656 TP_STRUCT__entry(
1657 WIPHY_ENTRY
1658 WDEV_ENTRY
1659 CHAN_ENTRY
1660 __field(bool, offchan)
1661 __field(unsigned int, wait)
1662 __field(bool, no_cck)
1663 __field(bool, dont_wait_for_ack)
1664 ),
1665 TP_fast_assign(
1666 WIPHY_ASSIGN;
1667 WDEV_ASSIGN;
1668 CHAN_ASSIGN(chan);
1669 __entry->offchan = offchan;
1670 __entry->wait = wait;
1671 __entry->no_cck = no_cck;
1672 __entry->dont_wait_for_ack = dont_wait_for_ack;
1673 ),
1674 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", offchan: %s,"
1675 " wait: %u, no cck: %s, dont wait for ack: %s",
1676 WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG,
1677 BOOL_TO_STR(__entry->offchan), __entry->wait,
1678 BOOL_TO_STR(__entry->no_cck),
1679 BOOL_TO_STR(__entry->dont_wait_for_ack))
1680);
1681
1682TRACE_EVENT(rdev_set_noack_map,
1683 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1684 u16 noack_map),
1685 TP_ARGS(wiphy, netdev, noack_map),
1686 TP_STRUCT__entry(
1687 WIPHY_ENTRY
1688 NETDEV_ENTRY
1689 __field(u16, noack_map)
1690 ),
1691 TP_fast_assign(
1692 WIPHY_ASSIGN;
1693 NETDEV_ASSIGN;
1694 __entry->noack_map = noack_map;
1695 ),
1696 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u",
1697 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map)
1698);
1699
1700TRACE_EVENT(rdev_get_et_sset_count,
1701 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int sset),
1702 TP_ARGS(wiphy, netdev, sset),
1703 TP_STRUCT__entry(
1704 WIPHY_ENTRY
1705 NETDEV_ENTRY
1706 __field(int, sset)
1707 ),
1708 TP_fast_assign(
1709 WIPHY_ASSIGN;
1710 NETDEV_ASSIGN;
1711 __entry->sset = sset;
1712 ),
1713 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %d",
1714 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset)
1715);
1716
1717TRACE_EVENT(rdev_get_et_strings,
1718 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 sset),
1719 TP_ARGS(wiphy, netdev, sset),
1720 TP_STRUCT__entry(
1721 WIPHY_ENTRY
1722 NETDEV_ENTRY
1723 __field(u32, sset)
1724 ),
1725 TP_fast_assign(
1726 WIPHY_ASSIGN;
1727 NETDEV_ASSIGN;
1728 __entry->sset = sset;
1729 ),
1730 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %u",
1731 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset)
1732);
1733
1734DEFINE_EVENT(wiphy_wdev_evt, rdev_get_channel,
1735 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
1736 TP_ARGS(wiphy, wdev)
1737);
1738
1739TRACE_EVENT(rdev_return_chandef,
1740 TP_PROTO(struct wiphy *wiphy, int ret,
1741 struct cfg80211_chan_def *chandef),
1742 TP_ARGS(wiphy, ret, chandef),
1743 TP_STRUCT__entry(
1744 WIPHY_ENTRY
1745 __field(int, ret)
1746 CHAN_DEF_ENTRY
1747 ),
1748 TP_fast_assign(
1749 WIPHY_ASSIGN;
1750 if (ret == 0)
1751 CHAN_DEF_ASSIGN(chandef);
1752 else
1753 CHAN_DEF_ASSIGN((struct cfg80211_chan_def *)NULL);
1754 __entry->ret = ret;
1755 ),
1756 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", ret: %d",
1757 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->ret)
1758);
1759
1760DEFINE_EVENT(wiphy_wdev_evt, rdev_start_p2p_device,
1761 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
1762 TP_ARGS(wiphy, wdev)
1763);
1764
1765DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device,
1766 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
1767 TP_ARGS(wiphy, wdev)
1768);
1769
1770/*************************************************************
1771 * cfg80211 exported functions traces *
1772 *************************************************************/
1773
1774TRACE_EVENT(cfg80211_return_bool,
1775 TP_PROTO(bool ret),
1776 TP_ARGS(ret),
1777 TP_STRUCT__entry(
1778 __field(bool, ret)
1779 ),
1780 TP_fast_assign(
1781 __entry->ret = ret;
1782 ),
1783 TP_printk("returned %s", BOOL_TO_STR(__entry->ret))
1784);
1785
1786DECLARE_EVENT_CLASS(cfg80211_netdev_mac_evt,
1787 TP_PROTO(struct net_device *netdev, const u8 *macaddr),
1788 TP_ARGS(netdev, macaddr),
1789 TP_STRUCT__entry(
1790 NETDEV_ENTRY
1791 MAC_ENTRY(macaddr)
1792 ),
1793 TP_fast_assign(
1794 NETDEV_ASSIGN;
1795 MAC_ASSIGN(macaddr, macaddr);
1796 ),
1797 TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
1798 NETDEV_PR_ARG, MAC_PR_ARG(macaddr))
1799);
1800
1801DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate,
1802 TP_PROTO(struct net_device *netdev, const u8 *macaddr),
1803 TP_ARGS(netdev, macaddr)
1804);
1805
1806DECLARE_EVENT_CLASS(netdev_evt_only,
1807 TP_PROTO(struct net_device *netdev),
1808 TP_ARGS(netdev),
1809 TP_STRUCT__entry(
1810 NETDEV_ENTRY
1811 ),
1812 TP_fast_assign(
1813 NETDEV_ASSIGN;
1814 ),
1815 TP_printk(NETDEV_PR_FMT , NETDEV_PR_ARG)
1816);
1817
1818DEFINE_EVENT(netdev_evt_only, cfg80211_send_rx_auth,
1819 TP_PROTO(struct net_device *netdev),
1820 TP_ARGS(netdev)
1821);
1822
1823TRACE_EVENT(cfg80211_send_rx_assoc,
1824 TP_PROTO(struct net_device *netdev, struct cfg80211_bss *bss),
1825 TP_ARGS(netdev, bss),
1826 TP_STRUCT__entry(
1827 NETDEV_ENTRY
1828 MAC_ENTRY(bssid)
1829 CHAN_ENTRY
1830 ),
1831 TP_fast_assign(
1832 NETDEV_ASSIGN;
1833 MAC_ASSIGN(bssid, bss->bssid);
1834 CHAN_ASSIGN(bss->channel);
1835 ),
1836 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", " CHAN_PR_FMT,
1837 NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
1838);
1839
1840DEFINE_EVENT(netdev_evt_only, __cfg80211_send_deauth,
1841 TP_PROTO(struct net_device *netdev),
1842 TP_ARGS(netdev)
1843);
1844
1845DEFINE_EVENT(netdev_evt_only, __cfg80211_send_disassoc,
1846 TP_PROTO(struct net_device *netdev),
1847 TP_ARGS(netdev)
1848);
1849
1850DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_deauth,
1851 TP_PROTO(struct net_device *netdev),
1852 TP_ARGS(netdev)
1853);
1854
1855DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_disassoc,
1856 TP_PROTO(struct net_device *netdev),
1857 TP_ARGS(netdev)
1858);
1859
1860DECLARE_EVENT_CLASS(netdev_mac_evt,
1861 TP_PROTO(struct net_device *netdev, const u8 *mac),
1862 TP_ARGS(netdev, mac),
1863 TP_STRUCT__entry(
1864 NETDEV_ENTRY
1865 MAC_ENTRY(mac)
1866 ),
1867 TP_fast_assign(
1868 NETDEV_ASSIGN;
1869 MAC_ASSIGN(mac, mac)
1870 ),
1871 TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT,
1872 NETDEV_PR_ARG, MAC_PR_ARG(mac))
1873);
1874
1875DEFINE_EVENT(netdev_mac_evt, cfg80211_send_auth_timeout,
1876 TP_PROTO(struct net_device *netdev, const u8 *mac),
1877 TP_ARGS(netdev, mac)
1878);
1879
1880DEFINE_EVENT(netdev_mac_evt, cfg80211_send_assoc_timeout,
1881 TP_PROTO(struct net_device *netdev, const u8 *mac),
1882 TP_ARGS(netdev, mac)
1883);
1884
1885TRACE_EVENT(cfg80211_michael_mic_failure,
1886 TP_PROTO(struct net_device *netdev, const u8 *addr,
1887 enum nl80211_key_type key_type, int key_id, const u8 *tsc),
1888 TP_ARGS(netdev, addr, key_type, key_id, tsc),
1889 TP_STRUCT__entry(
1890 NETDEV_ENTRY
1891 MAC_ENTRY(addr)
1892 __field(enum nl80211_key_type, key_type)
1893 __field(int, key_id)
1894 __array(u8, tsc, 6)
1895 ),
1896 TP_fast_assign(
1897 NETDEV_ASSIGN;
1898 MAC_ASSIGN(addr, addr);
1899 __entry->key_type = key_type;
1900 __entry->key_id = key_id;
1901 memcpy(__entry->tsc, tsc, 6);
1902 ),
1903 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
1904 NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
1905 __entry->key_id, __entry->tsc)
1906);
1907
1908TRACE_EVENT(cfg80211_ready_on_channel,
1909 TP_PROTO(struct wireless_dev *wdev, u64 cookie,
1910 struct ieee80211_channel *chan,
1911 unsigned int duration),
1912 TP_ARGS(wdev, cookie, chan, duration),
1913 TP_STRUCT__entry(
1914 WDEV_ENTRY
1915 __field(u64, cookie)
1916 CHAN_ENTRY
1917 __field(unsigned int, duration)
1918 ),
1919 TP_fast_assign(
1920 WDEV_ASSIGN;
1921 __entry->cookie = cookie;
1922 CHAN_ASSIGN(chan);
1923 __entry->duration = duration;
1924 ),
1925 TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT ", duration: %u",
1926 WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG,
1927 __entry->duration)
1928);
1929
1930TRACE_EVENT(cfg80211_ready_on_channel_expired,
1931 TP_PROTO(struct wireless_dev *wdev, u64 cookie,
1932 struct ieee80211_channel *chan),
1933 TP_ARGS(wdev, cookie, chan),
1934 TP_STRUCT__entry(
1935 WDEV_ENTRY
1936 __field(u64, cookie)
1937 CHAN_ENTRY
1938 ),
1939 TP_fast_assign(
1940 WDEV_ASSIGN;
1941 __entry->cookie = cookie;
1942 CHAN_ASSIGN(chan);
1943 ),
1944 TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT,
1945 WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG)
1946);
1947
1948TRACE_EVENT(cfg80211_new_sta,
1949 TP_PROTO(struct net_device *netdev, const u8 *mac_addr,
1950 struct station_info *sinfo),
1951 TP_ARGS(netdev, mac_addr, sinfo),
1952 TP_STRUCT__entry(
1953 NETDEV_ENTRY
1954 MAC_ENTRY(mac_addr)
1955 SINFO_ENTRY
1956 ),
1957 TP_fast_assign(
1958 NETDEV_ASSIGN;
1959 MAC_ASSIGN(mac_addr, mac_addr);
1960 SINFO_ASSIGN;
1961 ),
1962 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT,
1963 NETDEV_PR_ARG, MAC_PR_ARG(mac_addr))
1964);
1965
1966DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta,
1967 TP_PROTO(struct net_device *netdev, const u8 *macaddr),
1968 TP_ARGS(netdev, macaddr)
1969);
1970
1971TRACE_EVENT(cfg80211_rx_mgmt,
1972 TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm),
1973 TP_ARGS(wdev, freq, sig_mbm),
1974 TP_STRUCT__entry(
1975 WDEV_ENTRY
1976 __field(int, freq)
1977 __field(int, sig_mbm)
1978 ),
1979 TP_fast_assign(
1980 WDEV_ASSIGN;
1981 __entry->freq = freq;
1982 __entry->sig_mbm = sig_mbm;
1983 ),
1984 TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d",
1985 WDEV_PR_ARG, __entry->freq, __entry->sig_mbm)
1986);
1987
1988TRACE_EVENT(cfg80211_mgmt_tx_status,
1989 TP_PROTO(struct wireless_dev *wdev, u64 cookie, bool ack),
1990 TP_ARGS(wdev, cookie, ack),
1991 TP_STRUCT__entry(
1992 WDEV_ENTRY
1993 __field(u64, cookie)
1994 __field(bool, ack)
1995 ),
1996 TP_fast_assign(
1997 WDEV_ASSIGN;
1998 __entry->cookie = cookie;
1999 __entry->ack = ack;
2000 ),
2001 TP_printk(WDEV_PR_FMT", cookie: %llu, ack: %s",
2002 WDEV_PR_ARG, __entry->cookie, BOOL_TO_STR(__entry->ack))
2003);
2004
2005TRACE_EVENT(cfg80211_cqm_rssi_notify,
2006 TP_PROTO(struct net_device *netdev,
2007 enum nl80211_cqm_rssi_threshold_event rssi_event),
2008 TP_ARGS(netdev, rssi_event),
2009 TP_STRUCT__entry(
2010 NETDEV_ENTRY
2011 __field(enum nl80211_cqm_rssi_threshold_event, rssi_event)
2012 ),
2013 TP_fast_assign(
2014 NETDEV_ASSIGN;
2015 __entry->rssi_event = rssi_event;
2016 ),
2017 TP_printk(NETDEV_PR_FMT ", rssi event: %d",
2018 NETDEV_PR_ARG, __entry->rssi_event)
2019);
2020
2021TRACE_EVENT(cfg80211_reg_can_beacon,
2022 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
2023 TP_ARGS(wiphy, chandef),
2024 TP_STRUCT__entry(
2025 WIPHY_ENTRY
2026 CHAN_DEF_ENTRY
2027 ),
2028 TP_fast_assign(
2029 WIPHY_ASSIGN;
2030 CHAN_DEF_ASSIGN(chandef);
2031 ),
2032 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
2033 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
2034);
2035
2036TRACE_EVENT(cfg80211_ch_switch_notify,
2037 TP_PROTO(struct net_device *netdev,
2038 struct cfg80211_chan_def *chandef),
2039 TP_ARGS(netdev, chandef),
2040 TP_STRUCT__entry(
2041 NETDEV_ENTRY
2042 CHAN_DEF_ENTRY
2043 ),
2044 TP_fast_assign(
2045 NETDEV_ASSIGN;
2046 CHAN_DEF_ASSIGN(chandef);
2047 ),
2048 TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
2049 NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
2050);
2051
2052DECLARE_EVENT_CLASS(cfg80211_rx_evt,
2053 TP_PROTO(struct net_device *netdev, const u8 *addr),
2054 TP_ARGS(netdev, addr),
2055 TP_STRUCT__entry(
2056 NETDEV_ENTRY
2057 MAC_ENTRY(addr)
2058 ),
2059 TP_fast_assign(
2060 NETDEV_ASSIGN;
2061 MAC_ASSIGN(addr, addr);
2062 ),
2063 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
2064);
2065
2066DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined,
2067 TP_PROTO(struct net_device *netdev, const u8 *addr),
2068 TP_ARGS(netdev, addr)
2069);
2070
2071DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
2072 TP_PROTO(struct net_device *netdev, const u8 *addr),
2073 TP_ARGS(netdev, addr)
2074);
2075
2076DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame,
2077 TP_PROTO(struct net_device *netdev, const u8 *addr),
2078 TP_ARGS(netdev, addr)
2079);
2080
2081TRACE_EVENT(cfg80211_probe_status,
2082 TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie,
2083 bool acked),
2084 TP_ARGS(netdev, addr, cookie, acked),
2085 TP_STRUCT__entry(
2086 NETDEV_ENTRY
2087 MAC_ENTRY(addr)
2088 __field(u64, cookie)
2089 __field(bool, acked)
2090 ),
2091 TP_fast_assign(
2092 NETDEV_ASSIGN;
2093 MAC_ASSIGN(addr, addr);
2094 __entry->cookie = cookie;
2095 __entry->acked = acked;
2096 ),
2097 TP_printk(NETDEV_PR_FMT " addr:" MAC_PR_FMT ", cookie: %llu, acked: %s",
2098 NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->cookie,
2099 BOOL_TO_STR(__entry->acked))
2100);
2101
2102TRACE_EVENT(cfg80211_cqm_pktloss_notify,
2103 TP_PROTO(struct net_device *netdev, const u8 *peer, u32 num_packets),
2104 TP_ARGS(netdev, peer, num_packets),
2105 TP_STRUCT__entry(
2106 NETDEV_ENTRY
2107 MAC_ENTRY(peer)
2108 __field(u32, num_packets)
2109 ),
2110 TP_fast_assign(
2111 NETDEV_ASSIGN;
2112 MAC_ASSIGN(peer, peer);
2113 __entry->num_packets = num_packets;
2114 ),
2115 TP_printk(NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", num of lost packets: %u",
2116 NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->num_packets)
2117);
2118
2119DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_gtk_rekey_notify,
2120 TP_PROTO(struct net_device *netdev, const u8 *macaddr),
2121 TP_ARGS(netdev, macaddr)
2122);
2123
2124TRACE_EVENT(cfg80211_pmksa_candidate_notify,
2125 TP_PROTO(struct net_device *netdev, int index, const u8 *bssid,
2126 bool preauth),
2127 TP_ARGS(netdev, index, bssid, preauth),
2128 TP_STRUCT__entry(
2129 NETDEV_ENTRY
2130 __field(int, index)
2131 MAC_ENTRY(bssid)
2132 __field(bool, preauth)
2133 ),
2134 TP_fast_assign(
2135 NETDEV_ASSIGN;
2136 __entry->index = index;
2137 MAC_ASSIGN(bssid, bssid);
2138 __entry->preauth = preauth;
2139 ),
2140 TP_printk(NETDEV_PR_FMT ", index:%d, bssid: " MAC_PR_FMT ", pre auth: %s",
2141 NETDEV_PR_ARG, __entry->index, MAC_PR_ARG(bssid),
2142 BOOL_TO_STR(__entry->preauth))
2143);
2144
2145TRACE_EVENT(cfg80211_report_obss_beacon,
2146 TP_PROTO(struct wiphy *wiphy, const u8 *frame, size_t len,
2147 int freq, int sig_dbm),
2148 TP_ARGS(wiphy, frame, len, freq, sig_dbm),
2149 TP_STRUCT__entry(
2150 WIPHY_ENTRY
2151 __field(int, freq)
2152 __field(int, sig_dbm)
2153 ),
2154 TP_fast_assign(
2155 WIPHY_ASSIGN;
2156 __entry->freq = freq;
2157 __entry->sig_dbm = sig_dbm;
2158 ),
2159 TP_printk(WIPHY_PR_FMT ", freq: %d, sig_dbm: %d",
2160 WIPHY_PR_ARG, __entry->freq, __entry->sig_dbm)
2161);
2162
2163TRACE_EVENT(cfg80211_tdls_oper_request,
2164 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer,
2165 enum nl80211_tdls_operation oper, u16 reason_code),
2166 TP_ARGS(wiphy, netdev, peer, oper, reason_code),
2167 TP_STRUCT__entry(
2168 WIPHY_ENTRY
2169 NETDEV_ENTRY
2170 MAC_ENTRY(peer)
2171 __field(enum nl80211_tdls_operation, oper)
2172 __field(u16, reason_code)
2173 ),
2174 TP_fast_assign(
2175 WIPHY_ASSIGN;
2176 NETDEV_ASSIGN;
2177 MAC_ASSIGN(peer, peer);
2178 __entry->oper = oper;
2179 __entry->reason_code = reason_code;
2180 ),
2181 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", oper: %d, reason_code %u",
2182 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper,
2183 __entry->reason_code)
2184 );
2185
2186TRACE_EVENT(cfg80211_scan_done,
2187 TP_PROTO(struct cfg80211_scan_request *request, bool aborted),
2188 TP_ARGS(request, aborted),
2189 TP_STRUCT__entry(
2190 __field(u32, n_channels)
2191 __dynamic_array(u8, ie, request ? request->ie_len : 0)
2192 __array(u32, rates, IEEE80211_NUM_BANDS)
2193 __field(u32, wdev_id)
2194 MAC_ENTRY(wiphy_mac)
2195 __field(bool, no_cck)
2196 __field(bool, aborted)
2197 ),
2198 TP_fast_assign(
2199 if (request) {
2200 memcpy(__get_dynamic_array(ie), request->ie,
2201 request->ie_len);
2202 memcpy(__entry->rates, request->rates,
2203 IEEE80211_NUM_BANDS);
2204 __entry->wdev_id = request->wdev ?
2205 request->wdev->identifier : 0;
2206 if (request->wiphy)
2207 MAC_ASSIGN(wiphy_mac,
2208 request->wiphy->perm_addr);
2209 __entry->no_cck = request->no_cck;
2210 }
2211 __entry->aborted = aborted;
2212 ),
2213 TP_printk("aborted: %s", BOOL_TO_STR(__entry->aborted))
2214);
2215
2216DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_results,
2217 TP_PROTO(struct wiphy *wiphy),
2218 TP_ARGS(wiphy)
2219);
2220
2221DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_stopped,
2222 TP_PROTO(struct wiphy *wiphy),
2223 TP_ARGS(wiphy)
2224);
2225
2226TRACE_EVENT(cfg80211_get_bss,
2227 TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
2228 const u8 *bssid, const u8 *ssid, size_t ssid_len,
2229 u16 capa_mask, u16 capa_val),
2230 TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, capa_mask, capa_val),
2231 TP_STRUCT__entry(
2232 WIPHY_ENTRY
2233 CHAN_ENTRY
2234 MAC_ENTRY(bssid)
2235 __dynamic_array(u8, ssid, ssid_len)
2236 __field(u16, capa_mask)
2237 __field(u16, capa_val)
2238 ),
2239 TP_fast_assign(
2240 WIPHY_ASSIGN;
2241 CHAN_ASSIGN(channel);
2242 MAC_ASSIGN(bssid, bssid);
2243 memcpy(__get_dynamic_array(ssid), ssid, ssid_len);
2244 __entry->capa_mask = capa_mask;
2245 __entry->capa_val = capa_val;
2246 ),
2247 TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT ", buf: %#.2x, "
2248 "capa_mask: %d, capa_val: %u", WIPHY_PR_ARG, CHAN_PR_ARG,
2249 MAC_PR_ARG(bssid), ((u8 *)__get_dynamic_array(ssid))[0],
2250 __entry->capa_mask, __entry->capa_val)
2251);
2252
2253TRACE_EVENT(cfg80211_inform_bss_frame,
2254 TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
2255 struct ieee80211_mgmt *mgmt, size_t len,
2256 s32 signal),
2257 TP_ARGS(wiphy, channel, mgmt, len, signal),
2258 TP_STRUCT__entry(
2259 WIPHY_ENTRY
2260 CHAN_ENTRY
2261 __dynamic_array(u8, mgmt, len)
2262 __field(s32, signal)
2263 ),
2264 TP_fast_assign(
2265 WIPHY_ASSIGN;
2266 CHAN_ASSIGN(channel);
2267 if (mgmt)
2268 memcpy(__get_dynamic_array(mgmt), mgmt, len);
2269 __entry->signal = signal;
2270 ),
2271 TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d",
2272 WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal)
2273);
2274
2275DECLARE_EVENT_CLASS(cfg80211_bss_evt,
2276 TP_PROTO(struct cfg80211_bss *pub),
2277 TP_ARGS(pub),
2278 TP_STRUCT__entry(
2279 MAC_ENTRY(bssid)
2280 CHAN_ENTRY
2281 ),
2282 TP_fast_assign(
2283 MAC_ASSIGN(bssid, pub->bssid);
2284 CHAN_ASSIGN(pub->channel);
2285 ),
2286 TP_printk(MAC_PR_FMT ", " CHAN_PR_FMT, MAC_PR_ARG(bssid), CHAN_PR_ARG)
2287);
2288
2289DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss,
2290 TP_PROTO(struct cfg80211_bss *pub),
2291 TP_ARGS(pub)
2292);
2293
2294TRACE_EVENT(cfg80211_return_uint,
2295 TP_PROTO(unsigned int ret),
2296 TP_ARGS(ret),
2297 TP_STRUCT__entry(
2298 __field(unsigned int, ret)
2299 ),
2300 TP_fast_assign(
2301 __entry->ret = ret;
2302 ),
2303 TP_printk("ret: %d", __entry->ret)
2304);
2305
2306TRACE_EVENT(cfg80211_return_u32,
2307 TP_PROTO(u32 ret),
2308 TP_ARGS(ret),
2309 TP_STRUCT__entry(
2310 __field(u32, ret)
2311 ),
2312 TP_fast_assign(
2313 __entry->ret = ret;
2314 ),
2315 TP_printk("ret: %u", __entry->ret)
2316);
2317
2318#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
2319
2320#undef TRACE_INCLUDE_PATH
2321#define TRACE_INCLUDE_PATH .
2322#undef TRACE_INCLUDE_FILE
2323#define TRACE_INCLUDE_FILE trace
2324#include <trace/define_trace.h>
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2762e8329986..16d76a807c2f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -11,6 +11,8 @@
11#include <net/ip.h> 11#include <net/ip.h>
12#include <net/dsfield.h> 12#include <net/dsfield.h>
13#include "core.h" 13#include "core.h"
14#include "rdev-ops.h"
15
14 16
15struct ieee80211_rate * 17struct ieee80211_rate *
16ieee80211_get_response_rate(struct ieee80211_supported_band *sband, 18ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
@@ -686,10 +688,13 @@ EXPORT_SYMBOL(cfg80211_classify8021d);
686 688
687const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie) 689const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
688{ 690{
689 if (bss->information_elements == NULL) 691 const struct cfg80211_bss_ies *ies;
692
693 ies = rcu_dereference(bss->ies);
694 if (!ies)
690 return NULL; 695 return NULL;
691 return cfg80211_find_ie(ie, bss->information_elements, 696
692 bss->len_information_elements); 697 return cfg80211_find_ie(ie, ies->data, ies->len);
693} 698}
694EXPORT_SYMBOL(ieee80211_bss_get_ie); 699EXPORT_SYMBOL(ieee80211_bss_get_ie);
695 700
@@ -705,19 +710,18 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
705 for (i = 0; i < 6; i++) { 710 for (i = 0; i < 6; i++) {
706 if (!wdev->connect_keys->params[i].cipher) 711 if (!wdev->connect_keys->params[i].cipher)
707 continue; 712 continue;
708 if (rdev->ops->add_key(wdev->wiphy, dev, i, false, NULL, 713 if (rdev_add_key(rdev, dev, i, false, NULL,
709 &wdev->connect_keys->params[i])) { 714 &wdev->connect_keys->params[i])) {
710 netdev_err(dev, "failed to set key %d\n", i); 715 netdev_err(dev, "failed to set key %d\n", i);
711 continue; 716 continue;
712 } 717 }
713 if (wdev->connect_keys->def == i) 718 if (wdev->connect_keys->def == i)
714 if (rdev->ops->set_default_key(wdev->wiphy, dev, 719 if (rdev_set_default_key(rdev, dev, i, true, true)) {
715 i, true, true)) {
716 netdev_err(dev, "failed to set defkey %d\n", i); 720 netdev_err(dev, "failed to set defkey %d\n", i);
717 continue; 721 continue;
718 } 722 }
719 if (wdev->connect_keys->defmgmt == i) 723 if (wdev->connect_keys->defmgmt == i)
720 if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i)) 724 if (rdev_set_default_mgmt_key(rdev, dev, i))
721 netdev_err(dev, "failed to set mgtdef %d\n", i); 725 netdev_err(dev, "failed to set mgtdef %d\n", i);
722 } 726 }
723 727
@@ -850,8 +854,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
850 cfg80211_process_rdev_events(rdev); 854 cfg80211_process_rdev_events(rdev);
851 } 855 }
852 856
853 err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, 857 err = rdev_change_virtual_intf(rdev, dev, ntype, flags, params);
854 ntype, flags, params);
855 858
856 WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); 859 WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
857 860
@@ -944,14 +947,86 @@ static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
944 return __mcs2bitrate[rate->mcs]; 947 return __mcs2bitrate[rate->mcs];
945} 948}
946 949
950static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
951{
952 static const u32 base[4][10] = {
953 { 6500000,
954 13000000,
955 19500000,
956 26000000,
957 39000000,
958 52000000,
959 58500000,
960 65000000,
961 78000000,
962 0,
963 },
964 { 13500000,
965 27000000,
966 40500000,
967 54000000,
968 81000000,
969 108000000,
970 121500000,
971 135000000,
972 162000000,
973 180000000,
974 },
975 { 29300000,
976 58500000,
977 87800000,
978 117000000,
979 175500000,
980 234000000,
981 263300000,
982 292500000,
983 351000000,
984 390000000,
985 },
986 { 58500000,
987 117000000,
988 175500000,
989 234000000,
990 351000000,
991 468000000,
992 526500000,
993 585000000,
994 702000000,
995 780000000,
996 },
997 };
998 u32 bitrate;
999 int idx;
1000
1001 if (WARN_ON_ONCE(rate->mcs > 9))
1002 return 0;
1003
1004 idx = rate->flags & (RATE_INFO_FLAGS_160_MHZ_WIDTH |
1005 RATE_INFO_FLAGS_80P80_MHZ_WIDTH) ? 3 :
1006 rate->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH ? 2 :
1007 rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH ? 1 : 0;
1008
1009 bitrate = base[idx][rate->mcs];
1010 bitrate *= rate->nss;
1011
1012 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
1013 bitrate = (bitrate / 9) * 10;
1014
1015 /* do NOT round down here */
1016 return (bitrate + 50000) / 100000;
1017}
1018
947u32 cfg80211_calculate_bitrate(struct rate_info *rate) 1019u32 cfg80211_calculate_bitrate(struct rate_info *rate)
948{ 1020{
949 int modulation, streams, bitrate; 1021 int modulation, streams, bitrate;
950 1022
951 if (!(rate->flags & RATE_INFO_FLAGS_MCS)) 1023 if (!(rate->flags & RATE_INFO_FLAGS_MCS) &&
1024 !(rate->flags & RATE_INFO_FLAGS_VHT_MCS))
952 return rate->legacy; 1025 return rate->legacy;
953 if (rate->flags & RATE_INFO_FLAGS_60G) 1026 if (rate->flags & RATE_INFO_FLAGS_60G)
954 return cfg80211_calculate_bitrate_60g(rate); 1027 return cfg80211_calculate_bitrate_60g(rate);
1028 if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
1029 return cfg80211_calculate_bitrate_vht(rate);
955 1030
956 /* the formula below does only work for MCS values smaller than 32 */ 1031 /* the formula below does only work for MCS values smaller than 32 */
957 if (WARN_ON_ONCE(rate->mcs >= 32)) 1032 if (WARN_ON_ONCE(rate->mcs >= 32))
@@ -980,6 +1055,106 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
980} 1055}
981EXPORT_SYMBOL(cfg80211_calculate_bitrate); 1056EXPORT_SYMBOL(cfg80211_calculate_bitrate);
982 1057
1058int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len,
1059 enum ieee80211_p2p_attr_id attr,
1060 u8 *buf, unsigned int bufsize)
1061{
1062 u8 *out = buf;
1063 u16 attr_remaining = 0;
1064 bool desired_attr = false;
1065 u16 desired_len = 0;
1066
1067 while (len > 0) {
1068 unsigned int iedatalen;
1069 unsigned int copy;
1070 const u8 *iedata;
1071
1072 if (len < 2)
1073 return -EILSEQ;
1074 iedatalen = ies[1];
1075 if (iedatalen + 2 > len)
1076 return -EILSEQ;
1077
1078 if (ies[0] != WLAN_EID_VENDOR_SPECIFIC)
1079 goto cont;
1080
1081 if (iedatalen < 4)
1082 goto cont;
1083
1084 iedata = ies + 2;
1085
1086 /* check WFA OUI, P2P subtype */
1087 if (iedata[0] != 0x50 || iedata[1] != 0x6f ||
1088 iedata[2] != 0x9a || iedata[3] != 0x09)
1089 goto cont;
1090
1091 iedatalen -= 4;
1092 iedata += 4;
1093
1094 /* check attribute continuation into this IE */
1095 copy = min_t(unsigned int, attr_remaining, iedatalen);
1096 if (copy && desired_attr) {
1097 desired_len += copy;
1098 if (out) {
1099 memcpy(out, iedata, min(bufsize, copy));
1100 out += min(bufsize, copy);
1101 bufsize -= min(bufsize, copy);
1102 }
1103
1104
1105 if (copy == attr_remaining)
1106 return desired_len;
1107 }
1108
1109 attr_remaining -= copy;
1110 if (attr_remaining)
1111 goto cont;
1112
1113 iedatalen -= copy;
1114 iedata += copy;
1115
1116 while (iedatalen > 0) {
1117 u16 attr_len;
1118
1119 /* P2P attribute ID & size must fit */
1120 if (iedatalen < 3)
1121 return -EILSEQ;
1122 desired_attr = iedata[0] == attr;
1123 attr_len = get_unaligned_le16(iedata + 1);
1124 iedatalen -= 3;
1125 iedata += 3;
1126
1127 copy = min_t(unsigned int, attr_len, iedatalen);
1128
1129 if (desired_attr) {
1130 desired_len += copy;
1131 if (out) {
1132 memcpy(out, iedata, min(bufsize, copy));
1133 out += min(bufsize, copy);
1134 bufsize -= min(bufsize, copy);
1135 }
1136
1137 if (copy == attr_len)
1138 return desired_len;
1139 }
1140
1141 iedata += copy;
1142 iedatalen -= copy;
1143 attr_remaining = attr_len - copy;
1144 }
1145
1146 cont:
1147 len -= ies[1] + 2;
1148 ies += ies[1] + 2;
1149 }
1150
1151 if (attr_remaining && desired_attr)
1152 return -EILSEQ;
1153
1154 return -ENOENT;
1155}
1156EXPORT_SYMBOL(cfg80211_get_p2p_attr);
1157
983int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, 1158int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
984 u32 beacon_int) 1159 u32 beacon_int)
985{ 1160{
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 494379eb464f..d997d0f0c54a 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -19,6 +19,7 @@
19#include <net/cfg80211-wext.h> 19#include <net/cfg80211-wext.h>
20#include "wext-compat.h" 20#include "wext-compat.h"
21#include "core.h" 21#include "core.h"
22#include "rdev-ops.h"
22 23
23int cfg80211_wext_giwname(struct net_device *dev, 24int cfg80211_wext_giwname(struct net_device *dev,
24 struct iw_request_info *info, 25 struct iw_request_info *info,
@@ -175,7 +176,7 @@ int cfg80211_wext_giwrange(struct net_device *dev,
175 case CFG80211_SIGNAL_TYPE_NONE: 176 case CFG80211_SIGNAL_TYPE_NONE:
176 break; 177 break;
177 case CFG80211_SIGNAL_TYPE_MBM: 178 case CFG80211_SIGNAL_TYPE_MBM:
178 range->max_qual.level = -110; 179 range->max_qual.level = (u8)-110;
179 range->max_qual.qual = 70; 180 range->max_qual.qual = 70;
180 range->avg_qual.qual = 35; 181 range->avg_qual.qual = 35;
181 range->max_qual.updated |= IW_QUAL_DBM; 182 range->max_qual.updated |= IW_QUAL_DBM;
@@ -301,8 +302,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
301 else 302 else
302 wdev->wiphy->rts_threshold = rts->value; 303 wdev->wiphy->rts_threshold = rts->value;
303 304
304 err = rdev->ops->set_wiphy_params(wdev->wiphy, 305 err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD);
305 WIPHY_PARAM_RTS_THRESHOLD);
306 if (err) 306 if (err)
307 wdev->wiphy->rts_threshold = orts; 307 wdev->wiphy->rts_threshold = orts;
308 308
@@ -342,8 +342,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
342 wdev->wiphy->frag_threshold = frag->value & ~0x1; 342 wdev->wiphy->frag_threshold = frag->value & ~0x1;
343 } 343 }
344 344
345 err = rdev->ops->set_wiphy_params(wdev->wiphy, 345 err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD);
346 WIPHY_PARAM_FRAG_THRESHOLD);
347 if (err) 346 if (err)
348 wdev->wiphy->frag_threshold = ofrag; 347 wdev->wiphy->frag_threshold = ofrag;
349 348
@@ -396,7 +395,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
396 if (!changed) 395 if (!changed)
397 return 0; 396 return 0;
398 397
399 err = rdev->ops->set_wiphy_params(wdev->wiphy, changed); 398 err = rdev_set_wiphy_params(rdev, changed);
400 if (err) { 399 if (err) {
401 wdev->wiphy->retry_short = oshort; 400 wdev->wiphy->retry_short = oshort;
402 wdev->wiphy->retry_long = olong; 401 wdev->wiphy->retry_long = olong;
@@ -490,8 +489,8 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
490 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) 489 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
491 err = -ENOENT; 490 err = -ENOENT;
492 else 491 else
493 err = rdev->ops->del_key(&rdev->wiphy, dev, idx, 492 err = rdev_del_key(rdev, dev, idx, pairwise,
494 pairwise, addr); 493 addr);
495 } 494 }
496 wdev->wext.connect.privacy = false; 495 wdev->wext.connect.privacy = false;
497 /* 496 /*
@@ -525,8 +524,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
525 524
526 err = 0; 525 err = 0;
527 if (wdev->current_bss) 526 if (wdev->current_bss)
528 err = rdev->ops->add_key(&rdev->wiphy, dev, idx, 527 err = rdev_add_key(rdev, dev, idx, pairwise, addr, params);
529 pairwise, addr, params);
530 if (err) 528 if (err)
531 return err; 529 return err;
532 530
@@ -552,8 +550,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
552 __cfg80211_leave_ibss(rdev, wdev->netdev, true); 550 __cfg80211_leave_ibss(rdev, wdev->netdev, true);
553 rejoin = true; 551 rejoin = true;
554 } 552 }
555 err = rdev->ops->set_default_key(&rdev->wiphy, dev, 553 err = rdev_set_default_key(rdev, dev, idx, true, true);
556 idx, true, true);
557 } 554 }
558 if (!err) { 555 if (!err) {
559 wdev->wext.default_key = idx; 556 wdev->wext.default_key = idx;
@@ -566,8 +563,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
566 if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && 563 if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
567 (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { 564 (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
568 if (wdev->current_bss) 565 if (wdev->current_bss)
569 err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, 566 err = rdev_set_default_mgmt_key(rdev, dev, idx);
570 dev, idx);
571 if (!err) 567 if (!err)
572 wdev->wext.default_mgmt_key = idx; 568 wdev->wext.default_mgmt_key = idx;
573 return err; 569 return err;
@@ -631,8 +627,8 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
631 err = 0; 627 err = 0;
632 wdev_lock(wdev); 628 wdev_lock(wdev);
633 if (wdev->current_bss) 629 if (wdev->current_bss)
634 err = rdev->ops->set_default_key(&rdev->wiphy, dev, 630 err = rdev_set_default_key(rdev, dev, idx, true,
635 idx, true, true); 631 true);
636 if (!err) 632 if (!err)
637 wdev->wext.default_key = idx; 633 wdev->wext.default_key = idx;
638 wdev_unlock(wdev); 634 wdev_unlock(wdev);
@@ -788,6 +784,9 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
788{ 784{
789 struct wireless_dev *wdev = dev->ieee80211_ptr; 785 struct wireless_dev *wdev = dev->ieee80211_ptr;
790 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 786 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
787 struct cfg80211_chan_def chandef = {
788 .width = NL80211_CHAN_WIDTH_20_NOHT,
789 };
791 int freq, err; 790 int freq, err;
792 791
793 switch (wdev->iftype) { 792 switch (wdev->iftype) {
@@ -801,8 +800,12 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
801 return freq; 800 return freq;
802 if (freq == 0) 801 if (freq == 0)
803 return -EINVAL; 802 return -EINVAL;
803 chandef.center_freq1 = freq;
804 chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
805 if (!chandef.chan)
806 return -EINVAL;
804 mutex_lock(&rdev->devlist_mtx); 807 mutex_lock(&rdev->devlist_mtx);
805 err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT); 808 err = cfg80211_set_monitor_channel(rdev, &chandef);
806 mutex_unlock(&rdev->devlist_mtx); 809 mutex_unlock(&rdev->devlist_mtx);
807 return err; 810 return err;
808 case NL80211_IFTYPE_MESH_POINT: 811 case NL80211_IFTYPE_MESH_POINT:
@@ -811,9 +814,12 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
811 return freq; 814 return freq;
812 if (freq == 0) 815 if (freq == 0)
813 return -EINVAL; 816 return -EINVAL;
817 chandef.center_freq1 = freq;
818 chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
819 if (!chandef.chan)
820 return -EINVAL;
814 mutex_lock(&rdev->devlist_mtx); 821 mutex_lock(&rdev->devlist_mtx);
815 err = cfg80211_set_mesh_freq(rdev, wdev, freq, 822 err = cfg80211_set_mesh_channel(rdev, wdev, &chandef);
816 NL80211_CHAN_NO_HT);
817 mutex_unlock(&rdev->devlist_mtx); 823 mutex_unlock(&rdev->devlist_mtx);
818 return err; 824 return err;
819 default: 825 default:
@@ -827,8 +833,8 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
827{ 833{
828 struct wireless_dev *wdev = dev->ieee80211_ptr; 834 struct wireless_dev *wdev = dev->ieee80211_ptr;
829 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 835 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
830 struct ieee80211_channel *chan; 836 struct cfg80211_chan_def chandef;
831 enum nl80211_channel_type channel_type; 837 int ret;
832 838
833 switch (wdev->iftype) { 839 switch (wdev->iftype) {
834 case NL80211_IFTYPE_STATION: 840 case NL80211_IFTYPE_STATION:
@@ -839,10 +845,10 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
839 if (!rdev->ops->get_channel) 845 if (!rdev->ops->get_channel)
840 return -EINVAL; 846 return -EINVAL;
841 847
842 chan = rdev->ops->get_channel(wdev->wiphy, wdev, &channel_type); 848 ret = rdev_get_channel(rdev, wdev, &chandef);
843 if (!chan) 849 if (ret)
844 return -EINVAL; 850 return ret;
845 freq->m = chan->center_freq; 851 freq->m = chandef.chan->center_freq;
846 freq->e = 6; 852 freq->e = 6;
847 return 0; 853 return 0;
848 default: 854 default:
@@ -899,7 +905,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
899 return 0; 905 return 0;
900 } 906 }
901 907
902 return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm)); 908 return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm));
903} 909}
904 910
905static int cfg80211_wext_giwtxpower(struct net_device *dev, 911static int cfg80211_wext_giwtxpower(struct net_device *dev,
@@ -918,7 +924,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
918 if (!rdev->ops->get_tx_power) 924 if (!rdev->ops->get_tx_power)
919 return -EOPNOTSUPP; 925 return -EOPNOTSUPP;
920 926
921 err = rdev->ops->get_tx_power(wdev->wiphy, &val); 927 err = rdev_get_tx_power(rdev, wdev, &val);
922 if (err) 928 if (err)
923 return err; 929 return err;
924 930
@@ -1158,7 +1164,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
1158 timeout = wrq->value / 1000; 1164 timeout = wrq->value / 1000;
1159 } 1165 }
1160 1166
1161 err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, ps, timeout); 1167 err = rdev_set_power_mgmt(rdev, dev, ps, timeout);
1162 if (err) 1168 if (err)
1163 return err; 1169 return err;
1164 1170
@@ -1200,7 +1206,7 @@ static int cfg80211_wds_wext_siwap(struct net_device *dev,
1200 if (!rdev->ops->set_wds_peer) 1206 if (!rdev->ops->set_wds_peer)
1201 return -EOPNOTSUPP; 1207 return -EOPNOTSUPP;
1202 1208
1203 err = rdev->ops->set_wds_peer(wdev->wiphy, dev, (u8 *) &addr->sa_data); 1209 err = rdev_set_wds_peer(rdev, dev, (u8 *)&addr->sa_data);
1204 if (err) 1210 if (err)
1205 return err; 1211 return err;
1206 1212
@@ -1272,7 +1278,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
1272 if (!match) 1278 if (!match)
1273 return -EINVAL; 1279 return -EINVAL;
1274 1280
1275 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); 1281 return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
1276} 1282}
1277 1283
1278static int cfg80211_wext_giwrate(struct net_device *dev, 1284static int cfg80211_wext_giwrate(struct net_device *dev,
@@ -1302,7 +1308,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1302 if (err) 1308 if (err)
1303 return err; 1309 return err;
1304 1310
1305 err = rdev->ops->get_station(&rdev->wiphy, dev, addr, &sinfo); 1311 err = rdev_get_station(rdev, dev, addr, &sinfo);
1306 if (err) 1312 if (err)
1307 return err; 1313 return err;
1308 1314
@@ -1339,7 +1345,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1339 memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); 1345 memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
1340 wdev_unlock(wdev); 1346 wdev_unlock(wdev);
1341 1347
1342 if (rdev->ops->get_station(&rdev->wiphy, dev, bssid, &sinfo)) 1348 if (rdev_get_station(rdev, dev, bssid, &sinfo))
1343 return NULL; 1349 return NULL;
1344 1350
1345 memset(&wstats, 0, sizeof(wstats)); 1351 memset(&wstats, 0, sizeof(wstats));
@@ -1474,19 +1480,19 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
1474 if (!rdev->ops->set_pmksa) 1480 if (!rdev->ops->set_pmksa)
1475 return -EOPNOTSUPP; 1481 return -EOPNOTSUPP;
1476 1482
1477 return rdev->ops->set_pmksa(&rdev->wiphy, dev, &cfg_pmksa); 1483 return rdev_set_pmksa(rdev, dev, &cfg_pmksa);
1478 1484
1479 case IW_PMKSA_REMOVE: 1485 case IW_PMKSA_REMOVE:
1480 if (!rdev->ops->del_pmksa) 1486 if (!rdev->ops->del_pmksa)
1481 return -EOPNOTSUPP; 1487 return -EOPNOTSUPP;
1482 1488
1483 return rdev->ops->del_pmksa(&rdev->wiphy, dev, &cfg_pmksa); 1489 return rdev_del_pmksa(rdev, dev, &cfg_pmksa);
1484 1490
1485 case IW_PMKSA_FLUSH: 1491 case IW_PMKSA_FLUSH:
1486 if (!rdev->ops->flush_pmksa) 1492 if (!rdev->ops->flush_pmksa)
1487 return -EOPNOTSUPP; 1493 return -EOPNOTSUPP;
1488 1494
1489 return rdev->ops->flush_pmksa(&rdev->wiphy, dev); 1495 return rdev_flush_pmksa(rdev, dev);
1490 1496
1491 default: 1497 default:
1492 return -EOPNOTSUPP; 1498 return -EOPNOTSUPP;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 1f773f668d1a..fb9622f6d99c 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -119,7 +119,16 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
119 * channel we disconnected above and reconnect below. 119 * channel we disconnected above and reconnect below.
120 */ 120 */
121 if (chan && !wdev->wext.connect.ssid_len) { 121 if (chan && !wdev->wext.connect.ssid_len) {
122 err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT); 122 struct cfg80211_chan_def chandef = {
123 .width = NL80211_CHAN_WIDTH_20_NOHT,
124 .center_freq1 = freq,
125 };
126
127 chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
128 if (chandef.chan)
129 err = cfg80211_set_monitor_channel(rdev, &chandef);
130 else
131 err = -EINVAL;
123 goto out; 132 goto out;
124 } 133 }
125 134
@@ -233,13 +242,17 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
233 242
234 wdev_lock(wdev); 243 wdev_lock(wdev);
235 if (wdev->current_bss) { 244 if (wdev->current_bss) {
236 const u8 *ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, 245 const u8 *ie;
237 WLAN_EID_SSID); 246
247 rcu_read_lock();
248 ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
249 WLAN_EID_SSID);
238 if (ie) { 250 if (ie) {
239 data->flags = 1; 251 data->flags = 1;
240 data->length = ie[1]; 252 data->length = ie[1];
241 memcpy(ssid, ie + 2, data->length); 253 memcpy(ssid, ie + 2, data->length);
242 } 254 }
255 rcu_read_unlock();
243 } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { 256 } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
244 data->flags = 1; 257 data->flags = 1;
245 data->length = wdev->wext.connect.ssid_len; 258 data->length = wdev->wext.connect.ssid_len;
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index e5246fbe36c4..2906d520eea7 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -276,18 +276,16 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
276 struct crypto_comp * __percpu *tfms; 276 struct crypto_comp * __percpu *tfms;
277 int cpu; 277 int cpu;
278 278
279 /* This can be any valid CPU ID so we don't need locking. */
280 cpu = raw_smp_processor_id();
281 279
282 list_for_each_entry(pos, &ipcomp_tfms_list, list) { 280 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
283 struct crypto_comp *tfm; 281 struct crypto_comp *tfm;
284 282
285 tfms = pos->tfms; 283 /* This can be any valid CPU ID so we don't need locking. */
286 tfm = *per_cpu_ptr(tfms, cpu); 284 tfm = __this_cpu_read(*pos->tfms);
287 285
288 if (!strcmp(crypto_comp_name(tfm), alg_name)) { 286 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
289 pos->users++; 287 pos->users++;
290 return tfms; 288 return pos->tfms;
291 } 289 }
292 } 290 }
293 291
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 3efb07d3eb27..765f6fe951eb 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -521,13 +521,12 @@ int xfrm_init_replay(struct xfrm_state *x)
521 replay_esn->bmp_len * sizeof(__u32) * 8) 521 replay_esn->bmp_len * sizeof(__u32) * 8)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) 524 if (x->props.flags & XFRM_STATE_ESN) {
525 return -EINVAL; 525 if (replay_esn->replay_window == 0)
526 526 return -EINVAL;
527 if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) 527 x->repl = &xfrm_replay_esn;
528 x->repl = &xfrm_replay_esn; 528 } else
529 else 529 x->repl = &xfrm_replay_bmp;
530 x->repl = &xfrm_replay_bmp;
531 } else 530 } else
532 x->repl = &xfrm_replay_legacy; 531 x->repl = &xfrm_replay_legacy;
533 532
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 380976f74c4c..05a6e3d9c258 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,6 +54,10 @@ int __net_init xfrm_sysctl_init(struct net *net)
54 table[2].data = &net->xfrm.sysctl_larval_drop; 54 table[2].data = &net->xfrm.sysctl_larval_drop;
55 table[3].data = &net->xfrm.sysctl_acq_expires; 55 table[3].data = &net->xfrm.sysctl_acq_expires;
56 56
57 /* Don't export sysctls to unprivileged users */
58 if (net->user_ns != &init_user_ns)
59 table[0].procname = NULL;
60
57 net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table); 61 net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
58 if (!net->xfrm.sysctl_hdr) 62 if (!net->xfrm.sysctl_hdr)
59 goto out_register; 63 goto out_register;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 421f98444335..eb872b2e366e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2349,7 +2349,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2349 link = &xfrm_dispatch[type]; 2349 link = &xfrm_dispatch[type];
2350 2350
2351 /* All operations require privileges, even GET */ 2351 /* All operations require privileges, even GET */
2352 if (!capable(CAP_NET_ADMIN)) 2352 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2353 return -EPERM; 2353 return -EPERM;
2354 2354
2355 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2355 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||