aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/Kconfig3
-rw-r--r--net/802/Makefile1
-rw-r--r--net/802/mrp.c895
-rw-r--r--net/8021q/Kconfig11
-rw-r--r--net/8021q/Makefile1
-rw-r--r--net/8021q/vlan.c27
-rw-r--r--net/8021q/vlan.h16
-rw-r--r--net/8021q/vlan_core.c1
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/8021q/vlan_mvrp.c72
-rw-r--r--net/8021q/vlan_netlink.c2
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/ax25/af_ax25.c13
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/bluetooth/a2mp.c42
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/amp.c25
-rw-r--r--net/bluetooth/bnep/core.c1
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c30
-rw-r--r--net/bluetooth/hci_event.c64
-rw-r--r--net/bluetooth/hci_sysfs.c22
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bluetooth/mgmt.c489
-rw-r--r--net/bluetooth/sco.c18
-rw-r--r--net/bluetooth/smp.c13
-rw-r--r--net/bridge/Kconfig14
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br_device.c10
-rw-r--r--net/bridge/br_fdb.c258
-rw-r--r--net/bridge/br_forward.c9
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/bridge/br_input.c28
-rw-r--r--net/bridge/br_multicast.c69
-rw-r--r--net/bridge/br_netlink.c242
-rw-r--r--net/bridge/br_private.h178
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_sysfs_br.c21
-rw-r--r--net/bridge/br_vlan.c415
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/proc.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c756
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/dev_ioctl.c576
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/netpoll.c74
-rw-r--r--net/core/netprio_cgroup.c4
-rw-r--r--net/core/pktgen.c13
-rw-r--r--net/core/rtnetlink.c114
-rw-r--r--net/core/skbuff.c65
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/probe.c6
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/dn_dev.c4
-rw-r--r--net/decnet/dn_neigh.c7
-rw-r--r--net/decnet/dn_route.c5
-rw-r--r--net/ieee802154/6lowpan.c15
-rw-r--r--net/ipv4/af_inet.c21
-rw-r--r--net/ipv4/ah4.c3
-rw-r--r--net/ipv4/arp.c25
-rw-r--r--net/ipv4/fib_trie.c18
-rw-r--r--net/ipv4/gre.c118
-rw-r--r--net/ipv4/igmp.c11
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c82
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipmr.c10
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c14
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/proc.c17
-rw-r--r--net/ipv4/protocol.c6
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp.c17
-rw-r--r--net/ipv4/tcp_cong.c42
-rw-r--r--net/ipv4/tcp_input.c35
-rw-r--r--net/ipv4/tcp_ipv4.c20
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv4/tcp_probe.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv4/xfrm4_input.c2
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c4
-rw-r--r--net/ipv4/xfrm4_policy.c58
-rw-r--r--net/ipv6/addrconf.c20
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/anycast.c4
-rw-r--r--net/ipv6/datagram.c16
-rw-r--r--net/ipv6/ip6_flowlabel.c18
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c10
-rw-r--r--net/ipv6/ip6mr.c10
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/mcast.c20
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c18
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/proc.c16
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/reassembly.c18
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/tcp_ipv6.c28
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c4
-rw-r--r--net/ipv6/xfrm6_policy.c52
-rw-r--r--net/key/af_key.c44
-rw-r--r--net/l2tp/l2tp_core.c223
-rw-r--r--net/l2tp/l2tp_core.h7
-rw-r--r--net/l2tp/l2tp_ip.c16
-rw-r--r--net/l2tp/l2tp_ip6.c10
-rw-r--r--net/l2tp/l2tp_netlink.c1
-rw-r--r--net/l2tp/l2tp_ppp.c11
-rw-r--r--net/mac80211/Kconfig11
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-rx.c14
-rw-r--r--net/mac80211/agg-tx.c61
-rw-r--r--net/mac80211/cfg.c98
-rw-r--r--net/mac80211/chan.c155
-rw-r--r--net/mac80211/debug.h10
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_netdev.c5
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/driver-ops.h90
-rw-r--r--net/mac80211/ht.c110
-rw-r--r--net/mac80211/ibss.c53
-rw-r--r--net/mac80211/ieee80211_i.h199
-rw-r--r--net/mac80211/iface.c135
-rw-r--r--net/mac80211/key.c5
-rw-r--r--net/mac80211/main.c106
-rw-r--r--net/mac80211/mesh.c284
-rw-r--r--net/mac80211/mesh.h40
-rw-r--r--net/mac80211/mesh_hwmp.c49
-rw-r--r--net/mac80211/mesh_pathtbl.c12
-rw-r--r--net/mac80211/mesh_plink.c266
-rw-r--r--net/mac80211/mesh_ps.c598
-rw-r--r--net/mac80211/mlme.c1126
-rw-r--r--net/mac80211/offchannel.c35
-rw-r--r--net/mac80211/pm.c12
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rc80211_minstrel.c29
-rw-r--r--net/mac80211/rc80211_minstrel.h2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c181
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h5
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c112
-rw-r--r--net/mac80211/rx.c161
-rw-r--r--net/mac80211/scan.c66
-rw-r--r--net/mac80211/sta_info.c43
-rw-r--r--net/mac80211/sta_info.h20
-rw-r--r--net/mac80211/status.c25
-rw-r--r--net/mac80211/tkip.c10
-rw-r--r--net/mac80211/trace.h76
-rw-r--r--net/mac80211/tx.c117
-rw-r--r--net/mac80211/util.c82
-rw-r--r--net/mac80211/vht.c172
-rw-r--r--net/mac80211/wme.c13
-rw-r--r--net/mac80211/wpa.c5
-rw-r--r--net/mac802154/wpan.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c35
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c5
-rw-r--r--net/netfilter/nf_conntrack_helper.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_standalone.c6
-rw-r--r--net/netfilter/x_tables.c10
-rw-r--r--net/netfilter/xt_hashlimit.c6
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/nfc/llcp/llcp.c5
-rw-r--r--net/openvswitch/datapath.c2
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/phonet/pn_dev.c8
-rw-r--r--net/rose/af_rose.c19
-rw-r--r--net/rxrpc/af_rxrpc.c9
-rw-r--r--net/sched/act_ipt.c6
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c100
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_generic.c37
-rw-r--r--net/sched/sch_htb.c80
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sched/sch_tbf.c76
-rw-r--r--net/sctp/Kconfig4
-rw-r--r--net/sctp/auth.c31
-rw-r--r--net/sctp/endpointola.c6
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/probe.c27
-rw-r--r--net/sctp/sm_make_chunk.c31
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/socket.c18
-rw-r--r--net/sunrpc/sched.c18
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/socket.c103
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/vmw_vsock/Kconfig28
-rw-r--r--net/vmw_vsock/Makefile7
-rw-r--r--net/vmw_vsock/af_vsock.c2012
-rw-r--r--net/vmw_vsock/af_vsock.h175
-rw-r--r--net/vmw_vsock/vmci_transport.c2155
-rw-r--r--net/vmw_vsock/vmci_transport.h139
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c680
-rw-r--r--net/vmw_vsock/vmci_transport_notify.h83
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c438
-rw-r--r--net/vmw_vsock/vsock_addr.c86
-rw-r--r--net/vmw_vsock/vsock_addr.h32
-rw-r--r--net/wireless/chan.c142
-rw-r--r--net/wireless/core.c13
-rw-r--r--net/wireless/core.h35
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/mlme.c136
-rw-r--r--net/wireless/nl80211.c899
-rw-r--r--net/wireless/nl80211.h7
-rw-r--r--net/wireless/rdev-ops.h12
-rw-r--r--net/wireless/reg.c20
-rw-r--r--net/wireless/scan.c632
-rw-r--r--net/wireless/sme.c16
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/wireless/trace.h98
-rw-r--r--net/wireless/util.c5
-rw-r--r--net/wireless/wext-proc.c5
-rw-r--r--net/xfrm/xfrm_algo.c65
-rw-r--r--net/xfrm/xfrm_output.c2
-rw-r--r--net/xfrm/xfrm_policy.c247
-rw-r--r--net/xfrm/xfrm_proc.c6
240 files changed, 15877 insertions, 3545 deletions
diff --git a/net/802/Kconfig b/net/802/Kconfig
index be33d27c8e69..80d4bf78905d 100644
--- a/net/802/Kconfig
+++ b/net/802/Kconfig
@@ -5,3 +5,6 @@ config STP
5config GARP 5config GARP
6 tristate 6 tristate
7 select STP 7 select STP
8
9config MRP
10 tristate
diff --git a/net/802/Makefile b/net/802/Makefile
index a30d6e385aed..37e654d6615e 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
11obj-$(CONFIG_ATALK) += p8022.o psnap.o 11obj-$(CONFIG_ATALK) += p8022.o psnap.o
12obj-$(CONFIG_STP) += stp.o 12obj-$(CONFIG_STP) += stp.o
13obj-$(CONFIG_GARP) += garp.o 13obj-$(CONFIG_GARP) += garp.o
14obj-$(CONFIG_MRP) += mrp.o
diff --git a/net/802/mrp.c b/net/802/mrp.c
new file mode 100644
index 000000000000..a4cc3229952a
--- /dev/null
+++ b/net/802/mrp.c
@@ -0,0 +1,895 @@
1/*
2 * IEEE 802.1Q Multiple Registration Protocol (MRP)
3 *
4 * Copyright (c) 2012 Massachusetts Institute of Technology
5 *
6 * Adapted from code in net/802/garp.c
7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 */
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/rtnetlink.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/mrp.h>
22#include <asm/unaligned.h>
23
24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27MODULE_LICENSE("GPL");
28
29static const u8
30mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
31 [MRP_APPLICANT_VO] = {
32 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
33 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
34 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
35 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
36 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
37 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
38 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
39 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
40 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
41 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
43 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
44 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
45 },
46 [MRP_APPLICANT_VP] = {
47 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
48 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
49 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
50 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
51 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
52 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
53 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
54 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
55 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
56 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
58 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
59 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
60 },
61 [MRP_APPLICANT_VN] = {
62 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
63 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
64 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
65 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
66 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
67 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
68 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
69 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
70 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
71 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
73 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
74 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
75 },
76 [MRP_APPLICANT_AN] = {
77 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
78 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
79 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
80 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
81 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
82 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
83 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
84 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
85 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
86 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
87 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
88 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
89 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
90 },
91 [MRP_APPLICANT_AA] = {
92 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
93 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
94 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
95 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
96 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
97 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
98 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
99 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
100 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
101 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
102 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
103 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
104 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
105 },
106 [MRP_APPLICANT_QA] = {
107 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
108 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
109 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
110 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
111 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
112 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
113 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
114 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
115 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
116 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
117 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
118 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
119 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
120 },
121 [MRP_APPLICANT_LA] = {
122 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
123 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
124 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
125 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
126 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
127 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
128 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
129 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
130 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
131 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
133 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
134 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
135 },
136 [MRP_APPLICANT_AO] = {
137 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
138 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
139 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
140 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
141 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
142 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
143 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
144 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
145 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
146 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
147 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
148 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
149 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
150 },
151 [MRP_APPLICANT_QO] = {
152 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
153 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
154 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
155 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
156 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
157 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
158 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
159 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
160 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
161 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
162 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
163 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
164 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
165 },
166 [MRP_APPLICANT_AP] = {
167 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
168 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
169 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
170 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
171 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
172 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
173 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
174 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
175 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
176 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
177 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
178 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
179 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
180 },
181 [MRP_APPLICANT_QP] = {
182 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
183 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
184 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
185 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
186 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
187 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
188 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
189 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
190 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
191 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
192 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
193 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
194 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
195 },
196};
197
198static const u8
199mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
200 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
201 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
202 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
203 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
204 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
205 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
206 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
207 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
208 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
209 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
210 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
211};
212
213static void mrp_attrvalue_inc(void *value, u8 len)
214{
215 u8 *v = (u8 *)value;
216
217 /* Add 1 to the last byte. If it becomes zero,
218 * go to the previous byte and repeat.
219 */
220 while (len > 0 && !++v[--len])
221 ;
222}
223
224static int mrp_attr_cmp(const struct mrp_attr *attr,
225 const void *value, u8 len, u8 type)
226{
227 if (attr->type != type)
228 return attr->type - type;
229 if (attr->len != len)
230 return attr->len - len;
231 return memcmp(attr->value, value, len);
232}
233
234static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
235 const void *value, u8 len, u8 type)
236{
237 struct rb_node *parent = app->mad.rb_node;
238 struct mrp_attr *attr;
239 int d;
240
241 while (parent) {
242 attr = rb_entry(parent, struct mrp_attr, node);
243 d = mrp_attr_cmp(attr, value, len, type);
244 if (d > 0)
245 parent = parent->rb_left;
246 else if (d < 0)
247 parent = parent->rb_right;
248 else
249 return attr;
250 }
251 return NULL;
252}
253
254static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
255 const void *value, u8 len, u8 type)
256{
257 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
258 struct mrp_attr *attr;
259 int d;
260
261 while (*p) {
262 parent = *p;
263 attr = rb_entry(parent, struct mrp_attr, node);
264 d = mrp_attr_cmp(attr, value, len, type);
265 if (d > 0)
266 p = &parent->rb_left;
267 else if (d < 0)
268 p = &parent->rb_right;
269 else {
270 /* The attribute already exists; re-use it. */
271 return attr;
272 }
273 }
274 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
275 if (!attr)
276 return attr;
277 attr->state = MRP_APPLICANT_VO;
278 attr->type = type;
279 attr->len = len;
280 memcpy(attr->value, value, len);
281
282 rb_link_node(&attr->node, parent, p);
283 rb_insert_color(&attr->node, &app->mad);
284 return attr;
285}
286
287static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
288{
289 rb_erase(&attr->node, &app->mad);
290 kfree(attr);
291}
292
293static int mrp_pdu_init(struct mrp_applicant *app)
294{
295 struct sk_buff *skb;
296 struct mrp_pdu_hdr *ph;
297
298 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
299 GFP_ATOMIC);
300 if (!skb)
301 return -ENOMEM;
302
303 skb->dev = app->dev;
304 skb->protocol = app->app->pkttype.type;
305 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
306 skb_reset_network_header(skb);
307 skb_reset_transport_header(skb);
308
309 ph = (struct mrp_pdu_hdr *)__skb_put(skb, sizeof(*ph));
310 ph->version = app->app->version;
311
312 app->pdu = skb;
313 return 0;
314}
315
316static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
317{
318 __be16 *endmark;
319
320 if (skb_tailroom(app->pdu) < sizeof(*endmark))
321 return -1;
322 endmark = (__be16 *)__skb_put(app->pdu, sizeof(*endmark));
323 put_unaligned(MRP_END_MARK, endmark);
324 return 0;
325}
326
327static void mrp_pdu_queue(struct mrp_applicant *app)
328{
329 if (!app->pdu)
330 return;
331
332 if (mrp_cb(app->pdu)->mh)
333 mrp_pdu_append_end_mark(app);
334 mrp_pdu_append_end_mark(app);
335
336 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
337 app->app->group_address, app->dev->dev_addr,
338 app->pdu->len);
339
340 skb_queue_tail(&app->queue, app->pdu);
341 app->pdu = NULL;
342}
343
344static void mrp_queue_xmit(struct mrp_applicant *app)
345{
346 struct sk_buff *skb;
347
348 while ((skb = skb_dequeue(&app->queue)))
349 dev_queue_xmit(skb);
350}
351
352static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
353 u8 attrtype, u8 attrlen)
354{
355 struct mrp_msg_hdr *mh;
356
357 if (mrp_cb(app->pdu)->mh) {
358 if (mrp_pdu_append_end_mark(app) < 0)
359 return -1;
360 mrp_cb(app->pdu)->mh = NULL;
361 mrp_cb(app->pdu)->vah = NULL;
362 }
363
364 if (skb_tailroom(app->pdu) < sizeof(*mh))
365 return -1;
366 mh = (struct mrp_msg_hdr *)__skb_put(app->pdu, sizeof(*mh));
367 mh->attrtype = attrtype;
368 mh->attrlen = attrlen;
369 mrp_cb(app->pdu)->mh = mh;
370 return 0;
371}
372
373static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
374 const void *firstattrvalue, u8 attrlen)
375{
376 struct mrp_vecattr_hdr *vah;
377
378 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
379 return -1;
380 vah = (struct mrp_vecattr_hdr *)__skb_put(app->pdu,
381 sizeof(*vah) + attrlen);
382 put_unaligned(0, &vah->lenflags);
383 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
384 mrp_cb(app->pdu)->vah = vah;
385 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
386 return 0;
387}
388
389static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
390 const struct mrp_attr *attr,
391 enum mrp_vecattr_event vaevent)
392{
393 u16 len, pos;
394 u8 *vaevents;
395 int err;
396again:
397 if (!app->pdu) {
398 err = mrp_pdu_init(app);
399 if (err < 0)
400 return err;
401 }
402
403 /* If there is no Message header in the PDU, or the Message header is
404 * for a different attribute type, add an EndMark (if necessary) and a
405 * new Message header to the PDU.
406 */
407 if (!mrp_cb(app->pdu)->mh ||
408 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
409 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
410 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
411 goto queue;
412 }
413
414 /* If there is no VectorAttribute header for this Message in the PDU,
415 * or this attribute's value does not sequentially follow the previous
416 * attribute's value, add a new VectorAttribute header to the PDU.
417 */
418 if (!mrp_cb(app->pdu)->vah ||
419 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
420 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
421 goto queue;
422 }
423
424 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
425 pos = len % 3;
426
427 /* Events are packed into Vectors in the PDU, three to a byte. Add a
428 * byte to the end of the Vector if necessary.
429 */
430 if (!pos) {
431 if (skb_tailroom(app->pdu) < sizeof(u8))
432 goto queue;
433 vaevents = (u8 *)__skb_put(app->pdu, sizeof(u8));
434 } else {
435 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
436 }
437
438 switch (pos) {
439 case 0:
440 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
441 __MRP_VECATTR_EVENT_MAX);
442 break;
443 case 1:
444 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
445 break;
446 case 2:
447 *vaevents += vaevent;
448 break;
449 default:
450 WARN_ON(1);
451 }
452
453 /* Increment the length of the VectorAttribute in the PDU, as well as
454 * the value of the next attribute that would continue its Vector.
455 */
456 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
457 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
458
459 return 0;
460
461queue:
462 mrp_pdu_queue(app);
463 goto again;
464}
465
466static void mrp_attr_event(struct mrp_applicant *app,
467 struct mrp_attr *attr, enum mrp_event event)
468{
469 enum mrp_applicant_state state;
470
471 state = mrp_applicant_state_table[attr->state][event];
472 if (state == MRP_APPLICANT_INVALID) {
473 WARN_ON(1);
474 return;
475 }
476
477 if (event == MRP_EVENT_TX) {
478 /* When appending the attribute fails, don't update its state
479 * in order to retry at the next TX event.
480 */
481
482 switch (mrp_tx_action_table[attr->state]) {
483 case MRP_TX_ACTION_NONE:
484 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
485 case MRP_TX_ACTION_S_IN_OPTIONAL:
486 break;
487 case MRP_TX_ACTION_S_NEW:
488 if (mrp_pdu_append_vecattr_event(
489 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
490 return;
491 break;
492 case MRP_TX_ACTION_S_JOIN_IN:
493 if (mrp_pdu_append_vecattr_event(
494 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
495 return;
496 break;
497 case MRP_TX_ACTION_S_LV:
498 if (mrp_pdu_append_vecattr_event(
499 app, attr, MRP_VECATTR_EVENT_LV) < 0)
500 return;
501 /* As a pure applicant, sending a leave message
502 * implies that the attribute was unregistered and
503 * can be destroyed.
504 */
505 mrp_attr_destroy(app, attr);
506 return;
507 default:
508 WARN_ON(1);
509 }
510 }
511
512 attr->state = state;
513}
514
515int mrp_request_join(const struct net_device *dev,
516 const struct mrp_application *appl,
517 const void *value, u8 len, u8 type)
518{
519 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
520 struct mrp_applicant *app = rtnl_dereference(
521 port->applicants[appl->type]);
522 struct mrp_attr *attr;
523
524 if (sizeof(struct mrp_skb_cb) + len >
525 FIELD_SIZEOF(struct sk_buff, cb))
526 return -ENOMEM;
527
528 spin_lock_bh(&app->lock);
529 attr = mrp_attr_create(app, value, len, type);
530 if (!attr) {
531 spin_unlock_bh(&app->lock);
532 return -ENOMEM;
533 }
534 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
535 spin_unlock_bh(&app->lock);
536 return 0;
537}
538EXPORT_SYMBOL_GPL(mrp_request_join);
539
540void mrp_request_leave(const struct net_device *dev,
541 const struct mrp_application *appl,
542 const void *value, u8 len, u8 type)
543{
544 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
545 struct mrp_applicant *app = rtnl_dereference(
546 port->applicants[appl->type]);
547 struct mrp_attr *attr;
548
549 if (sizeof(struct mrp_skb_cb) + len >
550 FIELD_SIZEOF(struct sk_buff, cb))
551 return;
552
553 spin_lock_bh(&app->lock);
554 attr = mrp_attr_lookup(app, value, len, type);
555 if (!attr) {
556 spin_unlock_bh(&app->lock);
557 return;
558 }
559 mrp_attr_event(app, attr, MRP_EVENT_LV);
560 spin_unlock_bh(&app->lock);
561}
562EXPORT_SYMBOL_GPL(mrp_request_leave);
563
564static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
565{
566 struct rb_node *node, *next;
567 struct mrp_attr *attr;
568
569 for (node = rb_first(&app->mad);
570 next = node ? rb_next(node) : NULL, node != NULL;
571 node = next) {
572 attr = rb_entry(node, struct mrp_attr, node);
573 mrp_attr_event(app, attr, event);
574 }
575}
576
577static void mrp_join_timer_arm(struct mrp_applicant *app)
578{
579 unsigned long delay;
580
581 delay = (u64)msecs_to_jiffies(mrp_join_time) * net_random() >> 32;
582 mod_timer(&app->join_timer, jiffies + delay);
583}
584
585static void mrp_join_timer(unsigned long data)
586{
587 struct mrp_applicant *app = (struct mrp_applicant *)data;
588
589 spin_lock(&app->lock);
590 mrp_mad_event(app, MRP_EVENT_TX);
591 mrp_pdu_queue(app);
592 spin_unlock(&app->lock);
593
594 mrp_queue_xmit(app);
595 mrp_join_timer_arm(app);
596}
597
598static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
599{
600 __be16 endmark;
601
602 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
603 return -1;
604 if (endmark == MRP_END_MARK) {
605 *offset += sizeof(endmark);
606 return -1;
607 }
608 return 0;
609}
610
611static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
612 struct sk_buff *skb,
613 enum mrp_vecattr_event vaevent)
614{
615 struct mrp_attr *attr;
616 enum mrp_event event;
617
618 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
619 mrp_cb(skb)->mh->attrlen,
620 mrp_cb(skb)->mh->attrtype);
621 if (attr == NULL)
622 return;
623
624 switch (vaevent) {
625 case MRP_VECATTR_EVENT_NEW:
626 event = MRP_EVENT_R_NEW;
627 break;
628 case MRP_VECATTR_EVENT_JOIN_IN:
629 event = MRP_EVENT_R_JOIN_IN;
630 break;
631 case MRP_VECATTR_EVENT_IN:
632 event = MRP_EVENT_R_IN;
633 break;
634 case MRP_VECATTR_EVENT_JOIN_MT:
635 event = MRP_EVENT_R_JOIN_MT;
636 break;
637 case MRP_VECATTR_EVENT_MT:
638 event = MRP_EVENT_R_MT;
639 break;
640 case MRP_VECATTR_EVENT_LV:
641 event = MRP_EVENT_R_LV;
642 break;
643 default:
644 return;
645 }
646
647 mrp_attr_event(app, attr, event);
648}
649
650static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
651 struct sk_buff *skb, int *offset)
652{
653 struct mrp_vecattr_hdr _vah;
654 u16 valen;
655 u8 vaevents, vaevent;
656
657 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
658 &_vah);
659 if (!mrp_cb(skb)->vah)
660 return -1;
661 *offset += sizeof(_vah);
662
663 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
664 MRP_VECATTR_HDR_FLAG_LA)
665 mrp_mad_event(app, MRP_EVENT_R_LA);
666 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
667 MRP_VECATTR_HDR_LEN_MASK);
668
669 /* The VectorAttribute structure in a PDU carries event information
670 * about one or more attributes having consecutive values. Only the
671 * value for the first attribute is contained in the structure. So
672 * we make a copy of that value, and then increment it each time we
673 * advance to the next event in its Vector.
674 */
675 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
676 FIELD_SIZEOF(struct sk_buff, cb))
677 return -1;
678 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
679 mrp_cb(skb)->mh->attrlen) < 0)
680 return -1;
681 *offset += mrp_cb(skb)->mh->attrlen;
682
683 /* In a VectorAttribute, the Vector contains events which are packed
684 * three to a byte. We process one byte of the Vector at a time.
685 */
686 while (valen > 0) {
687 if (skb_copy_bits(skb, *offset, &vaevents,
688 sizeof(vaevents)) < 0)
689 return -1;
690 *offset += sizeof(vaevents);
691
692 /* Extract and process the first event. */
693 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
694 __MRP_VECATTR_EVENT_MAX);
695 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
696 /* The byte is malformed; stop processing. */
697 return -1;
698 }
699 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
700
701 /* If present, extract and process the second event. */
702 if (!--valen)
703 break;
704 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
705 mrp_cb(skb)->mh->attrlen);
706 vaevents %= (__MRP_VECATTR_EVENT_MAX *
707 __MRP_VECATTR_EVENT_MAX);
708 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
709 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
710
711 /* If present, extract and process the third event. */
712 if (!--valen)
713 break;
714 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
715 mrp_cb(skb)->mh->attrlen);
716 vaevents %= __MRP_VECATTR_EVENT_MAX;
717 vaevent = vaevents;
718 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
719 }
720 return 0;
721}
722
723static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
724 int *offset)
725{
726 struct mrp_msg_hdr _mh;
727
728 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
729 if (!mrp_cb(skb)->mh)
730 return -1;
731 *offset += sizeof(_mh);
732
733 if (mrp_cb(skb)->mh->attrtype == 0 ||
734 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
735 mrp_cb(skb)->mh->attrlen == 0)
736 return -1;
737
738 while (skb->len > *offset) {
739 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
740 break;
741 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
742 return -1;
743 }
744 return 0;
745}
746
747static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
748 struct packet_type *pt, struct net_device *orig_dev)
749{
750 struct mrp_application *appl = container_of(pt, struct mrp_application,
751 pkttype);
752 struct mrp_port *port;
753 struct mrp_applicant *app;
754 struct mrp_pdu_hdr _ph;
755 const struct mrp_pdu_hdr *ph;
756 int offset = skb_network_offset(skb);
757
758 /* If the interface is in promiscuous mode, drop the packet if
759 * it was unicast to another host.
760 */
761 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
762 goto out;
763 skb = skb_share_check(skb, GFP_ATOMIC);
764 if (unlikely(!skb))
765 goto out;
766 port = rcu_dereference(dev->mrp_port);
767 if (unlikely(!port))
768 goto out;
769 app = rcu_dereference(port->applicants[appl->type]);
770 if (unlikely(!app))
771 goto out;
772
773 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
774 if (!ph)
775 goto out;
776 offset += sizeof(_ph);
777
778 if (ph->version != app->app->version)
779 goto out;
780
781 spin_lock(&app->lock);
782 while (skb->len > offset) {
783 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
784 break;
785 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
786 break;
787 }
788 spin_unlock(&app->lock);
789out:
790 kfree_skb(skb);
791 return 0;
792}
793
794static int mrp_init_port(struct net_device *dev)
795{
796 struct mrp_port *port;
797
798 port = kzalloc(sizeof(*port), GFP_KERNEL);
799 if (!port)
800 return -ENOMEM;
801 rcu_assign_pointer(dev->mrp_port, port);
802 return 0;
803}
804
805static void mrp_release_port(struct net_device *dev)
806{
807 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
808 unsigned int i;
809
810 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
811 if (rtnl_dereference(port->applicants[i]))
812 return;
813 }
814 RCU_INIT_POINTER(dev->mrp_port, NULL);
815 kfree_rcu(port, rcu);
816}
817
818int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
819{
820 struct mrp_applicant *app;
821 int err;
822
823 ASSERT_RTNL();
824
825 if (!rtnl_dereference(dev->mrp_port)) {
826 err = mrp_init_port(dev);
827 if (err < 0)
828 goto err1;
829 }
830
831 err = -ENOMEM;
832 app = kzalloc(sizeof(*app), GFP_KERNEL);
833 if (!app)
834 goto err2;
835
836 err = dev_mc_add(dev, appl->group_address);
837 if (err < 0)
838 goto err3;
839
840 app->dev = dev;
841 app->app = appl;
842 app->mad = RB_ROOT;
843 spin_lock_init(&app->lock);
844 skb_queue_head_init(&app->queue);
845 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
846 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
847 mrp_join_timer_arm(app);
848 return 0;
849
850err3:
851 kfree(app);
852err2:
853 mrp_release_port(dev);
854err1:
855 return err;
856}
857EXPORT_SYMBOL_GPL(mrp_init_applicant);
858
859void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
860{
861 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
862 struct mrp_applicant *app = rtnl_dereference(
863 port->applicants[appl->type]);
864
865 ASSERT_RTNL();
866
867 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
868
869 /* Delete timer and generate a final TX event to flush out
870 * all pending messages before the applicant is gone.
871 */
872 del_timer_sync(&app->join_timer);
873 mrp_mad_event(app, MRP_EVENT_TX);
874 mrp_pdu_queue(app);
875 mrp_queue_xmit(app);
876
877 dev_mc_del(dev, appl->group_address);
878 kfree_rcu(app, rcu);
879 mrp_release_port(dev);
880}
881EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
882
883int mrp_register_application(struct mrp_application *appl)
884{
885 appl->pkttype.func = mrp_rcv;
886 dev_add_pack(&appl->pkttype);
887 return 0;
888}
889EXPORT_SYMBOL_GPL(mrp_register_application);
890
891void mrp_unregister_application(struct mrp_application *appl)
892{
893 dev_remove_pack(&appl->pkttype);
894}
895EXPORT_SYMBOL_GPL(mrp_unregister_application);
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig
index fa073a54963e..8f7517df41a5 100644
--- a/net/8021q/Kconfig
+++ b/net/8021q/Kconfig
@@ -27,3 +27,14 @@ config VLAN_8021Q_GVRP
27 automatic propagation of registered VLANs to switches. 27 automatic propagation of registered VLANs to switches.
28 28
29 If unsure, say N. 29 If unsure, say N.
30
31config VLAN_8021Q_MVRP
32 bool "MVRP (Multiple VLAN Registration Protocol) support"
33 depends on VLAN_8021Q
34 select MRP
35 help
36 Select this to enable MVRP end-system support. MVRP is used for
37 automatic propagation of registered VLANs to switches; it
38 supersedes GVRP and is not backwards-compatible.
39
40 If unsure, say N.
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
index 9f4f174ead1c..7bc8db08d7ef 100644
--- a/net/8021q/Makefile
+++ b/net/8021q/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_VLAN_8021Q) += 8021q.o
6 6
78021q-y := vlan.o vlan_dev.o vlan_netlink.o 78021q-y := vlan.o vlan_dev.o vlan_netlink.o
88021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o 88021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
98021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o
98021q-$(CONFIG_PROC_FS) += vlanproc.o 108021q-$(CONFIG_PROC_FS) += vlanproc.o
10 11
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index addc578d5443..a18714469bf7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -95,6 +95,8 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
95 95
96 grp->nr_vlan_devs--; 96 grp->nr_vlan_devs--;
97 97
98 if (vlan->flags & VLAN_FLAG_MVRP)
99 vlan_mvrp_request_leave(dev);
98 if (vlan->flags & VLAN_FLAG_GVRP) 100 if (vlan->flags & VLAN_FLAG_GVRP)
99 vlan_gvrp_request_leave(dev); 101 vlan_gvrp_request_leave(dev);
100 102
@@ -107,8 +109,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
107 109
108 netdev_upper_dev_unlink(real_dev, dev); 110 netdev_upper_dev_unlink(real_dev, dev);
109 111
110 if (grp->nr_vlan_devs == 0) 112 if (grp->nr_vlan_devs == 0) {
113 vlan_mvrp_uninit_applicant(real_dev);
111 vlan_gvrp_uninit_applicant(real_dev); 114 vlan_gvrp_uninit_applicant(real_dev);
115 }
112 116
113 /* Get rid of the vlan's reference to real_dev */ 117 /* Get rid of the vlan's reference to real_dev */
114 dev_put(real_dev); 118 dev_put(real_dev);
@@ -151,15 +155,18 @@ int register_vlan_dev(struct net_device *dev)
151 err = vlan_gvrp_init_applicant(real_dev); 155 err = vlan_gvrp_init_applicant(real_dev);
152 if (err < 0) 156 if (err < 0)
153 goto out_vid_del; 157 goto out_vid_del;
158 err = vlan_mvrp_init_applicant(real_dev);
159 if (err < 0)
160 goto out_uninit_gvrp;
154 } 161 }
155 162
156 err = vlan_group_prealloc_vid(grp, vlan_id); 163 err = vlan_group_prealloc_vid(grp, vlan_id);
157 if (err < 0) 164 if (err < 0)
158 goto out_uninit_applicant; 165 goto out_uninit_mvrp;
159 166
160 err = netdev_upper_dev_link(real_dev, dev); 167 err = netdev_upper_dev_link(real_dev, dev);
161 if (err) 168 if (err)
162 goto out_uninit_applicant; 169 goto out_uninit_mvrp;
163 170
164 err = register_netdevice(dev); 171 err = register_netdevice(dev);
165 if (err < 0) 172 if (err < 0)
@@ -181,7 +188,10 @@ int register_vlan_dev(struct net_device *dev)
181 188
182out_upper_dev_unlink: 189out_upper_dev_unlink:
183 netdev_upper_dev_unlink(real_dev, dev); 190 netdev_upper_dev_unlink(real_dev, dev);
184out_uninit_applicant: 191out_uninit_mvrp:
192 if (grp->nr_vlan_devs == 0)
193 vlan_mvrp_uninit_applicant(real_dev);
194out_uninit_gvrp:
185 if (grp->nr_vlan_devs == 0) 195 if (grp->nr_vlan_devs == 0)
186 vlan_gvrp_uninit_applicant(real_dev); 196 vlan_gvrp_uninit_applicant(real_dev);
187out_vid_del: 197out_vid_del:
@@ -655,13 +665,19 @@ static int __init vlan_proto_init(void)
655 if (err < 0) 665 if (err < 0)
656 goto err3; 666 goto err3;
657 667
658 err = vlan_netlink_init(); 668 err = vlan_mvrp_init();
659 if (err < 0) 669 if (err < 0)
660 goto err4; 670 goto err4;
661 671
672 err = vlan_netlink_init();
673 if (err < 0)
674 goto err5;
675
662 vlan_ioctl_set(vlan_ioctl_handler); 676 vlan_ioctl_set(vlan_ioctl_handler);
663 return 0; 677 return 0;
664 678
679err5:
680 vlan_mvrp_uninit();
665err4: 681err4:
666 vlan_gvrp_uninit(); 682 vlan_gvrp_uninit();
667err3: 683err3:
@@ -682,6 +698,7 @@ static void __exit vlan_cleanup_module(void)
682 unregister_pernet_subsys(&vlan_net_ops); 698 unregister_pernet_subsys(&vlan_net_ops);
683 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 699 rcu_barrier(); /* Wait for completion of call_rcu()'s */
684 700
701 vlan_mvrp_uninit();
685 vlan_gvrp_uninit(); 702 vlan_gvrp_uninit();
686} 703}
687 704
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index a4886d94c40c..670f1e8cfc0f 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -171,6 +171,22 @@ static inline int vlan_gvrp_init(void) { return 0; }
171static inline void vlan_gvrp_uninit(void) {} 171static inline void vlan_gvrp_uninit(void) {}
172#endif 172#endif
173 173
174#ifdef CONFIG_VLAN_8021Q_MVRP
175extern int vlan_mvrp_request_join(const struct net_device *dev);
176extern void vlan_mvrp_request_leave(const struct net_device *dev);
177extern int vlan_mvrp_init_applicant(struct net_device *dev);
178extern void vlan_mvrp_uninit_applicant(struct net_device *dev);
179extern int vlan_mvrp_init(void);
180extern void vlan_mvrp_uninit(void);
181#else
182static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
183static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
184static inline int vlan_mvrp_init_applicant(struct net_device *dev) { return 0; }
185static inline void vlan_mvrp_uninit_applicant(struct net_device *dev) {}
186static inline int vlan_mvrp_init(void) { return 0; }
187static inline void vlan_mvrp_uninit(void) {}
188#endif
189
174extern const char vlan_fullname[]; 190extern const char vlan_fullname[];
175extern const char vlan_version[]; 191extern const char vlan_version[];
176extern int vlan_netlink_init(void); 192extern int vlan_netlink_init(void);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 71b64fde8dc9..f3b6f515eba6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -144,6 +144,7 @@ err_free:
144 kfree_skb(skb); 144 kfree_skb(skb);
145 return NULL; 145 return NULL;
146} 146}
147EXPORT_SYMBOL(vlan_untag);
147 148
148 149
149/* 150/*
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 09f9108d4688..19cf81bf9f69 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -261,7 +261,7 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
261 u32 old_flags = vlan->flags; 261 u32 old_flags = vlan->flags;
262 262
263 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | 263 if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
264 VLAN_FLAG_LOOSE_BINDING)) 264 VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
265 return -EINVAL; 265 return -EINVAL;
266 266
267 vlan->flags = (old_flags & ~mask) | (flags & mask); 267 vlan->flags = (old_flags & ~mask) | (flags & mask);
@@ -272,6 +272,13 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
272 else 272 else
273 vlan_gvrp_request_leave(dev); 273 vlan_gvrp_request_leave(dev);
274 } 274 }
275
276 if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
277 if (vlan->flags & VLAN_FLAG_MVRP)
278 vlan_mvrp_request_join(dev);
279 else
280 vlan_mvrp_request_leave(dev);
281 }
275 return 0; 282 return 0;
276} 283}
277 284
@@ -312,6 +319,9 @@ static int vlan_dev_open(struct net_device *dev)
312 if (vlan->flags & VLAN_FLAG_GVRP) 319 if (vlan->flags & VLAN_FLAG_GVRP)
313 vlan_gvrp_request_join(dev); 320 vlan_gvrp_request_join(dev);
314 321
322 if (vlan->flags & VLAN_FLAG_MVRP)
323 vlan_mvrp_request_join(dev);
324
315 if (netif_carrier_ok(real_dev)) 325 if (netif_carrier_ok(real_dev))
316 netif_carrier_on(dev); 326 netif_carrier_on(dev);
317 return 0; 327 return 0;
@@ -723,7 +733,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
723 733
724 vlan->netpoll = NULL; 734 vlan->netpoll = NULL;
725 735
726 __netpoll_free_rcu(netpoll); 736 __netpoll_free_async(netpoll);
727} 737}
728#endif /* CONFIG_NET_POLL_CONTROLLER */ 738#endif /* CONFIG_NET_POLL_CONTROLLER */
729 739
diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c
new file mode 100644
index 000000000000..d9ec1d5964aa
--- /dev/null
+++ b/net/8021q/vlan_mvrp.c
@@ -0,0 +1,72 @@
1/*
2 * IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP)
3 *
4 * Copyright (c) 2012 Massachusetts Institute of Technology
5 *
6 * Adapted from code in net/8021q/vlan_gvrp.c
7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 */
13#include <linux/types.h>
14#include <linux/if_ether.h>
15#include <linux/if_vlan.h>
16#include <net/mrp.h>
17#include "vlan.h"
18
19#define MRP_MVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
20
21enum mvrp_attributes {
22 MVRP_ATTR_INVALID,
23 MVRP_ATTR_VID,
24 __MVRP_ATTR_MAX
25};
26#define MVRP_ATTR_MAX (__MVRP_ATTR_MAX - 1)
27
28static struct mrp_application vlan_mrp_app __read_mostly = {
29 .type = MRP_APPLICATION_MVRP,
30 .maxattr = MVRP_ATTR_MAX,
31 .pkttype.type = htons(ETH_P_MVRP),
32 .group_address = MRP_MVRP_ADDRESS,
33 .version = 0,
34};
35
36int vlan_mvrp_request_join(const struct net_device *dev)
37{
38 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
39 __be16 vlan_id = htons(vlan->vlan_id);
40
41 return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
42 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
43}
44
45void vlan_mvrp_request_leave(const struct net_device *dev)
46{
47 const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
48 __be16 vlan_id = htons(vlan->vlan_id);
49
50 mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
51 &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
52}
53
54int vlan_mvrp_init_applicant(struct net_device *dev)
55{
56 return mrp_init_applicant(dev, &vlan_mrp_app);
57}
58
59void vlan_mvrp_uninit_applicant(struct net_device *dev)
60{
61 mrp_uninit_applicant(dev, &vlan_mrp_app);
62}
63
64int __init vlan_mvrp_init(void)
65{
66 return mrp_register_application(&vlan_mrp_app);
67}
68
69void vlan_mvrp_uninit(void)
70{
71 mrp_unregister_application(&vlan_mrp_app);
72}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 708c80ea1874..1789658b7cd7 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -62,7 +62,7 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
62 flags = nla_data(data[IFLA_VLAN_FLAGS]); 62 flags = nla_data(data[IFLA_VLAN_FLAGS]);
63 if ((flags->flags & flags->mask) & 63 if ((flags->flags & flags->mask) &
64 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | 64 ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
65 VLAN_FLAG_LOOSE_BINDING)) 65 VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
66 return -EINVAL; 66 return -EINVAL;
67 } 67 }
68 68
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 4de77ea5fa37..dc526ec965e4 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -131,7 +131,7 @@ void vlan_proc_cleanup(struct net *net)
131 remove_proc_entry(name_conf, vn->proc_vlan_dir); 131 remove_proc_entry(name_conf, vn->proc_vlan_dir);
132 132
133 if (vn->proc_vlan_dir) 133 if (vn->proc_vlan_dir)
134 proc_net_remove(net, name_root); 134 remove_proc_entry(name_root, net->proc_net);
135 135
136 /* Dynamically added entries should be cleaned up as their vlan_device 136 /* Dynamically added entries should be cleaned up as their vlan_device
137 * is removed, so we should not have to take care of it here... 137 * is removed, so we should not have to take care of it here...
diff --git a/net/Kconfig b/net/Kconfig
index c31348e70aad..5a1888bb036d 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -217,6 +217,7 @@ source "net/dcb/Kconfig"
217source "net/dns_resolver/Kconfig" 217source "net/dns_resolver/Kconfig"
218source "net/batman-adv/Kconfig" 218source "net/batman-adv/Kconfig"
219source "net/openvswitch/Kconfig" 219source "net/openvswitch/Kconfig"
220source "net/vmw_vsock/Kconfig"
220 221
221config RPS 222config RPS
222 boolean 223 boolean
diff --git a/net/Makefile b/net/Makefile
index c5aa8b3b49dc..091e7b04f301 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -69,3 +69,4 @@ obj-$(CONFIG_CEPH_LIB) += ceph/
69obj-$(CONFIG_BATMAN_ADV) += batman-adv/ 69obj-$(CONFIG_BATMAN_ADV) += batman-adv/
70obj-$(CONFIG_NFC) += nfc/ 70obj-$(CONFIG_NFC) += nfc/
71obj-$(CONFIG_OPENVSWITCH) += openvswitch/ 71obj-$(CONFIG_OPENVSWITCH) += openvswitch/
72obj-$(CONFIG_VSOCKETS) += vmw_vsock/
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 0d020de8d233..b4e75340b162 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -460,7 +460,7 @@ static void atm_proc_dirs_remove(void)
460 if (e->dirent) 460 if (e->dirent)
461 remove_proc_entry(e->name, atm_proc_root); 461 remove_proc_entry(e->name, atm_proc_root);
462 } 462 }
463 proc_net_remove(&init_net, "atm"); 463 remove_proc_entry("atm", init_net.proc_net);
464} 464}
465 465
466int __init atm_proc_init(void) 466int __init atm_proc_init(void)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 779095ded689..69a06c47b648 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1992,9 +1992,10 @@ static int __init ax25_init(void)
1992 dev_add_pack(&ax25_packet_type); 1992 dev_add_pack(&ax25_packet_type);
1993 register_netdevice_notifier(&ax25_dev_notifier); 1993 register_netdevice_notifier(&ax25_dev_notifier);
1994 1994
1995 proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); 1995 proc_create("ax25_route", S_IRUGO, init_net.proc_net,
1996 proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); 1996 &ax25_route_fops);
1997 proc_net_fops_create(&init_net, "ax25_calls", S_IRUGO, &ax25_uid_fops); 1997 proc_create("ax25", S_IRUGO, init_net.proc_net, &ax25_info_fops);
1998 proc_create("ax25_calls", S_IRUGO, init_net.proc_net, &ax25_uid_fops);
1998out: 1999out:
1999 return rc; 2000 return rc;
2000} 2001}
@@ -2008,9 +2009,9 @@ MODULE_ALIAS_NETPROTO(PF_AX25);
2008 2009
2009static void __exit ax25_exit(void) 2010static void __exit ax25_exit(void)
2010{ 2011{
2011 proc_net_remove(&init_net, "ax25_route"); 2012 remove_proc_entry("ax25_route", init_net.proc_net);
2012 proc_net_remove(&init_net, "ax25"); 2013 remove_proc_entry("ax25", init_net.proc_net);
2013 proc_net_remove(&init_net, "ax25_calls"); 2014 remove_proc_entry("ax25_calls", init_net.proc_net);
2014 2015
2015 unregister_netdevice_notifier(&ax25_dev_notifier); 2016 unregister_netdevice_notifier(&ax25_dev_notifier);
2016 2017
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index ea0bd31d41c2..761a59002e34 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
440 /* this is an hash collision with the temporary selected node. Choose 440 /* this is an hash collision with the temporary selected node. Choose
441 * the one with the lowest address 441 * the one with the lowest address
442 */ 442 */
443 if ((tmp_max == max) && 443 if ((tmp_max == max) && max_orig_node &&
444 (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) 444 (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
445 goto out; 445 goto out;
446 446
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 2f67d5ecc907..eb0f4b16ff09 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -290,7 +290,7 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
290 goto done; 290 goto done;
291 } 291 }
292 292
293 mgr->state = READ_LOC_AMP_INFO; 293 set_bit(READ_LOC_AMP_INFO, &mgr->state);
294 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 294 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
295 295
296done: 296done:
@@ -499,8 +499,16 @@ send_rsp:
499 if (hdev) 499 if (hdev)
500 hci_dev_put(hdev); 500 hci_dev_put(hdev);
501 501
502 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp), 502 /* Reply error now and success after HCI Write Remote AMP Assoc
503 &rsp); 503 command complete with success status
504 */
505 if (rsp.status != A2MP_STATUS_SUCCESS) {
506 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
507 sizeof(rsp), &rsp);
508 } else {
509 set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
510 mgr->ident = hdr->ident;
511 }
504 512
505 skb_pull(skb, le16_to_cpu(hdr->len)); 513 skb_pull(skb, le16_to_cpu(hdr->len));
506 return 0; 514 return 0;
@@ -840,7 +848,7 @@ struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
840 848
841 mutex_lock(&amp_mgr_list_lock); 849 mutex_lock(&amp_mgr_list_lock);
842 list_for_each_entry(mgr, &amp_mgr_list, list) { 850 list_for_each_entry(mgr, &amp_mgr_list, list) {
843 if (mgr->state == state) { 851 if (test_and_clear_bit(state, &mgr->state)) {
844 amp_mgr_get(mgr); 852 amp_mgr_get(mgr);
845 mutex_unlock(&amp_mgr_list_lock); 853 mutex_unlock(&amp_mgr_list_lock);
846 return mgr; 854 return mgr;
@@ -949,6 +957,32 @@ clean:
949 kfree(req); 957 kfree(req);
950} 958}
951 959
960void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
961{
962 struct amp_mgr *mgr;
963 struct a2mp_physlink_rsp rsp;
964 struct hci_conn *hs_hcon;
965
966 mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
967 if (!mgr)
968 return;
969
970 hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
971 if (!hs_hcon) {
972 rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
973 } else {
974 rsp.remote_id = hs_hcon->remote_id;
975 rsp.status = A2MP_STATUS_SUCCESS;
976 }
977
978 BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
979 status);
980
981 rsp.local_id = hdev->id;
982 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
983 amp_mgr_put(mgr);
984}
985
952void a2mp_discover_amp(struct l2cap_chan *chan) 986void a2mp_discover_amp(struct l2cap_chan *chan)
953{ 987{
954 struct l2cap_conn *conn = chan->conn; 988 struct l2cap_conn *conn = chan->conn;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 5355df63d39b..d3ee69b35a78 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -641,7 +641,7 @@ int bt_procfs_init(struct module* module, struct net *net, const char *name,
641 sk_list->fops.llseek = seq_lseek; 641 sk_list->fops.llseek = seq_lseek;
642 sk_list->fops.release = seq_release_private; 642 sk_list->fops.release = seq_release_private;
643 643
644 pde = proc_net_fops_create(net, name, 0, &sk_list->fops); 644 pde = proc_create(name, 0, net->proc_net, &sk_list->fops);
645 if (!pde) 645 if (!pde)
646 return -ENOMEM; 646 return -ENOMEM;
647 647
@@ -652,7 +652,7 @@ int bt_procfs_init(struct module* module, struct net *net, const char *name,
652 652
653void bt_procfs_cleanup(struct net *net, const char *name) 653void bt_procfs_cleanup(struct net *net, const char *name)
654{ 654{
655 proc_net_remove(net, name); 655 remove_proc_entry(name, net->proc_net);
656} 656}
657#else 657#else
658int bt_procfs_init(struct module* module, struct net *net, const char *name, 658int bt_procfs_init(struct module* module, struct net *net, const char *name,
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 1b0d92c0643a..d459ed43c779 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -236,7 +236,7 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
236 236
237 cp.max_len = cpu_to_le16(hdev->amp_assoc_size); 237 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
238 238
239 mgr->state = READ_LOC_AMP_ASSOC; 239 set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); 240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
241} 241}
242 242
@@ -250,7 +250,7 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
250 cp.len_so_far = cpu_to_le16(0); 250 cp.len_so_far = cpu_to_le16(0);
251 cp.max_len = cpu_to_le16(hdev->amp_assoc_size); 251 cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
252 252
253 mgr->state = READ_LOC_AMP_ASSOC_FINAL; 253 set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
254 254
255 /* Read Local AMP Assoc final link information data */ 255 /* Read Local AMP Assoc final link information data */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); 256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
@@ -317,7 +317,9 @@ void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
317 if (!hcon) 317 if (!hcon)
318 return; 318 return;
319 319
320 amp_write_rem_assoc_frag(hdev, hcon); 320 /* Send A2MP create phylink rsp when all fragments are written */
321 if (amp_write_rem_assoc_frag(hdev, hcon))
322 a2mp_send_create_phy_link_rsp(hdev, 0);
321} 323}
322 324
323void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle) 325void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
@@ -403,26 +405,20 @@ void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
403 405
404void amp_create_logical_link(struct l2cap_chan *chan) 406void amp_create_logical_link(struct l2cap_chan *chan)
405{ 407{
408 struct hci_conn *hs_hcon = chan->hs_hcon;
406 struct hci_cp_create_accept_logical_link cp; 409 struct hci_cp_create_accept_logical_link cp;
407 struct hci_conn *hcon;
408 struct hci_dev *hdev; 410 struct hci_dev *hdev;
409 411
410 BT_DBG("chan %p", chan); 412 BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
411 413
412 if (!chan->hs_hcon) 414 if (!hs_hcon)
413 return; 415 return;
414 416
415 hdev = hci_dev_hold(chan->hs_hcon->hdev); 417 hdev = hci_dev_hold(chan->hs_hcon->hdev);
416 if (!hdev) 418 if (!hdev)
417 return; 419 return;
418 420
419 BT_DBG("chan %p dst %pMR", chan, chan->conn->dst); 421 cp.phy_handle = hs_hcon->handle;
420
421 hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, chan->conn->dst);
422 if (!hcon)
423 goto done;
424
425 cp.phy_handle = hcon->handle;
426 422
427 cp.tx_flow_spec.id = chan->local_id; 423 cp.tx_flow_spec.id = chan->local_id;
428 cp.tx_flow_spec.stype = chan->local_stype; 424 cp.tx_flow_spec.stype = chan->local_stype;
@@ -438,14 +434,13 @@ void amp_create_logical_link(struct l2cap_chan *chan)
438 cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat); 434 cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
439 cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to); 435 cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
440 436
441 if (hcon->out) 437 if (hs_hcon->out)
442 hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), 438 hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
443 &cp); 439 &cp);
444 else 440 else
445 hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), 441 hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
446 &cp); 442 &cp);
447 443
448done:
449 hci_dev_put(hdev); 444 hci_dev_put(hdev);
450} 445}
451 446
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a5b639702637..e430b1abcd2f 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -33,7 +33,6 @@
33 33
34#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
36#include <net/bluetooth/l2cap.h>
37 36
38#include "bnep.h" 37#include "bnep.h"
39 38
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 25bfce0666eb..4925a02ae7e4 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
249 __u8 reason = hci_proto_disconn_ind(conn); 249 __u8 reason = hci_proto_disconn_ind(conn);
250 250
251 switch (conn->type) { 251 switch (conn->type) {
252 case ACL_LINK:
253 hci_acl_disconn(conn, reason);
254 break;
255 case AMP_LINK: 252 case AMP_LINK:
256 hci_amp_disconn(conn, reason); 253 hci_amp_disconn(conn, reason);
257 break; 254 break;
255 default:
256 hci_acl_disconn(conn, reason);
257 break;
258 } 258 }
259} 259}
260 260
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 0f78e34220c9..22e77a786545 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1146,7 +1146,8 @@ static void hci_power_on(struct work_struct *work)
1146 return; 1146 return;
1147 1147
1148 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1148 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1149 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT); 1149 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1150 HCI_AUTO_OFF_TIMEOUT);
1150 1151
1151 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1152 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1152 mgmt_index_added(hdev); 1153 mgmt_index_added(hdev);
@@ -1182,14 +1183,10 @@ static void hci_discov_off(struct work_struct *work)
1182 1183
1183int hci_uuids_clear(struct hci_dev *hdev) 1184int hci_uuids_clear(struct hci_dev *hdev)
1184{ 1185{
1185 struct list_head *p, *n; 1186 struct bt_uuid *uuid, *tmp;
1186
1187 list_for_each_safe(p, n, &hdev->uuids) {
1188 struct bt_uuid *uuid;
1189 1187
1190 uuid = list_entry(p, struct bt_uuid, list); 1188 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1191 1189 list_del(&uuid->list);
1192 list_del(p);
1193 kfree(uuid); 1190 kfree(uuid);
1194 } 1191 }
1195 1192
@@ -1621,8 +1618,8 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1621 if (err < 0) 1618 if (err < 0)
1622 return err; 1619 return err;
1623 1620
1624 schedule_delayed_work(&hdev->le_scan_disable, 1621 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1625 msecs_to_jiffies(timeout)); 1622 msecs_to_jiffies(timeout));
1626 1623
1627 return 0; 1624 return 0;
1628} 1625}
@@ -1799,6 +1796,15 @@ int hci_register_dev(struct hci_dev *hdev)
1799 goto err; 1796 goto err;
1800 } 1797 }
1801 1798
1799 hdev->req_workqueue = alloc_workqueue(hdev->name,
1800 WQ_HIGHPRI | WQ_UNBOUND |
1801 WQ_MEM_RECLAIM, 1);
1802 if (!hdev->req_workqueue) {
1803 destroy_workqueue(hdev->workqueue);
1804 error = -ENOMEM;
1805 goto err;
1806 }
1807
1802 error = hci_add_sysfs(hdev); 1808 error = hci_add_sysfs(hdev);
1803 if (error < 0) 1809 if (error < 0)
1804 goto err_wqueue; 1810 goto err_wqueue;
@@ -1821,12 +1827,13 @@ int hci_register_dev(struct hci_dev *hdev)
1821 hci_notify(hdev, HCI_DEV_REG); 1827 hci_notify(hdev, HCI_DEV_REG);
1822 hci_dev_hold(hdev); 1828 hci_dev_hold(hdev);
1823 1829
1824 schedule_work(&hdev->power_on); 1830 queue_work(hdev->req_workqueue, &hdev->power_on);
1825 1831
1826 return id; 1832 return id;
1827 1833
1828err_wqueue: 1834err_wqueue:
1829 destroy_workqueue(hdev->workqueue); 1835 destroy_workqueue(hdev->workqueue);
1836 destroy_workqueue(hdev->req_workqueue);
1830err: 1837err:
1831 ida_simple_remove(&hci_index_ida, hdev->id); 1838 ida_simple_remove(&hci_index_ida, hdev->id);
1832 write_lock(&hci_dev_list_lock); 1839 write_lock(&hci_dev_list_lock);
@@ -1880,6 +1887,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1880 hci_del_sysfs(hdev); 1887 hci_del_sysfs(hdev);
1881 1888
1882 destroy_workqueue(hdev->workqueue); 1889 destroy_workqueue(hdev->workqueue);
1890 destroy_workqueue(hdev->req_workqueue);
1883 1891
1884 hci_dev_lock(hdev); 1892 hci_dev_lock(hdev);
1885 hci_blacklist_clear(hdev); 1893 hci_blacklist_clear(hdev);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 81b44481d0d9..477726a63512 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -609,8 +609,17 @@ static void le_setup(struct hci_dev *hdev)
609 /* Read LE Buffer Size */ 609 /* Read LE Buffer Size */
610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); 610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
611 611
612 /* Read LE Local Supported Features */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
614
612 /* Read LE Advertising Channel TX Power */ 615 /* Read LE Advertising Channel TX Power */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 616 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
617
618 /* Read LE White List Size */
619 hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
620
621 /* Read LE Supported States */
622 hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
614} 623}
615 624
616static void hci_setup(struct hci_dev *hdev) 625static void hci_setup(struct hci_dev *hdev)
@@ -1090,6 +1099,19 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1090 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 1099 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1091} 1100}
1092 1101
1102static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1103 struct sk_buff *skb)
1104{
1105 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1106
1107 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1108
1109 if (!rp->status)
1110 memcpy(hdev->le_features, rp->features, 8);
1111
1112 hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status);
1113}
1114
1093static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 1115static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1094 struct sk_buff *skb) 1116 struct sk_buff *skb)
1095{ 1117{
@@ -1290,6 +1312,19 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1290 } 1312 }
1291} 1313}
1292 1314
1315static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1317{
1318 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1319
1320 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1321
1322 if (!rp->status)
1323 hdev->le_white_list_size = rp->size;
1324
1325 hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
1326}
1327
1293static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 1328static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1294{ 1329{
1295 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 1330 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
@@ -1314,6 +1349,19 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1314 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1349 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1315} 1350}
1316 1351
1352static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1353 struct sk_buff *skb)
1354{
1355 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1356
1357 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1358
1359 if (!rp->status)
1360 memcpy(hdev->le_states, rp->le_states, 8);
1361
1362 hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status);
1363}
1364
1317static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1365static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1318 struct sk_buff *skb) 1366 struct sk_buff *skb)
1319{ 1367{
@@ -2628,6 +2676,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 hci_cc_le_read_buffer_size(hdev, skb); 2676 hci_cc_le_read_buffer_size(hdev, skb);
2629 break; 2677 break;
2630 2678
2679 case HCI_OP_LE_READ_LOCAL_FEATURES:
2680 hci_cc_le_read_local_features(hdev, skb);
2681 break;
2682
2631 case HCI_OP_LE_READ_ADV_TX_POWER: 2683 case HCI_OP_LE_READ_ADV_TX_POWER:
2632 hci_cc_le_read_adv_tx_power(hdev, skb); 2684 hci_cc_le_read_adv_tx_power(hdev, skb);
2633 break; 2685 break;
@@ -2664,6 +2716,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2664 hci_cc_le_set_scan_enable(hdev, skb); 2716 hci_cc_le_set_scan_enable(hdev, skb);
2665 break; 2717 break;
2666 2718
2719 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2720 hci_cc_le_read_white_list_size(hdev, skb);
2721 break;
2722
2667 case HCI_OP_LE_LTK_REPLY: 2723 case HCI_OP_LE_LTK_REPLY:
2668 hci_cc_le_ltk_reply(hdev, skb); 2724 hci_cc_le_ltk_reply(hdev, skb);
2669 break; 2725 break;
@@ -2672,6 +2728,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2672 hci_cc_le_ltk_neg_reply(hdev, skb); 2728 hci_cc_le_ltk_neg_reply(hdev, skb);
2673 break; 2729 break;
2674 2730
2731 case HCI_OP_LE_READ_SUPPORTED_STATES:
2732 hci_cc_le_read_supported_states(hdev, skb);
2733 break;
2734
2675 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2735 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2676 hci_cc_write_le_host_supported(hdev, skb); 2736 hci_cc_write_le_host_supported(hdev, skb);
2677 break; 2737 break;
@@ -3928,8 +3988,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3928 void *ptr = &skb->data[1]; 3988 void *ptr = &skb->data[1];
3929 s8 rssi; 3989 s8 rssi;
3930 3990
3931 hci_dev_lock(hdev);
3932
3933 while (num_reports--) { 3991 while (num_reports--) {
3934 struct hci_ev_le_advertising_info *ev = ptr; 3992 struct hci_ev_le_advertising_info *ev = ptr;
3935 3993
@@ -3939,8 +3997,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3939 3997
3940 ptr += sizeof(*ev) + ev->length + 1; 3998 ptr += sizeof(*ev) + ev->length + 1;
3941 } 3999 }
3942
3943 hci_dev_unlock(hdev);
3944} 4000}
3945 4001
3946static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4002static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 55cceee02a84..23b4e242a31a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -2,6 +2,7 @@
2 2
3#include <linux/debugfs.h> 3#include <linux/debugfs.h>
4#include <linux/module.h> 4#include <linux/module.h>
5#include <asm/unaligned.h>
5 6
6#include <net/bluetooth/bluetooth.h> 7#include <net/bluetooth/bluetooth.h>
7#include <net/bluetooth/hci_core.h> 8#include <net/bluetooth/hci_core.h>
@@ -461,19 +462,18 @@ static const struct file_operations blacklist_fops = {
461 462
462static void print_bt_uuid(struct seq_file *f, u8 *uuid) 463static void print_bt_uuid(struct seq_file *f, u8 *uuid)
463{ 464{
464 __be32 data0, data4; 465 u32 data0, data5;
465 __be16 data1, data2, data3, data5; 466 u16 data1, data2, data3, data4;
466 467
467 memcpy(&data0, &uuid[0], 4); 468 data5 = get_unaligned_le32(uuid);
468 memcpy(&data1, &uuid[4], 2); 469 data4 = get_unaligned_le16(uuid + 4);
469 memcpy(&data2, &uuid[6], 2); 470 data3 = get_unaligned_le16(uuid + 6);
470 memcpy(&data3, &uuid[8], 2); 471 data2 = get_unaligned_le16(uuid + 8);
471 memcpy(&data4, &uuid[10], 4); 472 data1 = get_unaligned_le16(uuid + 10);
472 memcpy(&data5, &uuid[14], 2); 473 data0 = get_unaligned_le32(uuid + 12);
473 474
474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", 475 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
475 ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3), 476 data0, data1, data2, data3, data4, data5);
476 ntohl(data4), ntohs(data5));
477} 477}
478 478
479static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 22e658322845..7c7e9321f1ea 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1527,17 +1527,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); 1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1528 1528
1529 switch (hcon->type) { 1529 switch (hcon->type) {
1530 case AMP_LINK:
1531 conn->mtu = hcon->hdev->block_mtu;
1532 break;
1533
1534 case LE_LINK: 1530 case LE_LINK:
1535 if (hcon->hdev->le_mtu) { 1531 if (hcon->hdev->le_mtu) {
1536 conn->mtu = hcon->hdev->le_mtu; 1532 conn->mtu = hcon->hdev->le_mtu;
1537 break; 1533 break;
1538 } 1534 }
1539 /* fall through */ 1535 /* fall through */
1540
1541 default: 1536 default:
1542 conn->mtu = hcon->hdev->acl_mtu; 1537 conn->mtu = hcon->hdev->acl_mtu;
1543 break; 1538 break;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f559b966279c..39395c7144aa 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,7 +35,7 @@
35bool enable_hs; 35bool enable_hs;
36 36
37#define MGMT_VERSION 1 37#define MGMT_VERSION 1
38#define MGMT_REVISION 2 38#define MGMT_REVISION 3
39 39
40static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -435,35 +435,117 @@ static u32 get_current_settings(struct hci_dev *hdev)
435 435
436#define PNP_INFO_SVCLASS_ID 0x1200 436#define PNP_INFO_SVCLASS_ID 0x1200
437 437
438static u8 bluetooth_base_uuid[] = { 438static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
439 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80, 439{
440 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 440 u8 *ptr = data, *uuids_start = NULL;
441}; 441 struct bt_uuid *uuid;
442
443 if (len < 4)
444 return ptr;
445
446 list_for_each_entry(uuid, &hdev->uuids, list) {
447 u16 uuid16;
448
449 if (uuid->size != 16)
450 continue;
451
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
453 if (uuid16 < 0x1100)
454 continue;
455
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
457 continue;
442 458
443static u16 get_uuid16(u8 *uuid128) 459 if (!uuids_start) {
460 uuids_start = ptr;
461 uuids_start[0] = 1;
462 uuids_start[1] = EIR_UUID16_ALL;
463 ptr += 2;
464 }
465
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
469 break;
470 }
471
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
475 }
476
477 return ptr;
478}
479
480static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
444{ 481{
445 u32 val; 482 u8 *ptr = data, *uuids_start = NULL;
446 int i; 483 struct bt_uuid *uuid;
484
485 if (len < 6)
486 return ptr;
447 487
448 for (i = 0; i < 12; i++) { 488 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (bluetooth_base_uuid[i] != uuid128[i]) 489 if (uuid->size != 32)
450 return 0; 490 continue;
491
492 if (!uuids_start) {
493 uuids_start = ptr;
494 uuids_start[0] = 1;
495 uuids_start[1] = EIR_UUID32_ALL;
496 ptr += 2;
497 }
498
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
502 break;
503 }
504
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
506 ptr += sizeof(u32);
507 uuids_start[0] += sizeof(u32);
451 } 508 }
452 509
453 val = get_unaligned_le32(&uuid128[12]); 510 return ptr;
454 if (val > 0xffff) 511}
455 return 0; 512
513static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
514{
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
517
518 if (len < 18)
519 return ptr;
456 520
457 return (u16) val; 521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
523 continue;
524
525 if (!uuids_start) {
526 uuids_start = ptr;
527 uuids_start[0] = 1;
528 uuids_start[1] = EIR_UUID128_ALL;
529 ptr += 2;
530 }
531
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
535 break;
536 }
537
538 memcpy(ptr, uuid->uuid, 16);
539 ptr += 16;
540 uuids_start[0] += 16;
541 }
542
543 return ptr;
458} 544}
459 545
460static void create_eir(struct hci_dev *hdev, u8 *data) 546static void create_eir(struct hci_dev *hdev, u8 *data)
461{ 547{
462 u8 *ptr = data; 548 u8 *ptr = data;
463 u16 eir_len = 0;
464 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
465 int i, truncated = 0;
466 struct bt_uuid *uuid;
467 size_t name_len; 549 size_t name_len;
468 550
469 name_len = strlen(hdev->dev_name); 551 name_len = strlen(hdev->dev_name);
@@ -481,7 +563,6 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
481 563
482 memcpy(ptr + 2, hdev->dev_name, name_len); 564 memcpy(ptr + 2, hdev->dev_name, name_len);
483 565
484 eir_len += (name_len + 2);
485 ptr += (name_len + 2); 566 ptr += (name_len + 2);
486 } 567 }
487 568
@@ -490,7 +571,6 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
490 ptr[1] = EIR_TX_POWER; 571 ptr[1] = EIR_TX_POWER;
491 ptr[2] = (u8) hdev->inq_tx_power; 572 ptr[2] = (u8) hdev->inq_tx_power;
492 573
493 eir_len += 3;
494 ptr += 3; 574 ptr += 3;
495 } 575 }
496 576
@@ -503,60 +583,12 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
503 put_unaligned_le16(hdev->devid_product, ptr + 6); 583 put_unaligned_le16(hdev->devid_product, ptr + 6);
504 put_unaligned_le16(hdev->devid_version, ptr + 8); 584 put_unaligned_le16(hdev->devid_version, ptr + 8);
505 585
506 eir_len += 10;
507 ptr += 10; 586 ptr += 10;
508 } 587 }
509 588
510 memset(uuid16_list, 0, sizeof(uuid16_list)); 589 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
511 590 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
512 /* Group all UUID16 types */ 591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 u16 uuid16;
515
516 uuid16 = get_uuid16(uuid->uuid);
517 if (uuid16 == 0)
518 return;
519
520 if (uuid16 < 0x1100)
521 continue;
522
523 if (uuid16 == PNP_INFO_SVCLASS_ID)
524 continue;
525
526 /* Stop if not enough space to put next UUID */
527 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
528 truncated = 1;
529 break;
530 }
531
532 /* Check for duplicates */
533 for (i = 0; uuid16_list[i] != 0; i++)
534 if (uuid16_list[i] == uuid16)
535 break;
536
537 if (uuid16_list[i] == 0) {
538 uuid16_list[i] = uuid16;
539 eir_len += sizeof(u16);
540 }
541 }
542
543 if (uuid16_list[0] != 0) {
544 u8 *length = ptr;
545
546 /* EIR Data type */
547 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
548
549 ptr += 2;
550 eir_len += 2;
551
552 for (i = 0; uuid16_list[i] != 0; i++) {
553 *ptr++ = (uuid16_list[i] & 0x00ff);
554 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
555 }
556
557 /* EIR Data length */
558 *length = (i * sizeof(u16)) + 1;
559 }
560} 592}
561 593
562static int update_eir(struct hci_dev *hdev) 594static int update_eir(struct hci_dev *hdev)
@@ -728,13 +760,9 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
728 void *data), 760 void *data),
729 void *data) 761 void *data)
730{ 762{
731 struct list_head *p, *n; 763 struct pending_cmd *cmd, *tmp;
732
733 list_for_each_safe(p, n, &hdev->mgmt_pending) {
734 struct pending_cmd *cmd;
735
736 cmd = list_entry(p, struct pending_cmd, list);
737 764
765 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
738 if (opcode > 0 && cmd->opcode != opcode) 766 if (opcode > 0 && cmd->opcode != opcode)
739 continue; 767 continue;
740 768
@@ -777,14 +805,19 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
777 805
778 BT_DBG("request for %s", hdev->name); 806 BT_DBG("request for %s", hdev->name);
779 807
808 if (cp->val != 0x00 && cp->val != 0x01)
809 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
810 MGMT_STATUS_INVALID_PARAMS);
811
780 hci_dev_lock(hdev); 812 hci_dev_lock(hdev);
781 813
782 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 814 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
783 cancel_delayed_work(&hdev->power_off); 815 cancel_delayed_work(&hdev->power_off);
784 816
785 if (cp->val) { 817 if (cp->val) {
786 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); 818 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
787 mgmt_powered(hdev, 1); 819 data, len);
820 err = mgmt_powered(hdev, 1);
788 goto failed; 821 goto failed;
789 } 822 }
790 } 823 }
@@ -807,9 +840,9 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
807 } 840 }
808 841
809 if (cp->val) 842 if (cp->val)
810 schedule_work(&hdev->power_on); 843 queue_work(hdev->req_workqueue, &hdev->power_on);
811 else 844 else
812 schedule_work(&hdev->power_off.work); 845 queue_work(hdev->req_workqueue, &hdev->power_off.work);
813 846
814 err = 0; 847 err = 0;
815 848
@@ -872,6 +905,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
872 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 905 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
873 MGMT_STATUS_NOT_SUPPORTED); 906 MGMT_STATUS_NOT_SUPPORTED);
874 907
908 if (cp->val != 0x00 && cp->val != 0x01)
909 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
910 MGMT_STATUS_INVALID_PARAMS);
911
875 timeout = __le16_to_cpu(cp->timeout); 912 timeout = __le16_to_cpu(cp->timeout);
876 if (!cp->val && timeout > 0) 913 if (!cp->val && timeout > 0)
877 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
@@ -971,6 +1008,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
971 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1008 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
972 MGMT_STATUS_NOT_SUPPORTED); 1009 MGMT_STATUS_NOT_SUPPORTED);
973 1010
1011 if (cp->val != 0x00 && cp->val != 0x01)
1012 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1013 MGMT_STATUS_INVALID_PARAMS);
1014
974 hci_dev_lock(hdev); 1015 hci_dev_lock(hdev);
975 1016
976 if (!hdev_is_powered(hdev)) { 1017 if (!hdev_is_powered(hdev)) {
@@ -1041,6 +1082,10 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1041 1082
1042 BT_DBG("request for %s", hdev->name); 1083 BT_DBG("request for %s", hdev->name);
1043 1084
1085 if (cp->val != 0x00 && cp->val != 0x01)
1086 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1087 MGMT_STATUS_INVALID_PARAMS);
1088
1044 hci_dev_lock(hdev); 1089 hci_dev_lock(hdev);
1045 1090
1046 if (cp->val) 1091 if (cp->val)
@@ -1073,6 +1118,10 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1073 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 1118 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1074 MGMT_STATUS_NOT_SUPPORTED); 1119 MGMT_STATUS_NOT_SUPPORTED);
1075 1120
1121 if (cp->val != 0x00 && cp->val != 0x01)
1122 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1123 MGMT_STATUS_INVALID_PARAMS);
1124
1076 hci_dev_lock(hdev); 1125 hci_dev_lock(hdev);
1077 1126
1078 if (!hdev_is_powered(hdev)) { 1127 if (!hdev_is_powered(hdev)) {
@@ -1133,13 +1182,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1133 1182
1134 BT_DBG("request for %s", hdev->name); 1183 BT_DBG("request for %s", hdev->name);
1135 1184
1136 hci_dev_lock(hdev); 1185 if (!lmp_ssp_capable(hdev))
1186 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1187 MGMT_STATUS_NOT_SUPPORTED);
1137 1188
1138 if (!lmp_ssp_capable(hdev)) { 1189 if (cp->val != 0x00 && cp->val != 0x01)
1139 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 1190 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1140 MGMT_STATUS_NOT_SUPPORTED); 1191 MGMT_STATUS_INVALID_PARAMS);
1141 goto failed; 1192
1142 } 1193 hci_dev_lock(hdev);
1143 1194
1144 val = !!cp->val; 1195 val = !!cp->val;
1145 1196
@@ -1199,6 +1250,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1199 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 1250 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1200 MGMT_STATUS_NOT_SUPPORTED); 1251 MGMT_STATUS_NOT_SUPPORTED);
1201 1252
1253 if (cp->val != 0x00 && cp->val != 0x01)
1254 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1255 MGMT_STATUS_INVALID_PARAMS);
1256
1202 if (cp->val) 1257 if (cp->val)
1203 set_bit(HCI_HS_ENABLED, &hdev->dev_flags); 1258 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1204 else 1259 else
@@ -1217,13 +1272,15 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1217 1272
1218 BT_DBG("request for %s", hdev->name); 1273 BT_DBG("request for %s", hdev->name);
1219 1274
1220 hci_dev_lock(hdev); 1275 if (!lmp_le_capable(hdev))
1276 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1277 MGMT_STATUS_NOT_SUPPORTED);
1221 1278
1222 if (!lmp_le_capable(hdev)) { 1279 if (cp->val != 0x00 && cp->val != 0x01)
1223 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1280 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1224 MGMT_STATUS_NOT_SUPPORTED); 1281 MGMT_STATUS_INVALID_PARAMS);
1225 goto unlock; 1282
1226 } 1283 hci_dev_lock(hdev);
1227 1284
1228 val = !!cp->val; 1285 val = !!cp->val;
1229 enabled = lmp_host_le_capable(hdev); 1286 enabled = lmp_host_le_capable(hdev);
@@ -1275,6 +1332,25 @@ unlock:
1275 return err; 1332 return err;
1276} 1333}
1277 1334
1335static const u8 bluetooth_base_uuid[] = {
1336 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1337 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1338};
1339
1340static u8 get_uuid_size(const u8 *uuid)
1341{
1342 u32 val;
1343
1344 if (memcmp(uuid, bluetooth_base_uuid, 12))
1345 return 128;
1346
1347 val = get_unaligned_le32(&uuid[12]);
1348 if (val > 0xffff)
1349 return 32;
1350
1351 return 16;
1352}
1353
1278static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1354static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1279{ 1355{
1280 struct mgmt_cp_add_uuid *cp = data; 1356 struct mgmt_cp_add_uuid *cp = data;
@@ -1300,8 +1376,9 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1300 1376
1301 memcpy(uuid->uuid, cp->uuid, 16); 1377 memcpy(uuid->uuid, cp->uuid, 16);
1302 uuid->svc_hint = cp->svc_hint; 1378 uuid->svc_hint = cp->svc_hint;
1379 uuid->size = get_uuid_size(cp->uuid);
1303 1380
1304 list_add(&uuid->list, &hdev->uuids); 1381 list_add_tail(&uuid->list, &hdev->uuids);
1305 1382
1306 err = update_class(hdev); 1383 err = update_class(hdev);
1307 if (err < 0) 1384 if (err < 0)
@@ -1332,7 +1409,8 @@ static bool enable_service_cache(struct hci_dev *hdev)
1332 return false; 1409 return false;
1333 1410
1334 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { 1411 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1335 schedule_delayed_work(&hdev->service_cache, CACHE_TIMEOUT); 1412 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1413 CACHE_TIMEOUT);
1336 return true; 1414 return true;
1337 } 1415 }
1338 1416
@@ -1344,7 +1422,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1344{ 1422{
1345 struct mgmt_cp_remove_uuid *cp = data; 1423 struct mgmt_cp_remove_uuid *cp = data;
1346 struct pending_cmd *cmd; 1424 struct pending_cmd *cmd;
1347 struct list_head *p, *n; 1425 struct bt_uuid *match, *tmp;
1348 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 1426 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1349 int err, found; 1427 int err, found;
1350 1428
@@ -1372,9 +1450,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1372 1450
1373 found = 0; 1451 found = 0;
1374 1452
1375 list_for_each_safe(p, n, &hdev->uuids) { 1453 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1376 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
1377
1378 if (memcmp(match->uuid, cp->uuid, 16) != 0) 1454 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1379 continue; 1455 continue;
1380 1456
@@ -1422,13 +1498,19 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1422 1498
1423 BT_DBG("request for %s", hdev->name); 1499 BT_DBG("request for %s", hdev->name);
1424 1500
1425 hci_dev_lock(hdev); 1501 if (!lmp_bredr_capable(hdev))
1502 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1503 MGMT_STATUS_NOT_SUPPORTED);
1426 1504
1427 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { 1505 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags))
1428 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 1506 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1429 MGMT_STATUS_BUSY); 1507 MGMT_STATUS_BUSY);
1430 goto unlock; 1508
1431 } 1509 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0)
1510 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1511 MGMT_STATUS_INVALID_PARAMS);
1512
1513 hci_dev_lock(hdev);
1432 1514
1433 hdev->major_class = cp->major; 1515 hdev->major_class = cp->major;
1434 hdev->minor_class = cp->minor; 1516 hdev->minor_class = cp->minor;
@@ -1483,9 +1565,21 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1483 MGMT_STATUS_INVALID_PARAMS); 1565 MGMT_STATUS_INVALID_PARAMS);
1484 } 1566 }
1485 1567
1568 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1569 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1570 MGMT_STATUS_INVALID_PARAMS);
1571
1486 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1572 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1487 key_count); 1573 key_count);
1488 1574
1575 for (i = 0; i < key_count; i++) {
1576 struct mgmt_link_key_info *key = &cp->keys[i];
1577
1578 if (key->addr.type != BDADDR_BREDR)
1579 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1580 MGMT_STATUS_INVALID_PARAMS);
1581 }
1582
1489 hci_dev_lock(hdev); 1583 hci_dev_lock(hdev);
1490 1584
1491 hci_link_keys_clear(hdev); 1585 hci_link_keys_clear(hdev);
@@ -1533,12 +1627,22 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1533 struct hci_conn *conn; 1627 struct hci_conn *conn;
1534 int err; 1628 int err;
1535 1629
1536 hci_dev_lock(hdev);
1537
1538 memset(&rp, 0, sizeof(rp)); 1630 memset(&rp, 0, sizeof(rp));
1539 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 1631 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1540 rp.addr.type = cp->addr.type; 1632 rp.addr.type = cp->addr.type;
1541 1633
1634 if (!bdaddr_type_is_valid(cp->addr.type))
1635 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1636 MGMT_STATUS_INVALID_PARAMS,
1637 &rp, sizeof(rp));
1638
1639 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1640 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1641 MGMT_STATUS_INVALID_PARAMS,
1642 &rp, sizeof(rp));
1643
1644 hci_dev_lock(hdev);
1645
1542 if (!hdev_is_powered(hdev)) { 1646 if (!hdev_is_powered(hdev)) {
1543 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 1647 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1544 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 1648 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
@@ -1596,6 +1700,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1596 u16 len) 1700 u16 len)
1597{ 1701{
1598 struct mgmt_cp_disconnect *cp = data; 1702 struct mgmt_cp_disconnect *cp = data;
1703 struct mgmt_rp_disconnect rp;
1599 struct hci_cp_disconnect dc; 1704 struct hci_cp_disconnect dc;
1600 struct pending_cmd *cmd; 1705 struct pending_cmd *cmd;
1601 struct hci_conn *conn; 1706 struct hci_conn *conn;
@@ -1603,17 +1708,26 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1603 1708
1604 BT_DBG(""); 1709 BT_DBG("");
1605 1710
1711 memset(&rp, 0, sizeof(rp));
1712 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1713 rp.addr.type = cp->addr.type;
1714
1715 if (!bdaddr_type_is_valid(cp->addr.type))
1716 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1717 MGMT_STATUS_INVALID_PARAMS,
1718 &rp, sizeof(rp));
1719
1606 hci_dev_lock(hdev); 1720 hci_dev_lock(hdev);
1607 1721
1608 if (!test_bit(HCI_UP, &hdev->flags)) { 1722 if (!test_bit(HCI_UP, &hdev->flags)) {
1609 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, 1723 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1610 MGMT_STATUS_NOT_POWERED); 1724 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1611 goto failed; 1725 goto failed;
1612 } 1726 }
1613 1727
1614 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { 1728 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, 1729 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1616 MGMT_STATUS_BUSY); 1730 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1617 goto failed; 1731 goto failed;
1618 } 1732 }
1619 1733
@@ -1624,8 +1738,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1624 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1738 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1625 1739
1626 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { 1740 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1627 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT, 1741 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1628 MGMT_STATUS_NOT_CONNECTED); 1742 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1629 goto failed; 1743 goto failed;
1630 } 1744 }
1631 1745
@@ -1903,11 +2017,20 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1903 2017
1904 BT_DBG(""); 2018 BT_DBG("");
1905 2019
2020 memset(&rp, 0, sizeof(rp));
2021 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2022 rp.addr.type = cp->addr.type;
2023
2024 if (!bdaddr_type_is_valid(cp->addr.type))
2025 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2026 MGMT_STATUS_INVALID_PARAMS,
2027 &rp, sizeof(rp));
2028
1906 hci_dev_lock(hdev); 2029 hci_dev_lock(hdev);
1907 2030
1908 if (!hdev_is_powered(hdev)) { 2031 if (!hdev_is_powered(hdev)) {
1909 err = cmd_status(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 2032 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1910 MGMT_STATUS_NOT_POWERED); 2033 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1911 goto unlock; 2034 goto unlock;
1912 } 2035 }
1913 2036
@@ -1924,10 +2047,6 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1924 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, 2047 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
1925 cp->addr.type, sec_level, auth_type); 2048 cp->addr.type, sec_level, auth_type);
1926 2049
1927 memset(&rp, 0, sizeof(rp));
1928 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1929 rp.addr.type = cp->addr.type;
1930
1931 if (IS_ERR(conn)) { 2050 if (IS_ERR(conn)) {
1932 int status; 2051 int status;
1933 2052
@@ -2254,24 +2373,16 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2254 2373
2255 hci_dev_lock(hdev); 2374 hci_dev_lock(hdev);
2256 2375
2257 if (!hdev_is_powered(hdev)) {
2258 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
2259 MGMT_STATUS_NOT_POWERED, &cp->addr,
2260 sizeof(cp->addr));
2261 goto unlock;
2262 }
2263
2264 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash, 2376 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2265 cp->randomizer); 2377 cp->randomizer);
2266 if (err < 0) 2378 if (err < 0)
2267 status = MGMT_STATUS_FAILED; 2379 status = MGMT_STATUS_FAILED;
2268 else 2380 else
2269 status = 0; 2381 status = MGMT_STATUS_SUCCESS;
2270 2382
2271 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, 2383 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2272 &cp->addr, sizeof(cp->addr)); 2384 &cp->addr, sizeof(cp->addr));
2273 2385
2274unlock:
2275 hci_dev_unlock(hdev); 2386 hci_dev_unlock(hdev);
2276 return err; 2387 return err;
2277} 2388}
@@ -2287,24 +2398,15 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2287 2398
2288 hci_dev_lock(hdev); 2399 hci_dev_lock(hdev);
2289 2400
2290 if (!hdev_is_powered(hdev)) {
2291 err = cmd_complete(sk, hdev->id,
2292 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2293 MGMT_STATUS_NOT_POWERED, &cp->addr,
2294 sizeof(cp->addr));
2295 goto unlock;
2296 }
2297
2298 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr); 2401 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2299 if (err < 0) 2402 if (err < 0)
2300 status = MGMT_STATUS_INVALID_PARAMS; 2403 status = MGMT_STATUS_INVALID_PARAMS;
2301 else 2404 else
2302 status = 0; 2405 status = MGMT_STATUS_SUCCESS;
2303 2406
2304 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2407 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2305 status, &cp->addr, sizeof(cp->addr)); 2408 status, &cp->addr, sizeof(cp->addr));
2306 2409
2307unlock:
2308 hci_dev_unlock(hdev); 2410 hci_dev_unlock(hdev);
2309 return err; 2411 return err;
2310} 2412}
@@ -2365,31 +2467,45 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2365 2467
2366 switch (hdev->discovery.type) { 2468 switch (hdev->discovery.type) {
2367 case DISCOV_TYPE_BREDR: 2469 case DISCOV_TYPE_BREDR:
2368 if (lmp_bredr_capable(hdev)) 2470 if (!lmp_bredr_capable(hdev)) {
2369 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); 2471 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2370 else 2472 MGMT_STATUS_NOT_SUPPORTED);
2371 err = -ENOTSUPP; 2473 mgmt_pending_remove(cmd);
2474 goto failed;
2475 }
2476
2477 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
2372 break; 2478 break;
2373 2479
2374 case DISCOV_TYPE_LE: 2480 case DISCOV_TYPE_LE:
2375 if (lmp_host_le_capable(hdev)) 2481 if (!lmp_host_le_capable(hdev)) {
2376 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, 2482 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2377 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY); 2483 MGMT_STATUS_NOT_SUPPORTED);
2378 else 2484 mgmt_pending_remove(cmd);
2379 err = -ENOTSUPP; 2485 goto failed;
2486 }
2487
2488 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT,
2489 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
2380 break; 2490 break;
2381 2491
2382 case DISCOV_TYPE_INTERLEAVED: 2492 case DISCOV_TYPE_INTERLEAVED:
2383 if (lmp_host_le_capable(hdev) && lmp_bredr_capable(hdev)) 2493 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
2384 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, 2494 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2385 LE_SCAN_WIN, 2495 MGMT_STATUS_NOT_SUPPORTED);
2386 LE_SCAN_TIMEOUT_BREDR_LE); 2496 mgmt_pending_remove(cmd);
2387 else 2497 goto failed;
2388 err = -ENOTSUPP; 2498 }
2499
2500 err = hci_le_scan(hdev, LE_SCAN_TYPE, LE_SCAN_INT, LE_SCAN_WIN,
2501 LE_SCAN_TIMEOUT_BREDR_LE);
2389 break; 2502 break;
2390 2503
2391 default: 2504 default:
2392 err = -EINVAL; 2505 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2506 MGMT_STATUS_INVALID_PARAMS);
2507 mgmt_pending_remove(cmd);
2508 goto failed;
2393 } 2509 }
2394 2510
2395 if (err < 0) 2511 if (err < 0)
@@ -2510,7 +2626,8 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2510 hci_inquiry_cache_update_resolve(hdev, e); 2626 hci_inquiry_cache_update_resolve(hdev, e);
2511 } 2627 }
2512 2628
2513 err = 0; 2629 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2630 sizeof(cp->addr));
2514 2631
2515failed: 2632failed:
2516 hci_dev_unlock(hdev); 2633 hci_dev_unlock(hdev);
@@ -2526,13 +2643,18 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2526 2643
2527 BT_DBG("%s", hdev->name); 2644 BT_DBG("%s", hdev->name);
2528 2645
2646 if (!bdaddr_type_is_valid(cp->addr.type))
2647 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2648 MGMT_STATUS_INVALID_PARAMS,
2649 &cp->addr, sizeof(cp->addr));
2650
2529 hci_dev_lock(hdev); 2651 hci_dev_lock(hdev);
2530 2652
2531 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type); 2653 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2532 if (err < 0) 2654 if (err < 0)
2533 status = MGMT_STATUS_FAILED; 2655 status = MGMT_STATUS_FAILED;
2534 else 2656 else
2535 status = 0; 2657 status = MGMT_STATUS_SUCCESS;
2536 2658
2537 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, 2659 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2538 &cp->addr, sizeof(cp->addr)); 2660 &cp->addr, sizeof(cp->addr));
@@ -2551,13 +2673,18 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2551 2673
2552 BT_DBG("%s", hdev->name); 2674 BT_DBG("%s", hdev->name);
2553 2675
2676 if (!bdaddr_type_is_valid(cp->addr.type))
2677 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
2678 MGMT_STATUS_INVALID_PARAMS,
2679 &cp->addr, sizeof(cp->addr));
2680
2554 hci_dev_lock(hdev); 2681 hci_dev_lock(hdev);
2555 2682
2556 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type); 2683 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
2557 if (err < 0) 2684 if (err < 0)
2558 status = MGMT_STATUS_INVALID_PARAMS; 2685 status = MGMT_STATUS_INVALID_PARAMS;
2559 else 2686 else
2560 status = 0; 2687 status = MGMT_STATUS_SUCCESS;
2561 2688
2562 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, 2689 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
2563 &cp->addr, sizeof(cp->addr)); 2690 &cp->addr, sizeof(cp->addr));
@@ -2612,6 +2739,10 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2612 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2739 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2613 MGMT_STATUS_NOT_SUPPORTED); 2740 MGMT_STATUS_NOT_SUPPORTED);
2614 2741
2742 if (cp->val != 0x00 && cp->val != 0x01)
2743 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2744 MGMT_STATUS_INVALID_PARAMS);
2745
2615 if (!hdev_is_powered(hdev)) 2746 if (!hdev_is_powered(hdev))
2616 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2747 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2617 MGMT_STATUS_NOT_POWERED); 2748 MGMT_STATUS_NOT_POWERED);
@@ -2659,12 +2790,23 @@ done:
2659 return err; 2790 return err;
2660} 2791}
2661 2792
2793static bool ltk_is_valid(struct mgmt_ltk_info *key)
2794{
2795 if (key->authenticated != 0x00 && key->authenticated != 0x01)
2796 return false;
2797 if (key->master != 0x00 && key->master != 0x01)
2798 return false;
2799 if (!bdaddr_type_is_le(key->addr.type))
2800 return false;
2801 return true;
2802}
2803
2662static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, 2804static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2663 void *cp_data, u16 len) 2805 void *cp_data, u16 len)
2664{ 2806{
2665 struct mgmt_cp_load_long_term_keys *cp = cp_data; 2807 struct mgmt_cp_load_long_term_keys *cp = cp_data;
2666 u16 key_count, expected_len; 2808 u16 key_count, expected_len;
2667 int i; 2809 int i, err;
2668 2810
2669 key_count = __le16_to_cpu(cp->key_count); 2811 key_count = __le16_to_cpu(cp->key_count);
2670 2812
@@ -2674,11 +2816,20 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2674 BT_ERR("load_keys: expected %u bytes, got %u bytes", 2816 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2675 len, expected_len); 2817 len, expected_len);
2676 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 2818 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2677 EINVAL); 2819 MGMT_STATUS_INVALID_PARAMS);
2678 } 2820 }
2679 2821
2680 BT_DBG("%s key_count %u", hdev->name, key_count); 2822 BT_DBG("%s key_count %u", hdev->name, key_count);
2681 2823
2824 for (i = 0; i < key_count; i++) {
2825 struct mgmt_ltk_info *key = &cp->keys[i];
2826
2827 if (!ltk_is_valid(key))
2828 return cmd_status(sk, hdev->id,
2829 MGMT_OP_LOAD_LONG_TERM_KEYS,
2830 MGMT_STATUS_INVALID_PARAMS);
2831 }
2832
2682 hci_dev_lock(hdev); 2833 hci_dev_lock(hdev);
2683 2834
2684 hci_smp_ltks_clear(hdev); 2835 hci_smp_ltks_clear(hdev);
@@ -2698,9 +2849,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2698 key->enc_size, key->ediv, key->rand); 2849 key->enc_size, key->ediv, key->rand);
2699 } 2850 }
2700 2851
2852 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
2853 NULL, 0);
2854
2701 hci_dev_unlock(hdev); 2855 hci_dev_unlock(hdev);
2702 2856
2703 return 0; 2857 return err;
2704} 2858}
2705 2859
2706static const struct mgmt_handler { 2860static const struct mgmt_handler {
@@ -2915,6 +3069,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2915 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 3069 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
2916 3070
2917 if (powered) { 3071 if (powered) {
3072 u8 link_sec;
3073
2918 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 3074 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
2919 !lmp_host_ssp_capable(hdev)) { 3075 !lmp_host_ssp_capable(hdev)) {
2920 u8 ssp = 1; 3076 u8 ssp = 1;
@@ -2938,6 +3094,11 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2938 sizeof(cp), &cp); 3094 sizeof(cp), &cp);
2939 } 3095 }
2940 3096
3097 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3098 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3099 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3100 sizeof(link_sec), &link_sec);
3101
2941 if (lmp_bredr_capable(hdev)) { 3102 if (lmp_bredr_capable(hdev)) {
2942 set_bredr_scan(hdev); 3103 set_bredr_scan(hdev);
2943 update_class(hdev); 3104 update_class(hdev);
@@ -2946,7 +3107,13 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2946 } 3107 }
2947 } else { 3108 } else {
2948 u8 status = MGMT_STATUS_NOT_POWERED; 3109 u8 status = MGMT_STATUS_NOT_POWERED;
3110 u8 zero_cod[] = { 0, 0, 0 };
3111
2949 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 3112 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3113
3114 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3115 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3116 zero_cod, sizeof(zero_cod), NULL);
2950 } 3117 }
2951 3118
2952 err = new_settings(hdev, match.sk); 3119 err = new_settings(hdev, match.sk);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 57f250c20e39..b5178d62064e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -900,8 +900,6 @@ static void sco_conn_ready(struct sco_conn *conn)
900 900
901 BT_DBG("conn %p", conn); 901 BT_DBG("conn %p", conn);
902 902
903 sco_conn_lock(conn);
904
905 if (sk) { 903 if (sk) {
906 sco_sock_clear_timer(sk); 904 sco_sock_clear_timer(sk);
907 bh_lock_sock(sk); 905 bh_lock_sock(sk);
@@ -909,9 +907,13 @@ static void sco_conn_ready(struct sco_conn *conn)
909 sk->sk_state_change(sk); 907 sk->sk_state_change(sk);
910 bh_unlock_sock(sk); 908 bh_unlock_sock(sk);
911 } else { 909 } else {
910 sco_conn_lock(conn);
911
912 parent = sco_get_sock_listen(conn->src); 912 parent = sco_get_sock_listen(conn->src);
913 if (!parent) 913 if (!parent) {
914 goto done; 914 sco_conn_unlock(conn);
915 return;
916 }
915 917
916 bh_lock_sock(parent); 918 bh_lock_sock(parent);
917 919
@@ -919,7 +921,8 @@ static void sco_conn_ready(struct sco_conn *conn)
919 BTPROTO_SCO, GFP_ATOMIC); 921 BTPROTO_SCO, GFP_ATOMIC);
920 if (!sk) { 922 if (!sk) {
921 bh_unlock_sock(parent); 923 bh_unlock_sock(parent);
922 goto done; 924 sco_conn_unlock(conn);
925 return;
923 } 926 }
924 927
925 sco_sock_init(sk, parent); 928 sco_sock_init(sk, parent);
@@ -939,10 +942,9 @@ static void sco_conn_ready(struct sco_conn *conn)
939 parent->sk_data_ready(parent, 1); 942 parent->sk_data_ready(parent, 1);
940 943
941 bh_unlock_sock(parent); 944 bh_unlock_sock(parent);
942 }
943 945
944done: 946 sco_conn_unlock(conn);
945 sco_conn_unlock(conn); 947 }
946} 948}
947 949
948/* ----- SCO interface with lower layer (HCI) ----- */ 950/* ----- SCO interface with lower layer (HCI) ----- */
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 68a9587c9694..5abefb12891d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
859 859
860 skb_pull(skb, sizeof(code)); 860 skb_pull(skb, sizeof(code));
861 861
862 /*
863 * The SMP context must be initialized for all other PDUs except
864 * pairing and security requests. If we get any other PDU when
865 * not initialized simply disconnect (done if this function
866 * returns an error).
867 */
868 if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
869 !conn->smp_chan) {
870 BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
871 kfree_skb(skb);
872 return -ENOTSUPP;
873 }
874
862 switch (code) { 875 switch (code) {
863 case SMP_CMD_PAIRING_REQ: 876 case SMP_CMD_PAIRING_REQ:
864 reason = smp_cmd_pairing_req(conn, skb); 877 reason = smp_cmd_pairing_req(conn, skb);
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 6dee7bf648a9..aa0d3b2f1bb7 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -46,3 +46,17 @@ config BRIDGE_IGMP_SNOOPING
46 Say N to exclude this support and reduce the binary size. 46 Say N to exclude this support and reduce the binary size.
47 47
48 If unsure, say Y. 48 If unsure, say Y.
49
50config BRIDGE_VLAN_FILTERING
51 bool "VLAN filtering"
52 depends on BRIDGE
53 depends on VLAN_8021Q
54 default n
55 ---help---
56 If you say Y here, then the Ethernet bridge will be able selectively
57 receive and forward traffic based on VLAN information in the packet
58 any VLAN information configured on the bridge port or bridge device.
59
60 Say N to exclude this support and reduce the binary size.
61
62 If unsure, say Y.
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index e859098f5ee9..e85498b2f166 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -14,4 +14,6 @@ bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
14 14
15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o 15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
16 16
17bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
18
17obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 19obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e1bc090bc00a..d5f1d3fd4b28 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -30,6 +30,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
30 struct net_bridge_fdb_entry *dst; 30 struct net_bridge_fdb_entry *dst;
31 struct net_bridge_mdb_entry *mdst; 31 struct net_bridge_mdb_entry *mdst;
32 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); 32 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
33 u16 vid = 0;
33 34
34 rcu_read_lock(); 35 rcu_read_lock();
35#ifdef CONFIG_BRIDGE_NETFILTER 36#ifdef CONFIG_BRIDGE_NETFILTER
@@ -45,6 +46,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
45 brstats->tx_bytes += skb->len; 46 brstats->tx_bytes += skb->len;
46 u64_stats_update_end(&brstats->syncp); 47 u64_stats_update_end(&brstats->syncp);
47 48
49 if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
50 goto out;
51
48 BR_INPUT_SKB_CB(skb)->brdev = dev; 52 BR_INPUT_SKB_CB(skb)->brdev = dev;
49 53
50 skb_reset_mac_header(skb); 54 skb_reset_mac_header(skb);
@@ -67,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
67 br_multicast_deliver(mdst, skb); 71 br_multicast_deliver(mdst, skb);
68 else 72 else
69 br_flood_deliver(br, skb); 73 br_flood_deliver(br, skb);
70 } else if ((dst = __br_fdb_get(br, dest)) != NULL) 74 } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
71 br_deliver(dst->dst, skb); 75 br_deliver(dst->dst, skb);
72 else 76 else
73 br_flood_deliver(br, skb); 77 br_flood_deliver(br, skb);
@@ -176,7 +180,6 @@ static int br_set_mac_address(struct net_device *dev, void *p)
176 br_fdb_change_mac_address(br, addr->sa_data); 180 br_fdb_change_mac_address(br, addr->sa_data);
177 br_stp_change_bridge_id(br, addr->sa_data); 181 br_stp_change_bridge_id(br, addr->sa_data);
178 } 182 }
179 br->flags |= BR_SET_MAC_ADDR;
180 spin_unlock_bh(&br->lock); 183 spin_unlock_bh(&br->lock);
181 184
182 return 0; 185 return 0;
@@ -266,7 +269,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
266 269
267 p->np = NULL; 270 p->np = NULL;
268 271
269 __netpoll_free_rcu(np); 272 __netpoll_free_async(np);
270} 273}
271 274
272#endif 275#endif
@@ -314,6 +317,7 @@ static const struct net_device_ops br_netdev_ops = {
314 .ndo_fdb_dump = br_fdb_dump, 317 .ndo_fdb_dump = br_fdb_dump,
315 .ndo_bridge_getlink = br_getlink, 318 .ndo_bridge_getlink = br_getlink,
316 .ndo_bridge_setlink = br_setlink, 319 .ndo_bridge_setlink = br_setlink,
320 .ndo_bridge_dellink = br_dellink,
317}; 321};
318 322
319static void br_dev_free(struct net_device *dev) 323static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d9576e6de2b8..8117900af4de 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -23,11 +23,12 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/atomic.h> 24#include <linux/atomic.h>
25#include <asm/unaligned.h> 25#include <asm/unaligned.h>
26#include <linux/if_vlan.h>
26#include "br_private.h" 27#include "br_private.h"
27 28
28static struct kmem_cache *br_fdb_cache __read_mostly; 29static struct kmem_cache *br_fdb_cache __read_mostly;
29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 30static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
30 const unsigned char *addr); 31 const unsigned char *addr, u16 vid);
31static void fdb_notify(struct net_bridge *br, 32static void fdb_notify(struct net_bridge *br,
32 const struct net_bridge_fdb_entry *, int); 33 const struct net_bridge_fdb_entry *, int);
33 34
@@ -67,11 +68,11 @@ static inline int has_expired(const struct net_bridge *br,
67 time_before_eq(fdb->updated + hold_time(br), jiffies); 68 time_before_eq(fdb->updated + hold_time(br), jiffies);
68} 69}
69 70
70static inline int br_mac_hash(const unsigned char *mac) 71static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
71{ 72{
72 /* use 1 byte of OUI cnd 3 bytes of NIC */ 73 /* use 1 byte of OUI and 3 bytes of NIC */
73 u32 key = get_unaligned((u32 *)(mac + 2)); 74 u32 key = get_unaligned((u32 *)(mac + 2));
74 return jhash_1word(key, fdb_salt) & (BR_HASH_SIZE - 1); 75 return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
75} 76}
76 77
77static void fdb_rcu_free(struct rcu_head *head) 78static void fdb_rcu_free(struct rcu_head *head)
@@ -91,6 +92,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
91void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) 92void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
92{ 93{
93 struct net_bridge *br = p->br; 94 struct net_bridge *br = p->br;
95 bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false;
94 int i; 96 int i;
95 97
96 spin_lock_bh(&br->hash_lock); 98 spin_lock_bh(&br->hash_lock);
@@ -105,10 +107,12 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
105 if (f->dst == p && f->is_local) { 107 if (f->dst == p && f->is_local) {
106 /* maybe another port has same hw addr? */ 108 /* maybe another port has same hw addr? */
107 struct net_bridge_port *op; 109 struct net_bridge_port *op;
110 u16 vid = f->vlan_id;
108 list_for_each_entry(op, &br->port_list, list) { 111 list_for_each_entry(op, &br->port_list, list) {
109 if (op != p && 112 if (op != p &&
110 ether_addr_equal(op->dev->dev_addr, 113 ether_addr_equal(op->dev->dev_addr,
111 f->addr.addr)) { 114 f->addr.addr) &&
115 nbp_vlan_find(op, vid)) {
112 f->dst = op; 116 f->dst = op;
113 goto insert; 117 goto insert;
114 } 118 }
@@ -116,27 +120,55 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
116 120
117 /* delete old one */ 121 /* delete old one */
118 fdb_delete(br, f); 122 fdb_delete(br, f);
119 goto insert; 123insert:
124 /* insert new address, may fail if invalid
125 * address or dup.
126 */
127 fdb_insert(br, p, newaddr, vid);
128
129 /* if this port has no vlan information
130 * configured, we can safely be done at
131 * this point.
132 */
133 if (no_vlan)
134 goto done;
120 } 135 }
121 } 136 }
122 } 137 }
123 insert:
124 /* insert new address, may fail if invalid address or dup. */
125 fdb_insert(br, p, newaddr);
126 138
139done:
127 spin_unlock_bh(&br->hash_lock); 140 spin_unlock_bh(&br->hash_lock);
128} 141}
129 142
130void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) 143void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
131{ 144{
132 struct net_bridge_fdb_entry *f; 145 struct net_bridge_fdb_entry *f;
146 struct net_port_vlans *pv;
147 u16 vid = 0;
133 148
134 /* If old entry was unassociated with any port, then delete it. */ 149 /* If old entry was unassociated with any port, then delete it. */
135 f = __br_fdb_get(br, br->dev->dev_addr); 150 f = __br_fdb_get(br, br->dev->dev_addr, 0);
136 if (f && f->is_local && !f->dst) 151 if (f && f->is_local && !f->dst)
137 fdb_delete(br, f); 152 fdb_delete(br, f);
138 153
139 fdb_insert(br, NULL, newaddr); 154 fdb_insert(br, NULL, newaddr, 0);
155
156 /* Now remove and add entries for every VLAN configured on the
157 * bridge. This function runs under RTNL so the bitmap will not
158 * change from under us.
159 */
160 pv = br_get_vlan_info(br);
161 if (!pv)
162 return;
163
164 for (vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid);
165 vid < BR_VLAN_BITMAP_LEN;
166 vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid+1)) {
167 f = __br_fdb_get(br, br->dev->dev_addr, vid);
168 if (f && f->is_local && !f->dst)
169 fdb_delete(br, f);
170 fdb_insert(br, NULL, newaddr, vid);
171 }
140} 172}
141 173
142void br_fdb_cleanup(unsigned long _data) 174void br_fdb_cleanup(unsigned long _data)
@@ -231,13 +263,16 @@ void br_fdb_delete_by_port(struct net_bridge *br,
231 263
232/* No locking or refcounting, assumes caller has rcu_read_lock */ 264/* No locking or refcounting, assumes caller has rcu_read_lock */
233struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, 265struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
234 const unsigned char *addr) 266 const unsigned char *addr,
267 __u16 vid)
235{ 268{
236 struct hlist_node *h; 269 struct hlist_node *h;
237 struct net_bridge_fdb_entry *fdb; 270 struct net_bridge_fdb_entry *fdb;
238 271
239 hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) { 272 hlist_for_each_entry_rcu(fdb, h,
240 if (ether_addr_equal(fdb->addr.addr, addr)) { 273 &br->hash[br_mac_hash(addr, vid)], hlist) {
274 if (ether_addr_equal(fdb->addr.addr, addr) &&
275 fdb->vlan_id == vid) {
241 if (unlikely(has_expired(br, fdb))) 276 if (unlikely(has_expired(br, fdb)))
242 break; 277 break;
243 return fdb; 278 return fdb;
@@ -261,7 +296,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
261 if (!port) 296 if (!port)
262 ret = 0; 297 ret = 0;
263 else { 298 else {
264 fdb = __br_fdb_get(port->br, addr); 299 fdb = __br_fdb_get(port->br, addr, 0);
265 ret = fdb && fdb->dst && fdb->dst->dev != dev && 300 ret = fdb && fdb->dst && fdb->dst->dev != dev &&
266 fdb->dst->state == BR_STATE_FORWARDING; 301 fdb->dst->state == BR_STATE_FORWARDING;
267 } 302 }
@@ -325,26 +360,30 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
325} 360}
326 361
327static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, 362static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
328 const unsigned char *addr) 363 const unsigned char *addr,
364 __u16 vid)
329{ 365{
330 struct hlist_node *h; 366 struct hlist_node *h;
331 struct net_bridge_fdb_entry *fdb; 367 struct net_bridge_fdb_entry *fdb;
332 368
333 hlist_for_each_entry(fdb, h, head, hlist) { 369 hlist_for_each_entry(fdb, h, head, hlist) {
334 if (ether_addr_equal(fdb->addr.addr, addr)) 370 if (ether_addr_equal(fdb->addr.addr, addr) &&
371 fdb->vlan_id == vid)
335 return fdb; 372 return fdb;
336 } 373 }
337 return NULL; 374 return NULL;
338} 375}
339 376
340static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head, 377static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
341 const unsigned char *addr) 378 const unsigned char *addr,
379 __u16 vid)
342{ 380{
343 struct hlist_node *h; 381 struct hlist_node *h;
344 struct net_bridge_fdb_entry *fdb; 382 struct net_bridge_fdb_entry *fdb;
345 383
346 hlist_for_each_entry_rcu(fdb, h, head, hlist) { 384 hlist_for_each_entry_rcu(fdb, h, head, hlist) {
347 if (ether_addr_equal(fdb->addr.addr, addr)) 385 if (ether_addr_equal(fdb->addr.addr, addr) &&
386 fdb->vlan_id == vid)
348 return fdb; 387 return fdb;
349 } 388 }
350 return NULL; 389 return NULL;
@@ -352,7 +391,8 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
352 391
353static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 392static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
354 struct net_bridge_port *source, 393 struct net_bridge_port *source,
355 const unsigned char *addr) 394 const unsigned char *addr,
395 __u16 vid)
356{ 396{
357 struct net_bridge_fdb_entry *fdb; 397 struct net_bridge_fdb_entry *fdb;
358 398
@@ -360,6 +400,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
360 if (fdb) { 400 if (fdb) {
361 memcpy(fdb->addr.addr, addr, ETH_ALEN); 401 memcpy(fdb->addr.addr, addr, ETH_ALEN);
362 fdb->dst = source; 402 fdb->dst = source;
403 fdb->vlan_id = vid;
363 fdb->is_local = 0; 404 fdb->is_local = 0;
364 fdb->is_static = 0; 405 fdb->is_static = 0;
365 fdb->updated = fdb->used = jiffies; 406 fdb->updated = fdb->used = jiffies;
@@ -369,15 +410,15 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
369} 410}
370 411
371static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 412static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
372 const unsigned char *addr) 413 const unsigned char *addr, u16 vid)
373{ 414{
374 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 415 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
375 struct net_bridge_fdb_entry *fdb; 416 struct net_bridge_fdb_entry *fdb;
376 417
377 if (!is_valid_ether_addr(addr)) 418 if (!is_valid_ether_addr(addr))
378 return -EINVAL; 419 return -EINVAL;
379 420
380 fdb = fdb_find(head, addr); 421 fdb = fdb_find(head, addr, vid);
381 if (fdb) { 422 if (fdb) {
382 /* it is okay to have multiple ports with same 423 /* it is okay to have multiple ports with same
383 * address, just use the first one. 424 * address, just use the first one.
@@ -390,7 +431,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
390 fdb_delete(br, fdb); 431 fdb_delete(br, fdb);
391 } 432 }
392 433
393 fdb = fdb_create(head, source, addr); 434 fdb = fdb_create(head, source, addr, vid);
394 if (!fdb) 435 if (!fdb)
395 return -ENOMEM; 436 return -ENOMEM;
396 437
@@ -401,20 +442,20 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
401 442
402/* Add entry for local address of interface */ 443/* Add entry for local address of interface */
403int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 444int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
404 const unsigned char *addr) 445 const unsigned char *addr, u16 vid)
405{ 446{
406 int ret; 447 int ret;
407 448
408 spin_lock_bh(&br->hash_lock); 449 spin_lock_bh(&br->hash_lock);
409 ret = fdb_insert(br, source, addr); 450 ret = fdb_insert(br, source, addr, vid);
410 spin_unlock_bh(&br->hash_lock); 451 spin_unlock_bh(&br->hash_lock);
411 return ret; 452 return ret;
412} 453}
413 454
414void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, 455void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
415 const unsigned char *addr) 456 const unsigned char *addr, u16 vid)
416{ 457{
417 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 458 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
418 struct net_bridge_fdb_entry *fdb; 459 struct net_bridge_fdb_entry *fdb;
419 460
420 /* some users want to always flood. */ 461 /* some users want to always flood. */
@@ -426,7 +467,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
426 source->state == BR_STATE_FORWARDING)) 467 source->state == BR_STATE_FORWARDING))
427 return; 468 return;
428 469
429 fdb = fdb_find_rcu(head, addr); 470 fdb = fdb_find_rcu(head, addr, vid);
430 if (likely(fdb)) { 471 if (likely(fdb)) {
431 /* attempt to update an entry for a local interface */ 472 /* attempt to update an entry for a local interface */
432 if (unlikely(fdb->is_local)) { 473 if (unlikely(fdb->is_local)) {
@@ -441,8 +482,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
441 } 482 }
442 } else { 483 } else {
443 spin_lock(&br->hash_lock); 484 spin_lock(&br->hash_lock);
444 if (likely(!fdb_find(head, addr))) { 485 if (likely(!fdb_find(head, addr, vid))) {
445 fdb = fdb_create(head, source, addr); 486 fdb = fdb_create(head, source, addr, vid);
446 if (fdb) 487 if (fdb)
447 fdb_notify(br, fdb, RTM_NEWNEIGH); 488 fdb_notify(br, fdb, RTM_NEWNEIGH);
448 } 489 }
@@ -495,6 +536,10 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
495 ci.ndm_refcnt = 0; 536 ci.ndm_refcnt = 0;
496 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 537 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
497 goto nla_put_failure; 538 goto nla_put_failure;
539
540 if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
541 goto nla_put_failure;
542
498 return nlmsg_end(skb, nlh); 543 return nlmsg_end(skb, nlh);
499 544
500nla_put_failure: 545nla_put_failure:
@@ -506,6 +551,7 @@ static inline size_t fdb_nlmsg_size(void)
506{ 551{
507 return NLMSG_ALIGN(sizeof(struct ndmsg)) 552 return NLMSG_ALIGN(sizeof(struct ndmsg))
508 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 553 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
554 + nla_total_size(sizeof(u16)) /* NDA_VLAN */
509 + nla_total_size(sizeof(struct nda_cacheinfo)); 555 + nla_total_size(sizeof(struct nda_cacheinfo));
510} 556}
511 557
@@ -571,18 +617,18 @@ out:
571 617
572/* Update (create or replace) forwarding database entry */ 618/* Update (create or replace) forwarding database entry */
573static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, 619static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
574 __u16 state, __u16 flags) 620 __u16 state, __u16 flags, __u16 vid)
575{ 621{
576 struct net_bridge *br = source->br; 622 struct net_bridge *br = source->br;
577 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 623 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
578 struct net_bridge_fdb_entry *fdb; 624 struct net_bridge_fdb_entry *fdb;
579 625
580 fdb = fdb_find(head, addr); 626 fdb = fdb_find(head, addr, vid);
581 if (fdb == NULL) { 627 if (fdb == NULL) {
582 if (!(flags & NLM_F_CREATE)) 628 if (!(flags & NLM_F_CREATE))
583 return -ENOENT; 629 return -ENOENT;
584 630
585 fdb = fdb_create(head, source, addr); 631 fdb = fdb_create(head, source, addr, vid);
586 if (!fdb) 632 if (!fdb)
587 return -ENOMEM; 633 return -ENOMEM;
588 fdb_notify(br, fdb, RTM_NEWNEIGH); 634 fdb_notify(br, fdb, RTM_NEWNEIGH);
@@ -607,6 +653,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
607 return 0; 653 return 0;
608} 654}
609 655
656static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
657 const unsigned char *addr, u16 nlh_flags, u16 vid)
658{
659 int err = 0;
660
661 if (ndm->ndm_flags & NTF_USE) {
662 rcu_read_lock();
663 br_fdb_update(p->br, p, addr, vid);
664 rcu_read_unlock();
665 } else {
666 spin_lock_bh(&p->br->hash_lock);
667 err = fdb_add_entry(p, addr, ndm->ndm_state,
668 nlh_flags, vid);
669 spin_unlock_bh(&p->br->hash_lock);
670 }
671
672 return err;
673}
674
610/* Add new permanent fdb entry with RTM_NEWNEIGH */ 675/* Add new permanent fdb entry with RTM_NEWNEIGH */
611int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 676int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
612 struct net_device *dev, 677 struct net_device *dev,
@@ -614,12 +679,29 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
614{ 679{
615 struct net_bridge_port *p; 680 struct net_bridge_port *p;
616 int err = 0; 681 int err = 0;
682 struct net_port_vlans *pv;
683 unsigned short vid = VLAN_N_VID;
617 684
618 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { 685 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
619 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); 686 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
620 return -EINVAL; 687 return -EINVAL;
621 } 688 }
622 689
690 if (tb[NDA_VLAN]) {
691 if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
692 pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
693 return -EINVAL;
694 }
695
696 vid = nla_get_u16(tb[NDA_VLAN]);
697
698 if (vid >= VLAN_N_VID) {
699 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
700 vid);
701 return -EINVAL;
702 }
703 }
704
623 p = br_port_get_rtnl(dev); 705 p = br_port_get_rtnl(dev);
624 if (p == NULL) { 706 if (p == NULL) {
625 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", 707 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -627,40 +709,90 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
627 return -EINVAL; 709 return -EINVAL;
628 } 710 }
629 711
630 if (ndm->ndm_flags & NTF_USE) { 712 pv = nbp_get_vlan_info(p);
631 rcu_read_lock(); 713 if (vid != VLAN_N_VID) {
632 br_fdb_update(p->br, p, addr); 714 if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
633 rcu_read_unlock(); 715 pr_info("bridge: RTM_NEWNEIGH with unconfigured "
716 "vlan %d on port %s\n", vid, dev->name);
717 return -EINVAL;
718 }
719
720 /* VID was specified, so use it. */
721 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
634 } else { 722 } else {
635 spin_lock_bh(&p->br->hash_lock); 723 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
636 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags); 724 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
637 spin_unlock_bh(&p->br->hash_lock); 725 goto out;
726 }
727
728 /* We have vlans configured on this port and user didn't
729 * specify a VLAN. To be nice, add/update entry for every
730 * vlan on this port.
731 */
732 vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
733 while (vid < BR_VLAN_BITMAP_LEN) {
734 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
735 if (err)
736 goto out;
737 vid = find_next_bit(pv->vlan_bitmap,
738 BR_VLAN_BITMAP_LEN, vid+1);
739 }
638 } 740 }
639 741
742out:
640 return err; 743 return err;
641} 744}
642 745
643static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) 746int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
747 u16 vlan)
644{ 748{
645 struct net_bridge *br = p->br; 749 struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
646 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
647 struct net_bridge_fdb_entry *fdb; 750 struct net_bridge_fdb_entry *fdb;
648 751
649 fdb = fdb_find(head, addr); 752 fdb = fdb_find(head, addr, vlan);
650 if (!fdb) 753 if (!fdb)
651 return -ENOENT; 754 return -ENOENT;
652 755
653 fdb_delete(p->br, fdb); 756 fdb_delete(br, fdb);
654 return 0; 757 return 0;
655} 758}
656 759
760static int __br_fdb_delete(struct net_bridge_port *p,
761 const unsigned char *addr, u16 vid)
762{
763 int err;
764
765 spin_lock_bh(&p->br->hash_lock);
766 err = fdb_delete_by_addr(p->br, addr, vid);
767 spin_unlock_bh(&p->br->hash_lock);
768
769 return err;
770}
771
657/* Remove neighbor entry with RTM_DELNEIGH */ 772/* Remove neighbor entry with RTM_DELNEIGH */
658int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev, 773int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
774 struct net_device *dev,
659 const unsigned char *addr) 775 const unsigned char *addr)
660{ 776{
661 struct net_bridge_port *p; 777 struct net_bridge_port *p;
662 int err; 778 int err;
779 struct net_port_vlans *pv;
780 unsigned short vid = VLAN_N_VID;
781
782 if (tb[NDA_VLAN]) {
783 if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
784 pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
785 return -EINVAL;
786 }
787
788 vid = nla_get_u16(tb[NDA_VLAN]);
663 789
790 if (vid >= VLAN_N_VID) {
791 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
792 vid);
793 return -EINVAL;
794 }
795 }
664 p = br_port_get_rtnl(dev); 796 p = br_port_get_rtnl(dev);
665 if (p == NULL) { 797 if (p == NULL) {
666 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", 798 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
@@ -668,9 +800,33 @@ int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
668 return -EINVAL; 800 return -EINVAL;
669 } 801 }
670 802
671 spin_lock_bh(&p->br->hash_lock); 803 pv = nbp_get_vlan_info(p);
672 err = fdb_delete_by_addr(p, addr); 804 if (vid != VLAN_N_VID) {
673 spin_unlock_bh(&p->br->hash_lock); 805 if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
806 pr_info("bridge: RTM_DELNEIGH with unconfigured "
807 "vlan %d on port %s\n", vid, dev->name);
808 return -EINVAL;
809 }
810
811 err = __br_fdb_delete(p, addr, vid);
812 } else {
813 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
814 err = __br_fdb_delete(p, addr, 0);
815 goto out;
816 }
674 817
818 /* We have vlans configured on this port and user didn't
819 * specify a VLAN. To be nice, add/update entry for every
820 * vlan on this port.
821 */
822 err = -ENOENT;
823 vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
824 while (vid < BR_VLAN_BITMAP_LEN) {
825 err &= __br_fdb_delete(p, addr, vid);
826 vid = find_next_bit(pv->vlan_bitmap,
827 BR_VLAN_BITMAP_LEN, vid+1);
828 }
829 }
830out:
675 return err; 831 return err;
676} 832}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 02015a505d2a..092b20e4ee4c 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -31,6 +31,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
31 const struct sk_buff *skb) 31 const struct sk_buff *skb)
32{ 32{
33 return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && 33 return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
34 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
34 p->state == BR_STATE_FORWARDING); 35 p->state == BR_STATE_FORWARDING);
35} 36}
36 37
@@ -63,6 +64,10 @@ int br_forward_finish(struct sk_buff *skb)
63 64
64static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 65static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
65{ 66{
67 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
68 if (!skb)
69 return;
70
66 skb->dev = to->dev; 71 skb->dev = to->dev;
67 72
68 if (unlikely(netpoll_tx_running(to->br->dev))) { 73 if (unlikely(netpoll_tx_running(to->br->dev))) {
@@ -88,6 +93,10 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
88 return; 93 return;
89 } 94 }
90 95
96 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
97 if (!skb)
98 return;
99
91 indev = skb->dev; 100 indev = skb->dev;
92 skb->dev = to->dev; 101 skb->dev = to->dev;
93 skb_forward_csum(skb); 102 skb_forward_csum(skb);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 2148d474a04f..ef1b91431c6b 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -23,6 +23,7 @@
23#include <linux/if_ether.h> 23#include <linux/if_ether.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <net/sock.h> 25#include <net/sock.h>
26#include <linux/if_vlan.h>
26 27
27#include "br_private.h" 28#include "br_private.h"
28 29
@@ -139,6 +140,7 @@ static void del_nbp(struct net_bridge_port *p)
139 140
140 br_ifinfo_notify(RTM_DELLINK, p); 141 br_ifinfo_notify(RTM_DELLINK, p);
141 142
143 nbp_vlan_flush(p);
142 br_fdb_delete_by_port(br, p, 1); 144 br_fdb_delete_by_port(br, p, 1);
143 145
144 list_del_rcu(&p->list); 146 list_del_rcu(&p->list);
@@ -395,7 +397,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
395 397
396 dev_set_mtu(br->dev, br_min_mtu(br)); 398 dev_set_mtu(br->dev, br_min_mtu(br));
397 399
398 if (br_fdb_insert(br, p, dev->dev_addr)) 400 if (br_fdb_insert(br, p, dev->dev_addr, 0))
399 netdev_err(dev, "failed insert local address bridge forwarding table\n"); 401 netdev_err(dev, "failed insert local address bridge forwarding table\n");
400 402
401 kobject_uevent(&p->kobj, KOBJ_ADD); 403 kobject_uevent(&p->kobj, KOBJ_ADD);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 4b34207419b1..480330151898 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -17,6 +17,7 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/netfilter_bridge.h> 18#include <linux/netfilter_bridge.h>
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/rculist.h>
20#include "br_private.h" 21#include "br_private.h"
21 22
22/* Hook for brouter */ 23/* Hook for brouter */
@@ -34,6 +35,20 @@ static int br_pass_frame_up(struct sk_buff *skb)
34 brstats->rx_bytes += skb->len; 35 brstats->rx_bytes += skb->len;
35 u64_stats_update_end(&brstats->syncp); 36 u64_stats_update_end(&brstats->syncp);
36 37
38 /* Bridge is just like any other port. Make sure the
39 * packet is allowed except in promisc modue when someone
40 * may be running packet capture.
41 */
42 if (!(brdev->flags & IFF_PROMISC) &&
43 !br_allowed_egress(br, br_get_vlan_info(br), skb)) {
44 kfree_skb(skb);
45 return NET_RX_DROP;
46 }
47
48 skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
49 if (!skb)
50 return NET_RX_DROP;
51
37 indev = skb->dev; 52 indev = skb->dev;
38 skb->dev = brdev; 53 skb->dev = brdev;
39 54
@@ -50,13 +65,17 @@ int br_handle_frame_finish(struct sk_buff *skb)
50 struct net_bridge_fdb_entry *dst; 65 struct net_bridge_fdb_entry *dst;
51 struct net_bridge_mdb_entry *mdst; 66 struct net_bridge_mdb_entry *mdst;
52 struct sk_buff *skb2; 67 struct sk_buff *skb2;
68 u16 vid = 0;
53 69
54 if (!p || p->state == BR_STATE_DISABLED) 70 if (!p || p->state == BR_STATE_DISABLED)
55 goto drop; 71 goto drop;
56 72
73 if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
74 goto drop;
75
57 /* insert into forwarding database after filtering to avoid spoofing */ 76 /* insert into forwarding database after filtering to avoid spoofing */
58 br = p->br; 77 br = p->br;
59 br_fdb_update(br, p, eth_hdr(skb)->h_source); 78 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
60 79
61 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && 80 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
62 br_multicast_rcv(br, p, skb)) 81 br_multicast_rcv(br, p, skb))
@@ -91,7 +110,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
91 skb2 = skb; 110 skb2 = skb;
92 111
93 br->dev->stats.multicast++; 112 br->dev->stats.multicast++;
94 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 113 } else if ((dst = __br_fdb_get(br, dest, vid)) &&
114 dst->is_local) {
95 skb2 = skb; 115 skb2 = skb;
96 /* Do not forward the packet since it's local. */ 116 /* Do not forward the packet since it's local. */
97 skb = NULL; 117 skb = NULL;
@@ -119,8 +139,10 @@ drop:
119static int br_handle_local_finish(struct sk_buff *skb) 139static int br_handle_local_finish(struct sk_buff *skb)
120{ 140{
121 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 141 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
142 u16 vid = 0;
122 143
123 br_fdb_update(p->br, p, eth_hdr(skb)->h_source); 144 br_vlan_get_tag(skb, &vid);
145 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
124 return 0; /* process further */ 146 return 0; /* process further */
125} 147}
126 148
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6d6f26531de2..7d886b0a8b7b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,8 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
39{ 39{
40 if (a->proto != b->proto) 40 if (a->proto != b->proto)
41 return 0; 41 return 0;
42 if (a->vid != b->vid)
43 return 0;
42 switch (a->proto) { 44 switch (a->proto) {
43 case htons(ETH_P_IP): 45 case htons(ETH_P_IP):
44 return a->u.ip4 == b->u.ip4; 46 return a->u.ip4 == b->u.ip4;
@@ -50,16 +52,19 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
50 return 0; 52 return 0;
51} 53}
52 54
53static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) 55static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
56 __u16 vid)
54{ 57{
55 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); 58 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
56} 59}
57 60
58#if IS_ENABLED(CONFIG_IPV6) 61#if IS_ENABLED(CONFIG_IPV6)
59static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, 62static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
60 const struct in6_addr *ip) 63 const struct in6_addr *ip,
64 __u16 vid)
61{ 65{
62 return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); 66 return jhash_2words(ipv6_addr_hash(ip), vid,
67 mdb->secret) & (mdb->max - 1);
63} 68}
64#endif 69#endif
65 70
@@ -68,10 +73,10 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
68{ 73{
69 switch (ip->proto) { 74 switch (ip->proto) {
70 case htons(ETH_P_IP): 75 case htons(ETH_P_IP):
71 return __br_ip4_hash(mdb, ip->u.ip4); 76 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
72#if IS_ENABLED(CONFIG_IPV6) 77#if IS_ENABLED(CONFIG_IPV6)
73 case htons(ETH_P_IPV6): 78 case htons(ETH_P_IPV6):
74 return __br_ip6_hash(mdb, &ip->u.ip6); 79 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
75#endif 80#endif
76 } 81 }
77 return 0; 82 return 0;
@@ -101,24 +106,27 @@ struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
101} 106}
102 107
103static struct net_bridge_mdb_entry *br_mdb_ip4_get( 108static struct net_bridge_mdb_entry *br_mdb_ip4_get(
104 struct net_bridge_mdb_htable *mdb, __be32 dst) 109 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
105{ 110{
106 struct br_ip br_dst; 111 struct br_ip br_dst;
107 112
108 br_dst.u.ip4 = dst; 113 br_dst.u.ip4 = dst;
109 br_dst.proto = htons(ETH_P_IP); 114 br_dst.proto = htons(ETH_P_IP);
115 br_dst.vid = vid;
110 116
111 return br_mdb_ip_get(mdb, &br_dst); 117 return br_mdb_ip_get(mdb, &br_dst);
112} 118}
113 119
114#if IS_ENABLED(CONFIG_IPV6) 120#if IS_ENABLED(CONFIG_IPV6)
115static struct net_bridge_mdb_entry *br_mdb_ip6_get( 121static struct net_bridge_mdb_entry *br_mdb_ip6_get(
116 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) 122 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
123 __u16 vid)
117{ 124{
118 struct br_ip br_dst; 125 struct br_ip br_dst;
119 126
120 br_dst.u.ip6 = *dst; 127 br_dst.u.ip6 = *dst;
121 br_dst.proto = htons(ETH_P_IPV6); 128 br_dst.proto = htons(ETH_P_IPV6);
129 br_dst.vid = vid;
122 130
123 return br_mdb_ip_get(mdb, &br_dst); 131 return br_mdb_ip_get(mdb, &br_dst);
124} 132}
@@ -694,7 +702,8 @@ err:
694 702
695static int br_ip4_multicast_add_group(struct net_bridge *br, 703static int br_ip4_multicast_add_group(struct net_bridge *br,
696 struct net_bridge_port *port, 704 struct net_bridge_port *port,
697 __be32 group) 705 __be32 group,
706 __u16 vid)
698{ 707{
699 struct br_ip br_group; 708 struct br_ip br_group;
700 709
@@ -703,6 +712,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
703 712
704 br_group.u.ip4 = group; 713 br_group.u.ip4 = group;
705 br_group.proto = htons(ETH_P_IP); 714 br_group.proto = htons(ETH_P_IP);
715 br_group.vid = vid;
706 716
707 return br_multicast_add_group(br, port, &br_group); 717 return br_multicast_add_group(br, port, &br_group);
708} 718}
@@ -710,7 +720,8 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
710#if IS_ENABLED(CONFIG_IPV6) 720#if IS_ENABLED(CONFIG_IPV6)
711static int br_ip6_multicast_add_group(struct net_bridge *br, 721static int br_ip6_multicast_add_group(struct net_bridge *br,
712 struct net_bridge_port *port, 722 struct net_bridge_port *port,
713 const struct in6_addr *group) 723 const struct in6_addr *group,
724 __u16 vid)
714{ 725{
715 struct br_ip br_group; 726 struct br_ip br_group;
716 727
@@ -719,6 +730,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
719 730
720 br_group.u.ip6 = *group; 731 br_group.u.ip6 = *group;
721 br_group.proto = htons(ETH_P_IPV6); 732 br_group.proto = htons(ETH_P_IPV6);
733 br_group.vid = vid;
722 734
723 return br_multicast_add_group(br, port, &br_group); 735 return br_multicast_add_group(br, port, &br_group);
724} 736}
@@ -895,10 +907,12 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
895 int type; 907 int type;
896 int err = 0; 908 int err = 0;
897 __be32 group; 909 __be32 group;
910 u16 vid = 0;
898 911
899 if (!pskb_may_pull(skb, sizeof(*ih))) 912 if (!pskb_may_pull(skb, sizeof(*ih)))
900 return -EINVAL; 913 return -EINVAL;
901 914
915 br_vlan_get_tag(skb, &vid);
902 ih = igmpv3_report_hdr(skb); 916 ih = igmpv3_report_hdr(skb);
903 num = ntohs(ih->ngrec); 917 num = ntohs(ih->ngrec);
904 len = sizeof(*ih); 918 len = sizeof(*ih);
@@ -930,7 +944,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
930 continue; 944 continue;
931 } 945 }
932 946
933 err = br_ip4_multicast_add_group(br, port, group); 947 err = br_ip4_multicast_add_group(br, port, group, vid);
934 if (err) 948 if (err)
935 break; 949 break;
936 } 950 }
@@ -949,10 +963,12 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
949 int len; 963 int len;
950 int num; 964 int num;
951 int err = 0; 965 int err = 0;
966 u16 vid = 0;
952 967
953 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 968 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
954 return -EINVAL; 969 return -EINVAL;
955 970
971 br_vlan_get_tag(skb, &vid);
956 icmp6h = icmp6_hdr(skb); 972 icmp6h = icmp6_hdr(skb);
957 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 973 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
958 len = sizeof(*icmp6h); 974 len = sizeof(*icmp6h);
@@ -990,7 +1006,8 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
990 continue; 1006 continue;
991 } 1007 }
992 1008
993 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); 1009 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
1010 vid);
994 if (!err) 1011 if (!err)
995 break; 1012 break;
996 } 1013 }
@@ -1074,6 +1091,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1074 unsigned long now = jiffies; 1091 unsigned long now = jiffies;
1075 __be32 group; 1092 __be32 group;
1076 int err = 0; 1093 int err = 0;
1094 u16 vid = 0;
1077 1095
1078 spin_lock(&br->multicast_lock); 1096 spin_lock(&br->multicast_lock);
1079 if (!netif_running(br->dev) || 1097 if (!netif_running(br->dev) ||
@@ -1108,7 +1126,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1108 if (!group) 1126 if (!group)
1109 goto out; 1127 goto out;
1110 1128
1111 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group); 1129 br_vlan_get_tag(skb, &vid);
1130 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1112 if (!mp) 1131 if (!mp)
1113 goto out; 1132 goto out;
1114 1133
@@ -1149,6 +1168,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1149 unsigned long now = jiffies; 1168 unsigned long now = jiffies;
1150 const struct in6_addr *group = NULL; 1169 const struct in6_addr *group = NULL;
1151 int err = 0; 1170 int err = 0;
1171 u16 vid = 0;
1152 1172
1153 spin_lock(&br->multicast_lock); 1173 spin_lock(&br->multicast_lock);
1154 if (!netif_running(br->dev) || 1174 if (!netif_running(br->dev) ||
@@ -1180,7 +1200,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1180 if (!group) 1200 if (!group)
1181 goto out; 1201 goto out;
1182 1202
1183 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group); 1203 br_vlan_get_tag(skb, &vid);
1204 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1184 if (!mp) 1205 if (!mp)
1185 goto out; 1206 goto out;
1186 1207
@@ -1286,7 +1307,8 @@ out:
1286 1307
1287static void br_ip4_multicast_leave_group(struct net_bridge *br, 1308static void br_ip4_multicast_leave_group(struct net_bridge *br,
1288 struct net_bridge_port *port, 1309 struct net_bridge_port *port,
1289 __be32 group) 1310 __be32 group,
1311 __u16 vid)
1290{ 1312{
1291 struct br_ip br_group; 1313 struct br_ip br_group;
1292 1314
@@ -1295,6 +1317,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1295 1317
1296 br_group.u.ip4 = group; 1318 br_group.u.ip4 = group;
1297 br_group.proto = htons(ETH_P_IP); 1319 br_group.proto = htons(ETH_P_IP);
1320 br_group.vid = vid;
1298 1321
1299 br_multicast_leave_group(br, port, &br_group); 1322 br_multicast_leave_group(br, port, &br_group);
1300} 1323}
@@ -1302,7 +1325,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1302#if IS_ENABLED(CONFIG_IPV6) 1325#if IS_ENABLED(CONFIG_IPV6)
1303static void br_ip6_multicast_leave_group(struct net_bridge *br, 1326static void br_ip6_multicast_leave_group(struct net_bridge *br,
1304 struct net_bridge_port *port, 1327 struct net_bridge_port *port,
1305 const struct in6_addr *group) 1328 const struct in6_addr *group,
1329 __u16 vid)
1306{ 1330{
1307 struct br_ip br_group; 1331 struct br_ip br_group;
1308 1332
@@ -1311,6 +1335,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1311 1335
1312 br_group.u.ip6 = *group; 1336 br_group.u.ip6 = *group;
1313 br_group.proto = htons(ETH_P_IPV6); 1337 br_group.proto = htons(ETH_P_IPV6);
1338 br_group.vid = vid;
1314 1339
1315 br_multicast_leave_group(br, port, &br_group); 1340 br_multicast_leave_group(br, port, &br_group);
1316} 1341}
@@ -1326,6 +1351,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1326 unsigned int len; 1351 unsigned int len;
1327 unsigned int offset; 1352 unsigned int offset;
1328 int err; 1353 int err;
1354 u16 vid = 0;
1329 1355
1330 /* We treat OOM as packet loss for now. */ 1356 /* We treat OOM as packet loss for now. */
1331 if (!pskb_may_pull(skb, sizeof(*iph))) 1357 if (!pskb_may_pull(skb, sizeof(*iph)))
@@ -1386,6 +1412,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1386 1412
1387 err = 0; 1413 err = 0;
1388 1414
1415 br_vlan_get_tag(skb2, &vid);
1389 BR_INPUT_SKB_CB(skb)->igmp = 1; 1416 BR_INPUT_SKB_CB(skb)->igmp = 1;
1390 ih = igmp_hdr(skb2); 1417 ih = igmp_hdr(skb2);
1391 1418
@@ -1393,7 +1420,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1393 case IGMP_HOST_MEMBERSHIP_REPORT: 1420 case IGMP_HOST_MEMBERSHIP_REPORT:
1394 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1421 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1395 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1422 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1396 err = br_ip4_multicast_add_group(br, port, ih->group); 1423 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1397 break; 1424 break;
1398 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1425 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1399 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1426 err = br_ip4_multicast_igmp3_report(br, port, skb2);
@@ -1402,7 +1429,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1402 err = br_ip4_multicast_query(br, port, skb2); 1429 err = br_ip4_multicast_query(br, port, skb2);
1403 break; 1430 break;
1404 case IGMP_HOST_LEAVE_MESSAGE: 1431 case IGMP_HOST_LEAVE_MESSAGE:
1405 br_ip4_multicast_leave_group(br, port, ih->group); 1432 br_ip4_multicast_leave_group(br, port, ih->group, vid);
1406 break; 1433 break;
1407 } 1434 }
1408 1435
@@ -1427,6 +1454,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1427 unsigned int len; 1454 unsigned int len;
1428 int offset; 1455 int offset;
1429 int err; 1456 int err;
1457 u16 vid = 0;
1430 1458
1431 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1459 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1432 return -EINVAL; 1460 return -EINVAL;
@@ -1510,6 +1538,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1510 1538
1511 err = 0; 1539 err = 0;
1512 1540
1541 br_vlan_get_tag(skb, &vid);
1513 BR_INPUT_SKB_CB(skb)->igmp = 1; 1542 BR_INPUT_SKB_CB(skb)->igmp = 1;
1514 1543
1515 switch (icmp6_type) { 1544 switch (icmp6_type) {
@@ -1522,7 +1551,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1522 } 1551 }
1523 mld = (struct mld_msg *)skb_transport_header(skb2); 1552 mld = (struct mld_msg *)skb_transport_header(skb2);
1524 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1553 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1525 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1554 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
1526 break; 1555 break;
1527 } 1556 }
1528 case ICMPV6_MLD2_REPORT: 1557 case ICMPV6_MLD2_REPORT:
@@ -1539,7 +1568,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1539 goto out; 1568 goto out;
1540 } 1569 }
1541 mld = (struct mld_msg *)skb_transport_header(skb2); 1570 mld = (struct mld_msg *)skb_transport_header(skb2);
1542 br_ip6_multicast_leave_group(br, port, &mld->mld_mca); 1571 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
1543 } 1572 }
1544 } 1573 }
1545 1574
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 39ca9796f3f7..27aa3ee517ce 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -16,6 +16,7 @@
16#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
17#include <net/net_namespace.h> 17#include <net/net_namespace.h>
18#include <net/sock.h> 18#include <net/sock.h>
19#include <uapi/linux/if_bridge.h>
19 20
20#include "br_private.h" 21#include "br_private.h"
21#include "br_private_stp.h" 22#include "br_private_stp.h"
@@ -64,15 +65,21 @@ static int br_port_fill_attrs(struct sk_buff *skb,
64 * Create one netlink message for one interface 65 * Create one netlink message for one interface
65 * Contains port and master info as well as carrier and bridge state. 66 * Contains port and master info as well as carrier and bridge state.
66 */ 67 */
67static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, 68static int br_fill_ifinfo(struct sk_buff *skb,
68 u32 pid, u32 seq, int event, unsigned int flags) 69 const struct net_bridge_port *port,
70 u32 pid, u32 seq, int event, unsigned int flags,
71 u32 filter_mask, const struct net_device *dev)
69{ 72{
70 const struct net_bridge *br = port->br; 73 const struct net_bridge *br;
71 const struct net_device *dev = port->dev;
72 struct ifinfomsg *hdr; 74 struct ifinfomsg *hdr;
73 struct nlmsghdr *nlh; 75 struct nlmsghdr *nlh;
74 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 76 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
75 77
78 if (port)
79 br = port->br;
80 else
81 br = netdev_priv(dev);
82
76 br_debug(br, "br_fill_info event %d port %s master %s\n", 83 br_debug(br, "br_fill_info event %d port %s master %s\n",
77 event, dev->name, br->dev->name); 84 event, dev->name, br->dev->name);
78 85
@@ -98,7 +105,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
98 nla_put_u32(skb, IFLA_LINK, dev->iflink))) 105 nla_put_u32(skb, IFLA_LINK, dev->iflink)))
99 goto nla_put_failure; 106 goto nla_put_failure;
100 107
101 if (event == RTM_NEWLINK) { 108 if (event == RTM_NEWLINK && port) {
102 struct nlattr *nest 109 struct nlattr *nest
103 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 110 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
104 111
@@ -107,6 +114,48 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
107 nla_nest_end(skb, nest); 114 nla_nest_end(skb, nest);
108 } 115 }
109 116
117 /* Check if the VID information is requested */
118 if (filter_mask & RTEXT_FILTER_BRVLAN) {
119 struct nlattr *af;
120 const struct net_port_vlans *pv;
121 struct bridge_vlan_info vinfo;
122 u16 vid;
123 u16 pvid;
124
125 if (port)
126 pv = nbp_get_vlan_info(port);
127 else
128 pv = br_get_vlan_info(br);
129
130 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
131 goto done;
132
133 af = nla_nest_start(skb, IFLA_AF_SPEC);
134 if (!af)
135 goto nla_put_failure;
136
137 pvid = br_get_pvid(pv);
138 for (vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN);
139 vid < BR_VLAN_BITMAP_LEN;
140 vid = find_next_bit(pv->vlan_bitmap,
141 BR_VLAN_BITMAP_LEN, vid+1)) {
142 vinfo.vid = vid;
143 vinfo.flags = 0;
144 if (vid == pvid)
145 vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
146
147 if (test_bit(vid, pv->untagged_bitmap))
148 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
149
150 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
151 sizeof(vinfo), &vinfo))
152 goto nla_put_failure;
153 }
154
155 nla_nest_end(skb, af);
156 }
157
158done:
110 return nlmsg_end(skb, nlh); 159 return nlmsg_end(skb, nlh);
111 160
112nla_put_failure: 161nla_put_failure:
@@ -119,10 +168,14 @@ nla_put_failure:
119 */ 168 */
120void br_ifinfo_notify(int event, struct net_bridge_port *port) 169void br_ifinfo_notify(int event, struct net_bridge_port *port)
121{ 170{
122 struct net *net = dev_net(port->dev); 171 struct net *net;
123 struct sk_buff *skb; 172 struct sk_buff *skb;
124 int err = -ENOBUFS; 173 int err = -ENOBUFS;
125 174
175 if (!port)
176 return;
177
178 net = dev_net(port->dev);
126 br_debug(port->br, "port %u(%s) event %d\n", 179 br_debug(port->br, "port %u(%s) event %d\n",
127 (unsigned int)port->port_no, port->dev->name, event); 180 (unsigned int)port->port_no, port->dev->name, event);
128 181
@@ -130,7 +183,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
130 if (skb == NULL) 183 if (skb == NULL)
131 goto errout; 184 goto errout;
132 185
133 err = br_fill_ifinfo(skb, port, 0, 0, event, 0); 186 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev);
134 if (err < 0) { 187 if (err < 0) {
135 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 188 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
136 WARN_ON(err == -EMSGSIZE); 189 WARN_ON(err == -EMSGSIZE);
@@ -144,24 +197,85 @@ errout:
144 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 197 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
145} 198}
146 199
200
147/* 201/*
148 * Dump information about all ports, in response to GETLINK 202 * Dump information about all ports, in response to GETLINK
149 */ 203 */
150int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 204int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
151 struct net_device *dev) 205 struct net_device *dev, u32 filter_mask)
152{ 206{
153 int err = 0; 207 int err = 0;
154 struct net_bridge_port *port = br_port_get_rcu(dev); 208 struct net_bridge_port *port = br_port_get_rcu(dev);
155 209
156 /* not a bridge port */ 210 /* not a bridge port and */
157 if (!port) 211 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
158 goto out; 212 goto out;
159 213
160 err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI); 214 err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
215 filter_mask, dev);
161out: 216out:
162 return err; 217 return err;
163} 218}
164 219
220static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
221 [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
222 [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
223 [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
224 .len = sizeof(struct bridge_vlan_info), },
225};
226
227static int br_afspec(struct net_bridge *br,
228 struct net_bridge_port *p,
229 struct nlattr *af_spec,
230 int cmd)
231{
232 struct nlattr *tb[IFLA_BRIDGE_MAX+1];
233 int err = 0;
234
235 err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy);
236 if (err)
237 return err;
238
239 if (tb[IFLA_BRIDGE_VLAN_INFO]) {
240 struct bridge_vlan_info *vinfo;
241
242 vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
243
244 if (vinfo->vid >= VLAN_N_VID)
245 return -EINVAL;
246
247 switch (cmd) {
248 case RTM_SETLINK:
249 if (p) {
250 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
251 if (err)
252 break;
253
254 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
255 err = br_vlan_add(p->br, vinfo->vid,
256 vinfo->flags);
257 } else
258 err = br_vlan_add(br, vinfo->vid, vinfo->flags);
259
260 if (err)
261 break;
262
263 break;
264
265 case RTM_DELLINK:
266 if (p) {
267 nbp_vlan_delete(p, vinfo->vid);
268 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
269 br_vlan_delete(p->br, vinfo->vid);
270 } else
271 br_vlan_delete(br, vinfo->vid);
272 break;
273 }
274 }
275
276 return err;
277}
278
165static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { 279static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
166 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 280 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
167 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 281 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
@@ -241,6 +355,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
241{ 355{
242 struct ifinfomsg *ifm; 356 struct ifinfomsg *ifm;
243 struct nlattr *protinfo; 357 struct nlattr *protinfo;
358 struct nlattr *afspec;
244 struct net_bridge_port *p; 359 struct net_bridge_port *p;
245 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 360 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
246 int err; 361 int err;
@@ -248,38 +363,76 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
248 ifm = nlmsg_data(nlh); 363 ifm = nlmsg_data(nlh);
249 364
250 protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO); 365 protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO);
251 if (!protinfo) 366 afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC);
367 if (!protinfo && !afspec)
252 return 0; 368 return 0;
253 369
254 p = br_port_get_rtnl(dev); 370 p = br_port_get_rtnl(dev);
255 if (!p) 371 /* We want to accept dev as bridge itself if the AF_SPEC
372 * is set to see if someone is setting vlan info on the brigde
373 */
374 if (!p && ((dev->priv_flags & IFF_EBRIDGE) && !afspec))
256 return -EINVAL; 375 return -EINVAL;
257 376
258 if (protinfo->nla_type & NLA_F_NESTED) { 377 if (p && protinfo) {
259 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, 378 if (protinfo->nla_type & NLA_F_NESTED) {
260 protinfo, ifla_brport_policy); 379 err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
380 protinfo, ifla_brport_policy);
381 if (err)
382 return err;
383
384 spin_lock_bh(&p->br->lock);
385 err = br_setport(p, tb);
386 spin_unlock_bh(&p->br->lock);
387 } else {
388 /* Binary compatability with old RSTP */
389 if (nla_len(protinfo) < sizeof(u8))
390 return -EINVAL;
391
392 spin_lock_bh(&p->br->lock);
393 err = br_set_port_state(p, nla_get_u8(protinfo));
394 spin_unlock_bh(&p->br->lock);
395 }
261 if (err) 396 if (err)
262 return err; 397 goto out;
263 398 }
264 spin_lock_bh(&p->br->lock);
265 err = br_setport(p, tb);
266 spin_unlock_bh(&p->br->lock);
267 } else {
268 /* Binary compatability with old RSTP */
269 if (nla_len(protinfo) < sizeof(u8))
270 return -EINVAL;
271 399
272 spin_lock_bh(&p->br->lock); 400 if (afspec) {
273 err = br_set_port_state(p, nla_get_u8(protinfo)); 401 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
274 spin_unlock_bh(&p->br->lock); 402 afspec, RTM_SETLINK);
275 } 403 }
276 404
277 if (err == 0) 405 if (err == 0)
278 br_ifinfo_notify(RTM_NEWLINK, p); 406 br_ifinfo_notify(RTM_NEWLINK, p);
279 407
408out:
280 return err; 409 return err;
281} 410}
282 411
412/* Delete port information */
413int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
414{
415 struct ifinfomsg *ifm;
416 struct nlattr *afspec;
417 struct net_bridge_port *p;
418 int err;
419
420 ifm = nlmsg_data(nlh);
421
422 afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC);
423 if (!afspec)
424 return 0;
425
426 p = br_port_get_rtnl(dev);
427 /* We want to accept dev as bridge itself as well */
428 if (!p && !(dev->priv_flags & IFF_EBRIDGE))
429 return -EINVAL;
430
431 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
432 afspec, RTM_DELLINK);
433
434 return err;
435}
283static int br_validate(struct nlattr *tb[], struct nlattr *data[]) 436static int br_validate(struct nlattr *tb[], struct nlattr *data[])
284{ 437{
285 if (tb[IFLA_ADDRESS]) { 438 if (tb[IFLA_ADDRESS]) {
@@ -292,6 +445,29 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
292 return 0; 445 return 0;
293} 446}
294 447
448static size_t br_get_link_af_size(const struct net_device *dev)
449{
450 struct net_port_vlans *pv;
451
452 if (br_port_exists(dev))
453 pv = nbp_get_vlan_info(br_port_get_rcu(dev));
454 else if (dev->priv_flags & IFF_EBRIDGE)
455 pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
456 else
457 return 0;
458
459 if (!pv)
460 return 0;
461
462 /* Each VLAN is returned in bridge_vlan_info along with flags */
463 return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
464}
465
466static struct rtnl_af_ops br_af_ops = {
467 .family = AF_BRIDGE,
468 .get_link_af_size = br_get_link_af_size,
469};
470
295struct rtnl_link_ops br_link_ops __read_mostly = { 471struct rtnl_link_ops br_link_ops __read_mostly = {
296 .kind = "bridge", 472 .kind = "bridge",
297 .priv_size = sizeof(struct net_bridge), 473 .priv_size = sizeof(struct net_bridge),
@@ -305,11 +481,18 @@ int __init br_netlink_init(void)
305 int err; 481 int err;
306 482
307 br_mdb_init(); 483 br_mdb_init();
308 err = rtnl_link_register(&br_link_ops); 484 err = rtnl_af_register(&br_af_ops);
309 if (err) 485 if (err)
310 goto out; 486 goto out;
311 487
488 err = rtnl_link_register(&br_link_ops);
489 if (err)
490 goto out_af;
491
312 return 0; 492 return 0;
493
494out_af:
495 rtnl_af_unregister(&br_af_ops);
313out: 496out:
314 br_mdb_uninit(); 497 br_mdb_uninit();
315 return err; 498 return err;
@@ -318,5 +501,6 @@ out:
318void __exit br_netlink_fini(void) 501void __exit br_netlink_fini(void)
319{ 502{
320 br_mdb_uninit(); 503 br_mdb_uninit();
504 rtnl_af_unregister(&br_af_ops);
321 rtnl_link_unregister(&br_link_ops); 505 rtnl_link_unregister(&br_link_ops);
322} 506}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 711094aed41a..6d314c4e6bcb 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -18,6 +18,7 @@
18#include <linux/netpoll.h> 18#include <linux/netpoll.h>
19#include <linux/u64_stats_sync.h> 19#include <linux/u64_stats_sync.h>
20#include <net/route.h> 20#include <net/route.h>
21#include <linux/if_vlan.h>
21 22
22#define BR_HASH_BITS 8 23#define BR_HASH_BITS 8
23#define BR_HASH_SIZE (1 << BR_HASH_BITS) 24#define BR_HASH_SIZE (1 << BR_HASH_BITS)
@@ -26,6 +27,7 @@
26 27
27#define BR_PORT_BITS 10 28#define BR_PORT_BITS 10
28#define BR_MAX_PORTS (1<<BR_PORT_BITS) 29#define BR_MAX_PORTS (1<<BR_PORT_BITS)
30#define BR_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
29 31
30#define BR_VERSION "2.3" 32#define BR_VERSION "2.3"
31 33
@@ -61,6 +63,20 @@ struct br_ip
61#endif 63#endif
62 } u; 64 } u;
63 __be16 proto; 65 __be16 proto;
66 __u16 vid;
67};
68
69struct net_port_vlans {
70 u16 port_idx;
71 u16 pvid;
72 union {
73 struct net_bridge_port *port;
74 struct net_bridge *br;
75 } parent;
76 struct rcu_head rcu;
77 unsigned long vlan_bitmap[BR_VLAN_BITMAP_LEN];
78 unsigned long untagged_bitmap[BR_VLAN_BITMAP_LEN];
79 u16 num_vlans;
64}; 80};
65 81
66struct net_bridge_fdb_entry 82struct net_bridge_fdb_entry
@@ -74,6 +90,7 @@ struct net_bridge_fdb_entry
74 mac_addr addr; 90 mac_addr addr;
75 unsigned char is_local; 91 unsigned char is_local;
76 unsigned char is_static; 92 unsigned char is_static;
93 __u16 vlan_id;
77}; 94};
78 95
79struct net_bridge_port_group { 96struct net_bridge_port_group {
@@ -156,6 +173,9 @@ struct net_bridge_port
156#ifdef CONFIG_NET_POLL_CONTROLLER 173#ifdef CONFIG_NET_POLL_CONTROLLER
157 struct netpoll *np; 174 struct netpoll *np;
158#endif 175#endif
176#ifdef CONFIG_BRIDGE_VLAN_FILTERING
177 struct net_port_vlans __rcu *vlan_info;
178#endif
159}; 179};
160 180
161#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) 181#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
@@ -197,9 +217,6 @@ struct net_bridge
197 bool nf_call_ip6tables; 217 bool nf_call_ip6tables;
198 bool nf_call_arptables; 218 bool nf_call_arptables;
199#endif 219#endif
200 unsigned long flags;
201#define BR_SET_MAC_ADDR 0x00000001
202
203 u16 group_fwd_mask; 220 u16 group_fwd_mask;
204 221
205 /* STP */ 222 /* STP */
@@ -260,6 +277,10 @@ struct net_bridge
260 struct timer_list topology_change_timer; 277 struct timer_list topology_change_timer;
261 struct timer_list gc_timer; 278 struct timer_list gc_timer;
262 struct kobject *ifobj; 279 struct kobject *ifobj;
280#ifdef CONFIG_BRIDGE_VLAN_FILTERING
281 u8 vlan_enabled;
282 struct net_port_vlans __rcu *vlan_info;
283#endif
263}; 284};
264 285
265struct br_input_skb_cb { 286struct br_input_skb_cb {
@@ -355,18 +376,22 @@ extern void br_fdb_cleanup(unsigned long arg);
355extern void br_fdb_delete_by_port(struct net_bridge *br, 376extern void br_fdb_delete_by_port(struct net_bridge *br,
356 const struct net_bridge_port *p, int do_all); 377 const struct net_bridge_port *p, int do_all);
357extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, 378extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
358 const unsigned char *addr); 379 const unsigned char *addr,
380 __u16 vid);
359extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr); 381extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
360extern int br_fdb_fillbuf(struct net_bridge *br, void *buf, 382extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
361 unsigned long count, unsigned long off); 383 unsigned long count, unsigned long off);
362extern int br_fdb_insert(struct net_bridge *br, 384extern int br_fdb_insert(struct net_bridge *br,
363 struct net_bridge_port *source, 385 struct net_bridge_port *source,
364 const unsigned char *addr); 386 const unsigned char *addr,
387 u16 vid);
365extern void br_fdb_update(struct net_bridge *br, 388extern void br_fdb_update(struct net_bridge *br,
366 struct net_bridge_port *source, 389 struct net_bridge_port *source,
367 const unsigned char *addr); 390 const unsigned char *addr,
391 u16 vid);
392extern int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
368 393
369extern int br_fdb_delete(struct ndmsg *ndm, 394extern int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
370 struct net_device *dev, 395 struct net_device *dev,
371 const unsigned char *addr); 396 const unsigned char *addr);
372extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], 397extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
@@ -534,6 +559,142 @@ static inline void br_mdb_uninit(void)
534} 559}
535#endif 560#endif
536 561
562/* br_vlan.c */
563#ifdef CONFIG_BRIDGE_VLAN_FILTERING
564extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
565 struct sk_buff *skb, u16 *vid);
566extern bool br_allowed_egress(struct net_bridge *br,
567 const struct net_port_vlans *v,
568 const struct sk_buff *skb);
569extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
570 const struct net_port_vlans *v,
571 struct sk_buff *skb);
572extern int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
573extern int br_vlan_delete(struct net_bridge *br, u16 vid);
574extern void br_vlan_flush(struct net_bridge *br);
575extern int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
576extern int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
577extern int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
578extern void nbp_vlan_flush(struct net_bridge_port *port);
579extern bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
580
581static inline struct net_port_vlans *br_get_vlan_info(
582 const struct net_bridge *br)
583{
584 return rcu_dereference_rtnl(br->vlan_info);
585}
586
587static inline struct net_port_vlans *nbp_get_vlan_info(
588 const struct net_bridge_port *p)
589{
590 return rcu_dereference_rtnl(p->vlan_info);
591}
592
593/* Since bridge now depends on 8021Q module, but the time bridge sees the
594 * skb, the vlan tag will always be present if the frame was tagged.
595 */
596static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
597{
598 int err = 0;
599
600 if (vlan_tx_tag_present(skb))
601 *vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK;
602 else {
603 *vid = 0;
604 err = -EINVAL;
605 }
606
607 return err;
608}
609
610static inline u16 br_get_pvid(const struct net_port_vlans *v)
611{
612 /* Return just the VID if it is set, or VLAN_N_VID (invalid vid) if
613 * vid wasn't set
614 */
615 smp_rmb();
616 return (v->pvid & VLAN_TAG_PRESENT) ?
617 (v->pvid & ~VLAN_TAG_PRESENT) :
618 VLAN_N_VID;
619}
620
621#else
622static inline bool br_allowed_ingress(struct net_bridge *br,
623 struct net_port_vlans *v,
624 struct sk_buff *skb,
625 u16 *vid)
626{
627 return true;
628}
629
630static inline bool br_allowed_egress(struct net_bridge *br,
631 const struct net_port_vlans *v,
632 const struct sk_buff *skb)
633{
634 return true;
635}
636
637static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
638 const struct net_port_vlans *v,
639 struct sk_buff *skb)
640{
641 return skb;
642}
643
644static inline int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
645{
646 return -EOPNOTSUPP;
647}
648
649static inline int br_vlan_delete(struct net_bridge *br, u16 vid)
650{
651 return -EOPNOTSUPP;
652}
653
654static inline void br_vlan_flush(struct net_bridge *br)
655{
656}
657
658static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
659{
660 return -EOPNOTSUPP;
661}
662
663static inline int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
664{
665 return -EOPNOTSUPP;
666}
667
668static inline void nbp_vlan_flush(struct net_bridge_port *port)
669{
670}
671
672static inline struct net_port_vlans *br_get_vlan_info(
673 const struct net_bridge *br)
674{
675 return NULL;
676}
677static inline struct net_port_vlans *nbp_get_vlan_info(
678 const struct net_bridge_port *p)
679{
680 return NULL;
681}
682
683static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
684{
685 return false;
686}
687
688static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
689{
690 return 0;
691}
692static inline u16 br_get_pvid(const struct net_port_vlans *v)
693{
694 return VLAN_N_VID; /* Returns invalid vid */
695}
696#endif
697
537/* br_netfilter.c */ 698/* br_netfilter.c */
538#ifdef CONFIG_BRIDGE_NETFILTER 699#ifdef CONFIG_BRIDGE_NETFILTER
539extern int br_netfilter_init(void); 700extern int br_netfilter_init(void);
@@ -594,8 +755,9 @@ extern int br_netlink_init(void);
594extern void br_netlink_fini(void); 755extern void br_netlink_fini(void);
595extern void br_ifinfo_notify(int event, struct net_bridge_port *port); 756extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
596extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg); 757extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
758extern int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
597extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 759extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
598 struct net_device *dev); 760 struct net_device *dev, u32 filter_mask);
599 761
600#ifdef CONFIG_SYSFS 762#ifdef CONFIG_SYSFS
601/* br_sysfs_if.c */ 763/* br_sysfs_if.c */
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 7f884e3fb955..8660ea3be705 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -16,6 +16,7 @@
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/llc.h> 17#include <linux/llc.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pkt_sched.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <net/llc.h> 21#include <net/llc.h>
21#include <net/llc_pdu.h> 22#include <net/llc_pdu.h>
@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
40 41
41 skb->dev = p->dev; 42 skb->dev = p->dev;
42 skb->protocol = htons(ETH_P_802_2); 43 skb->protocol = htons(ETH_P_802_2);
44 skb->priority = TC_PRIO_CONTROL;
43 45
44 skb_reserve(skb, LLC_RESERVE); 46 skb_reserve(skb, LLC_RESERVE);
45 memcpy(__skb_put(skb, length), data, length); 47 memcpy(__skb_put(skb, length), data, length);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 7b5197c7de13..0bdb4ebd362b 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -216,7 +216,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
216 struct net_bridge_port *p; 216 struct net_bridge_port *p;
217 217
218 /* user has chosen a value so keep it */ 218 /* user has chosen a value so keep it */
219 if (br->flags & BR_SET_MAC_ADDR) 219 if (br->dev->addr_assign_type == NET_ADDR_SET)
220 return false; 220 return false;
221 221
222 list_for_each_entry(p, &br->port_list, list) { 222 list_for_each_entry(p, &br->port_list, list) {
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 5913a3a0047b..8baa9c08e1a4 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -691,6 +691,24 @@ static ssize_t store_nf_call_arptables(
691static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR, 691static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
692 show_nf_call_arptables, store_nf_call_arptables); 692 show_nf_call_arptables, store_nf_call_arptables);
693#endif 693#endif
694#ifdef CONFIG_BRIDGE_VLAN_FILTERING
695static ssize_t show_vlan_filtering(struct device *d,
696 struct device_attribute *attr,
697 char *buf)
698{
699 struct net_bridge *br = to_bridge(d);
700 return sprintf(buf, "%d\n", br->vlan_enabled);
701}
702
703static ssize_t store_vlan_filtering(struct device *d,
704 struct device_attribute *attr,
705 const char *buf, size_t len)
706{
707 return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
708}
709static DEVICE_ATTR(vlan_filtering, S_IRUGO | S_IWUSR,
710 show_vlan_filtering, store_vlan_filtering);
711#endif
694 712
695static struct attribute *bridge_attrs[] = { 713static struct attribute *bridge_attrs[] = {
696 &dev_attr_forward_delay.attr, 714 &dev_attr_forward_delay.attr,
@@ -732,6 +750,9 @@ static struct attribute *bridge_attrs[] = {
732 &dev_attr_nf_call_ip6tables.attr, 750 &dev_attr_nf_call_ip6tables.attr,
733 &dev_attr_nf_call_arptables.attr, 751 &dev_attr_nf_call_arptables.attr,
734#endif 752#endif
753#ifdef CONFIG_BRIDGE_VLAN_FILTERING
754 &dev_attr_vlan_filtering.attr,
755#endif
735 NULL 756 NULL
736}; 757};
737 758
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
new file mode 100644
index 000000000000..93dde75923f0
--- /dev/null
+++ b/net/bridge/br_vlan.c
@@ -0,0 +1,415 @@
1#include <linux/kernel.h>
2#include <linux/netdevice.h>
3#include <linux/rtnetlink.h>
4#include <linux/slab.h>
5
6#include "br_private.h"
7
8static void __vlan_add_pvid(struct net_port_vlans *v, u16 vid)
9{
10 if (v->pvid == vid)
11 return;
12
13 smp_wmb();
14 v->pvid = vid;
15}
16
17static void __vlan_delete_pvid(struct net_port_vlans *v, u16 vid)
18{
19 if (v->pvid != vid)
20 return;
21
22 smp_wmb();
23 v->pvid = 0;
24}
25
26static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
27{
28 if (flags & BRIDGE_VLAN_INFO_PVID)
29 __vlan_add_pvid(v, vid);
30
31 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
32 set_bit(vid, v->untagged_bitmap);
33}
34
35static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
36{
37 struct net_bridge_port *p = NULL;
38 struct net_bridge *br;
39 struct net_device *dev;
40 int err;
41
42 if (test_bit(vid, v->vlan_bitmap)) {
43 __vlan_add_flags(v, vid, flags);
44 return 0;
45 }
46
47 if (vid) {
48 if (v->port_idx) {
49 p = v->parent.port;
50 br = p->br;
51 dev = p->dev;
52 } else {
53 br = v->parent.br;
54 dev = br->dev;
55 }
56
57 if (p && (dev->features & NETIF_F_HW_VLAN_FILTER)) {
58 /* Add VLAN to the device filter if it is supported.
59 * Stricly speaking, this is not necessary now, since
60 * devices are made promiscuous by the bridge, but if
61 * that ever changes this code will allow tagged
62 * traffic to enter the bridge.
63 */
64 err = dev->netdev_ops->ndo_vlan_rx_add_vid(dev, vid);
65 if (err)
66 return err;
67 }
68
69 err = br_fdb_insert(br, p, dev->dev_addr, vid);
70 if (err) {
71 br_err(br, "failed insert local address into bridge "
72 "forwarding table\n");
73 goto out_filt;
74 }
75
76 }
77
78 set_bit(vid, v->vlan_bitmap);
79 v->num_vlans++;
80 __vlan_add_flags(v, vid, flags);
81
82 return 0;
83
84out_filt:
85 if (p && (dev->features & NETIF_F_HW_VLAN_FILTER))
86 dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
87 return err;
88}
89
90static int __vlan_del(struct net_port_vlans *v, u16 vid)
91{
92 if (!test_bit(vid, v->vlan_bitmap))
93 return -EINVAL;
94
95 __vlan_delete_pvid(v, vid);
96 clear_bit(vid, v->untagged_bitmap);
97
98 if (v->port_idx && vid) {
99 struct net_device *dev = v->parent.port->dev;
100
101 if (dev->features & NETIF_F_HW_VLAN_FILTER)
102 dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, vid);
103 }
104
105 clear_bit(vid, v->vlan_bitmap);
106 v->num_vlans--;
107 if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
108 if (v->port_idx)
109 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
110 else
111 rcu_assign_pointer(v->parent.br->vlan_info, NULL);
112 kfree_rcu(v, rcu);
113 }
114 return 0;
115}
116
117static void __vlan_flush(struct net_port_vlans *v)
118{
119 smp_wmb();
120 v->pvid = 0;
121 bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
122 if (v->port_idx)
123 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
124 else
125 rcu_assign_pointer(v->parent.br->vlan_info, NULL);
126 kfree_rcu(v, rcu);
127}
128
129/* Strip the tag from the packet. Will return skb with tci set 0. */
130static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
131{
132 if (skb->protocol != htons(ETH_P_8021Q)) {
133 skb->vlan_tci = 0;
134 return skb;
135 }
136
137 skb->vlan_tci = 0;
138 skb = vlan_untag(skb);
139 if (skb)
140 skb->vlan_tci = 0;
141
142 return skb;
143}
144
145struct sk_buff *br_handle_vlan(struct net_bridge *br,
146 const struct net_port_vlans *pv,
147 struct sk_buff *skb)
148{
149 u16 vid;
150
151 if (!br->vlan_enabled)
152 goto out;
153
154 /* At this point, we know that the frame was filtered and contains
155 * a valid vlan id. If the vlan id is set in the untagged bitmap,
156 * send untagged; otherwise, send taged.
157 */
158 br_vlan_get_tag(skb, &vid);
159 if (test_bit(vid, pv->untagged_bitmap))
160 skb = br_vlan_untag(skb);
161 else {
162 /* Egress policy says "send tagged". If output device
163 * is the bridge, we need to add the VLAN header
164 * ourselves since we'll be going through the RX path.
165 * Sending to ports puts the frame on the TX path and
166 * we let dev_hard_start_xmit() add the header.
167 */
168 if (skb->protocol != htons(ETH_P_8021Q) &&
169 pv->port_idx == 0) {
170 /* vlan_put_tag expects skb->data to point to
171 * mac header.
172 */
173 skb_push(skb, ETH_HLEN);
174 skb = __vlan_put_tag(skb, skb->vlan_tci);
175 if (!skb)
176 goto out;
177 /* put skb->data back to where it was */
178 skb_pull(skb, ETH_HLEN);
179 skb->vlan_tci = 0;
180 }
181 }
182
183out:
184 return skb;
185}
186
187/* Called under RCU */
188bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
189 struct sk_buff *skb, u16 *vid)
190{
191 /* If VLAN filtering is disabled on the bridge, all packets are
192 * permitted.
193 */
194 if (!br->vlan_enabled)
195 return true;
196
197 /* If there are no vlan in the permitted list, all packets are
198 * rejected.
199 */
200 if (!v)
201 return false;
202
203 if (br_vlan_get_tag(skb, vid)) {
204 u16 pvid = br_get_pvid(v);
205
206 /* Frame did not have a tag. See if pvid is set
207 * on this port. That tells us which vlan untagged
208 * traffic belongs to.
209 */
210 if (pvid == VLAN_N_VID)
211 return false;
212
213 /* PVID is set on this port. Any untagged ingress
214 * frame is considered to belong to this vlan.
215 */
216 __vlan_hwaccel_put_tag(skb, pvid);
217 return true;
218 }
219
220 /* Frame had a valid vlan tag. See if vlan is allowed */
221 if (test_bit(*vid, v->vlan_bitmap))
222 return true;
223
224 return false;
225}
226
227/* Called under RCU. */
228bool br_allowed_egress(struct net_bridge *br,
229 const struct net_port_vlans *v,
230 const struct sk_buff *skb)
231{
232 u16 vid;
233
234 if (!br->vlan_enabled)
235 return true;
236
237 if (!v)
238 return false;
239
240 br_vlan_get_tag(skb, &vid);
241 if (test_bit(vid, v->vlan_bitmap))
242 return true;
243
244 return false;
245}
246
247/* Must be protected by RTNL */
248int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
249{
250 struct net_port_vlans *pv = NULL;
251 int err;
252
253 ASSERT_RTNL();
254
255 pv = rtnl_dereference(br->vlan_info);
256 if (pv)
257 return __vlan_add(pv, vid, flags);
258
259 /* Create port vlan infomration
260 */
261 pv = kzalloc(sizeof(*pv), GFP_KERNEL);
262 if (!pv)
263 return -ENOMEM;
264
265 pv->parent.br = br;
266 err = __vlan_add(pv, vid, flags);
267 if (err)
268 goto out;
269
270 rcu_assign_pointer(br->vlan_info, pv);
271 return 0;
272out:
273 kfree(pv);
274 return err;
275}
276
277/* Must be protected by RTNL */
278int br_vlan_delete(struct net_bridge *br, u16 vid)
279{
280 struct net_port_vlans *pv;
281
282 ASSERT_RTNL();
283
284 pv = rtnl_dereference(br->vlan_info);
285 if (!pv)
286 return -EINVAL;
287
288 if (vid) {
289 /* If the VID !=0 remove fdb for this vid. VID 0 is special
290 * in that it's the default and is always there in the fdb.
291 */
292 spin_lock_bh(&br->hash_lock);
293 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
294 spin_unlock_bh(&br->hash_lock);
295 }
296
297 __vlan_del(pv, vid);
298 return 0;
299}
300
301void br_vlan_flush(struct net_bridge *br)
302{
303 struct net_port_vlans *pv;
304
305 ASSERT_RTNL();
306 pv = rtnl_dereference(br->vlan_info);
307 if (!pv)
308 return;
309
310 __vlan_flush(pv);
311}
312
313int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
314{
315 if (!rtnl_trylock())
316 return restart_syscall();
317
318 if (br->vlan_enabled == val)
319 goto unlock;
320
321 br->vlan_enabled = val;
322
323unlock:
324 rtnl_unlock();
325 return 0;
326}
327
328/* Must be protected by RTNL */
329int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
330{
331 struct net_port_vlans *pv = NULL;
332 int err;
333
334 ASSERT_RTNL();
335
336 pv = rtnl_dereference(port->vlan_info);
337 if (pv)
338 return __vlan_add(pv, vid, flags);
339
340 /* Create port vlan infomration
341 */
342 pv = kzalloc(sizeof(*pv), GFP_KERNEL);
343 if (!pv) {
344 err = -ENOMEM;
345 goto clean_up;
346 }
347
348 pv->port_idx = port->port_no;
349 pv->parent.port = port;
350 err = __vlan_add(pv, vid, flags);
351 if (err)
352 goto clean_up;
353
354 rcu_assign_pointer(port->vlan_info, pv);
355 return 0;
356
357clean_up:
358 kfree(pv);
359 return err;
360}
361
362/* Must be protected by RTNL */
363int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
364{
365 struct net_port_vlans *pv;
366
367 ASSERT_RTNL();
368
369 pv = rtnl_dereference(port->vlan_info);
370 if (!pv)
371 return -EINVAL;
372
373 if (vid) {
374 /* If the VID !=0 remove fdb for this vid. VID 0 is special
375 * in that it's the default and is always there in the fdb.
376 */
377 spin_lock_bh(&port->br->hash_lock);
378 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
379 spin_unlock_bh(&port->br->hash_lock);
380 }
381
382 return __vlan_del(pv, vid);
383}
384
385void nbp_vlan_flush(struct net_bridge_port *port)
386{
387 struct net_port_vlans *pv;
388
389 ASSERT_RTNL();
390
391 pv = rtnl_dereference(port->vlan_info);
392 if (!pv)
393 return;
394
395 __vlan_flush(pv);
396}
397
398bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
399{
400 struct net_port_vlans *pv;
401 bool found = false;
402
403 rcu_read_lock();
404 pv = rcu_dereference(port->vlan_info);
405
406 if (!pv)
407 goto out;
408
409 if (test_bit(vid, pv->vlan_bitmap))
410 found = true;
411
412out:
413 rcu_read_unlock();
414 return found;
415}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 28e12d18f0f1..5dcb20076f39 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1633,7 +1633,7 @@ static void __exit bcm_module_exit(void)
1633 can_proto_unregister(&bcm_can_proto); 1633 can_proto_unregister(&bcm_can_proto);
1634 1634
1635 if (proc_dir) 1635 if (proc_dir)
1636 proc_net_remove(&init_net, "can-bcm"); 1636 remove_proc_entry("can-bcm", init_net.proc_net);
1637} 1637}
1638 1638
1639module_init(bcm_module_init); 1639module_init(bcm_module_init);
diff --git a/net/can/proc.c b/net/can/proc.c
index ae566902d2bf..497335892146 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -531,5 +531,5 @@ void can_remove_proc(void)
531 can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF); 531 can_remove_proc_readentry(CAN_PROC_RCVLIST_SFF);
532 532
533 if (can_dir) 533 if (can_dir)
534 proc_net_remove(&init_net, "can"); 534 remove_proc_entry("can", init_net.proc_net);
535} 535}
diff --git a/net/core/Makefile b/net/core/Makefile
index 674641b13aea..0c5e3618c80b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ 10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
12 sock_diag.o 12 sock_diag.o dev_ioctl.o
13 13
14obj-$(CONFIG_XFRM) += flow.o 14obj-$(CONFIG_XFRM) += flow.o
15obj-y += net-sysfs.o 15obj-y += net-sysfs.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 0337e2b76862..368f9c3f9dc6 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
187 skb_queue_walk(queue, skb) { 187 skb_queue_walk(queue, skb) {
188 *peeked = skb->peeked; 188 *peeked = skb->peeked;
189 if (flags & MSG_PEEK) { 189 if (flags & MSG_PEEK) {
190 if (*off >= skb->len) { 190 if (*off >= skb->len && skb->len) {
191 *off -= skb->len; 191 *off -= skb->len;
192 continue; 192 continue;
193 } 193 }
diff --git a/net/core/dev.c b/net/core/dev.c
index a87bc74e9fd0..decf55f9ad80 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -106,7 +106,6 @@
106#include <net/xfrm.h> 106#include <net/xfrm.h>
107#include <linux/highmem.h> 107#include <linux/highmem.h>
108#include <linux/init.h> 108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h> 109#include <linux/module.h>
111#include <linux/netpoll.h> 110#include <linux/netpoll.h>
112#include <linux/rcupdate.h> 111#include <linux/rcupdate.h>
@@ -132,7 +131,6 @@
132#include <linux/pci.h> 131#include <linux/pci.h>
133#include <linux/inetdevice.h> 132#include <linux/inetdevice.h>
134#include <linux/cpu_rmap.h> 133#include <linux/cpu_rmap.h>
135#include <linux/net_tstamp.h>
136#include <linux/static_key.h> 134#include <linux/static_key.h>
137 135
138#include "net-sysfs.h" 136#include "net-sysfs.h"
@@ -1226,36 +1224,6 @@ void netdev_notify_peers(struct net_device *dev)
1226} 1224}
1227EXPORT_SYMBOL(netdev_notify_peers); 1225EXPORT_SYMBOL(netdev_notify_peers);
1228 1226
1229/**
1230 * dev_load - load a network module
1231 * @net: the applicable net namespace
1232 * @name: name of interface
1233 *
1234 * If a network interface is not present and the process has suitable
1235 * privileges this function loads the module. If module loading is not
1236 * available in this kernel then it becomes a nop.
1237 */
1238
1239void dev_load(struct net *net, const char *name)
1240{
1241 struct net_device *dev;
1242 int no_module;
1243
1244 rcu_read_lock();
1245 dev = dev_get_by_name_rcu(net, name);
1246 rcu_read_unlock();
1247
1248 no_module = !dev;
1249 if (no_module && capable(CAP_NET_ADMIN))
1250 no_module = request_module("netdev-%s", name);
1251 if (no_module && capable(CAP_SYS_MODULE)) {
1252 if (!request_module("%s", name))
1253 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1254 name);
1255 }
1256}
1257EXPORT_SYMBOL(dev_load);
1258
1259static int __dev_open(struct net_device *dev) 1227static int __dev_open(struct net_device *dev)
1260{ 1228{
1261 const struct net_device_ops *ops = dev->netdev_ops; 1229 const struct net_device_ops *ops = dev->netdev_ops;
@@ -1266,6 +1234,14 @@ static int __dev_open(struct net_device *dev)
1266 if (!netif_device_present(dev)) 1234 if (!netif_device_present(dev))
1267 return -ENODEV; 1235 return -ENODEV;
1268 1236
1237 /* Block netpoll from trying to do any rx path servicing.
1238 * If we don't do this there is a chance ndo_poll_controller
1239 * or ndo_poll may be running while we open the device
1240 */
1241 ret = netpoll_rx_disable(dev);
1242 if (ret)
1243 return ret;
1244
1269 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1245 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1270 ret = notifier_to_errno(ret); 1246 ret = notifier_to_errno(ret);
1271 if (ret) 1247 if (ret)
@@ -1279,6 +1255,8 @@ static int __dev_open(struct net_device *dev)
1279 if (!ret && ops->ndo_open) 1255 if (!ret && ops->ndo_open)
1280 ret = ops->ndo_open(dev); 1256 ret = ops->ndo_open(dev);
1281 1257
1258 netpoll_rx_enable(dev);
1259
1282 if (ret) 1260 if (ret)
1283 clear_bit(__LINK_STATE_START, &dev->state); 1261 clear_bit(__LINK_STATE_START, &dev->state);
1284 else { 1262 else {
@@ -1370,9 +1348,16 @@ static int __dev_close(struct net_device *dev)
1370 int retval; 1348 int retval;
1371 LIST_HEAD(single); 1349 LIST_HEAD(single);
1372 1350
1351 /* Temporarily disable netpoll until the interface is down */
1352 retval = netpoll_rx_disable(dev);
1353 if (retval)
1354 return retval;
1355
1373 list_add(&dev->unreg_list, &single); 1356 list_add(&dev->unreg_list, &single);
1374 retval = __dev_close_many(&single); 1357 retval = __dev_close_many(&single);
1375 list_del(&single); 1358 list_del(&single);
1359
1360 netpoll_rx_enable(dev);
1376 return retval; 1361 return retval;
1377} 1362}
1378 1363
@@ -1408,14 +1393,22 @@ static int dev_close_many(struct list_head *head)
1408 */ 1393 */
1409int dev_close(struct net_device *dev) 1394int dev_close(struct net_device *dev)
1410{ 1395{
1396 int ret = 0;
1411 if (dev->flags & IFF_UP) { 1397 if (dev->flags & IFF_UP) {
1412 LIST_HEAD(single); 1398 LIST_HEAD(single);
1413 1399
1400 /* Block netpoll rx while the interface is going down */
1401 ret = netpoll_rx_disable(dev);
1402 if (ret)
1403 return ret;
1404
1414 list_add(&dev->unreg_list, &single); 1405 list_add(&dev->unreg_list, &single);
1415 dev_close_many(&single); 1406 dev_close_many(&single);
1416 list_del(&single); 1407 list_del(&single);
1408
1409 netpoll_rx_enable(dev);
1417 } 1410 }
1418 return 0; 1411 return ret;
1419} 1412}
1420EXPORT_SYMBOL(dev_close); 1413EXPORT_SYMBOL(dev_close);
1421 1414
@@ -1620,57 +1613,6 @@ static inline void net_timestamp_set(struct sk_buff *skb)
1620 __net_timestamp(SKB); \ 1613 __net_timestamp(SKB); \
1621 } \ 1614 } \
1622 1615
1623static int net_hwtstamp_validate(struct ifreq *ifr)
1624{
1625 struct hwtstamp_config cfg;
1626 enum hwtstamp_tx_types tx_type;
1627 enum hwtstamp_rx_filters rx_filter;
1628 int tx_type_valid = 0;
1629 int rx_filter_valid = 0;
1630
1631 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1632 return -EFAULT;
1633
1634 if (cfg.flags) /* reserved for future extensions */
1635 return -EINVAL;
1636
1637 tx_type = cfg.tx_type;
1638 rx_filter = cfg.rx_filter;
1639
1640 switch (tx_type) {
1641 case HWTSTAMP_TX_OFF:
1642 case HWTSTAMP_TX_ON:
1643 case HWTSTAMP_TX_ONESTEP_SYNC:
1644 tx_type_valid = 1;
1645 break;
1646 }
1647
1648 switch (rx_filter) {
1649 case HWTSTAMP_FILTER_NONE:
1650 case HWTSTAMP_FILTER_ALL:
1651 case HWTSTAMP_FILTER_SOME:
1652 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1653 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1654 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1655 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1656 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1657 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1658 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1659 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1660 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1661 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1662 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1663 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1664 rx_filter_valid = 1;
1665 break;
1666 }
1667
1668 if (!tx_type_valid || !rx_filter_valid)
1669 return -ERANGE;
1670
1671 return 0;
1672}
1673
1674static inline bool is_skb_forwardable(struct net_device *dev, 1616static inline bool is_skb_forwardable(struct net_device *dev,
1675 struct sk_buff *skb) 1617 struct sk_buff *skb)
1676{ 1618{
@@ -2303,25 +2245,19 @@ out:
2303EXPORT_SYMBOL(skb_checksum_help); 2245EXPORT_SYMBOL(skb_checksum_help);
2304 2246
2305/** 2247/**
2306 * skb_gso_segment - Perform segmentation on skb. 2248 * skb_mac_gso_segment - mac layer segmentation handler.
2307 * @skb: buffer to segment 2249 * @skb: buffer to segment
2308 * @features: features for the output path (see dev->features) 2250 * @features: features for the output path (see dev->features)
2309 *
2310 * This function segments the given skb and returns a list of segments.
2311 *
2312 * It may return NULL if the skb requires no segmentation. This is
2313 * only possible when GSO is used for verifying header integrity.
2314 */ 2251 */
2315struct sk_buff *skb_gso_segment(struct sk_buff *skb, 2252struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2316 netdev_features_t features) 2253 netdev_features_t features)
2317{ 2254{
2318 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2255 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2319 struct packet_offload *ptype; 2256 struct packet_offload *ptype;
2320 __be16 type = skb->protocol; 2257 __be16 type = skb->protocol;
2321 int vlan_depth = ETH_HLEN;
2322 int err;
2323 2258
2324 while (type == htons(ETH_P_8021Q)) { 2259 while (type == htons(ETH_P_8021Q)) {
2260 int vlan_depth = ETH_HLEN;
2325 struct vlan_hdr *vh; 2261 struct vlan_hdr *vh;
2326 2262
2327 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2263 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
@@ -2332,22 +2268,14 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2332 vlan_depth += VLAN_HLEN; 2268 vlan_depth += VLAN_HLEN;
2333 } 2269 }
2334 2270
2335 skb_reset_mac_header(skb);
2336 skb->mac_len = skb->network_header - skb->mac_header;
2337 __skb_pull(skb, skb->mac_len); 2271 __skb_pull(skb, skb->mac_len);
2338 2272
2339 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2340 skb_warn_bad_offload(skb);
2341
2342 if (skb_header_cloned(skb) &&
2343 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2344 return ERR_PTR(err);
2345 }
2346
2347 rcu_read_lock(); 2273 rcu_read_lock();
2348 list_for_each_entry_rcu(ptype, &offload_base, list) { 2274 list_for_each_entry_rcu(ptype, &offload_base, list) {
2349 if (ptype->type == type && ptype->callbacks.gso_segment) { 2275 if (ptype->type == type && ptype->callbacks.gso_segment) {
2350 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 2276 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2277 int err;
2278
2351 err = ptype->callbacks.gso_send_check(skb); 2279 err = ptype->callbacks.gso_send_check(skb);
2352 segs = ERR_PTR(err); 2280 segs = ERR_PTR(err);
2353 if (err || skb_gso_ok(skb, features)) 2281 if (err || skb_gso_ok(skb, features))
@@ -2365,7 +2293,50 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2365 2293
2366 return segs; 2294 return segs;
2367} 2295}
2368EXPORT_SYMBOL(skb_gso_segment); 2296EXPORT_SYMBOL(skb_mac_gso_segment);
2297
2298
2299/* openvswitch calls this on rx path, so we need a different check.
2300 */
2301static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2302{
2303 if (tx_path)
2304 return skb->ip_summed != CHECKSUM_PARTIAL;
2305 else
2306 return skb->ip_summed == CHECKSUM_NONE;
2307}
2308
2309/**
2310 * __skb_gso_segment - Perform segmentation on skb.
2311 * @skb: buffer to segment
2312 * @features: features for the output path (see dev->features)
2313 * @tx_path: whether it is called in TX path
2314 *
2315 * This function segments the given skb and returns a list of segments.
2316 *
2317 * It may return NULL if the skb requires no segmentation. This is
2318 * only possible when GSO is used for verifying header integrity.
2319 */
2320struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2321 netdev_features_t features, bool tx_path)
2322{
2323 if (unlikely(skb_needs_check(skb, tx_path))) {
2324 int err;
2325
2326 skb_warn_bad_offload(skb);
2327
2328 if (skb_header_cloned(skb) &&
2329 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2330 return ERR_PTR(err);
2331 }
2332
2333 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2334 skb_reset_mac_header(skb);
2335 skb_reset_mac_len(skb);
2336
2337 return skb_mac_gso_segment(skb, features);
2338}
2339EXPORT_SYMBOL(__skb_gso_segment);
2369 2340
2370/* Take action when hardware reception checksum errors are detected. */ 2341/* Take action when hardware reception checksum errors are detected. */
2371#ifdef CONFIG_BUG 2342#ifdef CONFIG_BUG
@@ -2799,6 +2770,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2799 struct Qdisc *q; 2770 struct Qdisc *q;
2800 int rc = -ENOMEM; 2771 int rc = -ENOMEM;
2801 2772
2773 skb_reset_mac_header(skb);
2774
2802 /* Disable soft irqs for various locks below. Also 2775 /* Disable soft irqs for various locks below. Also
2803 * stops preemption for RCU. 2776 * stops preemption for RCU.
2804 */ 2777 */
@@ -3419,7 +3392,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3419 } 3392 }
3420} 3393}
3421 3394
3422static int __netif_receive_skb(struct sk_buff *skb) 3395static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3423{ 3396{
3424 struct packet_type *ptype, *pt_prev; 3397 struct packet_type *ptype, *pt_prev;
3425 rx_handler_func_t *rx_handler; 3398 rx_handler_func_t *rx_handler;
@@ -3428,24 +3401,11 @@ static int __netif_receive_skb(struct sk_buff *skb)
3428 bool deliver_exact = false; 3401 bool deliver_exact = false;
3429 int ret = NET_RX_DROP; 3402 int ret = NET_RX_DROP;
3430 __be16 type; 3403 __be16 type;
3431 unsigned long pflags = current->flags;
3432 3404
3433 net_timestamp_check(!netdev_tstamp_prequeue, skb); 3405 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3434 3406
3435 trace_netif_receive_skb(skb); 3407 trace_netif_receive_skb(skb);
3436 3408
3437 /*
3438 * PFMEMALLOC skbs are special, they should
3439 * - be delivered to SOCK_MEMALLOC sockets only
3440 * - stay away from userspace
3441 * - have bounded memory usage
3442 *
3443 * Use PF_MEMALLOC as this saves us from propagating the allocation
3444 * context down to all allocation sites.
3445 */
3446 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3447 current->flags |= PF_MEMALLOC;
3448
3449 /* if we've gotten here through NAPI, check netpoll */ 3409 /* if we've gotten here through NAPI, check netpoll */
3450 if (netpoll_receive_skb(skb)) 3410 if (netpoll_receive_skb(skb))
3451 goto out; 3411 goto out;
@@ -3479,7 +3439,7 @@ another_round:
3479 } 3439 }
3480#endif 3440#endif
3481 3441
3482 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 3442 if (pfmemalloc)
3483 goto skip_taps; 3443 goto skip_taps;
3484 3444
3485 list_for_each_entry_rcu(ptype, &ptype_all, list) { 3445 list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -3498,8 +3458,7 @@ skip_taps:
3498ncls: 3458ncls:
3499#endif 3459#endif
3500 3460
3501 if (sk_memalloc_socks() && skb_pfmemalloc(skb) 3461 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3502 && !skb_pfmemalloc_protocol(skb))
3503 goto drop; 3462 goto drop;
3504 3463
3505 if (vlan_tx_tag_present(skb)) { 3464 if (vlan_tx_tag_present(skb)) {
@@ -3569,7 +3528,31 @@ drop:
3569unlock: 3528unlock:
3570 rcu_read_unlock(); 3529 rcu_read_unlock();
3571out: 3530out:
3572 tsk_restore_flags(current, pflags, PF_MEMALLOC); 3531 return ret;
3532}
3533
3534static int __netif_receive_skb(struct sk_buff *skb)
3535{
3536 int ret;
3537
3538 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3539 unsigned long pflags = current->flags;
3540
3541 /*
3542 * PFMEMALLOC skbs are special, they should
3543 * - be delivered to SOCK_MEMALLOC sockets only
3544 * - stay away from userspace
3545 * - have bounded memory usage
3546 *
3547 * Use PF_MEMALLOC as this saves us from propagating the allocation
3548 * context down to all allocation sites.
3549 */
3550 current->flags |= PF_MEMALLOC;
3551 ret = __netif_receive_skb_core(skb, true);
3552 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3553 } else
3554 ret = __netif_receive_skb_core(skb, false);
3555
3573 return ret; 3556 return ret;
3574} 3557}
3575 3558
@@ -3736,7 +3719,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3736 __be16 type = skb->protocol; 3719 __be16 type = skb->protocol;
3737 struct list_head *head = &offload_base; 3720 struct list_head *head = &offload_base;
3738 int same_flow; 3721 int same_flow;
3739 int mac_len;
3740 enum gro_result ret; 3722 enum gro_result ret;
3741 3723
3742 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3724 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
@@ -3753,8 +3735,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3753 continue; 3735 continue;
3754 3736
3755 skb_set_network_header(skb, skb_gro_offset(skb)); 3737 skb_set_network_header(skb, skb_gro_offset(skb));
3756 mac_len = skb->network_header - skb->mac_header; 3738 skb_reset_mac_len(skb);
3757 skb->mac_len = mac_len;
3758 NAPI_GRO_CB(skb)->same_flow = 0; 3739 NAPI_GRO_CB(skb)->same_flow = 0;
3759 NAPI_GRO_CB(skb)->flush = 0; 3740 NAPI_GRO_CB(skb)->flush = 0;
3760 NAPI_GRO_CB(skb)->free = 0; 3741 NAPI_GRO_CB(skb)->free = 0;
@@ -4236,127 +4217,6 @@ softnet_break:
4236 goto out; 4217 goto out;
4237} 4218}
4238 4219
4239static gifconf_func_t *gifconf_list[NPROTO];
4240
4241/**
4242 * register_gifconf - register a SIOCGIF handler
4243 * @family: Address family
4244 * @gifconf: Function handler
4245 *
4246 * Register protocol dependent address dumping routines. The handler
4247 * that is passed must not be freed or reused until it has been replaced
4248 * by another handler.
4249 */
4250int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
4251{
4252 if (family >= NPROTO)
4253 return -EINVAL;
4254 gifconf_list[family] = gifconf;
4255 return 0;
4256}
4257EXPORT_SYMBOL(register_gifconf);
4258
4259
4260/*
4261 * Map an interface index to its name (SIOCGIFNAME)
4262 */
4263
4264/*
4265 * We need this ioctl for efficient implementation of the
4266 * if_indextoname() function required by the IPv6 API. Without
4267 * it, we would have to search all the interfaces to find a
4268 * match. --pb
4269 */
4270
4271static int dev_ifname(struct net *net, struct ifreq __user *arg)
4272{
4273 struct net_device *dev;
4274 struct ifreq ifr;
4275 unsigned seq;
4276
4277 /*
4278 * Fetch the caller's info block.
4279 */
4280
4281 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4282 return -EFAULT;
4283
4284retry:
4285 seq = read_seqcount_begin(&devnet_rename_seq);
4286 rcu_read_lock();
4287 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4288 if (!dev) {
4289 rcu_read_unlock();
4290 return -ENODEV;
4291 }
4292
4293 strcpy(ifr.ifr_name, dev->name);
4294 rcu_read_unlock();
4295 if (read_seqcount_retry(&devnet_rename_seq, seq))
4296 goto retry;
4297
4298 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4299 return -EFAULT;
4300 return 0;
4301}
4302
4303/*
4304 * Perform a SIOCGIFCONF call. This structure will change
4305 * size eventually, and there is nothing I can do about it.
4306 * Thus we will need a 'compatibility mode'.
4307 */
4308
4309static int dev_ifconf(struct net *net, char __user *arg)
4310{
4311 struct ifconf ifc;
4312 struct net_device *dev;
4313 char __user *pos;
4314 int len;
4315 int total;
4316 int i;
4317
4318 /*
4319 * Fetch the caller's info block.
4320 */
4321
4322 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4323 return -EFAULT;
4324
4325 pos = ifc.ifc_buf;
4326 len = ifc.ifc_len;
4327
4328 /*
4329 * Loop over the interfaces, and write an info block for each.
4330 */
4331
4332 total = 0;
4333 for_each_netdev(net, dev) {
4334 for (i = 0; i < NPROTO; i++) {
4335 if (gifconf_list[i]) {
4336 int done;
4337 if (!pos)
4338 done = gifconf_list[i](dev, NULL, 0);
4339 else
4340 done = gifconf_list[i](dev, pos + total,
4341 len - total);
4342 if (done < 0)
4343 return -EFAULT;
4344 total += done;
4345 }
4346 }
4347 }
4348
4349 /*
4350 * All done. Write the updated control block back to the caller.
4351 */
4352 ifc.ifc_len = total;
4353
4354 /*
4355 * Both BSD and Solaris return 0 here, so we do too.
4356 */
4357 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4358}
4359
4360#ifdef CONFIG_PROC_FS 4220#ifdef CONFIG_PROC_FS
4361 4221
4362#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1) 4222#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
@@ -4658,11 +4518,12 @@ static int __net_init dev_proc_net_init(struct net *net)
4658{ 4518{
4659 int rc = -ENOMEM; 4519 int rc = -ENOMEM;
4660 4520
4661 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) 4521 if (!proc_create("dev", S_IRUGO, net->proc_net, &dev_seq_fops))
4662 goto out; 4522 goto out;
4663 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) 4523 if (!proc_create("softnet_stat", S_IRUGO, net->proc_net,
4524 &softnet_seq_fops))
4664 goto out_dev; 4525 goto out_dev;
4665 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) 4526 if (!proc_create("ptype", S_IRUGO, net->proc_net, &ptype_seq_fops))
4666 goto out_softnet; 4527 goto out_softnet;
4667 4528
4668 if (wext_proc_init(net)) 4529 if (wext_proc_init(net))
@@ -4671,11 +4532,11 @@ static int __net_init dev_proc_net_init(struct net *net)
4671out: 4532out:
4672 return rc; 4533 return rc;
4673out_ptype: 4534out_ptype:
4674 proc_net_remove(net, "ptype"); 4535 remove_proc_entry("ptype", net->proc_net);
4675out_softnet: 4536out_softnet:
4676 proc_net_remove(net, "softnet_stat"); 4537 remove_proc_entry("softnet_stat", net->proc_net);
4677out_dev: 4538out_dev:
4678 proc_net_remove(net, "dev"); 4539 remove_proc_entry("dev", net->proc_net);
4679 goto out; 4540 goto out;
4680} 4541}
4681 4542
@@ -4683,9 +4544,9 @@ static void __net_exit dev_proc_net_exit(struct net *net)
4683{ 4544{
4684 wext_proc_exit(net); 4545 wext_proc_exit(net);
4685 4546
4686 proc_net_remove(net, "ptype"); 4547 remove_proc_entry("ptype", net->proc_net);
4687 proc_net_remove(net, "softnet_stat"); 4548 remove_proc_entry("softnet_stat", net->proc_net);
4688 proc_net_remove(net, "dev"); 4549 remove_proc_entry("dev", net->proc_net);
4689} 4550}
4690 4551
4691static struct pernet_operations __net_initdata dev_proc_ops = { 4552static struct pernet_operations __net_initdata dev_proc_ops = {
@@ -5317,375 +5178,6 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
5317} 5178}
5318EXPORT_SYMBOL(dev_change_carrier); 5179EXPORT_SYMBOL(dev_change_carrier);
5319 5180
5320/*
5321 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
5322 */
5323static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
5324{
5325 int err;
5326 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
5327
5328 if (!dev)
5329 return -ENODEV;
5330
5331 switch (cmd) {
5332 case SIOCGIFFLAGS: /* Get interface flags */
5333 ifr->ifr_flags = (short) dev_get_flags(dev);
5334 return 0;
5335
5336 case SIOCGIFMETRIC: /* Get the metric on the interface
5337 (currently unused) */
5338 ifr->ifr_metric = 0;
5339 return 0;
5340
5341 case SIOCGIFMTU: /* Get the MTU of a device */
5342 ifr->ifr_mtu = dev->mtu;
5343 return 0;
5344
5345 case SIOCGIFHWADDR:
5346 if (!dev->addr_len)
5347 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5348 else
5349 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5350 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5351 ifr->ifr_hwaddr.sa_family = dev->type;
5352 return 0;
5353
5354 case SIOCGIFSLAVE:
5355 err = -EINVAL;
5356 break;
5357
5358 case SIOCGIFMAP:
5359 ifr->ifr_map.mem_start = dev->mem_start;
5360 ifr->ifr_map.mem_end = dev->mem_end;
5361 ifr->ifr_map.base_addr = dev->base_addr;
5362 ifr->ifr_map.irq = dev->irq;
5363 ifr->ifr_map.dma = dev->dma;
5364 ifr->ifr_map.port = dev->if_port;
5365 return 0;
5366
5367 case SIOCGIFINDEX:
5368 ifr->ifr_ifindex = dev->ifindex;
5369 return 0;
5370
5371 case SIOCGIFTXQLEN:
5372 ifr->ifr_qlen = dev->tx_queue_len;
5373 return 0;
5374
5375 default:
5376 /* dev_ioctl() should ensure this case
5377 * is never reached
5378 */
5379 WARN_ON(1);
5380 err = -ENOTTY;
5381 break;
5382
5383 }
5384 return err;
5385}
5386
5387/*
5388 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5389 */
5390static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5391{
5392 int err;
5393 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5394 const struct net_device_ops *ops;
5395
5396 if (!dev)
5397 return -ENODEV;
5398
5399 ops = dev->netdev_ops;
5400
5401 switch (cmd) {
5402 case SIOCSIFFLAGS: /* Set interface flags */
5403 return dev_change_flags(dev, ifr->ifr_flags);
5404
5405 case SIOCSIFMETRIC: /* Set the metric on the interface
5406 (currently unused) */
5407 return -EOPNOTSUPP;
5408
5409 case SIOCSIFMTU: /* Set the MTU of a device */
5410 return dev_set_mtu(dev, ifr->ifr_mtu);
5411
5412 case SIOCSIFHWADDR:
5413 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
5414
5415 case SIOCSIFHWBROADCAST:
5416 if (ifr->ifr_hwaddr.sa_family != dev->type)
5417 return -EINVAL;
5418 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5419 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5420 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5421 return 0;
5422
5423 case SIOCSIFMAP:
5424 if (ops->ndo_set_config) {
5425 if (!netif_device_present(dev))
5426 return -ENODEV;
5427 return ops->ndo_set_config(dev, &ifr->ifr_map);
5428 }
5429 return -EOPNOTSUPP;
5430
5431 case SIOCADDMULTI:
5432 if (!ops->ndo_set_rx_mode ||
5433 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5434 return -EINVAL;
5435 if (!netif_device_present(dev))
5436 return -ENODEV;
5437 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
5438
5439 case SIOCDELMULTI:
5440 if (!ops->ndo_set_rx_mode ||
5441 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5442 return -EINVAL;
5443 if (!netif_device_present(dev))
5444 return -ENODEV;
5445 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5446
5447 case SIOCSIFTXQLEN:
5448 if (ifr->ifr_qlen < 0)
5449 return -EINVAL;
5450 dev->tx_queue_len = ifr->ifr_qlen;
5451 return 0;
5452
5453 case SIOCSIFNAME:
5454 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5455 return dev_change_name(dev, ifr->ifr_newname);
5456
5457 case SIOCSHWTSTAMP:
5458 err = net_hwtstamp_validate(ifr);
5459 if (err)
5460 return err;
5461 /* fall through */
5462
5463 /*
5464 * Unknown or private ioctl
5465 */
5466 default:
5467 if ((cmd >= SIOCDEVPRIVATE &&
5468 cmd <= SIOCDEVPRIVATE + 15) ||
5469 cmd == SIOCBONDENSLAVE ||
5470 cmd == SIOCBONDRELEASE ||
5471 cmd == SIOCBONDSETHWADDR ||
5472 cmd == SIOCBONDSLAVEINFOQUERY ||
5473 cmd == SIOCBONDINFOQUERY ||
5474 cmd == SIOCBONDCHANGEACTIVE ||
5475 cmd == SIOCGMIIPHY ||
5476 cmd == SIOCGMIIREG ||
5477 cmd == SIOCSMIIREG ||
5478 cmd == SIOCBRADDIF ||
5479 cmd == SIOCBRDELIF ||
5480 cmd == SIOCSHWTSTAMP ||
5481 cmd == SIOCWANDEV) {
5482 err = -EOPNOTSUPP;
5483 if (ops->ndo_do_ioctl) {
5484 if (netif_device_present(dev))
5485 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5486 else
5487 err = -ENODEV;
5488 }
5489 } else
5490 err = -EINVAL;
5491
5492 }
5493 return err;
5494}
5495
5496/*
5497 * This function handles all "interface"-type I/O control requests. The actual
5498 * 'doing' part of this is dev_ifsioc above.
5499 */
5500
5501/**
5502 * dev_ioctl - network device ioctl
5503 * @net: the applicable net namespace
5504 * @cmd: command to issue
5505 * @arg: pointer to a struct ifreq in user space
5506 *
5507 * Issue ioctl functions to devices. This is normally called by the
5508 * user space syscall interfaces but can sometimes be useful for
5509 * other purposes. The return value is the return from the syscall if
5510 * positive or a negative errno code on error.
5511 */
5512
5513int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5514{
5515 struct ifreq ifr;
5516 int ret;
5517 char *colon;
5518
5519 /* One special case: SIOCGIFCONF takes ifconf argument
5520 and requires shared lock, because it sleeps writing
5521 to user space.
5522 */
5523
5524 if (cmd == SIOCGIFCONF) {
5525 rtnl_lock();
5526 ret = dev_ifconf(net, (char __user *) arg);
5527 rtnl_unlock();
5528 return ret;
5529 }
5530 if (cmd == SIOCGIFNAME)
5531 return dev_ifname(net, (struct ifreq __user *)arg);
5532
5533 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5534 return -EFAULT;
5535
5536 ifr.ifr_name[IFNAMSIZ-1] = 0;
5537
5538 colon = strchr(ifr.ifr_name, ':');
5539 if (colon)
5540 *colon = 0;
5541
5542 /*
5543 * See which interface the caller is talking about.
5544 */
5545
5546 switch (cmd) {
5547 /*
5548 * These ioctl calls:
5549 * - can be done by all.
5550 * - atomic and do not require locking.
5551 * - return a value
5552 */
5553 case SIOCGIFFLAGS:
5554 case SIOCGIFMETRIC:
5555 case SIOCGIFMTU:
5556 case SIOCGIFHWADDR:
5557 case SIOCGIFSLAVE:
5558 case SIOCGIFMAP:
5559 case SIOCGIFINDEX:
5560 case SIOCGIFTXQLEN:
5561 dev_load(net, ifr.ifr_name);
5562 rcu_read_lock();
5563 ret = dev_ifsioc_locked(net, &ifr, cmd);
5564 rcu_read_unlock();
5565 if (!ret) {
5566 if (colon)
5567 *colon = ':';
5568 if (copy_to_user(arg, &ifr,
5569 sizeof(struct ifreq)))
5570 ret = -EFAULT;
5571 }
5572 return ret;
5573
5574 case SIOCETHTOOL:
5575 dev_load(net, ifr.ifr_name);
5576 rtnl_lock();
5577 ret = dev_ethtool(net, &ifr);
5578 rtnl_unlock();
5579 if (!ret) {
5580 if (colon)
5581 *colon = ':';
5582 if (copy_to_user(arg, &ifr,
5583 sizeof(struct ifreq)))
5584 ret = -EFAULT;
5585 }
5586 return ret;
5587
5588 /*
5589 * These ioctl calls:
5590 * - require superuser power.
5591 * - require strict serialization.
5592 * - return a value
5593 */
5594 case SIOCGMIIPHY:
5595 case SIOCGMIIREG:
5596 case SIOCSIFNAME:
5597 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5598 return -EPERM;
5599 dev_load(net, ifr.ifr_name);
5600 rtnl_lock();
5601 ret = dev_ifsioc(net, &ifr, cmd);
5602 rtnl_unlock();
5603 if (!ret) {
5604 if (colon)
5605 *colon = ':';
5606 if (copy_to_user(arg, &ifr,
5607 sizeof(struct ifreq)))
5608 ret = -EFAULT;
5609 }
5610 return ret;
5611
5612 /*
5613 * These ioctl calls:
5614 * - require superuser power.
5615 * - require strict serialization.
5616 * - do not return a value
5617 */
5618 case SIOCSIFMAP:
5619 case SIOCSIFTXQLEN:
5620 if (!capable(CAP_NET_ADMIN))
5621 return -EPERM;
5622 /* fall through */
5623 /*
5624 * These ioctl calls:
5625 * - require local superuser power.
5626 * - require strict serialization.
5627 * - do not return a value
5628 */
5629 case SIOCSIFFLAGS:
5630 case SIOCSIFMETRIC:
5631 case SIOCSIFMTU:
5632 case SIOCSIFHWADDR:
5633 case SIOCSIFSLAVE:
5634 case SIOCADDMULTI:
5635 case SIOCDELMULTI:
5636 case SIOCSIFHWBROADCAST:
5637 case SIOCSMIIREG:
5638 case SIOCBONDENSLAVE:
5639 case SIOCBONDRELEASE:
5640 case SIOCBONDSETHWADDR:
5641 case SIOCBONDCHANGEACTIVE:
5642 case SIOCBRADDIF:
5643 case SIOCBRDELIF:
5644 case SIOCSHWTSTAMP:
5645 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
5646 return -EPERM;
5647 /* fall through */
5648 case SIOCBONDSLAVEINFOQUERY:
5649 case SIOCBONDINFOQUERY:
5650 dev_load(net, ifr.ifr_name);
5651 rtnl_lock();
5652 ret = dev_ifsioc(net, &ifr, cmd);
5653 rtnl_unlock();
5654 return ret;
5655
5656 case SIOCGIFMEM:
5657 /* Get the per device memory space. We can add this but
5658 * currently do not support it */
5659 case SIOCSIFMEM:
5660 /* Set the per device memory buffer space.
5661 * Not applicable in our case */
5662 case SIOCSIFLINK:
5663 return -ENOTTY;
5664
5665 /*
5666 * Unknown or private ioctl.
5667 */
5668 default:
5669 if (cmd == SIOCWANDEV ||
5670 (cmd >= SIOCDEVPRIVATE &&
5671 cmd <= SIOCDEVPRIVATE + 15)) {
5672 dev_load(net, ifr.ifr_name);
5673 rtnl_lock();
5674 ret = dev_ifsioc(net, &ifr, cmd);
5675 rtnl_unlock();
5676 if (!ret && copy_to_user(arg, &ifr,
5677 sizeof(struct ifreq)))
5678 ret = -EFAULT;
5679 return ret;
5680 }
5681 /* Take care of Wireless Extensions */
5682 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5683 return wext_handle_ioctl(net, &ifr, cmd, arg);
5684 return -ENOTTY;
5685 }
5686}
5687
5688
5689/** 5181/**
5690 * dev_new_index - allocate an ifindex 5182 * dev_new_index - allocate an ifindex
5691 * @net: the applicable net namespace 5183 * @net: the applicable net namespace
@@ -5958,10 +5450,9 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5958 BUG_ON(count < 1); 5450 BUG_ON(count < 1);
5959 5451
5960 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5452 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5961 if (!rx) { 5453 if (!rx)
5962 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5963 return -ENOMEM; 5454 return -ENOMEM;
5964 } 5455
5965 dev->_rx = rx; 5456 dev->_rx = rx;
5966 5457
5967 for (i = 0; i < count; i++) 5458 for (i = 0; i < count; i++)
@@ -5992,10 +5483,9 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5992 BUG_ON(count < 1); 5483 BUG_ON(count < 1);
5993 5484
5994 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5485 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5995 if (!tx) { 5486 if (!tx)
5996 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5997 return -ENOMEM; 5487 return -ENOMEM;
5998 } 5488
5999 dev->_tx = tx; 5489 dev->_tx = tx;
6000 5490
6001 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 5491 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
@@ -6482,10 +5972,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6482 alloc_size += NETDEV_ALIGN - 1; 5972 alloc_size += NETDEV_ALIGN - 1;
6483 5973
6484 p = kzalloc(alloc_size, GFP_KERNEL); 5974 p = kzalloc(alloc_size, GFP_KERNEL);
6485 if (!p) { 5975 if (!p)
6486 pr_err("alloc_netdev: Unable to allocate device\n");
6487 return NULL; 5976 return NULL;
6488 }
6489 5977
6490 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5978 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6491 dev->padded = (char *)dev - (char *)p; 5979 dev->padded = (char *)dev - (char *)p;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index b079c7bbc157..89562529df45 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -780,14 +780,14 @@ static const struct file_operations dev_mc_seq_fops = {
780 780
781static int __net_init dev_mc_net_init(struct net *net) 781static int __net_init dev_mc_net_init(struct net *net)
782{ 782{
783 if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) 783 if (!proc_create("dev_mcast", 0, net->proc_net, &dev_mc_seq_fops))
784 return -ENOMEM; 784 return -ENOMEM;
785 return 0; 785 return 0;
786} 786}
787 787
788static void __net_exit dev_mc_net_exit(struct net *net) 788static void __net_exit dev_mc_net_exit(struct net *net)
789{ 789{
790 proc_net_remove(net, "dev_mcast"); 790 remove_proc_entry("dev_mcast", net->proc_net);
791} 791}
792 792
793static struct pernet_operations __net_initdata dev_mc_net_ops = { 793static struct pernet_operations __net_initdata dev_mc_net_ops = {
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
new file mode 100644
index 000000000000..6cc0481faade
--- /dev/null
+++ b/net/core/dev_ioctl.c
@@ -0,0 +1,576 @@
1#include <linux/kmod.h>
2#include <linux/netdevice.h>
3#include <linux/etherdevice.h>
4#include <linux/rtnetlink.h>
5#include <linux/net_tstamp.h>
6#include <linux/wireless.h>
7#include <net/wext.h>
8
9/*
10 * Map an interface index to its name (SIOCGIFNAME)
11 */
12
13/*
14 * We need this ioctl for efficient implementation of the
15 * if_indextoname() function required by the IPv6 API. Without
16 * it, we would have to search all the interfaces to find a
17 * match. --pb
18 */
19
20static int dev_ifname(struct net *net, struct ifreq __user *arg)
21{
22 struct net_device *dev;
23 struct ifreq ifr;
24 unsigned seq;
25
26 /*
27 * Fetch the caller's info block.
28 */
29
30 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
31 return -EFAULT;
32
33retry:
34 seq = read_seqcount_begin(&devnet_rename_seq);
35 rcu_read_lock();
36 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
37 if (!dev) {
38 rcu_read_unlock();
39 return -ENODEV;
40 }
41
42 strcpy(ifr.ifr_name, dev->name);
43 rcu_read_unlock();
44 if (read_seqcount_retry(&devnet_rename_seq, seq))
45 goto retry;
46
47 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
48 return -EFAULT;
49 return 0;
50}
51
52static gifconf_func_t *gifconf_list[NPROTO];
53
54/**
55 * register_gifconf - register a SIOCGIF handler
56 * @family: Address family
57 * @gifconf: Function handler
58 *
59 * Register protocol dependent address dumping routines. The handler
60 * that is passed must not be freed or reused until it has been replaced
61 * by another handler.
62 */
63int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
64{
65 if (family >= NPROTO)
66 return -EINVAL;
67 gifconf_list[family] = gifconf;
68 return 0;
69}
70EXPORT_SYMBOL(register_gifconf);
71
72/*
73 * Perform a SIOCGIFCONF call. This structure will change
74 * size eventually, and there is nothing I can do about it.
75 * Thus we will need a 'compatibility mode'.
76 */
77
78static int dev_ifconf(struct net *net, char __user *arg)
79{
80 struct ifconf ifc;
81 struct net_device *dev;
82 char __user *pos;
83 int len;
84 int total;
85 int i;
86
87 /*
88 * Fetch the caller's info block.
89 */
90
91 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
92 return -EFAULT;
93
94 pos = ifc.ifc_buf;
95 len = ifc.ifc_len;
96
97 /*
98 * Loop over the interfaces, and write an info block for each.
99 */
100
101 total = 0;
102 for_each_netdev(net, dev) {
103 for (i = 0; i < NPROTO; i++) {
104 if (gifconf_list[i]) {
105 int done;
106 if (!pos)
107 done = gifconf_list[i](dev, NULL, 0);
108 else
109 done = gifconf_list[i](dev, pos + total,
110 len - total);
111 if (done < 0)
112 return -EFAULT;
113 total += done;
114 }
115 }
116 }
117
118 /*
119 * All done. Write the updated control block back to the caller.
120 */
121 ifc.ifc_len = total;
122
123 /*
124 * Both BSD and Solaris return 0 here, so we do too.
125 */
126 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
127}
128
129/*
130 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
131 */
132static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
133{
134 int err;
135 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
136
137 if (!dev)
138 return -ENODEV;
139
140 switch (cmd) {
141 case SIOCGIFFLAGS: /* Get interface flags */
142 ifr->ifr_flags = (short) dev_get_flags(dev);
143 return 0;
144
145 case SIOCGIFMETRIC: /* Get the metric on the interface
146 (currently unused) */
147 ifr->ifr_metric = 0;
148 return 0;
149
150 case SIOCGIFMTU: /* Get the MTU of a device */
151 ifr->ifr_mtu = dev->mtu;
152 return 0;
153
154 case SIOCGIFHWADDR:
155 if (!dev->addr_len)
156 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
157 else
158 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
159 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
160 ifr->ifr_hwaddr.sa_family = dev->type;
161 return 0;
162
163 case SIOCGIFSLAVE:
164 err = -EINVAL;
165 break;
166
167 case SIOCGIFMAP:
168 ifr->ifr_map.mem_start = dev->mem_start;
169 ifr->ifr_map.mem_end = dev->mem_end;
170 ifr->ifr_map.base_addr = dev->base_addr;
171 ifr->ifr_map.irq = dev->irq;
172 ifr->ifr_map.dma = dev->dma;
173 ifr->ifr_map.port = dev->if_port;
174 return 0;
175
176 case SIOCGIFINDEX:
177 ifr->ifr_ifindex = dev->ifindex;
178 return 0;
179
180 case SIOCGIFTXQLEN:
181 ifr->ifr_qlen = dev->tx_queue_len;
182 return 0;
183
184 default:
185 /* dev_ioctl() should ensure this case
186 * is never reached
187 */
188 WARN_ON(1);
189 err = -ENOTTY;
190 break;
191
192 }
193 return err;
194}
195
196static int net_hwtstamp_validate(struct ifreq *ifr)
197{
198 struct hwtstamp_config cfg;
199 enum hwtstamp_tx_types tx_type;
200 enum hwtstamp_rx_filters rx_filter;
201 int tx_type_valid = 0;
202 int rx_filter_valid = 0;
203
204 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
205 return -EFAULT;
206
207 if (cfg.flags) /* reserved for future extensions */
208 return -EINVAL;
209
210 tx_type = cfg.tx_type;
211 rx_filter = cfg.rx_filter;
212
213 switch (tx_type) {
214 case HWTSTAMP_TX_OFF:
215 case HWTSTAMP_TX_ON:
216 case HWTSTAMP_TX_ONESTEP_SYNC:
217 tx_type_valid = 1;
218 break;
219 }
220
221 switch (rx_filter) {
222 case HWTSTAMP_FILTER_NONE:
223 case HWTSTAMP_FILTER_ALL:
224 case HWTSTAMP_FILTER_SOME:
225 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
226 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
227 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
228 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
229 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
230 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
231 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
232 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
233 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
234 case HWTSTAMP_FILTER_PTP_V2_EVENT:
235 case HWTSTAMP_FILTER_PTP_V2_SYNC:
236 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
237 rx_filter_valid = 1;
238 break;
239 }
240
241 if (!tx_type_valid || !rx_filter_valid)
242 return -ERANGE;
243
244 return 0;
245}
246
247/*
248 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
249 */
250static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
251{
252 int err;
253 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
254 const struct net_device_ops *ops;
255
256 if (!dev)
257 return -ENODEV;
258
259 ops = dev->netdev_ops;
260
261 switch (cmd) {
262 case SIOCSIFFLAGS: /* Set interface flags */
263 return dev_change_flags(dev, ifr->ifr_flags);
264
265 case SIOCSIFMETRIC: /* Set the metric on the interface
266 (currently unused) */
267 return -EOPNOTSUPP;
268
269 case SIOCSIFMTU: /* Set the MTU of a device */
270 return dev_set_mtu(dev, ifr->ifr_mtu);
271
272 case SIOCSIFHWADDR:
273 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
274
275 case SIOCSIFHWBROADCAST:
276 if (ifr->ifr_hwaddr.sa_family != dev->type)
277 return -EINVAL;
278 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
279 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
280 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
281 return 0;
282
283 case SIOCSIFMAP:
284 if (ops->ndo_set_config) {
285 if (!netif_device_present(dev))
286 return -ENODEV;
287 return ops->ndo_set_config(dev, &ifr->ifr_map);
288 }
289 return -EOPNOTSUPP;
290
291 case SIOCADDMULTI:
292 if (!ops->ndo_set_rx_mode ||
293 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
294 return -EINVAL;
295 if (!netif_device_present(dev))
296 return -ENODEV;
297 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
298
299 case SIOCDELMULTI:
300 if (!ops->ndo_set_rx_mode ||
301 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
302 return -EINVAL;
303 if (!netif_device_present(dev))
304 return -ENODEV;
305 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
306
307 case SIOCSIFTXQLEN:
308 if (ifr->ifr_qlen < 0)
309 return -EINVAL;
310 dev->tx_queue_len = ifr->ifr_qlen;
311 return 0;
312
313 case SIOCSIFNAME:
314 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
315 return dev_change_name(dev, ifr->ifr_newname);
316
317 case SIOCSHWTSTAMP:
318 err = net_hwtstamp_validate(ifr);
319 if (err)
320 return err;
321 /* fall through */
322
323 /*
324 * Unknown or private ioctl
325 */
326 default:
327 if ((cmd >= SIOCDEVPRIVATE &&
328 cmd <= SIOCDEVPRIVATE + 15) ||
329 cmd == SIOCBONDENSLAVE ||
330 cmd == SIOCBONDRELEASE ||
331 cmd == SIOCBONDSETHWADDR ||
332 cmd == SIOCBONDSLAVEINFOQUERY ||
333 cmd == SIOCBONDINFOQUERY ||
334 cmd == SIOCBONDCHANGEACTIVE ||
335 cmd == SIOCGMIIPHY ||
336 cmd == SIOCGMIIREG ||
337 cmd == SIOCSMIIREG ||
338 cmd == SIOCBRADDIF ||
339 cmd == SIOCBRDELIF ||
340 cmd == SIOCSHWTSTAMP ||
341 cmd == SIOCWANDEV) {
342 err = -EOPNOTSUPP;
343 if (ops->ndo_do_ioctl) {
344 if (netif_device_present(dev))
345 err = ops->ndo_do_ioctl(dev, ifr, cmd);
346 else
347 err = -ENODEV;
348 }
349 } else
350 err = -EINVAL;
351
352 }
353 return err;
354}
355
356/**
357 * dev_load - load a network module
358 * @net: the applicable net namespace
359 * @name: name of interface
360 *
361 * If a network interface is not present and the process has suitable
362 * privileges this function loads the module. If module loading is not
363 * available in this kernel then it becomes a nop.
364 */
365
366void dev_load(struct net *net, const char *name)
367{
368 struct net_device *dev;
369 int no_module;
370
371 rcu_read_lock();
372 dev = dev_get_by_name_rcu(net, name);
373 rcu_read_unlock();
374
375 no_module = !dev;
376 if (no_module && capable(CAP_NET_ADMIN))
377 no_module = request_module("netdev-%s", name);
378 if (no_module && capable(CAP_SYS_MODULE)) {
379 if (!request_module("%s", name))
380 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
381 name);
382 }
383}
384EXPORT_SYMBOL(dev_load);
385
386/*
387 * This function handles all "interface"-type I/O control requests. The actual
388 * 'doing' part of this is dev_ifsioc above.
389 */
390
391/**
392 * dev_ioctl - network device ioctl
393 * @net: the applicable net namespace
394 * @cmd: command to issue
395 * @arg: pointer to a struct ifreq in user space
396 *
397 * Issue ioctl functions to devices. This is normally called by the
398 * user space syscall interfaces but can sometimes be useful for
399 * other purposes. The return value is the return from the syscall if
400 * positive or a negative errno code on error.
401 */
402
403int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
404{
405 struct ifreq ifr;
406 int ret;
407 char *colon;
408
409 /* One special case: SIOCGIFCONF takes ifconf argument
410 and requires shared lock, because it sleeps writing
411 to user space.
412 */
413
414 if (cmd == SIOCGIFCONF) {
415 rtnl_lock();
416 ret = dev_ifconf(net, (char __user *) arg);
417 rtnl_unlock();
418 return ret;
419 }
420 if (cmd == SIOCGIFNAME)
421 return dev_ifname(net, (struct ifreq __user *)arg);
422
423 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
424 return -EFAULT;
425
426 ifr.ifr_name[IFNAMSIZ-1] = 0;
427
428 colon = strchr(ifr.ifr_name, ':');
429 if (colon)
430 *colon = 0;
431
432 /*
433 * See which interface the caller is talking about.
434 */
435
436 switch (cmd) {
437 /*
438 * These ioctl calls:
439 * - can be done by all.
440 * - atomic and do not require locking.
441 * - return a value
442 */
443 case SIOCGIFFLAGS:
444 case SIOCGIFMETRIC:
445 case SIOCGIFMTU:
446 case SIOCGIFHWADDR:
447 case SIOCGIFSLAVE:
448 case SIOCGIFMAP:
449 case SIOCGIFINDEX:
450 case SIOCGIFTXQLEN:
451 dev_load(net, ifr.ifr_name);
452 rcu_read_lock();
453 ret = dev_ifsioc_locked(net, &ifr, cmd);
454 rcu_read_unlock();
455 if (!ret) {
456 if (colon)
457 *colon = ':';
458 if (copy_to_user(arg, &ifr,
459 sizeof(struct ifreq)))
460 ret = -EFAULT;
461 }
462 return ret;
463
464 case SIOCETHTOOL:
465 dev_load(net, ifr.ifr_name);
466 rtnl_lock();
467 ret = dev_ethtool(net, &ifr);
468 rtnl_unlock();
469 if (!ret) {
470 if (colon)
471 *colon = ':';
472 if (copy_to_user(arg, &ifr,
473 sizeof(struct ifreq)))
474 ret = -EFAULT;
475 }
476 return ret;
477
478 /*
479 * These ioctl calls:
480 * - require superuser power.
481 * - require strict serialization.
482 * - return a value
483 */
484 case SIOCGMIIPHY:
485 case SIOCGMIIREG:
486 case SIOCSIFNAME:
487 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
488 return -EPERM;
489 dev_load(net, ifr.ifr_name);
490 rtnl_lock();
491 ret = dev_ifsioc(net, &ifr, cmd);
492 rtnl_unlock();
493 if (!ret) {
494 if (colon)
495 *colon = ':';
496 if (copy_to_user(arg, &ifr,
497 sizeof(struct ifreq)))
498 ret = -EFAULT;
499 }
500 return ret;
501
502 /*
503 * These ioctl calls:
504 * - require superuser power.
505 * - require strict serialization.
506 * - do not return a value
507 */
508 case SIOCSIFMAP:
509 case SIOCSIFTXQLEN:
510 if (!capable(CAP_NET_ADMIN))
511 return -EPERM;
512 /* fall through */
513 /*
514 * These ioctl calls:
515 * - require local superuser power.
516 * - require strict serialization.
517 * - do not return a value
518 */
519 case SIOCSIFFLAGS:
520 case SIOCSIFMETRIC:
521 case SIOCSIFMTU:
522 case SIOCSIFHWADDR:
523 case SIOCSIFSLAVE:
524 case SIOCADDMULTI:
525 case SIOCDELMULTI:
526 case SIOCSIFHWBROADCAST:
527 case SIOCSMIIREG:
528 case SIOCBONDENSLAVE:
529 case SIOCBONDRELEASE:
530 case SIOCBONDSETHWADDR:
531 case SIOCBONDCHANGEACTIVE:
532 case SIOCBRADDIF:
533 case SIOCBRDELIF:
534 case SIOCSHWTSTAMP:
535 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
536 return -EPERM;
537 /* fall through */
538 case SIOCBONDSLAVEINFOQUERY:
539 case SIOCBONDINFOQUERY:
540 dev_load(net, ifr.ifr_name);
541 rtnl_lock();
542 ret = dev_ifsioc(net, &ifr, cmd);
543 rtnl_unlock();
544 return ret;
545
546 case SIOCGIFMEM:
547 /* Get the per device memory space. We can add this but
548 * currently do not support it */
549 case SIOCSIFMEM:
550 /* Set the per device memory buffer space.
551 * Not applicable in our case */
552 case SIOCSIFLINK:
553 return -ENOTTY;
554
555 /*
556 * Unknown or private ioctl.
557 */
558 default:
559 if (cmd == SIOCWANDEV ||
560 (cmd >= SIOCDEVPRIVATE &&
561 cmd <= SIOCDEVPRIVATE + 15)) {
562 dev_load(net, ifr.ifr_name);
563 rtnl_lock();
564 ret = dev_ifsioc(net, &ifr, cmd);
565 rtnl_unlock();
566 if (!ret && copy_to_user(arg, &ifr,
567 sizeof(struct ifreq)))
568 ret = -EFAULT;
569 return ret;
570 }
571 /* Take care of Wireless Extensions */
572 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
573 return wext_handle_ioctl(net, &ifr, cmd, arg);
574 return -ENOTTY;
575 }
576}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d9d55209db67..3e9b2c3e30f0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -77,6 +77,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
77 [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", 77 [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation",
78 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", 78 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
79 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", 79 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
80 [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
80 81
81 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", 82 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
82 [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", 83 [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 331ccb90f915..fa32899006a2 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -47,6 +47,8 @@ static struct sk_buff_head skb_pool;
47 47
48static atomic_t trapped; 48static atomic_t trapped;
49 49
50static struct srcu_struct netpoll_srcu;
51
50#define USEC_PER_POLL 50 52#define USEC_PER_POLL 50
51#define NETPOLL_RX_ENABLED 1 53#define NETPOLL_RX_ENABLED 1
52#define NETPOLL_RX_DROP 2 54#define NETPOLL_RX_DROP 2
@@ -59,6 +61,7 @@ static atomic_t trapped;
59 61
60static void zap_completion_queue(void); 62static void zap_completion_queue(void);
61static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo); 63static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
64static void netpoll_async_cleanup(struct work_struct *work);
62 65
63static unsigned int carrier_timeout = 4; 66static unsigned int carrier_timeout = 4;
64module_param(carrier_timeout, uint, 0644); 67module_param(carrier_timeout, uint, 0644);
@@ -199,18 +202,31 @@ static void netpoll_poll_dev(struct net_device *dev)
199 const struct net_device_ops *ops; 202 const struct net_device_ops *ops;
200 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); 203 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
201 204
202 if (!dev || !netif_running(dev)) 205 /* Don't do any rx activity if the dev_lock mutex is held
206 * the dev_open/close paths use this to block netpoll activity
207 * while changing device state
208 */
209 if (!mutex_trylock(&ni->dev_lock))
203 return; 210 return;
204 211
212 if (!netif_running(dev)) {
213 mutex_unlock(&ni->dev_lock);
214 return;
215 }
216
205 ops = dev->netdev_ops; 217 ops = dev->netdev_ops;
206 if (!ops->ndo_poll_controller) 218 if (!ops->ndo_poll_controller) {
219 mutex_unlock(&ni->dev_lock);
207 return; 220 return;
221 }
208 222
209 /* Process pending work on NIC */ 223 /* Process pending work on NIC */
210 ops->ndo_poll_controller(dev); 224 ops->ndo_poll_controller(dev);
211 225
212 poll_napi(dev); 226 poll_napi(dev);
213 227
228 mutex_unlock(&ni->dev_lock);
229
214 if (dev->flags & IFF_SLAVE) { 230 if (dev->flags & IFF_SLAVE) {
215 if (ni) { 231 if (ni) {
216 struct net_device *bond_dev; 232 struct net_device *bond_dev;
@@ -231,6 +247,31 @@ static void netpoll_poll_dev(struct net_device *dev)
231 zap_completion_queue(); 247 zap_completion_queue();
232} 248}
233 249
250int netpoll_rx_disable(struct net_device *dev)
251{
252 struct netpoll_info *ni;
253 int idx;
254 might_sleep();
255 idx = srcu_read_lock(&netpoll_srcu);
256 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
257 if (ni)
258 mutex_lock(&ni->dev_lock);
259 srcu_read_unlock(&netpoll_srcu, idx);
260 return 0;
261}
262EXPORT_SYMBOL(netpoll_rx_disable);
263
264void netpoll_rx_enable(struct net_device *dev)
265{
266 struct netpoll_info *ni;
267 rcu_read_lock();
268 ni = rcu_dereference(dev->npinfo);
269 if (ni)
270 mutex_unlock(&ni->dev_lock);
271 rcu_read_unlock();
272}
273EXPORT_SYMBOL(netpoll_rx_enable);
274
234static void refill_skbs(void) 275static void refill_skbs(void)
235{ 276{
236 struct sk_buff *skb; 277 struct sk_buff *skb;
@@ -666,7 +707,7 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
666 icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 707 icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
667 icmp6h->icmp6_router = 0; 708 icmp6h->icmp6_router = 0;
668 icmp6h->icmp6_solicited = 1; 709 icmp6h->icmp6_solicited = 1;
669 target = (struct in6_addr *)skb_transport_header(send_skb) + sizeof(struct icmp6hdr); 710 target = (struct in6_addr *)(skb_transport_header(send_skb) + sizeof(struct icmp6hdr));
670 *target = msg->target; 711 *target = msg->target;
671 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size, 712 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
672 IPPROTO_ICMPV6, 713 IPPROTO_ICMPV6,
@@ -984,6 +1025,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
984 1025
985 np->dev = ndev; 1026 np->dev = ndev;
986 strlcpy(np->dev_name, ndev->name, IFNAMSIZ); 1027 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
1028 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
987 1029
988 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 1030 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
989 !ndev->netdev_ops->ndo_poll_controller) { 1031 !ndev->netdev_ops->ndo_poll_controller) {
@@ -1004,6 +1046,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1004 INIT_LIST_HEAD(&npinfo->rx_np); 1046 INIT_LIST_HEAD(&npinfo->rx_np);
1005 1047
1006 spin_lock_init(&npinfo->rx_lock); 1048 spin_lock_init(&npinfo->rx_lock);
1049 mutex_init(&npinfo->dev_lock);
1007 skb_queue_head_init(&npinfo->neigh_tx); 1050 skb_queue_head_init(&npinfo->neigh_tx);
1008 skb_queue_head_init(&npinfo->txq); 1051 skb_queue_head_init(&npinfo->txq);
1009 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 1052 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
@@ -1017,7 +1060,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1017 goto free_npinfo; 1060 goto free_npinfo;
1018 } 1061 }
1019 } else { 1062 } else {
1020 npinfo = ndev->npinfo; 1063 npinfo = rtnl_dereference(ndev->npinfo);
1021 atomic_inc(&npinfo->refcnt); 1064 atomic_inc(&npinfo->refcnt);
1022 } 1065 }
1023 1066
@@ -1169,6 +1212,7 @@ EXPORT_SYMBOL(netpoll_setup);
1169static int __init netpoll_init(void) 1212static int __init netpoll_init(void)
1170{ 1213{
1171 skb_queue_head_init(&skb_pool); 1214 skb_queue_head_init(&skb_pool);
1215 init_srcu_struct(&netpoll_srcu);
1172 return 0; 1216 return 0;
1173} 1217}
1174core_initcall(netpoll_init); 1218core_initcall(netpoll_init);
@@ -1196,7 +1240,11 @@ void __netpoll_cleanup(struct netpoll *np)
1196 struct netpoll_info *npinfo; 1240 struct netpoll_info *npinfo;
1197 unsigned long flags; 1241 unsigned long flags;
1198 1242
1199 npinfo = np->dev->npinfo; 1243 /* rtnl_dereference would be preferable here but
1244 * rcu_cleanup_netpoll path can put us in here safely without
1245 * holding the rtnl, so plain rcu_dereference it is
1246 */
1247 npinfo = rtnl_dereference(np->dev->npinfo);
1200 if (!npinfo) 1248 if (!npinfo)
1201 return; 1249 return;
1202 1250
@@ -1208,6 +1256,8 @@ void __netpoll_cleanup(struct netpoll *np)
1208 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 1256 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1209 } 1257 }
1210 1258
1259 synchronize_srcu(&netpoll_srcu);
1260
1211 if (atomic_dec_and_test(&npinfo->refcnt)) { 1261 if (atomic_dec_and_test(&npinfo->refcnt)) {
1212 const struct net_device_ops *ops; 1262 const struct net_device_ops *ops;
1213 1263
@@ -1215,25 +1265,27 @@ void __netpoll_cleanup(struct netpoll *np)
1215 if (ops->ndo_netpoll_cleanup) 1265 if (ops->ndo_netpoll_cleanup)
1216 ops->ndo_netpoll_cleanup(np->dev); 1266 ops->ndo_netpoll_cleanup(np->dev);
1217 1267
1218 RCU_INIT_POINTER(np->dev->npinfo, NULL); 1268 rcu_assign_pointer(np->dev->npinfo, NULL);
1219 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); 1269 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
1220 } 1270 }
1221} 1271}
1222EXPORT_SYMBOL_GPL(__netpoll_cleanup); 1272EXPORT_SYMBOL_GPL(__netpoll_cleanup);
1223 1273
1224static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) 1274static void netpoll_async_cleanup(struct work_struct *work)
1225{ 1275{
1226 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); 1276 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
1227 1277
1278 rtnl_lock();
1228 __netpoll_cleanup(np); 1279 __netpoll_cleanup(np);
1280 rtnl_unlock();
1229 kfree(np); 1281 kfree(np);
1230} 1282}
1231 1283
1232void __netpoll_free_rcu(struct netpoll *np) 1284void __netpoll_free_async(struct netpoll *np)
1233{ 1285{
1234 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); 1286 schedule_work(&np->cleanup_work);
1235} 1287}
1236EXPORT_SYMBOL_GPL(__netpoll_free_rcu); 1288EXPORT_SYMBOL_GPL(__netpoll_free_async);
1237 1289
1238void netpoll_cleanup(struct netpoll *np) 1290void netpoll_cleanup(struct netpoll *np)
1239{ 1291{
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 5e67defe2cb0..0777d0aa18c3 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -69,10 +69,8 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
69 69
70 /* allocate & copy */ 70 /* allocate & copy */
71 new = kzalloc(new_sz, GFP_KERNEL); 71 new = kzalloc(new_sz, GFP_KERNEL);
72 if (!new) { 72 if (!new)
73 pr_warn("Unable to alloc new priomap!\n");
74 return -ENOMEM; 73 return -ENOMEM;
75 }
76 74
77 if (old) 75 if (old)
78 memcpy(new->priomap, old->priomap, 76 memcpy(new->priomap, old->priomap,
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 797769551b91..6048fc1da1c2 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1790,10 +1790,13 @@ static ssize_t pktgen_thread_write(struct file *file,
1790 return -EFAULT; 1790 return -EFAULT;
1791 i += len; 1791 i += len;
1792 mutex_lock(&pktgen_thread_lock); 1792 mutex_lock(&pktgen_thread_lock);
1793 pktgen_add_device(t, f); 1793 ret = pktgen_add_device(t, f);
1794 mutex_unlock(&pktgen_thread_lock); 1794 mutex_unlock(&pktgen_thread_lock);
1795 ret = count; 1795 if (!ret) {
1796 sprintf(pg_result, "OK: add_device=%s", f); 1796 ret = count;
1797 sprintf(pg_result, "OK: add_device=%s", f);
1798 } else
1799 sprintf(pg_result, "ERROR: can not add device %s", f);
1797 goto out; 1800 goto out;
1798 } 1801 }
1799 1802
@@ -3647,7 +3650,7 @@ static int __net_init pg_net_init(struct net *net)
3647remove_entry: 3650remove_entry:
3648 remove_proc_entry(PGCTRL, pn->proc_dir); 3651 remove_proc_entry(PGCTRL, pn->proc_dir);
3649remove: 3652remove:
3650 proc_net_remove(pn->net, PG_PROC_DIR); 3653 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3651 return ret; 3654 return ret;
3652} 3655}
3653 3656
@@ -3673,7 +3676,7 @@ static void __net_exit pg_net_exit(struct net *net)
3673 } 3676 }
3674 3677
3675 remove_proc_entry(PGCTRL, pn->proc_dir); 3678 remove_proc_entry(PGCTRL, pn->proc_dir);
3676 proc_net_remove(pn->net, PG_PROC_DIR); 3679 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net);
3677} 3680}
3678 3681
3679static struct pernet_operations pg_net_ops = { 3682static struct pernet_operations pg_net_ops = {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index c1e4db60eeca..d8aa20f6a46e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2119,13 +2119,17 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2119{ 2119{
2120 struct net *net = sock_net(skb->sk); 2120 struct net *net = sock_net(skb->sk);
2121 struct ndmsg *ndm; 2121 struct ndmsg *ndm;
2122 struct nlattr *llattr; 2122 struct nlattr *tb[NDA_MAX+1];
2123 struct net_device *dev; 2123 struct net_device *dev;
2124 int err = -EINVAL; 2124 int err = -EINVAL;
2125 __u8 *addr; 2125 __u8 *addr;
2126 2126
2127 if (nlmsg_len(nlh) < sizeof(*ndm)) 2127 if (!capable(CAP_NET_ADMIN))
2128 return -EINVAL; 2128 return -EPERM;
2129
2130 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
2131 if (err < 0)
2132 return err;
2129 2133
2130 ndm = nlmsg_data(nlh); 2134 ndm = nlmsg_data(nlh);
2131 if (ndm->ndm_ifindex == 0) { 2135 if (ndm->ndm_ifindex == 0) {
@@ -2139,13 +2143,17 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2139 return -ENODEV; 2143 return -ENODEV;
2140 } 2144 }
2141 2145
2142 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR); 2146 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
2143 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) { 2147 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
2144 pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n"); 2148 return -EINVAL;
2149 }
2150
2151 addr = nla_data(tb[NDA_LLADDR]);
2152 if (!is_valid_ether_addr(addr)) {
2153 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ether address\n");
2145 return -EINVAL; 2154 return -EINVAL;
2146 } 2155 }
2147 2156
2148 addr = nla_data(llattr);
2149 err = -EOPNOTSUPP; 2157 err = -EOPNOTSUPP;
2150 2158
2151 /* Support fdb on master device the net/bridge default case */ 2159 /* Support fdb on master device the net/bridge default case */
@@ -2155,7 +2163,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2155 const struct net_device_ops *ops = br_dev->netdev_ops; 2163 const struct net_device_ops *ops = br_dev->netdev_ops;
2156 2164
2157 if (ops->ndo_fdb_del) 2165 if (ops->ndo_fdb_del)
2158 err = ops->ndo_fdb_del(ndm, dev, addr); 2166 err = ops->ndo_fdb_del(ndm, tb, dev, addr);
2159 2167
2160 if (err) 2168 if (err)
2161 goto out; 2169 goto out;
@@ -2165,7 +2173,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2165 2173
2166 /* Embedded bridge, macvlan, and any other device support */ 2174 /* Embedded bridge, macvlan, and any other device support */
2167 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { 2175 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
2168 err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr); 2176 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
2169 2177
2170 if (!err) { 2178 if (!err) {
2171 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); 2179 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
@@ -2315,6 +2323,13 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2315 int idx = 0; 2323 int idx = 0;
2316 u32 portid = NETLINK_CB(cb->skb).portid; 2324 u32 portid = NETLINK_CB(cb->skb).portid;
2317 u32 seq = cb->nlh->nlmsg_seq; 2325 u32 seq = cb->nlh->nlmsg_seq;
2326 struct nlattr *extfilt;
2327 u32 filter_mask = 0;
2328
2329 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
2330 IFLA_EXT_MASK);
2331 if (extfilt)
2332 filter_mask = nla_get_u32(extfilt);
2318 2333
2319 rcu_read_lock(); 2334 rcu_read_lock();
2320 for_each_netdev_rcu(net, dev) { 2335 for_each_netdev_rcu(net, dev) {
@@ -2324,14 +2339,15 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2324 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 2339 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
2325 if (idx >= cb->args[0] && 2340 if (idx >= cb->args[0] &&
2326 br_dev->netdev_ops->ndo_bridge_getlink( 2341 br_dev->netdev_ops->ndo_bridge_getlink(
2327 skb, portid, seq, dev) < 0) 2342 skb, portid, seq, dev, filter_mask) < 0)
2328 break; 2343 break;
2329 idx++; 2344 idx++;
2330 } 2345 }
2331 2346
2332 if (ops->ndo_bridge_getlink) { 2347 if (ops->ndo_bridge_getlink) {
2333 if (idx >= cb->args[0] && 2348 if (idx >= cb->args[0] &&
2334 ops->ndo_bridge_getlink(skb, portid, seq, dev) < 0) 2349 ops->ndo_bridge_getlink(skb, portid, seq, dev,
2350 filter_mask) < 0)
2335 break; 2351 break;
2336 idx++; 2352 idx++;
2337 } 2353 }
@@ -2372,14 +2388,14 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
2372 2388
2373 if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) && 2389 if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
2374 br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 2390 br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
2375 err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev); 2391 err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
2376 if (err < 0) 2392 if (err < 0)
2377 goto errout; 2393 goto errout;
2378 } 2394 }
2379 2395
2380 if ((flags & BRIDGE_FLAGS_SELF) && 2396 if ((flags & BRIDGE_FLAGS_SELF) &&
2381 dev->netdev_ops->ndo_bridge_getlink) { 2397 dev->netdev_ops->ndo_bridge_getlink) {
2382 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev); 2398 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
2383 if (err < 0) 2399 if (err < 0)
2384 goto errout; 2400 goto errout;
2385 } 2401 }
@@ -2464,6 +2480,77 @@ out:
2464 return err; 2480 return err;
2465} 2481}
2466 2482
2483static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2484 void *arg)
2485{
2486 struct net *net = sock_net(skb->sk);
2487 struct ifinfomsg *ifm;
2488 struct net_device *dev;
2489 struct nlattr *br_spec, *attr = NULL;
2490 int rem, err = -EOPNOTSUPP;
2491 u16 oflags, flags = 0;
2492 bool have_flags = false;
2493
2494 if (nlmsg_len(nlh) < sizeof(*ifm))
2495 return -EINVAL;
2496
2497 ifm = nlmsg_data(nlh);
2498 if (ifm->ifi_family != AF_BRIDGE)
2499 return -EPFNOSUPPORT;
2500
2501 dev = __dev_get_by_index(net, ifm->ifi_index);
2502 if (!dev) {
2503 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
2504 return -ENODEV;
2505 }
2506
2507 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
2508 if (br_spec) {
2509 nla_for_each_nested(attr, br_spec, rem) {
2510 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
2511 have_flags = true;
2512 flags = nla_get_u16(attr);
2513 break;
2514 }
2515 }
2516 }
2517
2518 oflags = flags;
2519
2520 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
2521 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
2522
2523 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
2524 err = -EOPNOTSUPP;
2525 goto out;
2526 }
2527
2528 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
2529 if (err)
2530 goto out;
2531
2532 flags &= ~BRIDGE_FLAGS_MASTER;
2533 }
2534
2535 if ((flags & BRIDGE_FLAGS_SELF)) {
2536 if (!dev->netdev_ops->ndo_bridge_dellink)
2537 err = -EOPNOTSUPP;
2538 else
2539 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
2540
2541 if (!err)
2542 flags &= ~BRIDGE_FLAGS_SELF;
2543 }
2544
2545 if (have_flags)
2546 memcpy(nla_data(attr), &flags, sizeof(flags));
2547 /* Generate event to notify upper layer of bridge change */
2548 if (!err)
2549 err = rtnl_bridge_notify(dev, oflags);
2550out:
2551 return err;
2552}
2553
2467/* Protected by RTNL sempahore. */ 2554/* Protected by RTNL sempahore. */
2468static struct rtattr **rta_buf; 2555static struct rtattr **rta_buf;
2469static int rtattr_max; 2556static int rtattr_max;
@@ -2647,6 +2734,7 @@ void __init rtnetlink_init(void)
2647 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); 2734 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
2648 2735
2649 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL); 2736 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
2737 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
2650 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL); 2738 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
2651} 2739}
2652 2740
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bddc1dd2e7f2..2a3ca33c30aa 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -104,47 +104,37 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
104 .get = sock_pipe_buf_get, 104 .get = sock_pipe_buf_get,
105}; 105};
106 106
107/*
108 * Keep out-of-line to prevent kernel bloat.
109 * __builtin_return_address is not used because it is not always
110 * reliable.
111 */
112
113/** 107/**
114 * skb_over_panic - private function 108 * skb_panic - private function for out-of-line support
115 * @skb: buffer 109 * @skb: buffer
116 * @sz: size 110 * @sz: size
117 * @here: address 111 * @addr: address
118 * 112 * @msg: skb_over_panic or skb_under_panic
119 * Out of line support code for skb_put(). Not user callable. 113 *
114 * Out-of-line support for skb_put() and skb_push().
115 * Called via the wrapper skb_over_panic() or skb_under_panic().
116 * Keep out of line to prevent kernel bloat.
117 * __builtin_return_address is not used because it is not always reliable.
120 */ 118 */
121static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 119static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
120 const char msg[])
122{ 121{
123 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 122 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
124 __func__, here, skb->len, sz, skb->head, skb->data, 123 msg, addr, skb->len, sz, skb->head, skb->data,
125 (unsigned long)skb->tail, (unsigned long)skb->end, 124 (unsigned long)skb->tail, (unsigned long)skb->end,
126 skb->dev ? skb->dev->name : "<NULL>"); 125 skb->dev ? skb->dev->name : "<NULL>");
127 BUG(); 126 BUG();
128} 127}
129 128
130/** 129static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
131 * skb_under_panic - private function
132 * @skb: buffer
133 * @sz: size
134 * @here: address
135 *
136 * Out of line support code for skb_push(). Not user callable.
137 */
138
139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
140{ 130{
141 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 131 skb_panic(skb, sz, addr, __func__);
142 __func__, here, skb->len, sz, skb->head, skb->data,
143 (unsigned long)skb->tail, (unsigned long)skb->end,
144 skb->dev ? skb->dev->name : "<NULL>");
145 BUG();
146} 132}
147 133
134static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
135{
136 skb_panic(skb, sz, addr, __func__);
137}
148 138
149/* 139/*
150 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 140 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
@@ -351,10 +341,6 @@ struct netdev_alloc_cache {
351}; 341};
352static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 342static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
353 343
354#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
355#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
356#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
357
358static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 344static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
359{ 345{
360 struct netdev_alloc_cache *nc; 346 struct netdev_alloc_cache *nc;
@@ -686,7 +672,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
686 new->network_header = old->network_header; 672 new->network_header = old->network_header;
687 new->mac_header = old->mac_header; 673 new->mac_header = old->mac_header;
688 new->inner_transport_header = old->inner_transport_header; 674 new->inner_transport_header = old->inner_transport_header;
689 new->inner_network_header = old->inner_transport_header; 675 new->inner_network_header = old->inner_network_header;
690 skb_dst_copy(new, old); 676 skb_dst_copy(new, old);
691 new->rxhash = old->rxhash; 677 new->rxhash = old->rxhash;
692 new->ooo_okay = old->ooo_okay; 678 new->ooo_okay = old->ooo_okay;
@@ -2340,8 +2326,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2340{ 2326{
2341 int pos = skb_headlen(skb); 2327 int pos = skb_headlen(skb);
2342 2328
2343 skb_shinfo(skb1)->gso_type = skb_shinfo(skb)->gso_type; 2329 skb_shinfo(skb)->tx_flags = skb_shinfo(skb1)->tx_flags & SKBTX_SHARED_FRAG;
2344
2345 if (len < pos) /* Split line is inside header. */ 2330 if (len < pos) /* Split line is inside header. */
2346 skb_split_inside_header(skb, skb1, len, pos); 2331 skb_split_inside_header(skb, skb1, len, pos);
2347 else /* Second chunk has no header, nothing to copy. */ 2332 else /* Second chunk has no header, nothing to copy. */
@@ -2753,6 +2738,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2753 unsigned int mss = skb_shinfo(skb)->gso_size; 2738 unsigned int mss = skb_shinfo(skb)->gso_size;
2754 unsigned int doffset = skb->data - skb_mac_header(skb); 2739 unsigned int doffset = skb->data - skb_mac_header(skb);
2755 unsigned int offset = doffset; 2740 unsigned int offset = doffset;
2741 unsigned int tnl_hlen = skb_tnl_header_len(skb);
2756 unsigned int headroom; 2742 unsigned int headroom;
2757 unsigned int len; 2743 unsigned int len;
2758 int sg = !!(features & NETIF_F_SG); 2744 int sg = !!(features & NETIF_F_SG);
@@ -2829,7 +2815,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2829 skb_set_network_header(nskb, skb->mac_len); 2815 skb_set_network_header(nskb, skb->mac_len);
2830 nskb->transport_header = (nskb->network_header + 2816 nskb->transport_header = (nskb->network_header +
2831 skb_network_header_len(skb)); 2817 skb_network_header_len(skb));
2832 skb_copy_from_linear_data(skb, nskb->data, doffset); 2818
2819 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
2820 nskb->data - tnl_hlen,
2821 doffset + tnl_hlen);
2833 2822
2834 if (fskb != skb_shinfo(skb)->frag_list) 2823 if (fskb != skb_shinfo(skb)->frag_list)
2835 continue; 2824 continue;
@@ -2847,7 +2836,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2847 skb_copy_from_linear_data_offset(skb, offset, 2836 skb_copy_from_linear_data_offset(skb, offset,
2848 skb_put(nskb, hsize), hsize); 2837 skb_put(nskb, hsize), hsize);
2849 2838
2850 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 2839 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2851 2840
2852 while (pos < offset + len && i < nfrags) { 2841 while (pos < offset + len && i < nfrags) {
2853 *frag = skb_shinfo(skb)->frags[i]; 2842 *frag = skb_shinfo(skb)->frags[i];
diff --git a/net/core/sock.c b/net/core/sock.c
index f1e14e20d181..fe96c5d34299 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2836,7 +2836,7 @@ static const struct file_operations proto_seq_fops = {
2836 2836
2837static __net_init int proto_init_net(struct net *net) 2837static __net_init int proto_init_net(struct net *net)
2838{ 2838{
2839 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2839 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2840 return -ENOMEM; 2840 return -ENOMEM;
2841 2841
2842 return 0; 2842 return 0;
@@ -2844,7 +2844,7 @@ static __net_init int proto_init_net(struct net *net)
2844 2844
2845static __net_exit void proto_exit_net(struct net *net) 2845static __net_exit void proto_exit_net(struct net *net)
2846{ 2846{
2847 proc_net_remove(net, "protocols"); 2847 remove_proc_entry("protocols", net->proc_net);
2848} 2848}
2849 2849
2850 2850
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 0a8d6ebd9b45..4c6bdf97a657 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -171,7 +171,7 @@ static __init int dccpprobe_init(void)
171 spin_lock_init(&dccpw.lock); 171 spin_lock_init(&dccpw.lock);
172 if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL)) 172 if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
173 return ret; 173 return ret;
174 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 174 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
175 goto err0; 175 goto err0;
176 176
177 ret = setup_jprobe(); 177 ret = setup_jprobe();
@@ -181,7 +181,7 @@ static __init int dccpprobe_init(void)
181 pr_info("DCCP watch registered (port=%d)\n", port); 181 pr_info("DCCP watch registered (port=%d)\n", port);
182 return 0; 182 return 0;
183err1: 183err1:
184 proc_net_remove(&init_net, procname); 184 remove_proc_entry(procname, init_net.proc_net);
185err0: 185err0:
186 kfifo_free(&dccpw.fifo); 186 kfifo_free(&dccpw.fifo);
187 return ret; 187 return ret;
@@ -191,7 +191,7 @@ module_init(dccpprobe_init);
191static __exit void dccpprobe_exit(void) 191static __exit void dccpprobe_exit(void)
192{ 192{
193 kfifo_free(&dccpw.fifo); 193 kfifo_free(&dccpw.fifo);
194 proc_net_remove(&init_net, procname); 194 remove_proc_entry(procname, init_net.proc_net);
195 unregister_jprobe(&dccp_send_probe); 195 unregister_jprobe(&dccp_send_probe);
196 196
197} 197}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 64d9843f9e04..c4a2def5b7bd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2384,7 +2384,7 @@ static int __init decnet_init(void)
2384 dev_add_pack(&dn_dix_packet_type); 2384 dev_add_pack(&dn_dix_packet_type);
2385 register_netdevice_notifier(&dn_dev_notifier); 2385 register_netdevice_notifier(&dn_dev_notifier);
2386 2386
2387 proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops); 2387 proc_create("decnet", S_IRUGO, init_net.proc_net, &dn_socket_seq_fops);
2388 dn_register_sysctl(); 2388 dn_register_sysctl();
2389out: 2389out:
2390 return rc; 2390 return rc;
@@ -2413,7 +2413,7 @@ static void __exit decnet_exit(void)
2413 dn_neigh_cleanup(); 2413 dn_neigh_cleanup();
2414 dn_fib_cleanup(); 2414 dn_fib_cleanup();
2415 2415
2416 proc_net_remove(&init_net, "decnet"); 2416 remove_proc_entry("decnet", init_net.proc_net);
2417 2417
2418 proto_unregister(&dn_proto); 2418 proto_unregister(&dn_proto);
2419 2419
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index e47ba9fc4a0e..c8da116d84a4 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1412,7 +1412,7 @@ void __init dn_dev_init(void)
1412 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL); 1412 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL);
1413 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL); 1413 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL);
1414 1414
1415 proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); 1415 proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops);
1416 1416
1417#ifdef CONFIG_SYSCTL 1417#ifdef CONFIG_SYSCTL
1418 { 1418 {
@@ -1433,7 +1433,7 @@ void __exit dn_dev_cleanup(void)
1433 } 1433 }
1434#endif /* CONFIG_SYSCTL */ 1434#endif /* CONFIG_SYSCTL */
1435 1435
1436 proc_net_remove(&init_net, "decnet_dev"); 1436 remove_proc_entry("decnet_dev", init_net.proc_net);
1437 1437
1438 dn_dev_devices_off(); 1438 dn_dev_devices_off();
1439} 1439}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 3aede1b459fd..f8637f93d318 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -95,7 +95,7 @@ static u32 dn_neigh_hash(const void *pkey,
95 95
96struct neigh_table dn_neigh_table = { 96struct neigh_table dn_neigh_table = {
97 .family = PF_DECnet, 97 .family = PF_DECnet,
98 .entry_size = sizeof(struct dn_neigh), 98 .entry_size = NEIGH_ENTRY_SIZE(sizeof(struct dn_neigh)),
99 .key_len = sizeof(__le16), 99 .key_len = sizeof(__le16),
100 .hash = dn_neigh_hash, 100 .hash = dn_neigh_hash,
101 .constructor = dn_neigh_construct, 101 .constructor = dn_neigh_construct,
@@ -590,11 +590,12 @@ static const struct file_operations dn_neigh_seq_fops = {
590void __init dn_neigh_init(void) 590void __init dn_neigh_init(void)
591{ 591{
592 neigh_table_init(&dn_neigh_table); 592 neigh_table_init(&dn_neigh_table);
593 proc_net_fops_create(&init_net, "decnet_neigh", S_IRUGO, &dn_neigh_seq_fops); 593 proc_create("decnet_neigh", S_IRUGO, init_net.proc_net,
594 &dn_neigh_seq_fops);
594} 595}
595 596
596void __exit dn_neigh_cleanup(void) 597void __exit dn_neigh_cleanup(void)
597{ 598{
598 proc_net_remove(&init_net, "decnet_neigh"); 599 remove_proc_entry("decnet_neigh", init_net.proc_net);
599 neigh_table_clear(&dn_neigh_table); 600 neigh_table_clear(&dn_neigh_table);
600} 601}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1550028fcd8e..5ac0e153ef83 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1901,7 +1901,8 @@ void __init dn_route_init(void)
1901 1901
1902 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1902 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1903 1903
1904 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1904 proc_create("decnet_cache", S_IRUGO, init_net.proc_net,
1905 &dn_rt_cache_seq_fops);
1905 1906
1906#ifdef CONFIG_DECNET_ROUTER 1907#ifdef CONFIG_DECNET_ROUTER
1907 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, 1908 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
@@ -1917,7 +1918,7 @@ void __exit dn_route_cleanup(void)
1917 del_timer(&dn_route_timer); 1918 del_timer(&dn_route_timer);
1918 dn_run_flush(0); 1919 dn_run_flush(0);
1919 1920
1920 proc_net_remove(&init_net, "decnet_cache"); 1921 remove_proc_entry("decnet_cache", init_net.proc_net);
1921 dst_entries_destroy(&dn_dst_ops); 1922 dst_entries_destroy(&dn_dst_ops);
1922} 1923}
1923 1924
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 09cba81d2c4a..43b95ca61114 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -377,17 +377,14 @@ static int lowpan_header_create(struct sk_buff *skb,
377 struct ipv6hdr *hdr; 377 struct ipv6hdr *hdr;
378 const u8 *saddr = _saddr; 378 const u8 *saddr = _saddr;
379 const u8 *daddr = _daddr; 379 const u8 *daddr = _daddr;
380 u8 *head; 380 u8 head[100];
381 struct ieee802154_addr sa, da; 381 struct ieee802154_addr sa, da;
382 382
383 /* TODO:
384 * if this package isn't ipv6 one, where should it be routed?
385 */
383 if (type != ETH_P_IPV6) 386 if (type != ETH_P_IPV6)
384 return 0; 387 return 0;
385 /* TODO:
386 * if this package isn't ipv6 one, where should it be routed?
387 */
388 head = kzalloc(100, GFP_KERNEL);
389 if (head == NULL)
390 return -ENOMEM;
391 388
392 hdr = ipv6_hdr(skb); 389 hdr = ipv6_hdr(skb);
393 hc06_ptr = head + 2; 390 hc06_ptr = head + 2;
@@ -561,8 +558,6 @@ static int lowpan_header_create(struct sk_buff *skb,
561 skb_pull(skb, sizeof(struct ipv6hdr)); 558 skb_pull(skb, sizeof(struct ipv6hdr));
562 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head); 559 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
563 560
564 kfree(head);
565
566 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, 561 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
567 skb->len); 562 skb->len);
568 563
@@ -1267,7 +1262,7 @@ static inline int __init lowpan_netlink_init(void)
1267 return rtnl_link_register(&lowpan_link_ops); 1262 return rtnl_link_register(&lowpan_link_ops);
1268} 1263}
1269 1264
1270static inline void __init lowpan_netlink_fini(void) 1265static inline void lowpan_netlink_fini(void)
1271{ 1266{
1272 rtnl_link_unregister(&lowpan_link_ops); 1267 rtnl_link_unregister(&lowpan_link_ops);
1273} 1268}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 49ddca31c4da..e225a4e5b572 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -263,21 +263,6 @@ void build_ehash_secret(void)
263} 263}
264EXPORT_SYMBOL(build_ehash_secret); 264EXPORT_SYMBOL(build_ehash_secret);
265 265
266static inline int inet_netns_ok(struct net *net, __u8 protocol)
267{
268 const struct net_protocol *ipprot;
269
270 if (net_eq(net, &init_net))
271 return 1;
272
273 ipprot = rcu_dereference(inet_protos[protocol]);
274 if (ipprot == NULL) {
275 /* raw IP is OK */
276 return 1;
277 }
278 return ipprot->netns_ok;
279}
280
281/* 266/*
282 * Create an inet socket. 267 * Create an inet socket.
283 */ 268 */
@@ -350,10 +335,6 @@ lookup_protocol:
350 !ns_capable(net->user_ns, CAP_NET_RAW)) 335 !ns_capable(net->user_ns, CAP_NET_RAW))
351 goto out_rcu_unlock; 336 goto out_rcu_unlock;
352 337
353 err = -EAFNOSUPPORT;
354 if (!inet_netns_ok(net, protocol))
355 goto out_rcu_unlock;
356
357 sock->ops = answer->ops; 338 sock->ops = answer->ops;
358 answer_prot = answer->prot; 339 answer_prot = answer->prot;
359 answer_no_check = answer->no_check; 340 answer_no_check = answer->no_check;
@@ -1306,7 +1287,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1306 SKB_GSO_UDP | 1287 SKB_GSO_UDP |
1307 SKB_GSO_DODGY | 1288 SKB_GSO_DODGY |
1308 SKB_GSO_TCP_ECN | 1289 SKB_GSO_TCP_ECN |
1309 SKB_GSO_SHARED_FRAG | 1290 SKB_GSO_GRE |
1310 0))) 1291 0)))
1311 goto out; 1292 goto out;
1312 1293
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a69b4e4a02b5..2e7f1948216f 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -321,8 +321,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
321 321
322 /* We are going to _remove_ AH header to keep sockets happy, 322 /* We are going to _remove_ AH header to keep sockets happy,
323 * so... Later this can change. */ 323 * so... Later this can change. */
324 if (skb_cloned(skb) && 324 if (skb_unclone(skb, GFP_ATOMIC))
325 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
326 goto out; 325 goto out;
327 326
328 skb->ip_summed = CHECKSUM_NONE; 327 skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 9547a273b9e9..fea4929f6200 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -928,24 +928,25 @@ static void parp_redo(struct sk_buff *skb)
928static int arp_rcv(struct sk_buff *skb, struct net_device *dev, 928static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
929 struct packet_type *pt, struct net_device *orig_dev) 929 struct packet_type *pt, struct net_device *orig_dev)
930{ 930{
931 struct arphdr *arp; 931 const struct arphdr *arp;
932
933 if (dev->flags & IFF_NOARP ||
934 skb->pkt_type == PACKET_OTHERHOST ||
935 skb->pkt_type == PACKET_LOOPBACK)
936 goto freeskb;
937
938 skb = skb_share_check(skb, GFP_ATOMIC);
939 if (!skb)
940 goto out_of_mem;
932 941
933 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 942 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
934 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 943 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
935 goto freeskb; 944 goto freeskb;
936 945
937 arp = arp_hdr(skb); 946 arp = arp_hdr(skb);
938 if (arp->ar_hln != dev->addr_len || 947 if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
939 dev->flags & IFF_NOARP ||
940 skb->pkt_type == PACKET_OTHERHOST ||
941 skb->pkt_type == PACKET_LOOPBACK ||
942 arp->ar_pln != 4)
943 goto freeskb; 948 goto freeskb;
944 949
945 skb = skb_share_check(skb, GFP_ATOMIC);
946 if (skb == NULL)
947 goto out_of_mem;
948
949 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); 950 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
950 951
951 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); 952 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
@@ -1404,14 +1405,14 @@ static const struct file_operations arp_seq_fops = {
1404 1405
1405static int __net_init arp_net_init(struct net *net) 1406static int __net_init arp_net_init(struct net *net)
1406{ 1407{
1407 if (!proc_net_fops_create(net, "arp", S_IRUGO, &arp_seq_fops)) 1408 if (!proc_create("arp", S_IRUGO, net->proc_net, &arp_seq_fops))
1408 return -ENOMEM; 1409 return -ENOMEM;
1409 return 0; 1410 return 0;
1410} 1411}
1411 1412
1412static void __net_exit arp_net_exit(struct net *net) 1413static void __net_exit arp_net_exit(struct net *net)
1413{ 1414{
1414 proc_net_remove(net, "arp"); 1415 remove_proc_entry("arp", net->proc_net);
1415} 1416}
1416 1417
1417static struct pernet_operations arp_net_ops = { 1418static struct pernet_operations arp_net_ops = {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 31d771ca9a70..61e03da3e1f5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2607,31 +2607,31 @@ static const struct file_operations fib_route_fops = {
2607 2607
2608int __net_init fib_proc_init(struct net *net) 2608int __net_init fib_proc_init(struct net *net)
2609{ 2609{
2610 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops)) 2610 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
2611 goto out1; 2611 goto out1;
2612 2612
2613 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO, 2613 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2614 &fib_triestat_fops)) 2614 &fib_triestat_fops))
2615 goto out2; 2615 goto out2;
2616 2616
2617 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops)) 2617 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
2618 goto out3; 2618 goto out3;
2619 2619
2620 return 0; 2620 return 0;
2621 2621
2622out3: 2622out3:
2623 proc_net_remove(net, "fib_triestat"); 2623 remove_proc_entry("fib_triestat", net->proc_net);
2624out2: 2624out2:
2625 proc_net_remove(net, "fib_trie"); 2625 remove_proc_entry("fib_trie", net->proc_net);
2626out1: 2626out1:
2627 return -ENOMEM; 2627 return -ENOMEM;
2628} 2628}
2629 2629
2630void __net_exit fib_proc_exit(struct net *net) 2630void __net_exit fib_proc_exit(struct net *net)
2631{ 2631{
2632 proc_net_remove(net, "fib_trie"); 2632 remove_proc_entry("fib_trie", net->proc_net);
2633 proc_net_remove(net, "fib_triestat"); 2633 remove_proc_entry("fib_triestat", net->proc_net);
2634 proc_net_remove(net, "route"); 2634 remove_proc_entry("route", net->proc_net);
2635} 2635}
2636 2636
2637#endif /* CONFIG_PROC_FS */ 2637#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index 42a491055c76..7a4c710c4cdd 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -19,6 +19,7 @@
19#include <linux/in.h> 19#include <linux/in.h>
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/if_tunnel.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <net/protocol.h> 24#include <net/protocol.h>
24#include <net/gre.h> 25#include <net/gre.h>
@@ -26,6 +27,11 @@
26 27
27static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; 28static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
28static DEFINE_SPINLOCK(gre_proto_lock); 29static DEFINE_SPINLOCK(gre_proto_lock);
30struct gre_base_hdr {
31 __be16 flags;
32 __be16 protocol;
33};
34#define GRE_HEADER_SECTION 4
29 35
30int gre_add_protocol(const struct gre_protocol *proto, u8 version) 36int gre_add_protocol(const struct gre_protocol *proto, u8 version)
31{ 37{
@@ -112,12 +118,117 @@ static void gre_err(struct sk_buff *skb, u32 info)
112 rcu_read_unlock(); 118 rcu_read_unlock();
113} 119}
114 120
121static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
122 netdev_features_t features)
123{
124 struct sk_buff *segs = ERR_PTR(-EINVAL);
125 netdev_features_t enc_features;
126 int ghl = GRE_HEADER_SECTION;
127 struct gre_base_hdr *greh;
128 int mac_len = skb->mac_len;
129 int tnl_hlen;
130 bool csum;
131
132 if (unlikely(skb_shinfo(skb)->gso_type &
133 ~(SKB_GSO_TCPV4 |
134 SKB_GSO_TCPV6 |
135 SKB_GSO_UDP |
136 SKB_GSO_DODGY |
137 SKB_GSO_TCP_ECN |
138 SKB_GSO_GRE)))
139 goto out;
140
141 if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
142 goto out;
143
144 greh = (struct gre_base_hdr *)skb_transport_header(skb);
145
146 if (greh->flags & GRE_KEY)
147 ghl += GRE_HEADER_SECTION;
148 if (greh->flags & GRE_SEQ)
149 ghl += GRE_HEADER_SECTION;
150 if (greh->flags & GRE_CSUM) {
151 ghl += GRE_HEADER_SECTION;
152 csum = true;
153 } else
154 csum = false;
155
156 /* setup inner skb. */
157 if (greh->protocol == htons(ETH_P_TEB)) {
158 struct ethhdr *eth = eth_hdr(skb);
159 skb->protocol = eth->h_proto;
160 } else {
161 skb->protocol = greh->protocol;
162 }
163
164 skb->encapsulation = 0;
165
166 if (unlikely(!pskb_may_pull(skb, ghl)))
167 goto out;
168 __skb_pull(skb, ghl);
169 skb_reset_mac_header(skb);
170 skb_set_network_header(skb, skb_inner_network_offset(skb));
171 skb->mac_len = skb_inner_network_offset(skb);
172
173 /* segment inner packet. */
174 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
175 segs = skb_mac_gso_segment(skb, enc_features);
176 if (!segs || IS_ERR(segs))
177 goto out;
178
179 skb = segs;
180 tnl_hlen = skb_tnl_header_len(skb);
181 do {
182 __skb_push(skb, ghl);
183 if (csum) {
184 __be32 *pcsum;
185
186 if (skb_has_shared_frag(skb)) {
187 int err;
188
189 err = __skb_linearize(skb);
190 if (err) {
191 kfree_skb(segs);
192 segs = ERR_PTR(err);
193 goto out;
194 }
195 }
196
197 greh = (struct gre_base_hdr *)(skb->data);
198 pcsum = (__be32 *)(greh + 1);
199 *pcsum = 0;
200 *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0));
201 }
202 __skb_push(skb, tnl_hlen - ghl);
203
204 skb_reset_mac_header(skb);
205 skb_set_network_header(skb, mac_len);
206 skb->mac_len = mac_len;
207 } while ((skb = skb->next));
208out:
209 return segs;
210}
211
212static int gre_gso_send_check(struct sk_buff *skb)
213{
214 if (!skb->encapsulation)
215 return -EINVAL;
216 return 0;
217}
218
115static const struct net_protocol net_gre_protocol = { 219static const struct net_protocol net_gre_protocol = {
116 .handler = gre_rcv, 220 .handler = gre_rcv,
117 .err_handler = gre_err, 221 .err_handler = gre_err,
118 .netns_ok = 1, 222 .netns_ok = 1,
119}; 223};
120 224
225static const struct net_offload gre_offload = {
226 .callbacks = {
227 .gso_send_check = gre_gso_send_check,
228 .gso_segment = gre_gso_segment,
229 },
230};
231
121static int __init gre_init(void) 232static int __init gre_init(void)
122{ 233{
123 pr_info("GRE over IPv4 demultiplexor driver\n"); 234 pr_info("GRE over IPv4 demultiplexor driver\n");
@@ -127,11 +238,18 @@ static int __init gre_init(void)
127 return -EAGAIN; 238 return -EAGAIN;
128 } 239 }
129 240
241 if (inet_add_offload(&gre_offload, IPPROTO_GRE)) {
242 pr_err("can't add protocol offload\n");
243 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
244 return -EAGAIN;
245 }
246
130 return 0; 247 return 0;
131} 248}
132 249
133static void __exit gre_exit(void) 250static void __exit gre_exit(void)
134{ 251{
252 inet_del_offload(&gre_offload, IPPROTO_GRE);
135 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); 253 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
136} 254}
137 255
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 736ab70fd179..d8c232794bcb 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2646,24 +2646,25 @@ static int __net_init igmp_net_init(struct net *net)
2646{ 2646{
2647 struct proc_dir_entry *pde; 2647 struct proc_dir_entry *pde;
2648 2648
2649 pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops); 2649 pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
2650 if (!pde) 2650 if (!pde)
2651 goto out_igmp; 2651 goto out_igmp;
2652 pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops); 2652 pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
2653 &igmp_mcf_seq_fops);
2653 if (!pde) 2654 if (!pde)
2654 goto out_mcfilter; 2655 goto out_mcfilter;
2655 return 0; 2656 return 0;
2656 2657
2657out_mcfilter: 2658out_mcfilter:
2658 proc_net_remove(net, "igmp"); 2659 remove_proc_entry("igmp", net->proc_net);
2659out_igmp: 2660out_igmp:
2660 return -ENOMEM; 2661 return -ENOMEM;
2661} 2662}
2662 2663
2663static void __net_exit igmp_net_exit(struct net *net) 2664static void __net_exit igmp_net_exit(struct net *net)
2664{ 2665{
2665 proc_net_remove(net, "mcfilter"); 2666 remove_proc_entry("mcfilter", net->proc_net);
2666 proc_net_remove(net, "igmp"); 2667 remove_proc_entry("igmp", net->proc_net);
2667} 2668}
2668 2669
2669static struct pernet_operations igmp_net_ops = { 2670static struct pernet_operations igmp_net_ops = {
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1211613c6c34..b6d30acb600c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -590,7 +590,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
590 goto out_oversize; 590 goto out_oversize;
591 591
592 /* Head of list must not be cloned. */ 592 /* Head of list must not be cloned. */
593 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 593 if (skb_unclone(head, GFP_ATOMIC))
594 goto out_nomem; 594 goto out_nomem;
595 595
596 /* If the first fragment is fragmented itself, we split 596 /* If the first fragment is fragmented itself, we split
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 00a14b9864ea..a56f1182c176 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -735,8 +735,33 @@ drop:
735 return 0; 735 return 0;
736} 736}
737 737
738static struct sk_buff *handle_offloads(struct sk_buff *skb)
739{
740 int err;
741
742 if (skb_is_gso(skb)) {
743 err = skb_unclone(skb, GFP_ATOMIC);
744 if (unlikely(err))
745 goto error;
746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
747 return skb;
748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
749 err = skb_checksum_help(skb);
750 if (unlikely(err))
751 goto error;
752 }
753 skb->ip_summed = CHECKSUM_NONE;
754
755 return skb;
756
757error:
758 kfree_skb(skb);
759 return ERR_PTR(err);
760}
761
738static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 762static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
739{ 763{
764 struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
740 struct ip_tunnel *tunnel = netdev_priv(dev); 765 struct ip_tunnel *tunnel = netdev_priv(dev);
741 const struct iphdr *old_iph; 766 const struct iphdr *old_iph;
742 const struct iphdr *tiph; 767 const struct iphdr *tiph;
@@ -751,10 +776,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
751 __be32 dst; 776 __be32 dst;
752 int mtu; 777 int mtu;
753 u8 ttl; 778 u8 ttl;
779 int err;
780 int pkt_len;
754 781
755 if (skb->ip_summed == CHECKSUM_PARTIAL && 782 skb = handle_offloads(skb);
756 skb_checksum_help(skb)) 783 if (IS_ERR(skb)) {
757 goto tx_error; 784 dev->stats.tx_dropped++;
785 return NETDEV_TX_OK;
786 }
787
788 if (!skb->encapsulation) {
789 skb_reset_inner_headers(skb);
790 skb->encapsulation = 1;
791 }
758 792
759 old_iph = ip_hdr(skb); 793 old_iph = ip_hdr(skb);
760 794
@@ -855,7 +889,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
855 if (skb->protocol == htons(ETH_P_IP)) { 889 if (skb->protocol == htons(ETH_P_IP)) {
856 df |= (old_iph->frag_off&htons(IP_DF)); 890 df |= (old_iph->frag_off&htons(IP_DF));
857 891
858 if ((old_iph->frag_off&htons(IP_DF)) && 892 if (!skb_is_gso(skb) &&
893 (old_iph->frag_off&htons(IP_DF)) &&
859 mtu < ntohs(old_iph->tot_len)) { 894 mtu < ntohs(old_iph->tot_len)) {
860 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 895 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
861 ip_rt_put(rt); 896 ip_rt_put(rt);
@@ -875,7 +910,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
875 } 910 }
876 } 911 }
877 912
878 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { 913 if (!skb_is_gso(skb) &&
914 mtu >= IPV6_MIN_MTU &&
915 mtu < skb->len - tunnel->hlen + gre_hlen) {
879 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 916 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
880 ip_rt_put(rt); 917 ip_rt_put(rt);
881 goto tx_error; 918 goto tx_error;
@@ -936,6 +973,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
936 iph->daddr = fl4.daddr; 973 iph->daddr = fl4.daddr;
937 iph->saddr = fl4.saddr; 974 iph->saddr = fl4.saddr;
938 iph->ttl = ttl; 975 iph->ttl = ttl;
976 iph->id = 0;
939 977
940 if (ttl == 0) { 978 if (ttl == 0) {
941 if (skb->protocol == htons(ETH_P_IP)) 979 if (skb->protocol == htons(ETH_P_IP))
@@ -964,9 +1002,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
964 *ptr = tunnel->parms.o_key; 1002 *ptr = tunnel->parms.o_key;
965 ptr--; 1003 ptr--;
966 } 1004 }
967 if (tunnel->parms.o_flags&GRE_CSUM) { 1005 /* Skip GRE checksum if skb is getting offloaded. */
1006 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) &&
1007 (tunnel->parms.o_flags&GRE_CSUM)) {
968 int offset = skb_transport_offset(skb); 1008 int offset = skb_transport_offset(skb);
969 1009
1010 if (skb_has_shared_frag(skb)) {
1011 err = __skb_linearize(skb);
1012 if (err) {
1013 ip_rt_put(rt);
1014 goto tx_error;
1015 }
1016 }
1017
970 *ptr = 0; 1018 *ptr = 0;
971 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, 1019 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
972 skb->len - offset, 1020 skb->len - offset,
@@ -974,7 +1022,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
974 } 1022 }
975 } 1023 }
976 1024
977 iptunnel_xmit(skb, dev); 1025 nf_reset(skb);
1026
1027 pkt_len = skb->len - skb_transport_offset(skb);
1028 err = ip_local_out(skb);
1029 if (likely(net_xmit_eval(err) == 0)) {
1030 u64_stats_update_begin(&tstats->syncp);
1031 tstats->tx_bytes += pkt_len;
1032 tstats->tx_packets++;
1033 u64_stats_update_end(&tstats->syncp);
1034 } else {
1035 dev->stats.tx_errors++;
1036 dev->stats.tx_aborted_errors++;
1037 }
978 return NETDEV_TX_OK; 1038 return NETDEV_TX_OK;
979 1039
980#if IS_ENABLED(CONFIG_IPV6) 1040#if IS_ENABLED(CONFIG_IPV6)
@@ -1044,6 +1104,11 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
1044 mtu = 68; 1104 mtu = 68;
1045 1105
1046 tunnel->hlen = addend; 1106 tunnel->hlen = addend;
1107 /* TCP offload with GRE SEQ is not supported. */
1108 if (!(tunnel->parms.o_flags & GRE_SEQ)) {
1109 dev->features |= NETIF_F_GSO_SOFTWARE;
1110 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1111 }
1047 1112
1048 return mtu; 1113 return mtu;
1049} 1114}
@@ -1593,6 +1658,9 @@ static void ipgre_tap_setup(struct net_device *dev)
1593 1658
1594 dev->iflink = 0; 1659 dev->iflink = 0;
1595 dev->features |= NETIF_F_NETNS_LOCAL; 1660 dev->features |= NETIF_F_NETNS_LOCAL;
1661
1662 dev->features |= GRE_FEATURES;
1663 dev->hw_features |= GRE_FEATURES;
1596} 1664}
1597 1665
1598static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], 1666static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index f1395a6fb35f..87abd3e2bd32 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -208,13 +208,6 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
208 if (ipprot != NULL) { 208 if (ipprot != NULL) {
209 int ret; 209 int ret;
210 210
211 if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
212 net_info_ratelimited("%s: proto %d isn't netns-ready\n",
213 __func__, protocol);
214 kfree_skb(skb);
215 goto out;
216 }
217
218 if (!ipprot->no_policy) { 211 if (!ipprot->no_policy) {
219 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 212 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
220 kfree_skb(skb); 213 kfree_skb(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3e98ed2bff55..5e12dca7b3dd 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -598,6 +598,7 @@ slow_path:
598 /* for offloaded checksums cleanup checksum before fragmentation */ 598 /* for offloaded checksums cleanup checksum before fragmentation */
599 if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb)) 599 if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
600 goto fail; 600 goto fail;
601 iph = ip_hdr(skb);
601 602
602 left = skb->len - hlen; /* Space per frame */ 603 left = skb->len - hlen; /* Space per frame */
603 ptr = hlen; /* Where to start from */ 604 ptr = hlen; /* Where to start from */
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index a2e50ae80b53..98cbc6877019 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1394,7 +1394,7 @@ static int __init ip_auto_config(void)
1394 unsigned int i; 1394 unsigned int i;
1395 1395
1396#ifdef CONFIG_PROC_FS 1396#ifdef CONFIG_PROC_FS
1397 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); 1397 proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
1398#endif /* CONFIG_PROC_FS */ 1398#endif /* CONFIG_PROC_FS */
1399 1399
1400 if (!ic_enable) 1400 if (!ic_enable)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 7085b9b51e7f..5f95b3aa579e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2703,16 +2703,16 @@ static int __net_init ipmr_net_init(struct net *net)
2703 2703
2704#ifdef CONFIG_PROC_FS 2704#ifdef CONFIG_PROC_FS
2705 err = -ENOMEM; 2705 err = -ENOMEM;
2706 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) 2706 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2707 goto proc_vif_fail; 2707 goto proc_vif_fail;
2708 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) 2708 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2709 goto proc_cache_fail; 2709 goto proc_cache_fail;
2710#endif 2710#endif
2711 return 0; 2711 return 0;
2712 2712
2713#ifdef CONFIG_PROC_FS 2713#ifdef CONFIG_PROC_FS
2714proc_cache_fail: 2714proc_cache_fail:
2715 proc_net_remove(net, "ip_mr_vif"); 2715 remove_proc_entry("ip_mr_vif", net->proc_net);
2716proc_vif_fail: 2716proc_vif_fail:
2717 ipmr_rules_exit(net); 2717 ipmr_rules_exit(net);
2718#endif 2718#endif
@@ -2723,8 +2723,8 @@ fail:
2723static void __net_exit ipmr_net_exit(struct net *net) 2723static void __net_exit ipmr_net_exit(struct net *net)
2724{ 2724{
2725#ifdef CONFIG_PROC_FS 2725#ifdef CONFIG_PROC_FS
2726 proc_net_remove(net, "ip_mr_cache"); 2726 remove_proc_entry("ip_mr_cache", net->proc_net);
2727 proc_net_remove(net, "ip_mr_vif"); 2727 remove_proc_entry("ip_mr_vif", net->proc_net);
2728#endif 2728#endif
2729 ipmr_rules_exit(net); 2729 ipmr_rules_exit(net);
2730} 2730}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 9682b36df38c..f2ca12794081 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -417,12 +417,12 @@ static int __net_init ip_conntrack_net_init(struct net *net)
417{ 417{
418 struct proc_dir_entry *proc, *proc_exp, *proc_stat; 418 struct proc_dir_entry *proc, *proc_exp, *proc_stat;
419 419
420 proc = proc_net_fops_create(net, "ip_conntrack", 0440, &ct_file_ops); 420 proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
421 if (!proc) 421 if (!proc)
422 goto err1; 422 goto err1;
423 423
424 proc_exp = proc_net_fops_create(net, "ip_conntrack_expect", 0440, 424 proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
425 &ip_exp_file_ops); 425 &ip_exp_file_ops);
426 if (!proc_exp) 426 if (!proc_exp)
427 goto err2; 427 goto err2;
428 428
@@ -433,9 +433,9 @@ static int __net_init ip_conntrack_net_init(struct net *net)
433 return 0; 433 return 0;
434 434
435err3: 435err3:
436 proc_net_remove(net, "ip_conntrack_expect"); 436 remove_proc_entry("ip_conntrack_expect", net->proc_net);
437err2: 437err2:
438 proc_net_remove(net, "ip_conntrack"); 438 remove_proc_entry("ip_conntrack", net->proc_net);
439err1: 439err1:
440 return -ENOMEM; 440 return -ENOMEM;
441} 441}
@@ -443,8 +443,8 @@ err1:
443static void __net_exit ip_conntrack_net_exit(struct net *net) 443static void __net_exit ip_conntrack_net_exit(struct net *net)
444{ 444{
445 remove_proc_entry("ip_conntrack", net->proc_net_stat); 445 remove_proc_entry("ip_conntrack", net->proc_net_stat);
446 proc_net_remove(net, "ip_conntrack_expect"); 446 remove_proc_entry("ip_conntrack_expect", net->proc_net);
447 proc_net_remove(net, "ip_conntrack"); 447 remove_proc_entry("ip_conntrack", net->proc_net);
448} 448}
449 449
450static struct pernet_operations ip_conntrack_net_ops = { 450static struct pernet_operations ip_conntrack_net_ops = {
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 6f9c07268cf6..55c4ee1bba06 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -889,7 +889,7 @@ static int ping_proc_register(struct net *net)
889 struct proc_dir_entry *p; 889 struct proc_dir_entry *p;
890 int rc = 0; 890 int rc = 0;
891 891
892 p = proc_net_fops_create(net, "icmp", S_IRUGO, &ping_seq_fops); 892 p = proc_create("icmp", S_IRUGO, net->proc_net, &ping_seq_fops);
893 if (!p) 893 if (!p)
894 rc = -ENOMEM; 894 rc = -ENOMEM;
895 return rc; 895 return rc;
@@ -897,7 +897,7 @@ static int ping_proc_register(struct net *net)
897 897
898static void ping_proc_unregister(struct net *net) 898static void ping_proc_unregister(struct net *net)
899{ 899{
900 proc_net_remove(net, "icmp"); 900 remove_proc_entry("icmp", net->proc_net);
901} 901}
902 902
903 903
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8de53e1ddd54..32030a24e776 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -471,28 +471,29 @@ static const struct file_operations netstat_seq_fops = {
471 471
472static __net_init int ip_proc_init_net(struct net *net) 472static __net_init int ip_proc_init_net(struct net *net)
473{ 473{
474 if (!proc_net_fops_create(net, "sockstat", S_IRUGO, &sockstat_seq_fops)) 474 if (!proc_create("sockstat", S_IRUGO, net->proc_net,
475 &sockstat_seq_fops))
475 goto out_sockstat; 476 goto out_sockstat;
476 if (!proc_net_fops_create(net, "netstat", S_IRUGO, &netstat_seq_fops)) 477 if (!proc_create("netstat", S_IRUGO, net->proc_net, &netstat_seq_fops))
477 goto out_netstat; 478 goto out_netstat;
478 if (!proc_net_fops_create(net, "snmp", S_IRUGO, &snmp_seq_fops)) 479 if (!proc_create("snmp", S_IRUGO, net->proc_net, &snmp_seq_fops))
479 goto out_snmp; 480 goto out_snmp;
480 481
481 return 0; 482 return 0;
482 483
483out_snmp: 484out_snmp:
484 proc_net_remove(net, "netstat"); 485 remove_proc_entry("netstat", net->proc_net);
485out_netstat: 486out_netstat:
486 proc_net_remove(net, "sockstat"); 487 remove_proc_entry("sockstat", net->proc_net);
487out_sockstat: 488out_sockstat:
488 return -ENOMEM; 489 return -ENOMEM;
489} 490}
490 491
491static __net_exit void ip_proc_exit_net(struct net *net) 492static __net_exit void ip_proc_exit_net(struct net *net)
492{ 493{
493 proc_net_remove(net, "snmp"); 494 remove_proc_entry("snmp", net->proc_net);
494 proc_net_remove(net, "netstat"); 495 remove_proc_entry("netstat", net->proc_net);
495 proc_net_remove(net, "sockstat"); 496 remove_proc_entry("sockstat", net->proc_net);
496} 497}
497 498
498static __net_initdata struct pernet_operations ip_proc_ops = { 499static __net_initdata struct pernet_operations ip_proc_ops = {
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 0f9d09f54bd9..ce848461acbb 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -37,6 +37,12 @@ const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
37 37
38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
39{ 39{
40 if (!prot->netns_ok) {
41 pr_err("Protocol %u is not namespace aware, cannot register.\n",
42 protocol);
43 return -EINVAL;
44 }
45
40 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol], 46 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
41 NULL, prot) ? 0 : -1; 47 NULL, prot) ? 0 : -1;
42} 48}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6f08991409c3..53ddebc292b6 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -1050,7 +1050,7 @@ static const struct file_operations raw_seq_fops = {
1050 1050
1051static __net_init int raw_init_net(struct net *net) 1051static __net_init int raw_init_net(struct net *net)
1052{ 1052{
1053 if (!proc_net_fops_create(net, "raw", S_IRUGO, &raw_seq_fops)) 1053 if (!proc_create("raw", S_IRUGO, net->proc_net, &raw_seq_fops))
1054 return -ENOMEM; 1054 return -ENOMEM;
1055 1055
1056 return 0; 1056 return 0;
@@ -1058,7 +1058,7 @@ static __net_init int raw_init_net(struct net *net)
1058 1058
1059static __net_exit void raw_exit_net(struct net *net) 1059static __net_exit void raw_exit_net(struct net *net)
1060{ 1060{
1061 proc_net_remove(net, "raw"); 1061 remove_proc_entry("raw", net->proc_net);
1062} 1062}
1063 1063
1064static __net_initdata struct pernet_operations raw_net_ops = { 1064static __net_initdata struct pernet_operations raw_net_ops = {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a0fcc47fee73..3bdd1b1ad1b3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -384,8 +384,8 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
384{ 384{
385 struct proc_dir_entry *pde; 385 struct proc_dir_entry *pde;
386 386
387 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO, 387 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
388 &rt_cache_seq_fops); 388 &rt_cache_seq_fops);
389 if (!pde) 389 if (!pde)
390 goto err1; 390 goto err1;
391 391
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 2622707602d1..960fd29d9b8e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -633,13 +633,6 @@ static struct ctl_table ipv4_table[] = {
633 .proc_handler = proc_tcp_congestion_control, 633 .proc_handler = proc_tcp_congestion_control,
634 }, 634 },
635 { 635 {
636 .procname = "tcp_abc",
637 .data = &sysctl_tcp_abc,
638 .maxlen = sizeof(int),
639 .mode = 0644,
640 .proc_handler = proc_dointvec,
641 },
642 {
643 .procname = "tcp_mtu_probing", 636 .procname = "tcp_mtu_probing",
644 .data = &sysctl_tcp_mtu_probing, 637 .data = &sysctl_tcp_mtu_probing,
645 .maxlen = sizeof(int), 638 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ec1f69c5ceb..7a5ba48c2cc9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -400,6 +400,8 @@ void tcp_init_sock(struct sock *sk)
400 tcp_enable_early_retrans(tp); 400 tcp_enable_early_retrans(tp);
401 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 401 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
402 402
403 tp->tsoffset = 0;
404
403 sk->sk_state = TCP_CLOSE; 405 sk->sk_state = TCP_CLOSE;
404 406
405 sk->sk_write_space = sk_stream_write_space; 407 sk->sk_write_space = sk_stream_write_space;
@@ -895,8 +897,7 @@ new_segment:
895 get_page(page); 897 get_page(page);
896 skb_fill_page_desc(skb, i, page, offset, copy); 898 skb_fill_page_desc(skb, i, page, offset, copy);
897 } 899 }
898 900 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
899 skb_shinfo(skb)->gso_type |= SKB_GSO_SHARED_FRAG;
900 901
901 skb->len += copy; 902 skb->len += copy;
902 skb->data_len += copy; 903 skb->data_len += copy;
@@ -2289,7 +2290,6 @@ int tcp_disconnect(struct sock *sk, int flags)
2289 tp->packets_out = 0; 2290 tp->packets_out = 0;
2290 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2291 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2291 tp->snd_cwnd_cnt = 0; 2292 tp->snd_cwnd_cnt = 0;
2292 tp->bytes_acked = 0;
2293 tp->window_clamp = 0; 2293 tp->window_clamp = 0;
2294 tcp_set_ca_state(sk, TCP_CA_Open); 2294 tcp_set_ca_state(sk, TCP_CA_Open);
2295 tcp_clear_retrans(tp); 2295 tcp_clear_retrans(tp);
@@ -2713,6 +2713,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2713 else 2713 else
2714 err = -EINVAL; 2714 err = -EINVAL;
2715 break; 2715 break;
2716 case TCP_TIMESTAMP:
2717 if (!tp->repair)
2718 err = -EPERM;
2719 else
2720 tp->tsoffset = val - tcp_time_stamp;
2721 break;
2716 default: 2722 default:
2717 err = -ENOPROTOOPT; 2723 err = -ENOPROTOOPT;
2718 break; 2724 break;
@@ -2961,6 +2967,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2961 case TCP_USER_TIMEOUT: 2967 case TCP_USER_TIMEOUT:
2962 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2968 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2963 break; 2969 break;
2970 case TCP_TIMESTAMP:
2971 val = tcp_time_stamp + tp->tsoffset;
2972 break;
2964 default: 2973 default:
2965 return -ENOPROTOOPT; 2974 return -ENOPROTOOPT;
2966 } 2975 }
@@ -3034,7 +3043,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
3034 SKB_GSO_DODGY | 3043 SKB_GSO_DODGY |
3035 SKB_GSO_TCP_ECN | 3044 SKB_GSO_TCP_ECN |
3036 SKB_GSO_TCPV6 | 3045 SKB_GSO_TCPV6 |
3037 SKB_GSO_SHARED_FRAG | 3046 SKB_GSO_GRE |
3038 0) || 3047 0) ||
3039 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 3048 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
3040 goto out; 3049 goto out;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 291f2ed7cc31..019c2389a341 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -310,35 +310,24 @@ void tcp_slow_start(struct tcp_sock *tp)
310{ 310{
311 int cnt; /* increase in packets */ 311 int cnt; /* increase in packets */
312 unsigned int delta = 0; 312 unsigned int delta = 0;
313 u32 snd_cwnd = tp->snd_cwnd;
313 314
314 /* RFC3465: ABC Slow start 315 if (unlikely(!snd_cwnd)) {
315 * Increase only after a full MSS of bytes is acked 316 pr_err_once("snd_cwnd is nul, please report this bug.\n");
316 * 317 snd_cwnd = 1U;
317 * TCP sender SHOULD increase cwnd by the number of 318 }
318 * previously unacknowledged bytes ACKed by each incoming
319 * acknowledgment, provided the increase is not more than L
320 */
321 if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
322 return;
323 319
324 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 320 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
325 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 321 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
326 else 322 else
327 cnt = tp->snd_cwnd; /* exponential increase */ 323 cnt = snd_cwnd; /* exponential increase */
328
329 /* RFC3465: ABC
330 * We MAY increase by 2 if discovered delayed ack
331 */
332 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
333 cnt <<= 1;
334 tp->bytes_acked = 0;
335 324
336 tp->snd_cwnd_cnt += cnt; 325 tp->snd_cwnd_cnt += cnt;
337 while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 326 while (tp->snd_cwnd_cnt >= snd_cwnd) {
338 tp->snd_cwnd_cnt -= tp->snd_cwnd; 327 tp->snd_cwnd_cnt -= snd_cwnd;
339 delta++; 328 delta++;
340 } 329 }
341 tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); 330 tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
342} 331}
343EXPORT_SYMBOL_GPL(tcp_slow_start); 332EXPORT_SYMBOL_GPL(tcp_slow_start);
344 333
@@ -372,20 +361,9 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
372 /* In "safe" area, increase. */ 361 /* In "safe" area, increase. */
373 if (tp->snd_cwnd <= tp->snd_ssthresh) 362 if (tp->snd_cwnd <= tp->snd_ssthresh)
374 tcp_slow_start(tp); 363 tcp_slow_start(tp);
375
376 /* In dangerous area, increase slowly. */ 364 /* In dangerous area, increase slowly. */
377 else if (sysctl_tcp_abc) { 365 else
378 /* RFC3465: Appropriate Byte Count
379 * increase once for each full cwnd acked
380 */
381 if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
382 tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
383 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
384 tp->snd_cwnd++;
385 }
386 } else {
387 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 366 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
388 }
389} 367}
390EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 368EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
391 369
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 492c7cfe1453..a759e19496d2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -98,7 +98,6 @@ int sysctl_tcp_frto_response __read_mostly;
98int sysctl_tcp_thin_dupack __read_mostly; 98int sysctl_tcp_thin_dupack __read_mostly;
99 99
100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
101int sysctl_tcp_abc __read_mostly;
102int sysctl_tcp_early_retrans __read_mostly = 2; 101int sysctl_tcp_early_retrans __read_mostly = 2;
103 102
104#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 103#define FLAG_DATA 0x01 /* Incoming frame contained data. */
@@ -1240,13 +1239,13 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1240 */ 1239 */
1241 if (!skb_shinfo(prev)->gso_size) { 1240 if (!skb_shinfo(prev)->gso_size) {
1242 skb_shinfo(prev)->gso_size = mss; 1241 skb_shinfo(prev)->gso_size = mss;
1243 skb_shinfo(prev)->gso_type |= sk->sk_gso_type; 1242 skb_shinfo(prev)->gso_type = sk->sk_gso_type;
1244 } 1243 }
1245 1244
1246 /* CHECKME: To clear or not to clear? Mimics normal skb currently */ 1245 /* CHECKME: To clear or not to clear? Mimics normal skb currently */
1247 if (skb_shinfo(skb)->gso_segs <= 1) { 1246 if (skb_shinfo(skb)->gso_segs <= 1) {
1248 skb_shinfo(skb)->gso_size = 0; 1247 skb_shinfo(skb)->gso_size = 0;
1249 skb_shinfo(skb)->gso_type &= SKB_GSO_SHARED_FRAG; 1248 skb_shinfo(skb)->gso_type = 0;
1250 } 1249 }
1251 1250
1252 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1251 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
@@ -2007,7 +2006,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
2007 tp->snd_cwnd_cnt = 0; 2006 tp->snd_cwnd_cnt = 0;
2008 tp->snd_cwnd_stamp = tcp_time_stamp; 2007 tp->snd_cwnd_stamp = tcp_time_stamp;
2009 tp->frto_counter = 0; 2008 tp->frto_counter = 0;
2010 tp->bytes_acked = 0;
2011 2009
2012 tp->reordering = min_t(unsigned int, tp->reordering, 2010 tp->reordering = min_t(unsigned int, tp->reordering,
2013 sysctl_tcp_reordering); 2011 sysctl_tcp_reordering);
@@ -2056,7 +2054,6 @@ void tcp_enter_loss(struct sock *sk, int how)
2056 tp->snd_cwnd_cnt = 0; 2054 tp->snd_cwnd_cnt = 0;
2057 tp->snd_cwnd_stamp = tcp_time_stamp; 2055 tp->snd_cwnd_stamp = tcp_time_stamp;
2058 2056
2059 tp->bytes_acked = 0;
2060 tcp_clear_retrans_partial(tp); 2057 tcp_clear_retrans_partial(tp);
2061 2058
2062 if (tcp_is_reno(tp)) 2059 if (tcp_is_reno(tp))
@@ -2684,7 +2681,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2684 struct tcp_sock *tp = tcp_sk(sk); 2681 struct tcp_sock *tp = tcp_sk(sk);
2685 2682
2686 tp->high_seq = tp->snd_nxt; 2683 tp->high_seq = tp->snd_nxt;
2687 tp->bytes_acked = 0;
2688 tp->snd_cwnd_cnt = 0; 2684 tp->snd_cwnd_cnt = 0;
2689 tp->prior_cwnd = tp->snd_cwnd; 2685 tp->prior_cwnd = tp->snd_cwnd;
2690 tp->prr_delivered = 0; 2686 tp->prr_delivered = 0;
@@ -2735,7 +2731,6 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
2735 struct tcp_sock *tp = tcp_sk(sk); 2731 struct tcp_sock *tp = tcp_sk(sk);
2736 2732
2737 tp->prior_ssthresh = 0; 2733 tp->prior_ssthresh = 0;
2738 tp->bytes_acked = 0;
2739 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2734 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2740 tp->undo_marker = 0; 2735 tp->undo_marker = 0;
2741 tcp_init_cwnd_reduction(sk, set_ssthresh); 2736 tcp_init_cwnd_reduction(sk, set_ssthresh);
@@ -3417,7 +3412,6 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3417{ 3412{
3418 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3413 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3419 tp->snd_cwnd_cnt = 0; 3414 tp->snd_cwnd_cnt = 0;
3420 tp->bytes_acked = 0;
3421 TCP_ECN_queue_cwr(tp); 3415 TCP_ECN_queue_cwr(tp);
3422 tcp_moderate_cwnd(tp); 3416 tcp_moderate_cwnd(tp);
3423} 3417}
@@ -3502,6 +3496,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
3502 } 3496 }
3503 } else { 3497 } else {
3504 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3498 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3499 if (!tcp_packets_in_flight(tp)) {
3500 tcp_enter_frto_loss(sk, 2, flag);
3501 return true;
3502 }
3503
3505 /* Prevent sending of new data. */ 3504 /* Prevent sending of new data. */
3506 tp->snd_cwnd = min(tp->snd_cwnd, 3505 tp->snd_cwnd = min(tp->snd_cwnd,
3507 tcp_packets_in_flight(tp)); 3506 tcp_packets_in_flight(tp));
@@ -3608,15 +3607,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3608 if (after(ack, prior_snd_una)) 3607 if (after(ack, prior_snd_una))
3609 flag |= FLAG_SND_UNA_ADVANCED; 3608 flag |= FLAG_SND_UNA_ADVANCED;
3610 3609
3611 if (sysctl_tcp_abc) {
3612 if (icsk->icsk_ca_state < TCP_CA_CWR)
3613 tp->bytes_acked += ack - prior_snd_una;
3614 else if (icsk->icsk_ca_state == TCP_CA_Loss)
3615 /* we assume just one segment left network */
3616 tp->bytes_acked += min(ack - prior_snd_una,
3617 tp->mss_cache);
3618 }
3619
3620 prior_fackets = tp->fackets_out; 3610 prior_fackets = tp->fackets_out;
3621 prior_in_flight = tcp_packets_in_flight(tp); 3611 prior_in_flight = tcp_packets_in_flight(tp);
3622 3612
@@ -3870,7 +3860,7 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3870 ++ptr; 3860 ++ptr;
3871 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3861 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3872 ++ptr; 3862 ++ptr;
3873 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 3863 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3874 return true; 3864 return true;
3875 } 3865 }
3876 return false; 3866 return false;
@@ -3894,7 +3884,11 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3894 if (tcp_parse_aligned_timestamp(tp, th)) 3884 if (tcp_parse_aligned_timestamp(tp, th))
3895 return true; 3885 return true;
3896 } 3886 }
3887
3897 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3888 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
3889 if (tp->rx_opt.saw_tstamp)
3890 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3891
3898 return true; 3892 return true;
3899} 3893}
3900 3894
@@ -5647,8 +5641,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5647 * the remote receives only the retransmitted (regular) SYNs: either 5641 * the remote receives only the retransmitted (regular) SYNs: either
5648 * the original SYN-data or the corresponding SYN-ACK is lost. 5642 * the original SYN-data or the corresponding SYN-ACK is lost.
5649 */ 5643 */
5650 syn_drop = (cookie->len <= 0 && data && 5644 syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
5651 inet_csk(sk)->icsk_retransmits);
5652 5645
5653 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5646 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5654 5647
@@ -5676,6 +5669,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5676 int saved_clamp = tp->rx_opt.mss_clamp; 5669 int saved_clamp = tp->rx_opt.mss_clamp;
5677 5670
5678 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); 5671 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
5672 if (tp->rx_opt.saw_tstamp)
5673 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5679 5674
5680 if (th->ack) { 5675 if (th->ack) {
5681 /* rfc793: 5676 /* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a1cfc692df0..145d3bf8df86 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
496 * errors returned from accept(). 496 * errors returned from accept().
497 */ 497 */
498 inet_csk_reqsk_queue_drop(sk, req, prev); 498 inet_csk_reqsk_queue_drop(sk, req, prev);
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
499 goto out; 500 goto out;
500 501
501 case TCP_SYN_SENT: 502 case TCP_SYN_SENT:
@@ -725,7 +726,7 @@ release_sk1:
725 */ 726 */
726 727
727static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 728static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
728 u32 win, u32 ts, int oif, 729 u32 win, u32 tsval, u32 tsecr, int oif,
729 struct tcp_md5sig_key *key, 730 struct tcp_md5sig_key *key,
730 int reply_flags, u8 tos) 731 int reply_flags, u8 tos)
731{ 732{
@@ -746,12 +747,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
746 747
747 arg.iov[0].iov_base = (unsigned char *)&rep; 748 arg.iov[0].iov_base = (unsigned char *)&rep;
748 arg.iov[0].iov_len = sizeof(rep.th); 749 arg.iov[0].iov_len = sizeof(rep.th);
749 if (ts) { 750 if (tsecr) {
750 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 751 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
751 (TCPOPT_TIMESTAMP << 8) | 752 (TCPOPT_TIMESTAMP << 8) |
752 TCPOLEN_TIMESTAMP); 753 TCPOLEN_TIMESTAMP);
753 rep.opt[1] = htonl(tcp_time_stamp); 754 rep.opt[1] = htonl(tsval);
754 rep.opt[2] = htonl(ts); 755 rep.opt[2] = htonl(tsecr);
755 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; 756 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
756 } 757 }
757 758
@@ -766,7 +767,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
766 767
767#ifdef CONFIG_TCP_MD5SIG 768#ifdef CONFIG_TCP_MD5SIG
768 if (key) { 769 if (key) {
769 int offset = (ts) ? 3 : 0; 770 int offset = (tsecr) ? 3 : 0;
770 771
771 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | 772 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
772 (TCPOPT_NOP << 16) | 773 (TCPOPT_NOP << 16) |
@@ -801,6 +802,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
801 802
802 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 803 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
803 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 804 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805 tcp_time_stamp + tcptw->tw_ts_offset,
804 tcptw->tw_ts_recent, 806 tcptw->tw_ts_recent,
805 tw->tw_bound_dev_if, 807 tw->tw_bound_dev_if,
806 tcp_twsk_md5_key(tcptw), 808 tcp_twsk_md5_key(tcptw),
@@ -820,6 +822,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
820 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? 822 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
821 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 823 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
822 tcp_rsk(req)->rcv_nxt, req->rcv_wnd, 824 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
825 tcp_time_stamp,
823 req->ts_recent, 826 req->ts_recent,
824 0, 827 0,
825 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 828 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -1501,8 +1504,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1501 * clogging syn queue with openreqs with exponentially increasing 1504 * clogging syn queue with openreqs with exponentially increasing
1502 * timeout. 1505 * timeout.
1503 */ 1506 */
1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1507 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1508 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1505 goto drop; 1509 goto drop;
1510 }
1506 1511
1507 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1512 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1508 if (!req) 1513 if (!req)
@@ -1667,6 +1672,7 @@ drop_and_release:
1667drop_and_free: 1672drop_and_free:
1668 reqsk_free(req); 1673 reqsk_free(req);
1669drop: 1674drop:
1675 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1670 return 0; 1676 return 0;
1671} 1677}
1672EXPORT_SYMBOL(tcp_v4_conn_request); 1678EXPORT_SYMBOL(tcp_v4_conn_request);
@@ -2610,7 +2616,7 @@ EXPORT_SYMBOL(tcp_proc_register);
2610 2616
2611void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) 2617void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2612{ 2618{
2613 proc_net_remove(net, afinfo->name); 2619 remove_proc_entry(afinfo->name, net->proc_net);
2614} 2620}
2615EXPORT_SYMBOL(tcp_proc_unregister); 2621EXPORT_SYMBOL(tcp_proc_unregister);
2616 2622
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f35f2dfb6401..b83a49cc3816 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -102,6 +102,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
102 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 102 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
103 103
104 if (tmp_opt.saw_tstamp) { 104 if (tmp_opt.saw_tstamp) {
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
105 tmp_opt.ts_recent = tcptw->tw_ts_recent; 106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; 107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst); 108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
@@ -288,6 +289,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
288 tcptw->tw_rcv_wnd = tcp_receive_window(tp); 289 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
289 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 290 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
290 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 291 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
292 tcptw->tw_ts_offset = tp->tsoffset;
291 293
292#if IS_ENABLED(CONFIG_IPV6) 294#if IS_ENABLED(CONFIG_IPV6)
293 if (tw->tw_family == PF_INET6) { 295 if (tw->tw_family == PF_INET6) {
@@ -446,7 +448,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
446 */ 448 */
447 newtp->snd_cwnd = TCP_INIT_CWND; 449 newtp->snd_cwnd = TCP_INIT_CWND;
448 newtp->snd_cwnd_cnt = 0; 450 newtp->snd_cwnd_cnt = 0;
449 newtp->bytes_acked = 0;
450 451
451 newtp->frto_counter = 0; 452 newtp->frto_counter = 0;
452 newtp->frto_highmark = 0; 453 newtp->frto_highmark = 0;
@@ -500,6 +501,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
500 newtp->rx_opt.ts_recent_stamp = 0; 501 newtp->rx_opt.ts_recent_stamp = 0;
501 newtp->tcp_header_len = sizeof(struct tcphdr); 502 newtp->tcp_header_len = sizeof(struct tcphdr);
502 } 503 }
504 newtp->tsoffset = 0;
503#ifdef CONFIG_TCP_MD5SIG 505#ifdef CONFIG_TCP_MD5SIG
504 newtp->md5sig_info = NULL; /*XXX*/ 506 newtp->md5sig_info = NULL; /*XXX*/
505 if (newtp->af_specific->md5_lookup(sk, newsk)) 507 if (newtp->af_specific->md5_lookup(sk, newsk))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 367e2ec01da1..fd0cea114b5d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -622,7 +622,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
622 622
623 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 623 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
624 opts->options |= OPTION_TS; 624 opts->options |= OPTION_TS;
625 opts->tsval = TCP_SKB_CB(skb)->when; 625 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
626 opts->tsecr = tp->rx_opt.ts_recent; 626 opts->tsecr = tp->rx_opt.ts_recent;
627 remaining -= TCPOLEN_TSTAMP_ALIGNED; 627 remaining -= TCPOLEN_TSTAMP_ALIGNED;
628 } 628 }
@@ -806,7 +806,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
806 806
807 if (likely(tp->rx_opt.tstamp_ok)) { 807 if (likely(tp->rx_opt.tstamp_ok)) {
808 opts->options |= OPTION_TS; 808 opts->options |= OPTION_TS;
809 opts->tsval = tcb ? tcb->when : 0; 809 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
810 opts->tsecr = tp->rx_opt.ts_recent; 810 opts->tsecr = tp->rx_opt.ts_recent;
811 size += TCPOLEN_TSTAMP_ALIGNED; 811 size += TCPOLEN_TSTAMP_ALIGNED;
812 } 812 }
@@ -1133,7 +1133,6 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1133static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 1133static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1134 unsigned int mss_now) 1134 unsigned int mss_now)
1135{ 1135{
1136 skb_shinfo(skb)->gso_type &= SKB_GSO_SHARED_FRAG;
1137 if (skb->len <= mss_now || !sk_can_gso(sk) || 1136 if (skb->len <= mss_now || !sk_can_gso(sk) ||
1138 skb->ip_summed == CHECKSUM_NONE) { 1137 skb->ip_summed == CHECKSUM_NONE) {
1139 /* Avoid the costly divide in the normal 1138 /* Avoid the costly divide in the normal
@@ -1141,10 +1140,11 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1141 */ 1140 */
1142 skb_shinfo(skb)->gso_segs = 1; 1141 skb_shinfo(skb)->gso_segs = 1;
1143 skb_shinfo(skb)->gso_size = 0; 1142 skb_shinfo(skb)->gso_size = 0;
1143 skb_shinfo(skb)->gso_type = 0;
1144 } else { 1144 } else {
1145 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 1145 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
1146 skb_shinfo(skb)->gso_size = mss_now; 1146 skb_shinfo(skb)->gso_size = mss_now;
1147 skb_shinfo(skb)->gso_type |= sk->sk_gso_type; 1147 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1148 } 1148 }
1149} 1149}
1150 1150
@@ -1331,7 +1331,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1331/* Remove acked data from a packet in the transmit queue. */ 1331/* Remove acked data from a packet in the transmit queue. */
1332int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1332int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1333{ 1333{
1334 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1334 if (skb_unclone(skb, GFP_ATOMIC))
1335 return -ENOMEM; 1335 return -ENOMEM;
1336 1336
1337 __pskb_trim_head(skb, len); 1337 __pskb_trim_head(skb, len);
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 4526fe68e60e..d4943f67aff2 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -234,7 +234,7 @@ static __init int tcpprobe_init(void)
234 if (!tcp_probe.log) 234 if (!tcp_probe.log)
235 goto err0; 235 goto err0;
236 236
237 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &tcpprobe_fops)) 237 if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops))
238 goto err0; 238 goto err0;
239 239
240 ret = register_jprobe(&tcp_jprobe); 240 ret = register_jprobe(&tcp_jprobe);
@@ -244,7 +244,7 @@ static __init int tcpprobe_init(void)
244 pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize); 244 pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
245 return 0; 245 return 0;
246 err1: 246 err1:
247 proc_net_remove(&init_net, procname); 247 remove_proc_entry(procname, init_net.proc_net);
248 err0: 248 err0:
249 kfree(tcp_probe.log); 249 kfree(tcp_probe.log);
250 return ret; 250 return ret;
@@ -253,7 +253,7 @@ module_init(tcpprobe_init);
253 253
254static __exit void tcpprobe_exit(void) 254static __exit void tcpprobe_exit(void)
255{ 255{
256 proc_net_remove(&init_net, procname); 256 remove_proc_entry(procname, init_net.proc_net);
257 unregister_jprobe(&tcp_jprobe); 257 unregister_jprobe(&tcp_jprobe);
258 kfree(tcp_probe.log); 258 kfree(tcp_probe.log);
259} 259}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6791aac06ea9..265c42cf963c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2122,7 +2122,7 @@ EXPORT_SYMBOL(udp_proc_register);
2122 2122
2123void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) 2123void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
2124{ 2124{
2125 proc_net_remove(net, afinfo->name); 2125 remove_proc_entry(afinfo->name, net->proc_net);
2126} 2126}
2127EXPORT_SYMBOL(udp_proc_unregister); 2127EXPORT_SYMBOL(udp_proc_unregister);
2128 2128
@@ -2305,7 +2305,8 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2305 /* Packet is from an untrusted source, reset gso_segs. */ 2305 /* Packet is from an untrusted source, reset gso_segs. */
2306 int type = skb_shinfo(skb)->gso_type; 2306 int type = skb_shinfo(skb)->gso_type;
2307 2307
2308 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || 2308 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
2309 SKB_GSO_GRE) ||
2309 !(type & (SKB_GSO_UDP)))) 2310 !(type & (SKB_GSO_UDP))))
2310 goto out; 2311 goto out;
2311 2312
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 06814b6216dc..1f12c8b45864 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -132,7 +132,7 @@ int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
132 * header and optional ESP marker bytes) and then modify the 132 * header and optional ESP marker bytes) and then modify the
133 * protocol to ESP, and then call into the transform receiver. 133 * protocol to ESP, and then call into the transform receiver.
134 */ 134 */
135 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 135 if (skb_unclone(skb, GFP_ATOMIC))
136 goto drop; 136 goto drop;
137 137
138 /* Now we can update and verify the packet length... */ 138 /* Now we can update and verify the packet length... */
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index ddee0a099a2c..fe5189e2e114 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -142,8 +142,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
142 for_each_input_rcu(rcv_notify_handlers, handler) 142 for_each_input_rcu(rcv_notify_handlers, handler)
143 handler->handler(skb); 143 handler->handler(skb);
144 144
145 if (skb_cloned(skb) && 145 err = skb_unclone(skb, GFP_ATOMIC);
146 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 146 if (err)
147 goto out; 147 goto out;
148 148
149 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 149 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 3be0ac2c1920..9a459be24af7 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -262,21 +262,56 @@ static struct ctl_table xfrm4_policy_table[] = {
262 { } 262 { }
263}; 263};
264 264
265static struct ctl_table_header *sysctl_hdr; 265static int __net_init xfrm4_net_init(struct net *net)
266#endif
267
268static void __init xfrm4_policy_init(void)
269{ 266{
270 xfrm_policy_register_afinfo(&xfrm4_policy_afinfo); 267 struct ctl_table *table;
268 struct ctl_table_header *hdr;
269
270 table = xfrm4_policy_table;
271 if (!net_eq(net, &init_net)) {
272 table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
273 if (!table)
274 goto err_alloc;
275
276 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
277 }
278
279 hdr = register_net_sysctl(net, "net/ipv4", table);
280 if (!hdr)
281 goto err_reg;
282
283 net->ipv4.xfrm4_hdr = hdr;
284 return 0;
285
286err_reg:
287 if (!net_eq(net, &init_net))
288 kfree(table);
289err_alloc:
290 return -ENOMEM;
271} 291}
272 292
273static void __exit xfrm4_policy_fini(void) 293static void __net_exit xfrm4_net_exit(struct net *net)
274{ 294{
275#ifdef CONFIG_SYSCTL 295 struct ctl_table *table;
276 if (sysctl_hdr) 296
277 unregister_net_sysctl_table(sysctl_hdr); 297 if (net->ipv4.xfrm4_hdr == NULL)
298 return;
299
300 table = net->ipv4.xfrm4_hdr->ctl_table_arg;
301 unregister_net_sysctl_table(net->ipv4.xfrm4_hdr);
302 if (!net_eq(net, &init_net))
303 kfree(table);
304}
305
306static struct pernet_operations __net_initdata xfrm4_net_ops = {
307 .init = xfrm4_net_init,
308 .exit = xfrm4_net_exit,
309};
278#endif 310#endif
279 xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); 311
312static void __init xfrm4_policy_init(void)
313{
314 xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
280} 315}
281 316
282void __init xfrm4_init(void) 317void __init xfrm4_init(void)
@@ -286,8 +321,7 @@ void __init xfrm4_init(void)
286 xfrm4_state_init(); 321 xfrm4_state_init();
287 xfrm4_policy_init(); 322 xfrm4_policy_init();
288#ifdef CONFIG_SYSCTL 323#ifdef CONFIG_SYSCTL
289 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4", 324 register_pernet_subsys(&xfrm4_net_ops);
290 xfrm4_policy_table);
291#endif 325#endif
292} 326}
293 327
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7f7332b44699..4dc0d44a5d31 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -244,6 +244,9 @@ const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
244const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; 244const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
245const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT; 245const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
246const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; 246const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
247const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
248const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
249const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
247 250
248/* Check if a valid qdisc is available */ 251/* Check if a valid qdisc is available */
249static inline bool addrconf_qdisc_ok(const struct net_device *dev) 252static inline bool addrconf_qdisc_ok(const struct net_device *dev)
@@ -428,6 +431,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
428 /* protected by rtnl_lock */ 431 /* protected by rtnl_lock */
429 rcu_assign_pointer(dev->ip6_ptr, ndev); 432 rcu_assign_pointer(dev->ip6_ptr, ndev);
430 433
434 /* Join interface-local all-node multicast group */
435 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
436
431 /* Join all-node multicast group */ 437 /* Join all-node multicast group */
432 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); 438 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
433 439
@@ -611,10 +617,15 @@ static void dev_forward_change(struct inet6_dev *idev)
611 if (idev->cnf.forwarding) 617 if (idev->cnf.forwarding)
612 dev_disable_lro(dev); 618 dev_disable_lro(dev);
613 if (dev->flags & IFF_MULTICAST) { 619 if (dev->flags & IFF_MULTICAST) {
614 if (idev->cnf.forwarding) 620 if (idev->cnf.forwarding) {
615 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); 621 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
616 else 622 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
623 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
624 } else {
617 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); 625 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
626 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
627 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
628 }
618 } 629 }
619 630
620 list_for_each_entry(ifa, &idev->addr_list, if_list) { 631 list_for_each_entry(ifa, &idev->addr_list, if_list) {
@@ -1656,6 +1667,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1656 if (dev->addr_len != IEEE802154_ADDR_LEN) 1667 if (dev->addr_len != IEEE802154_ADDR_LEN)
1657 return -1; 1668 return -1;
1658 memcpy(eui, dev->dev_addr, 8); 1669 memcpy(eui, dev->dev_addr, 8);
1670 eui[0] ^= 2;
1659 return 0; 1671 return 0;
1660} 1672}
1661 1673
@@ -3313,14 +3325,14 @@ static const struct file_operations if6_fops = {
3313 3325
3314static int __net_init if6_proc_net_init(struct net *net) 3326static int __net_init if6_proc_net_init(struct net *net)
3315{ 3327{
3316 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops)) 3328 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
3317 return -ENOMEM; 3329 return -ENOMEM;
3318 return 0; 3330 return 0;
3319} 3331}
3320 3332
3321static void __net_exit if6_proc_net_exit(struct net *net) 3333static void __net_exit if6_proc_net_exit(struct net *net)
3322{ 3334{
3323 proc_net_remove(net, "if_inet6"); 3335 remove_proc_entry("if_inet6", net->proc_net);
3324} 3336}
3325 3337
3326static struct pernet_operations if6_proc_net_ops = { 3338static struct pernet_operations if6_proc_net_ops = {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 384233188ac1..bb02e176cb70 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -521,8 +521,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
521 521
522 /* We are going to _remove_ AH header to keep sockets happy, 522 /* We are going to _remove_ AH header to keep sockets happy,
523 * so... Later this can change. */ 523 * so... Later this can change. */
524 if (skb_cloned(skb) && 524 if (skb_unclone(skb, GFP_ATOMIC))
525 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
526 goto out; 525 goto out;
527 526
528 skb->ip_summed = CHECKSUM_NONE; 527 skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 921b8b398a8c..5a80f15a9de2 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -509,7 +509,7 @@ static const struct file_operations ac6_seq_fops = {
509 509
510int __net_init ac6_proc_init(struct net *net) 510int __net_init ac6_proc_init(struct net *net)
511{ 511{
512 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops)) 512 if (!proc_create("anycast6", S_IRUGO, net->proc_net, &ac6_seq_fops))
513 return -ENOMEM; 513 return -ENOMEM;
514 514
515 return 0; 515 return 0;
@@ -517,7 +517,7 @@ int __net_init ac6_proc_init(struct net *net)
517 517
518void ac6_proc_exit(struct net *net) 518void ac6_proc_exit(struct net *net)
519{ 519{
520 proc_net_remove(net, "anycast6"); 520 remove_proc_entry("anycast6", net->proc_net);
521} 521}
522#endif 522#endif
523 523
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 33be36398a78..f5a54782a340 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
380 if (skb->protocol == htons(ETH_P_IPV6)) { 380 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 381 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 382 if (np->rxopt.all)
383 datagram_recv_ctl(sk, msg, skb); 383 ip6_datagram_recv_ctl(sk, msg, skb);
384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
385 sin->sin6_scope_id = IP6CB(skb)->iif; 385 sin->sin6_scope_id = IP6CB(skb)->iif;
386 } else { 386 } else {
@@ -468,7 +468,8 @@ out:
468} 468}
469 469
470 470
471int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 471int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
472 struct sk_buff *skb)
472{ 473{
473 struct ipv6_pinfo *np = inet6_sk(sk); 474 struct ipv6_pinfo *np = inet6_sk(sk);
474 struct inet6_skb_parm *opt = IP6CB(skb); 475 struct inet6_skb_parm *opt = IP6CB(skb);
@@ -598,11 +599,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
598 } 599 }
599 return 0; 600 return 0;
600} 601}
602EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
601 603
602int datagram_send_ctl(struct net *net, struct sock *sk, 604int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
603 struct msghdr *msg, struct flowi6 *fl6, 605 struct msghdr *msg, struct flowi6 *fl6,
604 struct ipv6_txoptions *opt, 606 struct ipv6_txoptions *opt,
605 int *hlimit, int *tclass, int *dontfrag) 607 int *hlimit, int *tclass, int *dontfrag)
606{ 608{
607 struct in6_pktinfo *src_info; 609 struct in6_pktinfo *src_info;
608 struct cmsghdr *cmsg; 610 struct cmsghdr *cmsg;
@@ -872,4 +874,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
872exit_f: 874exit_f:
873 return err; 875 return err;
874} 876}
875EXPORT_SYMBOL_GPL(datagram_send_ctl); 877EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 22494afd981c..b973ed3d06cf 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -65,13 +65,13 @@ static DEFINE_SPINLOCK(ip6_fl_lock);
65static DEFINE_SPINLOCK(ip6_sk_fl_lock); 65static DEFINE_SPINLOCK(ip6_sk_fl_lock);
66 66
67#define for_each_fl_rcu(hash, fl) \ 67#define for_each_fl_rcu(hash, fl) \
68 for (fl = rcu_dereference(fl_ht[(hash)]); \ 68 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
69 fl != NULL; \ 69 fl != NULL; \
70 fl = rcu_dereference(fl->next)) 70 fl = rcu_dereference_bh(fl->next))
71#define for_each_fl_continue_rcu(fl) \ 71#define for_each_fl_continue_rcu(fl) \
72 for (fl = rcu_dereference(fl->next); \ 72 for (fl = rcu_dereference_bh(fl->next); \
73 fl != NULL; \ 73 fl != NULL; \
74 fl = rcu_dereference(fl->next)) 74 fl = rcu_dereference_bh(fl->next))
75 75
76#define for_each_sk_fl_rcu(np, sfl) \ 76#define for_each_sk_fl_rcu(np, sfl) \
77 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \ 77 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
@@ -390,8 +390,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
390 msg.msg_control = (void*)(fl->opt+1); 390 msg.msg_control = (void*)(fl->opt+1);
391 memset(&flowi6, 0, sizeof(flowi6)); 391 memset(&flowi6, 0, sizeof(flowi6));
392 392
393 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 393 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
394 &junk, &junk); 394 &junk, &junk, &junk);
395 if (err) 395 if (err)
396 goto done; 396 goto done;
397 err = -EINVAL; 397 err = -EINVAL;
@@ -806,15 +806,15 @@ static const struct file_operations ip6fl_seq_fops = {
806 806
807static int __net_init ip6_flowlabel_proc_init(struct net *net) 807static int __net_init ip6_flowlabel_proc_init(struct net *net)
808{ 808{
809 if (!proc_net_fops_create(net, "ip6_flowlabel", 809 if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
810 S_IRUGO, &ip6fl_seq_fops)) 810 &ip6fl_seq_fops))
811 return -ENOMEM; 811 return -ENOMEM;
812 return 0; 812 return 0;
813} 813}
814 814
815static void __net_exit ip6_flowlabel_proc_fini(struct net *net) 815static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
816{ 816{
817 proc_net_remove(net, "ip6_flowlabel"); 817 remove_proc_entry("ip6_flowlabel", net->proc_net);
818} 818}
819#else 819#else
820static inline int ip6_flowlabel_proc_init(struct net *net) 820static inline int ip6_flowlabel_proc_init(struct net *net)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index db91fe3466a3..e4efffe2522e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -958,7 +958,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
958 int ret; 958 int ret;
959 959
960 if (!ip6_tnl_xmit_ctl(t)) 960 if (!ip6_tnl_xmit_ctl(t))
961 return -1; 961 goto tx_err;
962 962
963 switch (skb->protocol) { 963 switch (skb->protocol) {
964 case htons(ETH_P_IP): 964 case htons(ETH_P_IP):
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 4ac5bf30e16a..5b10414e619e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -118,6 +118,15 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
118 ipv6_addr_loopback(&hdr->daddr)) 118 ipv6_addr_loopback(&hdr->daddr))
119 goto err; 119 goto err;
120 120
121 /* RFC4291 2.7
122 * Nodes must not originate a packet to a multicast address whose scope
123 * field contains the reserved value 0; if such a packet is received, it
124 * must be silently dropped.
125 */
126 if (ipv6_addr_is_multicast(&hdr->daddr) &&
127 IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
128 goto err;
129
121 /* 130 /*
122 * RFC4291 2.7 131 * RFC4291 2.7
123 * Multicast addresses must not be used as source addresses in IPv6 132 * Multicast addresses must not be used as source addresses in IPv6
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index d141fc32a2ea..8234c1dcdf72 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -99,8 +99,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
99 ~(SKB_GSO_UDP | 99 ~(SKB_GSO_UDP |
100 SKB_GSO_DODGY | 100 SKB_GSO_DODGY |
101 SKB_GSO_TCP_ECN | 101 SKB_GSO_TCP_ECN |
102 SKB_GSO_GRE |
102 SKB_GSO_TCPV6 | 103 SKB_GSO_TCPV6 |
103 SKB_GSO_SHARED_FRAG |
104 0))) 104 0)))
105 goto out; 105 goto out;
106 106
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 906b7e6dd7fb..155eccfa7760 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -120,6 +120,13 @@ static int ip6_finish_output2(struct sk_buff *skb)
120 120
121 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, 121 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
122 skb->len); 122 skb->len);
123
124 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
125 IPV6_ADDR_SCOPE_NODELOCAL &&
126 !(dev->flags & IFF_LOOPBACK)) {
127 kfree_skb(skb);
128 return 0;
129 }
123 } 130 }
124 131
125 rcu_read_lock_bh(); 132 rcu_read_lock_bh();
@@ -242,9 +249,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
242 dst->dev, dst_output); 249 dst->dev, dst_output);
243 } 250 }
244 251
245 net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n");
246 skb->dev = dst->dev; 252 skb->dev = dst->dev;
247 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 253 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
248 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); 254 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
249 kfree_skb(skb); 255 kfree_skb(skb);
250 return -EMSGSIZE; 256 return -EMSGSIZE;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 351ce98e90d9..96bfb4e4b820 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1359,9 +1359,9 @@ static int __net_init ip6mr_net_init(struct net *net)
1359 1359
1360#ifdef CONFIG_PROC_FS 1360#ifdef CONFIG_PROC_FS
1361 err = -ENOMEM; 1361 err = -ENOMEM;
1362 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops)) 1362 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1363 goto proc_vif_fail; 1363 goto proc_vif_fail;
1364 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops)) 1364 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1365 goto proc_cache_fail; 1365 goto proc_cache_fail;
1366#endif 1366#endif
1367 1367
@@ -1369,7 +1369,7 @@ static int __net_init ip6mr_net_init(struct net *net)
1369 1369
1370#ifdef CONFIG_PROC_FS 1370#ifdef CONFIG_PROC_FS
1371proc_cache_fail: 1371proc_cache_fail:
1372 proc_net_remove(net, "ip6_mr_vif"); 1372 remove_proc_entry("ip6_mr_vif", net->proc_net);
1373proc_vif_fail: 1373proc_vif_fail:
1374 ip6mr_rules_exit(net); 1374 ip6mr_rules_exit(net);
1375#endif 1375#endif
@@ -1380,8 +1380,8 @@ fail:
1380static void __net_exit ip6mr_net_exit(struct net *net) 1380static void __net_exit ip6mr_net_exit(struct net *net)
1381{ 1381{
1382#ifdef CONFIG_PROC_FS 1382#ifdef CONFIG_PROC_FS
1383 proc_net_remove(net, "ip6_mr_cache"); 1383 remove_proc_entry("ip6_mr_cache", net->proc_net);
1384 proc_net_remove(net, "ip6_mr_vif"); 1384 remove_proc_entry("ip6_mr_vif", net->proc_net);
1385#endif 1385#endif
1386 ip6mr_rules_exit(net); 1386 ip6mr_rules_exit(net);
1387} 1387}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ee94d31c9d4d..d1e2e8ef29c5 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -476,8 +476,8 @@ sticky_done:
476 msg.msg_controllen = optlen; 476 msg.msg_controllen = optlen;
477 msg.msg_control = (void*)(opt+1); 477 msg.msg_control = (void*)(opt+1);
478 478
479 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
480 &junk); 480 &junk, &junk);
481 if (retv) 481 if (retv)
482 goto done; 482 goto done;
483update: 483update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1002 release_sock(sk); 1002 release_sock(sk);
1003 1003
1004 if (skb) { 1004 if (skb) {
1005 int err = datagram_recv_ctl(sk, &msg, skb); 1005 int err = ip6_datagram_recv_ctl(sk, &msg, skb);
1006 kfree_skb(skb); 1006 kfree_skb(skb);
1007 if (err) 1007 if (err)
1008 return err; 1008 return err;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index e5de48551864..bfa6cc36ef2a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -661,6 +661,10 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
661 struct net_device *dev = mc->idev->dev; 661 struct net_device *dev = mc->idev->dev;
662 char buf[MAX_ADDR_LEN]; 662 char buf[MAX_ADDR_LEN];
663 663
664 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
665 IPV6_ADDR_SCOPE_LINKLOCAL)
666 return;
667
664 spin_lock_bh(&mc->mca_lock); 668 spin_lock_bh(&mc->mca_lock);
665 if (!(mc->mca_flags&MAF_LOADED)) { 669 if (!(mc->mca_flags&MAF_LOADED)) {
666 mc->mca_flags |= MAF_LOADED; 670 mc->mca_flags |= MAF_LOADED;
@@ -687,6 +691,10 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
687 struct net_device *dev = mc->idev->dev; 691 struct net_device *dev = mc->idev->dev;
688 char buf[MAX_ADDR_LEN]; 692 char buf[MAX_ADDR_LEN];
689 693
694 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
695 IPV6_ADDR_SCOPE_LINKLOCAL)
696 return;
697
690 spin_lock_bh(&mc->mca_lock); 698 spin_lock_bh(&mc->mca_lock);
691 if (mc->mca_flags&MAF_LOADED) { 699 if (mc->mca_flags&MAF_LOADED) {
692 mc->mca_flags &= ~MAF_LOADED; 700 mc->mca_flags &= ~MAF_LOADED;
@@ -2591,10 +2599,10 @@ static int __net_init igmp6_proc_init(struct net *net)
2591 int err; 2599 int err;
2592 2600
2593 err = -ENOMEM; 2601 err = -ENOMEM;
2594 if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops)) 2602 if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
2595 goto out; 2603 goto out;
2596 if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO, 2604 if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
2597 &igmp6_mcf_seq_fops)) 2605 &igmp6_mcf_seq_fops))
2598 goto out_proc_net_igmp6; 2606 goto out_proc_net_igmp6;
2599 2607
2600 err = 0; 2608 err = 0;
@@ -2602,14 +2610,14 @@ out:
2602 return err; 2610 return err;
2603 2611
2604out_proc_net_igmp6: 2612out_proc_net_igmp6:
2605 proc_net_remove(net, "igmp6"); 2613 remove_proc_entry("igmp6", net->proc_net);
2606 goto out; 2614 goto out;
2607} 2615}
2608 2616
2609static void __net_exit igmp6_proc_exit(struct net *net) 2617static void __net_exit igmp6_proc_exit(struct net *net)
2610{ 2618{
2611 proc_net_remove(net, "mcfilter6"); 2619 remove_proc_entry("mcfilter6", net->proc_net);
2612 proc_net_remove(net, "igmp6"); 2620 remove_proc_entry("igmp6", net->proc_net);
2613} 2621}
2614#else 2622#else
2615static inline int igmp6_proc_init(struct net *net) 2623static inline int igmp6_proc_init(struct net *net)
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 7302b0b7b642..83acc1405a18 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <net/ipv6.h>
12#include <linux/netfilter.h> 13#include <linux/netfilter.h>
13#include <linux/netfilter_ipv6.h> 14#include <linux/netfilter_ipv6.h>
14#include <linux/netfilter_ipv6/ip6t_NPT.h> 15#include <linux/netfilter_ipv6/ip6t_NPT.h>
@@ -18,11 +19,20 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
18{ 19{
19 struct ip6t_npt_tginfo *npt = par->targinfo; 20 struct ip6t_npt_tginfo *npt = par->targinfo;
20 __wsum src_sum = 0, dst_sum = 0; 21 __wsum src_sum = 0, dst_sum = 0;
22 struct in6_addr pfx;
21 unsigned int i; 23 unsigned int i;
22 24
23 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64) 25 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
24 return -EINVAL; 26 return -EINVAL;
25 27
28 /* Ensure that LSB of prefix is zero */
29 ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len);
30 if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6))
31 return -EINVAL;
32 ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len);
33 if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6))
34 return -EINVAL;
35
26 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) { 36 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
27 src_sum = csum_add(src_sum, 37 src_sum = csum_add(src_sum,
28 (__force __wsum)npt->src_pfx.in6.s6_addr16[i]); 38 (__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
@@ -30,7 +40,7 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
30 (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]); 40 (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
31 } 41 }
32 42
33 npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum); 43 npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));
34 return 0; 44 return 0;
35} 45}
36 46
@@ -51,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
51 61
52 idx = i / 32; 62 idx = i / 32;
53 addr->s6_addr32[idx] &= mask; 63 addr->s6_addr32[idx] &= mask;
54 addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx]; 64 addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx];
55 } 65 }
56 66
57 if (pfx_len <= 48) 67 if (pfx_len <= 48)
@@ -66,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
66 return false; 76 return false;
67 } 77 }
68 78
69 sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx], 79 sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]),
70 npt->adjustment); 80 csum_unfold(npt->adjustment)));
71 if (sum == CSUM_MANGLED_0) 81 if (sum == CSUM_MANGLED_0)
72 sum = 0; 82 sum = 0;
73 *(__force __sum16 *)&addr->s6_addr16[idx] = sum; 83 *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index c674f158efa8..54087e96d7b8 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -97,9 +97,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
97 if (table == NULL) 97 if (table == NULL)
98 goto err_alloc; 98 goto err_alloc;
99 99
100 table[0].data = &net->ipv6.frags.high_thresh; 100 table[0].data = &net->nf_frag.frags.timeout;
101 table[1].data = &net->ipv6.frags.low_thresh; 101 table[1].data = &net->nf_frag.frags.low_thresh;
102 table[2].data = &net->ipv6.frags.timeout; 102 table[2].data = &net->nf_frag.frags.high_thresh;
103 } 103 }
104 104
105 hdr = register_net_sysctl(net, "net/netfilter", table); 105 hdr = register_net_sysctl(net, "net/netfilter", table);
@@ -368,7 +368,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
368 } 368 }
369 369
370 /* Head of list must not be cloned. */ 370 /* Head of list must not be cloned. */
371 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) { 371 if (skb_unclone(head, GFP_ATOMIC)) {
372 pr_debug("skb is cloned but can't expand head"); 372 pr_debug("skb is cloned but can't expand head");
373 goto out_oom; 373 goto out_oom;
374 } 374 }
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 745a32042950..bbbe53a99b57 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -295,11 +295,11 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
295 295
296static int __net_init ipv6_proc_init_net(struct net *net) 296static int __net_init ipv6_proc_init_net(struct net *net)
297{ 297{
298 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO, 298 if (!proc_create("sockstat6", S_IRUGO, net->proc_net,
299 &sockstat6_seq_fops)) 299 &sockstat6_seq_fops))
300 return -ENOMEM; 300 return -ENOMEM;
301 301
302 if (!proc_net_fops_create(net, "snmp6", S_IRUGO, &snmp6_seq_fops)) 302 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
303 goto proc_snmp6_fail; 303 goto proc_snmp6_fail;
304 304
305 net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net); 305 net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
@@ -308,17 +308,17 @@ static int __net_init ipv6_proc_init_net(struct net *net)
308 return 0; 308 return 0;
309 309
310proc_dev_snmp6_fail: 310proc_dev_snmp6_fail:
311 proc_net_remove(net, "snmp6"); 311 remove_proc_entry("snmp6", net->proc_net);
312proc_snmp6_fail: 312proc_snmp6_fail:
313 proc_net_remove(net, "sockstat6"); 313 remove_proc_entry("sockstat6", net->proc_net);
314 return -ENOMEM; 314 return -ENOMEM;
315} 315}
316 316
317static void __net_exit ipv6_proc_exit_net(struct net *net) 317static void __net_exit ipv6_proc_exit_net(struct net *net)
318{ 318{
319 proc_net_remove(net, "sockstat6"); 319 remove_proc_entry("sockstat6", net->proc_net);
320 proc_net_remove(net, "dev_snmp6"); 320 remove_proc_entry("dev_snmp6", net->proc_net);
321 proc_net_remove(net, "snmp6"); 321 remove_proc_entry("snmp6", net->proc_net);
322} 322}
323 323
324static struct pernet_operations ipv6_proc_ops = { 324static struct pernet_operations ipv6_proc_ops = {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6cd29b1e8b92..c65907db8c44 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
507 sock_recv_ts_and_drops(msg, sk, skb); 507 sock_recv_ts_and_drops(msg, sk, skb);
508 508
509 if (np->rxopt.all) 509 if (np->rxopt.all)
510 datagram_recv_ctl(sk, msg, skb); 510 ip6_datagram_recv_ctl(sk, msg, skb);
511 511
512 err = copied; 512 err = copied;
513 if (flags & MSG_TRUNC) 513 if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
822 memset(opt, 0, sizeof(struct ipv6_txoptions)); 822 memset(opt, 0, sizeof(struct ipv6_txoptions));
823 opt->tot_len = sizeof(struct ipv6_txoptions); 823 opt->tot_len = sizeof(struct ipv6_txoptions);
824 824
825 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 825 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
826 &hlimit, &tclass, &dontfrag); 826 &hlimit, &tclass, &dontfrag);
827 if (err < 0) { 827 if (err < 0) {
828 fl6_sock_release(flowlabel); 828 fl6_sock_release(flowlabel);
829 return err; 829 return err;
@@ -1292,7 +1292,7 @@ static const struct file_operations raw6_seq_fops = {
1292 1292
1293static int __net_init raw6_init_net(struct net *net) 1293static int __net_init raw6_init_net(struct net *net)
1294{ 1294{
1295 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) 1295 if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
1296 return -ENOMEM; 1296 return -ENOMEM;
1297 1297
1298 return 0; 1298 return 0;
@@ -1300,7 +1300,7 @@ static int __net_init raw6_init_net(struct net *net)
1300 1300
1301static void __net_exit raw6_exit_net(struct net *net) 1301static void __net_exit raw6_exit_net(struct net *net)
1302{ 1302{
1303 proc_net_remove(net, "raw6"); 1303 remove_proc_entry("raw6", net->proc_net);
1304} 1304}
1305 1305
1306static struct pernet_operations raw6_net_ops = { 1306static struct pernet_operations raw6_net_ops = {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index bab2c270f292..3c6a77290c6e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -79,20 +79,8 @@ unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
79{ 79{
80 u32 c; 80 u32 c;
81 81
82 c = jhash_3words((__force u32)saddr->s6_addr32[0], 82 c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
83 (__force u32)saddr->s6_addr32[1], 83 (__force u32)id, rnd);
84 (__force u32)saddr->s6_addr32[2],
85 rnd);
86
87 c = jhash_3words((__force u32)saddr->s6_addr32[3],
88 (__force u32)daddr->s6_addr32[0],
89 (__force u32)daddr->s6_addr32[1],
90 c);
91
92 c = jhash_3words((__force u32)daddr->s6_addr32[2],
93 (__force u32)daddr->s6_addr32[3],
94 (__force u32)id,
95 c);
96 84
97 return c & (INETFRAGS_HASHSZ - 1); 85 return c & (INETFRAGS_HASHSZ - 1);
98} 86}
@@ -404,7 +392,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
404 goto out_oversize; 392 goto out_oversize;
405 393
406 /* Head of list must not be cloned. */ 394 /* Head of list must not be cloned. */
407 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 395 if (skb_unclone(head, GFP_ATOMIC))
408 goto out_oom; 396 goto out_oom;
409 397
410 /* If the first fragment is fragmented itself, we split 398 /* If the first fragment is fragmented itself, we split
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index f3328bc1174f..515bb51e05a8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2995,8 +2995,8 @@ static void __net_exit ip6_route_net_exit(struct net *net)
2995static int __net_init ip6_route_net_init_late(struct net *net) 2995static int __net_init ip6_route_net_init_late(struct net *net)
2996{ 2996{
2997#ifdef CONFIG_PROC_FS 2997#ifdef CONFIG_PROC_FS
2998 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); 2998 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
2999 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); 2999 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3000#endif 3000#endif
3001 return 0; 3001 return 0;
3002} 3002}
@@ -3004,8 +3004,8 @@ static int __net_init ip6_route_net_init_late(struct net *net)
3004static void __net_exit ip6_route_net_exit_late(struct net *net) 3004static void __net_exit ip6_route_net_exit_late(struct net *net)
3005{ 3005{
3006#ifdef CONFIG_PROC_FS 3006#ifdef CONFIG_PROC_FS
3007 proc_net_remove(net, "ipv6_route"); 3007 remove_proc_entry("ipv6_route", net->proc_net);
3008 proc_net_remove(net, "rt6_stats"); 3008 remove_proc_entry("rt6_stats", net->proc_net);
3009#endif 3009#endif
3010} 3010}
3011 3011
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 06087e58738a..9b6460055df5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
423 } 423 }
424 424
425 inet_csk_reqsk_queue_drop(sk, req, prev); 425 inet_csk_reqsk_queue_drop(sk, req, prev);
426 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
426 goto out; 427 goto out;
427 428
428 case TCP_SYN_SENT: 429 case TCP_SYN_SENT:
@@ -712,7 +713,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
712#endif 713#endif
713 714
714static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 715static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
715 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass) 716 u32 tsval, u32 tsecr,
717 struct tcp_md5sig_key *key, int rst, u8 tclass)
716{ 718{
717 const struct tcphdr *th = tcp_hdr(skb); 719 const struct tcphdr *th = tcp_hdr(skb);
718 struct tcphdr *t1; 720 struct tcphdr *t1;
@@ -724,7 +726,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
724 struct dst_entry *dst; 726 struct dst_entry *dst;
725 __be32 *topt; 727 __be32 *topt;
726 728
727 if (ts) 729 if (tsecr)
728 tot_len += TCPOLEN_TSTAMP_ALIGNED; 730 tot_len += TCPOLEN_TSTAMP_ALIGNED;
729#ifdef CONFIG_TCP_MD5SIG 731#ifdef CONFIG_TCP_MD5SIG
730 if (key) 732 if (key)
@@ -754,11 +756,11 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
754 756
755 topt = (__be32 *)(t1 + 1); 757 topt = (__be32 *)(t1 + 1);
756 758
757 if (ts) { 759 if (tsecr) {
758 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 760 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
759 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 761 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
760 *topt++ = htonl(tcp_time_stamp); 762 *topt++ = htonl(tsval);
761 *topt++ = htonl(ts); 763 *topt++ = htonl(tsecr);
762 } 764 }
763 765
764#ifdef CONFIG_TCP_MD5SIG 766#ifdef CONFIG_TCP_MD5SIG
@@ -859,7 +861,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
859 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 861 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
860 (th->doff << 2); 862 (th->doff << 2);
861 863
862 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0); 864 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0);
863 865
864#ifdef CONFIG_TCP_MD5SIG 866#ifdef CONFIG_TCP_MD5SIG
865release_sk1: 867release_sk1:
@@ -870,10 +872,11 @@ release_sk1:
870#endif 872#endif
871} 873}
872 874
873static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 875static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
876 u32 win, u32 tsval, u32 tsecr,
874 struct tcp_md5sig_key *key, u8 tclass) 877 struct tcp_md5sig_key *key, u8 tclass)
875{ 878{
876 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass); 879 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass);
877} 880}
878 881
879static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 882static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -883,6 +886,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
883 886
884 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 887 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
885 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 888 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
889 tcp_time_stamp + tcptw->tw_ts_offset,
886 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw), 890 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
887 tw->tw_tclass); 891 tw->tw_tclass);
888 892
@@ -892,7 +896,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
892static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 896static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
893 struct request_sock *req) 897 struct request_sock *req)
894{ 898{
895 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 899 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
900 req->rcv_wnd, tcp_time_stamp, req->ts_recent,
896 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0); 901 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
897} 902}
898 903
@@ -959,8 +964,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
959 goto drop; 964 goto drop;
960 } 965 }
961 966
962 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 967 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
968 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
963 goto drop; 969 goto drop;
970 }
964 971
965 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 972 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
966 if (req == NULL) 973 if (req == NULL)
@@ -1109,6 +1116,7 @@ drop_and_release:
1109drop_and_free: 1116drop_and_free:
1110 reqsk_free(req); 1117 reqsk_free(req);
1111drop: 1118drop:
1119 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1112 return 0; /* don't send reset */ 1120 return 0; /* don't send reset */
1113} 1121}
1114 1122
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index cb5bf497c09c..599e1ba6d1ce 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -467,7 +467,7 @@ try_again:
467 ip_cmsg_recv(msg, skb); 467 ip_cmsg_recv(msg, skb);
468 } else { 468 } else {
469 if (np->rxopt.all) 469 if (np->rxopt.all)
470 datagram_recv_ctl(sk, msg, skb); 470 ip6_datagram_recv_ctl(sk, msg, skb);
471 } 471 }
472 472
473 err = copied; 473 err = copied;
@@ -1143,8 +1143,8 @@ do_udp_sendmsg:
1143 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1143 memset(opt, 0, sizeof(struct ipv6_txoptions));
1144 opt->tot_len = sizeof(*opt); 1144 opt->tot_len = sizeof(*opt);
1145 1145
1146 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1146 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1147 &hlimit, &tclass, &dontfrag); 1147 &hlimit, &tclass, &dontfrag);
1148 if (err < 0) { 1148 if (err < 0) {
1149 fl6_sock_release(flowlabel); 1149 fl6_sock_release(flowlabel);
1150 return err; 1150 return err;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 0c8934a317c2..cf05cf073c51 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -56,7 +56,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
56 /* Packet is from an untrusted source, reset gso_segs. */ 56 /* Packet is from an untrusted source, reset gso_segs. */
57 int type = skb_shinfo(skb)->gso_type; 57 int type = skb_shinfo(skb)->gso_type;
58 58
59 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || 59 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
60 SKB_GSO_GRE) ||
60 !(type & (SKB_GSO_UDP)))) 61 !(type & (SKB_GSO_UDP))))
61 goto out; 62 goto out;
62 63
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 9f2095b19ad0..9bf6a74a71d2 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -69,8 +69,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
69 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 69 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
70 goto out; 70 goto out;
71 71
72 if (skb_cloned(skb) && 72 err = skb_unclone(skb, GFP_ATOMIC);
73 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 73 if (err)
74 goto out; 74 goto out;
75 75
76 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 76 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 128273744332..4ef7bdb65440 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -320,7 +320,51 @@ static struct ctl_table xfrm6_policy_table[] = {
320 { } 320 { }
321}; 321};
322 322
323static struct ctl_table_header *sysctl_hdr; 323static int __net_init xfrm6_net_init(struct net *net)
324{
325 struct ctl_table *table;
326 struct ctl_table_header *hdr;
327
328 table = xfrm6_policy_table;
329 if (!net_eq(net, &init_net)) {
330 table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
331 if (!table)
332 goto err_alloc;
333
334 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
335 }
336
337 hdr = register_net_sysctl(net, "net/ipv6", table);
338 if (!hdr)
339 goto err_reg;
340
341 net->ipv6.sysctl.xfrm6_hdr = hdr;
342 return 0;
343
344err_reg:
345 if (!net_eq(net, &init_net))
346 kfree(table);
347err_alloc:
348 return -ENOMEM;
349}
350
351static void __net_exit xfrm6_net_exit(struct net *net)
352{
353 struct ctl_table *table;
354
355 if (net->ipv6.sysctl.xfrm6_hdr == NULL)
356 return;
357
358 table = net->ipv6.sysctl.xfrm6_hdr->ctl_table_arg;
359 unregister_net_sysctl_table(net->ipv6.sysctl.xfrm6_hdr);
360 if (!net_eq(net, &init_net))
361 kfree(table);
362}
363
364static struct pernet_operations xfrm6_net_ops = {
365 .init = xfrm6_net_init,
366 .exit = xfrm6_net_exit,
367};
324#endif 368#endif
325 369
326int __init xfrm6_init(void) 370int __init xfrm6_init(void)
@@ -339,8 +383,7 @@ int __init xfrm6_init(void)
339 goto out_policy; 383 goto out_policy;
340 384
341#ifdef CONFIG_SYSCTL 385#ifdef CONFIG_SYSCTL
342 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6", 386 register_pernet_subsys(&xfrm6_net_ops);
343 xfrm6_policy_table);
344#endif 387#endif
345out: 388out:
346 return ret; 389 return ret;
@@ -352,8 +395,7 @@ out_policy:
352void xfrm6_fini(void) 395void xfrm6_fini(void)
353{ 396{
354#ifdef CONFIG_SYSCTL 397#ifdef CONFIG_SYSCTL
355 if (sysctl_hdr) 398 unregister_pernet_subsys(&xfrm6_net_ops);
356 unregister_net_sysctl_table(sysctl_hdr);
357#endif 399#endif
358 xfrm6_policy_fini(); 400 xfrm6_policy_fini();
359 xfrm6_state_fini(); 401 xfrm6_state_fini();
diff --git a/net/key/af_key.c b/net/key/af_key.c
index cc2630ac8061..9ef79851f297 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -203,7 +203,6 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
203 } 203 }
204 if (*skb2 != NULL) { 204 if (*skb2 != NULL) {
205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 205 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
206 skb_orphan(*skb2);
207 skb_set_owner_r(*skb2, sk); 206 skb_set_owner_r(*skb2, sk);
208 skb_queue_tail(&sk->sk_receive_queue, *skb2); 207 skb_queue_tail(&sk->sk_receive_queue, *skb2);
209 sk->sk_data_ready(sk, (*skb2)->len); 208 sk->sk_data_ready(sk, (*skb2)->len);
@@ -816,18 +815,21 @@ static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x,
816 sa->sadb_sa_auth = 0; 815 sa->sadb_sa_auth = 0;
817 if (x->aalg) { 816 if (x->aalg) {
818 struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 817 struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
819 sa->sadb_sa_auth = a ? a->desc.sadb_alg_id : 0; 818 sa->sadb_sa_auth = (a && a->pfkey_supported) ?
819 a->desc.sadb_alg_id : 0;
820 } 820 }
821 sa->sadb_sa_encrypt = 0; 821 sa->sadb_sa_encrypt = 0;
822 BUG_ON(x->ealg && x->calg); 822 BUG_ON(x->ealg && x->calg);
823 if (x->ealg) { 823 if (x->ealg) {
824 struct xfrm_algo_desc *a = xfrm_ealg_get_byname(x->ealg->alg_name, 0); 824 struct xfrm_algo_desc *a = xfrm_ealg_get_byname(x->ealg->alg_name, 0);
825 sa->sadb_sa_encrypt = a ? a->desc.sadb_alg_id : 0; 825 sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
826 a->desc.sadb_alg_id : 0;
826 } 827 }
827 /* KAME compatible: sadb_sa_encrypt is overloaded with calg id */ 828 /* KAME compatible: sadb_sa_encrypt is overloaded with calg id */
828 if (x->calg) { 829 if (x->calg) {
829 struct xfrm_algo_desc *a = xfrm_calg_get_byname(x->calg->alg_name, 0); 830 struct xfrm_algo_desc *a = xfrm_calg_get_byname(x->calg->alg_name, 0);
830 sa->sadb_sa_encrypt = a ? a->desc.sadb_alg_id : 0; 831 sa->sadb_sa_encrypt = (a && a->pfkey_supported) ?
832 a->desc.sadb_alg_id : 0;
831 } 833 }
832 834
833 sa->sadb_sa_flags = 0; 835 sa->sadb_sa_flags = 0;
@@ -1138,7 +1140,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1138 if (sa->sadb_sa_auth) { 1140 if (sa->sadb_sa_auth) {
1139 int keysize = 0; 1141 int keysize = 0;
1140 struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth); 1142 struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
1141 if (!a) { 1143 if (!a || !a->pfkey_supported) {
1142 err = -ENOSYS; 1144 err = -ENOSYS;
1143 goto out; 1145 goto out;
1144 } 1146 }
@@ -1160,7 +1162,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1160 if (sa->sadb_sa_encrypt) { 1162 if (sa->sadb_sa_encrypt) {
1161 if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) { 1163 if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) {
1162 struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt); 1164 struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt);
1163 if (!a) { 1165 if (!a || !a->pfkey_supported) {
1164 err = -ENOSYS; 1166 err = -ENOSYS;
1165 goto out; 1167 goto out;
1166 } 1168 }
@@ -1172,7 +1174,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1172 } else { 1174 } else {
1173 int keysize = 0; 1175 int keysize = 0;
1174 struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt); 1176 struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt);
1175 if (!a) { 1177 if (!a || !a->pfkey_supported) {
1176 err = -ENOSYS; 1178 err = -ENOSYS;
1177 goto out; 1179 goto out;
1178 } 1180 }
@@ -1578,13 +1580,13 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
1578 struct sadb_msg *hdr; 1580 struct sadb_msg *hdr;
1579 int len, auth_len, enc_len, i; 1581 int len, auth_len, enc_len, i;
1580 1582
1581 auth_len = xfrm_count_auth_supported(); 1583 auth_len = xfrm_count_pfkey_auth_supported();
1582 if (auth_len) { 1584 if (auth_len) {
1583 auth_len *= sizeof(struct sadb_alg); 1585 auth_len *= sizeof(struct sadb_alg);
1584 auth_len += sizeof(struct sadb_supported); 1586 auth_len += sizeof(struct sadb_supported);
1585 } 1587 }
1586 1588
1587 enc_len = xfrm_count_enc_supported(); 1589 enc_len = xfrm_count_pfkey_enc_supported();
1588 if (enc_len) { 1590 if (enc_len) {
1589 enc_len *= sizeof(struct sadb_alg); 1591 enc_len *= sizeof(struct sadb_alg);
1590 enc_len += sizeof(struct sadb_supported); 1592 enc_len += sizeof(struct sadb_supported);
@@ -1615,6 +1617,8 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
1615 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i); 1617 struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
1616 if (!aalg) 1618 if (!aalg)
1617 break; 1619 break;
1620 if (!aalg->pfkey_supported)
1621 continue;
1618 if (aalg->available) 1622 if (aalg->available)
1619 *ap++ = aalg->desc; 1623 *ap++ = aalg->desc;
1620 } 1624 }
@@ -1634,6 +1638,8 @@ static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig,
1634 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i); 1638 struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i);
1635 if (!ealg) 1639 if (!ealg)
1636 break; 1640 break;
1641 if (!ealg->pfkey_supported)
1642 continue;
1637 if (ealg->available) 1643 if (ealg->available)
1638 *ap++ = ealg->desc; 1644 *ap++ = ealg->desc;
1639 } 1645 }
@@ -2825,6 +2831,8 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
2825 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i); 2831 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i);
2826 if (!aalg) 2832 if (!aalg)
2827 break; 2833 break;
2834 if (!aalg->pfkey_supported)
2835 continue;
2828 if (aalg_tmpl_set(t, aalg) && aalg->available) 2836 if (aalg_tmpl_set(t, aalg) && aalg->available)
2829 sz += sizeof(struct sadb_comb); 2837 sz += sizeof(struct sadb_comb);
2830 } 2838 }
@@ -2840,6 +2848,9 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
2840 if (!ealg) 2848 if (!ealg)
2841 break; 2849 break;
2842 2850
2851 if (!ealg->pfkey_supported)
2852 continue;
2853
2843 if (!(ealg_tmpl_set(t, ealg) && ealg->available)) 2854 if (!(ealg_tmpl_set(t, ealg) && ealg->available))
2844 continue; 2855 continue;
2845 2856
@@ -2848,6 +2859,9 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
2848 if (!aalg) 2859 if (!aalg)
2849 break; 2860 break;
2850 2861
2862 if (!aalg->pfkey_supported)
2863 continue;
2864
2851 if (aalg_tmpl_set(t, aalg) && aalg->available) 2865 if (aalg_tmpl_set(t, aalg) && aalg->available)
2852 sz += sizeof(struct sadb_comb); 2866 sz += sizeof(struct sadb_comb);
2853 } 2867 }
@@ -2871,6 +2885,9 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
2871 if (!aalg) 2885 if (!aalg)
2872 break; 2886 break;
2873 2887
2888 if (!aalg->pfkey_supported)
2889 continue;
2890
2874 if (aalg_tmpl_set(t, aalg) && aalg->available) { 2891 if (aalg_tmpl_set(t, aalg) && aalg->available) {
2875 struct sadb_comb *c; 2892 struct sadb_comb *c;
2876 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb)); 2893 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
@@ -2903,6 +2920,9 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
2903 if (!ealg) 2920 if (!ealg)
2904 break; 2921 break;
2905 2922
2923 if (!ealg->pfkey_supported)
2924 continue;
2925
2906 if (!(ealg_tmpl_set(t, ealg) && ealg->available)) 2926 if (!(ealg_tmpl_set(t, ealg) && ealg->available))
2907 continue; 2927 continue;
2908 2928
@@ -2911,6 +2931,8 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
2911 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k); 2931 const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
2912 if (!aalg) 2932 if (!aalg)
2913 break; 2933 break;
2934 if (!aalg->pfkey_supported)
2935 continue;
2914 if (!(aalg_tmpl_set(t, aalg) && aalg->available)) 2936 if (!(aalg_tmpl_set(t, aalg) && aalg->available))
2915 continue; 2937 continue;
2916 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb)); 2938 c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb));
@@ -3718,7 +3740,7 @@ static int __net_init pfkey_init_proc(struct net *net)
3718{ 3740{
3719 struct proc_dir_entry *e; 3741 struct proc_dir_entry *e;
3720 3742
3721 e = proc_net_fops_create(net, "pfkey", 0, &pfkey_proc_ops); 3743 e = proc_create("pfkey", 0, net->proc_net, &pfkey_proc_ops);
3722 if (e == NULL) 3744 if (e == NULL)
3723 return -ENOMEM; 3745 return -ENOMEM;
3724 3746
@@ -3727,7 +3749,7 @@ static int __net_init pfkey_init_proc(struct net *net)
3727 3749
3728static void __net_exit pfkey_exit_proc(struct net *net) 3750static void __net_exit pfkey_exit_proc(struct net *net)
3729{ 3751{
3730 proc_net_remove(net, "pfkey"); 3752 remove_proc_entry("pfkey", net->proc_net);
3731} 3753}
3732#else 3754#else
3733static inline int pfkey_init_proc(struct net *net) 3755static inline int pfkey_init_proc(struct net *net)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1a9f3723c13c..dcfd64e83ab7 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -101,6 +101,7 @@ struct l2tp_skb_cb {
101 101
102static atomic_t l2tp_tunnel_count; 102static atomic_t l2tp_tunnel_count;
103static atomic_t l2tp_session_count; 103static atomic_t l2tp_session_count;
104static struct workqueue_struct *l2tp_wq;
104 105
105/* per-net private data for this module */ 106/* per-net private data for this module */
106static unsigned int l2tp_net_id; 107static unsigned int l2tp_net_id;
@@ -122,7 +123,6 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
122 return net_generic(net, l2tp_net_id); 123 return net_generic(net, l2tp_net_id);
123} 124}
124 125
125
126/* Tunnel reference counts. Incremented per session that is added to 126/* Tunnel reference counts. Incremented per session that is added to
127 * the tunnel. 127 * the tunnel.
128 */ 128 */
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
168 168
169} 169}
170 170
171/* Lookup the tunnel socket, possibly involving the fs code if the socket is
172 * owned by userspace. A struct sock returned from this function must be
173 * released using l2tp_tunnel_sock_put once you're done with it.
174 */
175struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
176{
177 int err = 0;
178 struct socket *sock = NULL;
179 struct sock *sk = NULL;
180
181 if (!tunnel)
182 goto out;
183
184 if (tunnel->fd >= 0) {
185 /* Socket is owned by userspace, who might be in the process
186 * of closing it. Look the socket up using the fd to ensure
187 * consistency.
188 */
189 sock = sockfd_lookup(tunnel->fd, &err);
190 if (sock)
191 sk = sock->sk;
192 } else {
193 /* Socket is owned by kernelspace */
194 sk = tunnel->sock;
195 }
196
197out:
198 return sk;
199}
200EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
201
202/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
203void l2tp_tunnel_sock_put(struct sock *sk)
204{
205 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
206 if (tunnel) {
207 if (tunnel->fd >= 0) {
208 /* Socket is owned by userspace */
209 sockfd_put(sk->sk_socket);
210 }
211 sock_put(sk);
212 }
213}
214EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
215
171/* Lookup a session by id in the global session list 216/* Lookup a session by id in the global session list
172 */ 217 */
173static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) 218static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1123 struct udphdr *uh; 1168 struct udphdr *uh;
1124 struct inet_sock *inet; 1169 struct inet_sock *inet;
1125 __wsum csum; 1170 __wsum csum;
1126 int old_headroom;
1127 int new_headroom;
1128 int headroom; 1171 int headroom;
1129 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1172 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1130 int udp_len; 1173 int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1136 */ 1179 */
1137 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1180 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1138 uhlen + hdr_len; 1181 uhlen + hdr_len;
1139 old_headroom = skb_headroom(skb);
1140 if (skb_cow_head(skb, headroom)) { 1182 if (skb_cow_head(skb, headroom)) {
1141 kfree_skb(skb); 1183 kfree_skb(skb);
1142 return NET_XMIT_DROP; 1184 return NET_XMIT_DROP;
1143 } 1185 }
1144 1186
1145 new_headroom = skb_headroom(skb);
1146 skb_orphan(skb); 1187 skb_orphan(skb);
1147 skb->truesize += new_headroom - old_headroom;
1148
1149 /* Setup L2TP header */ 1188 /* Setup L2TP header */
1150 session->build_header(session, __skb_push(skb, hdr_len)); 1189 session->build_header(session, __skb_push(skb, hdr_len));
1151 1190
@@ -1232,6 +1271,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1232static void l2tp_tunnel_destruct(struct sock *sk) 1271static void l2tp_tunnel_destruct(struct sock *sk)
1233{ 1272{
1234 struct l2tp_tunnel *tunnel; 1273 struct l2tp_tunnel *tunnel;
1274 struct l2tp_net *pn;
1235 1275
1236 tunnel = sk->sk_user_data; 1276 tunnel = sk->sk_user_data;
1237 if (tunnel == NULL) 1277 if (tunnel == NULL)
@@ -1239,9 +1279,8 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1239 1279
1240 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); 1280 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1241 1281
1242 /* Close all sessions */
1243 l2tp_tunnel_closeall(tunnel);
1244 1282
1283 /* Disable udp encapsulation */
1245 switch (tunnel->encap) { 1284 switch (tunnel->encap) {
1246 case L2TP_ENCAPTYPE_UDP: 1285 case L2TP_ENCAPTYPE_UDP:
1247 /* No longer an encapsulation socket. See net/ipv4/udp.c */ 1286 /* No longer an encapsulation socket. See net/ipv4/udp.c */
@@ -1253,17 +1292,23 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1253 } 1292 }
1254 1293
1255 /* Remove hooks into tunnel socket */ 1294 /* Remove hooks into tunnel socket */
1256 tunnel->sock = NULL;
1257 sk->sk_destruct = tunnel->old_sk_destruct; 1295 sk->sk_destruct = tunnel->old_sk_destruct;
1258 sk->sk_user_data = NULL; 1296 sk->sk_user_data = NULL;
1297 tunnel->sock = NULL;
1259 1298
1260 /* Call the original destructor */ 1299 /* Remove the tunnel struct from the tunnel list */
1261 if (sk->sk_destruct) 1300 pn = l2tp_pernet(tunnel->l2tp_net);
1262 (*sk->sk_destruct)(sk); 1301 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1302 list_del_rcu(&tunnel->list);
1303 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1304 atomic_dec(&l2tp_tunnel_count);
1263 1305
1264 /* We're finished with the socket */ 1306 l2tp_tunnel_closeall(tunnel);
1265 l2tp_tunnel_dec_refcount(tunnel); 1307 l2tp_tunnel_dec_refcount(tunnel);
1266 1308
1309 /* Call the original destructor */
1310 if (sk->sk_destruct)
1311 (*sk->sk_destruct)(sk);
1267end: 1312end:
1268 return; 1313 return;
1269} 1314}
@@ -1337,48 +1382,77 @@ again:
1337 */ 1382 */
1338static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) 1383static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1339{ 1384{
1340 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1341
1342 BUG_ON(atomic_read(&tunnel->ref_count) != 0); 1385 BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1343 BUG_ON(tunnel->sock != NULL); 1386 BUG_ON(tunnel->sock != NULL);
1344
1345 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); 1387 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1346
1347 /* Remove from tunnel list */
1348 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1349 list_del_rcu(&tunnel->list);
1350 kfree_rcu(tunnel, rcu); 1388 kfree_rcu(tunnel, rcu);
1351 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1389}
1352 1390
1353 atomic_dec(&l2tp_tunnel_count); 1391/* Workqueue tunnel deletion function */
1392static void l2tp_tunnel_del_work(struct work_struct *work)
1393{
1394 struct l2tp_tunnel *tunnel = NULL;
1395 struct socket *sock = NULL;
1396 struct sock *sk = NULL;
1397
1398 tunnel = container_of(work, struct l2tp_tunnel, del_work);
1399 sk = l2tp_tunnel_sock_lookup(tunnel);
1400 if (!sk)
1401 return;
1402
1403 sock = sk->sk_socket;
1404 BUG_ON(!sock);
1405
1406 /* If the tunnel socket was created directly by the kernel, use the
1407 * sk_* API to release the socket now. Otherwise go through the
1408 * inet_* layer to shut the socket down, and let userspace close it.
1409 * In either case the tunnel resources are freed in the socket
1410 * destructor when the tunnel socket goes away.
1411 */
1412 if (sock->file == NULL) {
1413 kernel_sock_shutdown(sock, SHUT_RDWR);
1414 sk_release_kernel(sk);
1415 } else {
1416 inet_shutdown(sock, 2);
1417 }
1418
1419 l2tp_tunnel_sock_put(sk);
1354} 1420}
1355 1421
1356/* Create a socket for the tunnel, if one isn't set up by 1422/* Create a socket for the tunnel, if one isn't set up by
1357 * userspace. This is used for static tunnels where there is no 1423 * userspace. This is used for static tunnels where there is no
1358 * managing L2TP daemon. 1424 * managing L2TP daemon.
1425 *
1426 * Since we don't want these sockets to keep a namespace alive by
1427 * themselves, we drop the socket's namespace refcount after creation.
1428 * These sockets are freed when the namespace exits using the pernet
1429 * exit hook.
1359 */ 1430 */
1360static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp) 1431static int l2tp_tunnel_sock_create(struct net *net,
1432 u32 tunnel_id,
1433 u32 peer_tunnel_id,
1434 struct l2tp_tunnel_cfg *cfg,
1435 struct socket **sockp)
1361{ 1436{
1362 int err = -EINVAL; 1437 int err = -EINVAL;
1363 struct sockaddr_in udp_addr; 1438 struct socket *sock = NULL;
1439 struct sockaddr_in udp_addr = {0};
1440 struct sockaddr_l2tpip ip_addr = {0};
1364#if IS_ENABLED(CONFIG_IPV6) 1441#if IS_ENABLED(CONFIG_IPV6)
1365 struct sockaddr_in6 udp6_addr; 1442 struct sockaddr_in6 udp6_addr = {0};
1366 struct sockaddr_l2tpip6 ip6_addr; 1443 struct sockaddr_l2tpip6 ip6_addr = {0};
1367#endif 1444#endif
1368 struct sockaddr_l2tpip ip_addr;
1369 struct socket *sock = NULL;
1370 1445
1371 switch (cfg->encap) { 1446 switch (cfg->encap) {
1372 case L2TP_ENCAPTYPE_UDP: 1447 case L2TP_ENCAPTYPE_UDP:
1373#if IS_ENABLED(CONFIG_IPV6) 1448#if IS_ENABLED(CONFIG_IPV6)
1374 if (cfg->local_ip6 && cfg->peer_ip6) { 1449 if (cfg->local_ip6 && cfg->peer_ip6) {
1375 err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp); 1450 err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
1376 if (err < 0) 1451 if (err < 0)
1377 goto out; 1452 goto out;
1378 1453
1379 sock = *sockp; 1454 sk_change_net(sock->sk, net);
1380 1455
1381 memset(&udp6_addr, 0, sizeof(udp6_addr));
1382 udp6_addr.sin6_family = AF_INET6; 1456 udp6_addr.sin6_family = AF_INET6;
1383 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6, 1457 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1384 sizeof(udp6_addr.sin6_addr)); 1458 sizeof(udp6_addr.sin6_addr));
@@ -1400,13 +1474,12 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1400 } else 1474 } else
1401#endif 1475#endif
1402 { 1476 {
1403 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); 1477 err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
1404 if (err < 0) 1478 if (err < 0)
1405 goto out; 1479 goto out;
1406 1480
1407 sock = *sockp; 1481 sk_change_net(sock->sk, net);
1408 1482
1409 memset(&udp_addr, 0, sizeof(udp_addr));
1410 udp_addr.sin_family = AF_INET; 1483 udp_addr.sin_family = AF_INET;
1411 udp_addr.sin_addr = cfg->local_ip; 1484 udp_addr.sin_addr = cfg->local_ip;
1412 udp_addr.sin_port = htons(cfg->local_udp_port); 1485 udp_addr.sin_port = htons(cfg->local_udp_port);
@@ -1433,14 +1506,13 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1433 case L2TP_ENCAPTYPE_IP: 1506 case L2TP_ENCAPTYPE_IP:
1434#if IS_ENABLED(CONFIG_IPV6) 1507#if IS_ENABLED(CONFIG_IPV6)
1435 if (cfg->local_ip6 && cfg->peer_ip6) { 1508 if (cfg->local_ip6 && cfg->peer_ip6) {
1436 err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP, 1509 err = sock_create_kern(AF_INET6, SOCK_DGRAM,
1437 sockp); 1510 IPPROTO_L2TP, &sock);
1438 if (err < 0) 1511 if (err < 0)
1439 goto out; 1512 goto out;
1440 1513
1441 sock = *sockp; 1514 sk_change_net(sock->sk, net);
1442 1515
1443 memset(&ip6_addr, 0, sizeof(ip6_addr));
1444 ip6_addr.l2tp_family = AF_INET6; 1516 ip6_addr.l2tp_family = AF_INET6;
1445 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, 1517 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1446 sizeof(ip6_addr.l2tp_addr)); 1518 sizeof(ip6_addr.l2tp_addr));
@@ -1462,14 +1534,13 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1462 } else 1534 } else
1463#endif 1535#endif
1464 { 1536 {
1465 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, 1537 err = sock_create_kern(AF_INET, SOCK_DGRAM,
1466 sockp); 1538 IPPROTO_L2TP, &sock);
1467 if (err < 0) 1539 if (err < 0)
1468 goto out; 1540 goto out;
1469 1541
1470 sock = *sockp; 1542 sk_change_net(sock->sk, net);
1471 1543
1472 memset(&ip_addr, 0, sizeof(ip_addr));
1473 ip_addr.l2tp_family = AF_INET; 1544 ip_addr.l2tp_family = AF_INET;
1474 ip_addr.l2tp_addr = cfg->local_ip; 1545 ip_addr.l2tp_addr = cfg->local_ip;
1475 ip_addr.l2tp_conn_id = tunnel_id; 1546 ip_addr.l2tp_conn_id = tunnel_id;
@@ -1493,8 +1564,10 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1493 } 1564 }
1494 1565
1495out: 1566out:
1567 *sockp = sock;
1496 if ((err < 0) && sock) { 1568 if ((err < 0) && sock) {
1497 sock_release(sock); 1569 kernel_sock_shutdown(sock, SHUT_RDWR);
1570 sk_release_kernel(sock->sk);
1498 *sockp = NULL; 1571 *sockp = NULL;
1499 } 1572 }
1500 1573
@@ -1517,15 +1590,23 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1517 * kernel socket. 1590 * kernel socket.
1518 */ 1591 */
1519 if (fd < 0) { 1592 if (fd < 0) {
1520 err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock); 1593 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1594 cfg, &sock);
1521 if (err < 0) 1595 if (err < 0)
1522 goto err; 1596 goto err;
1523 } else { 1597 } else {
1524 err = -EBADF;
1525 sock = sockfd_lookup(fd, &err); 1598 sock = sockfd_lookup(fd, &err);
1526 if (!sock) { 1599 if (!sock) {
1527 pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n", 1600 pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1528 tunnel_id, fd, err); 1601 tunnel_id, fd, err);
1602 err = -EBADF;
1603 goto err;
1604 }
1605
1606 /* Reject namespace mismatches */
1607 if (!net_eq(sock_net(sock->sk), net)) {
1608 pr_err("tunl %u: netns mismatch\n", tunnel_id);
1609 err = -EINVAL;
1529 goto err; 1610 goto err;
1530 } 1611 }
1531 } 1612 }
@@ -1607,10 +1688,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1607 tunnel->old_sk_destruct = sk->sk_destruct; 1688 tunnel->old_sk_destruct = sk->sk_destruct;
1608 sk->sk_destruct = &l2tp_tunnel_destruct; 1689 sk->sk_destruct = &l2tp_tunnel_destruct;
1609 tunnel->sock = sk; 1690 tunnel->sock = sk;
1691 tunnel->fd = fd;
1610 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1692 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1611 1693
1612 sk->sk_allocation = GFP_ATOMIC; 1694 sk->sk_allocation = GFP_ATOMIC;
1613 1695
1696 /* Init delete workqueue struct */
1697 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1698
1614 /* Add tunnel to our list */ 1699 /* Add tunnel to our list */
1615 INIT_LIST_HEAD(&tunnel->list); 1700 INIT_LIST_HEAD(&tunnel->list);
1616 atomic_inc(&l2tp_tunnel_count); 1701 atomic_inc(&l2tp_tunnel_count);
@@ -1642,25 +1727,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1642 */ 1727 */
1643int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1728int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1644{ 1729{
1645 int err = 0; 1730 return (false == queue_work(l2tp_wq, &tunnel->del_work));
1646 struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
1647
1648 /* Force the tunnel socket to close. This will eventually
1649 * cause the tunnel to be deleted via the normal socket close
1650 * mechanisms when userspace closes the tunnel socket.
1651 */
1652 if (sock != NULL) {
1653 err = inet_shutdown(sock, 2);
1654
1655 /* If the tunnel's socket was created by the kernel,
1656 * close the socket here since the socket was not
1657 * created by userspace.
1658 */
1659 if (sock->file == NULL)
1660 err = inet_release(sock);
1661 }
1662
1663 return err;
1664} 1731}
1665EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1732EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1666 1733
@@ -1844,8 +1911,21 @@ static __net_init int l2tp_init_net(struct net *net)
1844 return 0; 1911 return 0;
1845} 1912}
1846 1913
1914static __net_exit void l2tp_exit_net(struct net *net)
1915{
1916 struct l2tp_net *pn = l2tp_pernet(net);
1917 struct l2tp_tunnel *tunnel = NULL;
1918
1919 rcu_read_lock_bh();
1920 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1921 (void)l2tp_tunnel_delete(tunnel);
1922 }
1923 rcu_read_unlock_bh();
1924}
1925
1847static struct pernet_operations l2tp_net_ops = { 1926static struct pernet_operations l2tp_net_ops = {
1848 .init = l2tp_init_net, 1927 .init = l2tp_init_net,
1928 .exit = l2tp_exit_net,
1849 .id = &l2tp_net_id, 1929 .id = &l2tp_net_id,
1850 .size = sizeof(struct l2tp_net), 1930 .size = sizeof(struct l2tp_net),
1851}; 1931};
@@ -1858,6 +1938,13 @@ static int __init l2tp_init(void)
1858 if (rc) 1938 if (rc)
1859 goto out; 1939 goto out;
1860 1940
1941 l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
1942 if (!l2tp_wq) {
1943 pr_err("alloc_workqueue failed\n");
1944 rc = -ENOMEM;
1945 goto out;
1946 }
1947
1861 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION); 1948 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1862 1949
1863out: 1950out:
@@ -1867,6 +1954,10 @@ out:
1867static void __exit l2tp_exit(void) 1954static void __exit l2tp_exit(void)
1868{ 1955{
1869 unregister_pernet_device(&l2tp_net_ops); 1956 unregister_pernet_device(&l2tp_net_ops);
1957 if (l2tp_wq) {
1958 destroy_workqueue(l2tp_wq);
1959 l2tp_wq = NULL;
1960 }
1870} 1961}
1871 1962
1872module_init(l2tp_init); 1963module_init(l2tp_init);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 56d583e083a7..8eb8f1d47f3a 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,7 +188,10 @@ struct l2tp_tunnel {
188 int (*recv_payload_hook)(struct sk_buff *skb); 188 int (*recv_payload_hook)(struct sk_buff *skb);
189 void (*old_sk_destruct)(struct sock *); 189 void (*old_sk_destruct)(struct sock *);
190 struct sock *sock; /* Parent socket */ 190 struct sock *sock; /* Parent socket */
191 int fd; 191 int fd; /* Parent fd, if tunnel socket
192 * was created by userspace */
193
194 struct work_struct del_work;
192 195
193 uint8_t priv[0]; /* private data */ 196 uint8_t priv[0]; /* private data */
194}; 197};
@@ -228,6 +231,8 @@ out:
228 return tunnel; 231 return tunnel;
229} 232}
230 233
234extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
235extern void l2tp_tunnel_sock_put(struct sock *sk);
231extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); 236extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
232extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 237extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
233extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 238extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 61d8b75d2686..f7ac8f42fee2 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -115,6 +115,7 @@ static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, in
115 */ 115 */
116static int l2tp_ip_recv(struct sk_buff *skb) 116static int l2tp_ip_recv(struct sk_buff *skb)
117{ 117{
118 struct net *net = dev_net(skb->dev);
118 struct sock *sk; 119 struct sock *sk;
119 u32 session_id; 120 u32 session_id;
120 u32 tunnel_id; 121 u32 tunnel_id;
@@ -142,7 +143,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
142 } 143 }
143 144
144 /* Ok, this is a data packet. Lookup the session. */ 145 /* Ok, this is a data packet. Lookup the session. */
145 session = l2tp_session_find(&init_net, NULL, session_id); 146 session = l2tp_session_find(net, NULL, session_id);
146 if (session == NULL) 147 if (session == NULL)
147 goto discard; 148 goto discard;
148 149
@@ -173,14 +174,14 @@ pass_up:
173 goto discard; 174 goto discard;
174 175
175 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 176 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
176 tunnel = l2tp_tunnel_find(&init_net, tunnel_id); 177 tunnel = l2tp_tunnel_find(net, tunnel_id);
177 if (tunnel != NULL) 178 if (tunnel != NULL)
178 sk = tunnel->sock; 179 sk = tunnel->sock;
179 else { 180 else {
180 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 181 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
181 182
182 read_lock_bh(&l2tp_ip_lock); 183 read_lock_bh(&l2tp_ip_lock);
183 sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id); 184 sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
184 read_unlock_bh(&l2tp_ip_lock); 185 read_unlock_bh(&l2tp_ip_lock);
185 } 186 }
186 187
@@ -239,6 +240,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
239{ 240{
240 struct inet_sock *inet = inet_sk(sk); 241 struct inet_sock *inet = inet_sk(sk);
241 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; 242 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
243 struct net *net = sock_net(sk);
242 int ret; 244 int ret;
243 int chk_addr_ret; 245 int chk_addr_ret;
244 246
@@ -251,7 +253,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251 253
252 ret = -EADDRINUSE; 254 ret = -EADDRINUSE;
253 read_lock_bh(&l2tp_ip_lock); 255 read_lock_bh(&l2tp_ip_lock);
254 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) 256 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
257 sk->sk_bound_dev_if, addr->l2tp_conn_id))
255 goto out_in_use; 258 goto out_in_use;
256 259
257 read_unlock_bh(&l2tp_ip_lock); 260 read_unlock_bh(&l2tp_ip_lock);
@@ -260,7 +263,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
260 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) 263 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
261 goto out; 264 goto out;
262 265
263 chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr); 266 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
264 ret = -EADDRNOTAVAIL; 267 ret = -EADDRNOTAVAIL;
265 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && 268 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
266 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 269 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
@@ -369,7 +372,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
369 return 0; 372 return 0;
370 373
371drop: 374drop:
372 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); 375 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
373 kfree_skb(skb); 376 kfree_skb(skb);
374 return -1; 377 return -1;
375} 378}
@@ -605,6 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
605 608
606static struct net_protocol l2tp_ip_protocol __read_mostly = { 609static struct net_protocol l2tp_ip_protocol __read_mostly = {
607 .handler = l2tp_ip_recv, 610 .handler = l2tp_ip_recv,
611 .netns_ok = 1,
608}; 612};
609 613
610static int __init l2tp_ip_init(void) 614static int __init l2tp_ip_init(void)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 927547171bc7..8ee4a86ae996 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
554 memset(opt, 0, sizeof(struct ipv6_txoptions)); 554 memset(opt, 0, sizeof(struct ipv6_txoptions));
555 opt->tot_len = sizeof(struct ipv6_txoptions); 555 opt->tot_len = sizeof(struct ipv6_txoptions);
556 556
557 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 557 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
558 &hlimit, &tclass, &dontfrag); 558 &hlimit, &tclass, &dontfrag);
559 if (err < 0) { 559 if (err < 0) {
560 fl6_sock_release(flowlabel); 560 fl6_sock_release(flowlabel);
561 return err; 561 return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
646 struct msghdr *msg, size_t len, int noblock, 646 struct msghdr *msg, size_t len, int noblock,
647 int flags, int *addr_len) 647 int flags, int *addr_len)
648{ 648{
649 struct inet_sock *inet = inet_sk(sk); 649 struct ipv6_pinfo *np = inet6_sk(sk);
650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; 650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
651 size_t copied = 0; 651 size_t copied = 0;
652 int err = -EOPNOTSUPP; 652 int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
688 lsa->l2tp_scope_id = IP6CB(skb)->iif; 688 lsa->l2tp_scope_id = IP6CB(skb)->iif;
689 } 689 }
690 690
691 if (inet->cmsg_flags) 691 if (np->rxopt.all)
692 ip_cmsg_recv(msg, skb); 692 ip6_datagram_recv_ctl(sk, msg, skb);
693 693
694 if (flags & MSG_TRUNC) 694 if (flags & MSG_TRUNC)
695 copied = skb->len; 695 copied = skb->len;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index bbba3a19e944..c1bab22db85e 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -37,6 +37,7 @@ static struct genl_family l2tp_nl_family = {
37 .version = L2TP_GENL_VERSION, 37 .version = L2TP_GENL_VERSION,
38 .hdrsize = 0, 38 .hdrsize = 0,
39 .maxattr = L2TP_ATTR_MAX, 39 .maxattr = L2TP_ATTR_MAX,
40 .netnsok = true,
40}; 41};
41 42
42/* Accessed under genl lock */ 43/* Accessed under genl lock */
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 286366ef8930..3f4e3afc191a 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
388 struct l2tp_session *session; 388 struct l2tp_session *session;
389 struct l2tp_tunnel *tunnel; 389 struct l2tp_tunnel *tunnel;
390 struct pppol2tp_session *ps; 390 struct pppol2tp_session *ps;
391 int old_headroom;
392 int new_headroom;
393 int uhlen, headroom; 391 int uhlen, headroom;
394 392
395 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 393 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
408 if (tunnel == NULL) 406 if (tunnel == NULL)
409 goto abort_put_sess; 407 goto abort_put_sess;
410 408
411 old_headroom = skb_headroom(skb);
412 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 409 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
413 headroom = NET_SKB_PAD + 410 headroom = NET_SKB_PAD +
414 sizeof(struct iphdr) + /* IP header */ 411 sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
418 if (skb_cow_head(skb, headroom)) 415 if (skb_cow_head(skb, headroom))
419 goto abort_put_sess_tun; 416 goto abort_put_sess_tun;
420 417
421 new_headroom = skb_headroom(skb);
422 skb->truesize += new_headroom - old_headroom;
423
424 /* Setup PPP header */ 418 /* Setup PPP header */
425 __skb_push(skb, sizeof(ppph)); 419 __skb_push(skb, sizeof(ppph));
426 skb->data[0] = ppph[0]; 420 skb->data[0] = ppph[0];
@@ -1789,7 +1783,8 @@ static __net_init int pppol2tp_init_net(struct net *net)
1789 struct proc_dir_entry *pde; 1783 struct proc_dir_entry *pde;
1790 int err = 0; 1784 int err = 0;
1791 1785
1792 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); 1786 pde = proc_create("pppol2tp", S_IRUGO, net->proc_net,
1787 &pppol2tp_proc_fops);
1793 if (!pde) { 1788 if (!pde) {
1794 err = -ENOMEM; 1789 err = -ENOMEM;
1795 goto out; 1790 goto out;
@@ -1801,7 +1796,7 @@ out:
1801 1796
1802static __net_exit void pppol2tp_exit_net(struct net *net) 1797static __net_exit void pppol2tp_exit_net(struct net *net)
1803{ 1798{
1804 proc_net_remove(net, "pppol2tp"); 1799 remove_proc_entry("pppol2tp", net->proc_net);
1805} 1800}
1806 1801
1807static struct pernet_operations pppol2tp_net_ops = { 1802static struct pernet_operations pppol2tp_net_ops = {
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index b4ecf267a34b..0ecf947ad378 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -258,6 +258,17 @@ config MAC80211_MESH_SYNC_DEBUG
258 258
259 Do not select this option. 259 Do not select this option.
260 260
261config MAC80211_MESH_PS_DEBUG
262 bool "Verbose mesh powersave debugging"
263 depends on MAC80211_DEBUG_MENU
264 depends on MAC80211_MESH
265 ---help---
266 Selecting this option causes mac80211 to print out very verbose mesh
267 powersave debugging messages (when mac80211 is taking part in a
268 mesh network).
269
270 Do not select this option.
271
261config MAC80211_TDLS_DEBUG 272config MAC80211_TDLS_DEBUG
262 bool "Verbose TDLS debugging" 273 bool "Verbose TDLS debugging"
263 depends on MAC80211_DEBUG_MENU 274 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4911202334d9..9d7d840aac6d 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -39,7 +39,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
39 mesh_pathtbl.o \ 39 mesh_pathtbl.o \
40 mesh_plink.o \ 40 mesh_plink.o \
41 mesh_hwmp.o \ 41 mesh_hwmp.o \
42 mesh_sync.o 42 mesh_sync.o \
43 mesh_ps.o
43 44
44mac80211-$(CONFIG_PM) += pm.o 45mac80211-$(CONFIG_PM) += pm.o
45 46
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 808338a1bce5..31bf2586fb84 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -83,8 +83,8 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
83 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, 83 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
84 &sta->sta, tid, NULL, 0)) 84 &sta->sta, tid, NULL, 0))
85 sdata_info(sta->sdata, 85 sdata_info(sta->sdata,
86 "HW problem - can not stop rx aggregation for tid %d\n", 86 "HW problem - can not stop rx aggregation for %pM tid %d\n",
87 tid); 87 sta->sta.addr, tid);
88 88
89 /* check if this is a self generated aggregation halt */ 89 /* check if this is a self generated aggregation halt */
90 if (initiator == WLAN_BACK_RECIPIENT && tx) 90 if (initiator == WLAN_BACK_RECIPIENT && tx)
@@ -159,7 +159,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
159 } 159 }
160 rcu_read_unlock(); 160 rcu_read_unlock();
161 161
162 ht_dbg(sta->sdata, "rx session timer expired on tid %d\n", (u16)*ptid); 162 ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
163 sta->sta.addr, (u16)*ptid);
163 164
164 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); 165 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
165 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); 166 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
@@ -247,7 +248,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
247 status = WLAN_STATUS_REQUEST_DECLINED; 248 status = WLAN_STATUS_REQUEST_DECLINED;
248 249
249 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 250 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
250 ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request\n"); 251 ht_dbg(sta->sdata,
252 "Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
253 sta->sta.addr, tid);
251 goto end_no_lock; 254 goto end_no_lock;
252 } 255 }
253 256
@@ -317,7 +320,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
317 320
318 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, 321 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
319 &sta->sta, tid, &start_seq_num, 0); 322 &sta->sta, tid, &start_seq_num, 0);
320 ht_dbg(sta->sdata, "Rx A-MPDU request on tid %d result %d\n", tid, ret); 323 ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
324 sta->sta.addr, tid, ret);
321 if (ret) { 325 if (ret) {
322 kfree(tid_agg_rx->reorder_buf); 326 kfree(tid_agg_rx->reorder_buf);
323 kfree(tid_agg_rx->reorder_time); 327 kfree(tid_agg_rx->reorder_time);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 2f0ccbc5f13e..13b7683de5a4 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -296,7 +296,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
296 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, 296 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
297 &sta->sta, tid, NULL, 0); 297 &sta->sta, tid, NULL, 0);
298 WARN_ON_ONCE(ret); 298 WARN_ON_ONCE(ret);
299 goto remove_tid_tx; 299 return 0;
300 } 300 }
301 301
302 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 302 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
@@ -354,12 +354,15 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
354 */ 354 */
355 } 355 }
356 356
357 if (reason == AGG_STOP_DESTROY_STA) { 357 /*
358 remove_tid_tx: 358 * In the case of AGG_STOP_DESTROY_STA, the driver won't
359 spin_lock_bh(&sta->lock); 359 * necessarily call ieee80211_stop_tx_ba_cb(), so this may
360 ieee80211_remove_tid_tx(sta, tid); 360 * seem like we can leave the tid_tx data pending forever.
361 spin_unlock_bh(&sta->lock); 361 * This is true, in a way, but "forever" is only until the
362 } 362 * station struct is actually destroyed. In the meantime,
363 * leaving it around ensures that we don't transmit packets
364 * to the driver on this TID which might confuse it.
365 */
363 366
364 return 0; 367 return 0;
365} 368}
@@ -387,12 +390,13 @@ static void sta_addba_resp_timer_expired(unsigned long data)
387 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { 390 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
388 rcu_read_unlock(); 391 rcu_read_unlock();
389 ht_dbg(sta->sdata, 392 ht_dbg(sta->sdata,
390 "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n", 393 "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n",
391 tid); 394 sta->sta.addr, tid);
392 return; 395 return;
393 } 396 }
394 397
395 ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid); 398 ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
399 sta->sta.addr, tid);
396 400
397 ieee80211_stop_tx_ba_session(&sta->sta, tid); 401 ieee80211_stop_tx_ba_session(&sta->sta, tid);
398 rcu_read_unlock(); 402 rcu_read_unlock();
@@ -429,7 +433,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
429 &sta->sta, tid, &start_seq_num, 0); 433 &sta->sta, tid, &start_seq_num, 0);
430 if (ret) { 434 if (ret) {
431 ht_dbg(sdata, 435 ht_dbg(sdata,
432 "BA request denied - HW unavailable for tid %d\n", tid); 436 "BA request denied - HW unavailable for %pM tid %d\n",
437 sta->sta.addr, tid);
433 spin_lock_bh(&sta->lock); 438 spin_lock_bh(&sta->lock);
434 ieee80211_agg_splice_packets(sdata, tid_tx, tid); 439 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
435 ieee80211_assign_tid_tx(sta, tid, NULL); 440 ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -442,7 +447,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
442 447
443 /* activate the timer for the recipient's addBA response */ 448 /* activate the timer for the recipient's addBA response */
444 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); 449 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
445 ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid); 450 ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
451 sta->sta.addr, tid);
446 452
447 spin_lock_bh(&sta->lock); 453 spin_lock_bh(&sta->lock);
448 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; 454 sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
@@ -489,7 +495,8 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
489 495
490 rcu_read_unlock(); 496 rcu_read_unlock();
491 497
492 ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid); 498 ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
499 sta->sta.addr, (u16)*ptid);
493 500
494 ieee80211_stop_tx_ba_session(&sta->sta, *ptid); 501 ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
495} 502}
@@ -525,7 +532,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
525 532
526 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { 533 if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
527 ht_dbg(sdata, 534 ht_dbg(sdata,
528 "BA sessions blocked - Denying BA session request\n"); 535 "BA sessions blocked - Denying BA session request %pM tid %d\n",
536 sta->sta.addr, tid);
529 return -EINVAL; 537 return -EINVAL;
530 } 538 }
531 539
@@ -566,8 +574,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
566 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + 574 time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
567 HT_AGG_RETRIES_PERIOD)) { 575 HT_AGG_RETRIES_PERIOD)) {
568 ht_dbg(sdata, 576 ht_dbg(sdata,
569 "BA request denied - waiting a grace period after %d failed requests on tid %u\n", 577 "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n",
570 sta->ampdu_mlme.addba_req_num[tid], tid); 578 sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
571 ret = -EBUSY; 579 ret = -EBUSY;
572 goto err_unlock_sta; 580 goto err_unlock_sta;
573 } 581 }
@@ -576,8 +584,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
576 /* check if the TID is not in aggregation flow already */ 584 /* check if the TID is not in aggregation flow already */
577 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { 585 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
578 ht_dbg(sdata, 586 ht_dbg(sdata,
579 "BA request denied - session is not idle on tid %u\n", 587 "BA request denied - session is not idle on %pM tid %u\n",
580 tid); 588 sta->sta.addr, tid);
581 ret = -EAGAIN; 589 ret = -EAGAIN;
582 goto err_unlock_sta; 590 goto err_unlock_sta;
583 } 591 }
@@ -632,7 +640,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
632 640
633 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 641 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
634 642
635 ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid); 643 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
644 sta->sta.addr, tid);
636 645
637 drv_ampdu_action(local, sta->sdata, 646 drv_ampdu_action(local, sta->sdata,
638 IEEE80211_AMPDU_TX_OPERATIONAL, 647 IEEE80211_AMPDU_TX_OPERATIONAL,
@@ -802,7 +811,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
802 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 811 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
803 812
804 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 813 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
805 ht_dbg(sdata, "unexpected callback to A-MPDU stop\n"); 814 ht_dbg(sdata,
815 "unexpected callback to A-MPDU stop for %pM tid %d\n",
816 sta->sta.addr, tid);
806 goto unlock_sta; 817 goto unlock_sta;
807 } 818 }
808 819
@@ -861,13 +872,15 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
861 goto out; 872 goto out;
862 873
863 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { 874 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
864 ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid); 875 ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
876 sta->sta.addr, tid);
865 goto out; 877 goto out;
866 } 878 }
867 879
868 del_timer_sync(&tid_tx->addba_resp_timer); 880 del_timer_sync(&tid_tx->addba_resp_timer);
869 881
870 ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid); 882 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
883 sta->sta.addr, tid);
871 884
872 /* 885 /*
873 * addba_resp_timer may have fired before we got here, and 886 * addba_resp_timer may have fired before we got here, and
@@ -877,8 +890,8 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
877 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || 890 if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
878 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 891 test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
879 ht_dbg(sta->sdata, 892 ht_dbg(sta->sdata,
880 "got addBA resp for tid %d but we already gave up\n", 893 "got addBA resp for %pM tid %d but we already gave up\n",
881 tid); 894 sta->sta.addr, tid);
882 goto out; 895 goto out;
883 } 896 }
884 897
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 661b878bd19c..179dcbd8be1c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -492,7 +492,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
492#ifdef CONFIG_MAC80211_MESH 492#ifdef CONFIG_MAC80211_MESH
493 sinfo->filled |= STATION_INFO_LLID | 493 sinfo->filled |= STATION_INFO_LLID |
494 STATION_INFO_PLID | 494 STATION_INFO_PLID |
495 STATION_INFO_PLINK_STATE; 495 STATION_INFO_PLINK_STATE |
496 STATION_INFO_LOCAL_PM |
497 STATION_INFO_PEER_PM |
498 STATION_INFO_NONPEER_PM;
496 499
497 sinfo->llid = le16_to_cpu(sta->llid); 500 sinfo->llid = le16_to_cpu(sta->llid);
498 sinfo->plid = le16_to_cpu(sta->plid); 501 sinfo->plid = le16_to_cpu(sta->plid);
@@ -501,6 +504,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
501 sinfo->filled |= STATION_INFO_T_OFFSET; 504 sinfo->filled |= STATION_INFO_T_OFFSET;
502 sinfo->t_offset = sta->t_offset; 505 sinfo->t_offset = sta->t_offset;
503 } 506 }
507 sinfo->local_pm = sta->local_pm;
508 sinfo->peer_pm = sta->peer_pm;
509 sinfo->nonpeer_pm = sta->nonpeer_pm;
504#endif 510#endif
505 } 511 }
506 512
@@ -922,11 +928,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
922 /* TODO: make hostapd tell us what it wants */ 928 /* TODO: make hostapd tell us what it wants */
923 sdata->smps_mode = IEEE80211_SMPS_OFF; 929 sdata->smps_mode = IEEE80211_SMPS_OFF;
924 sdata->needed_rx_chains = sdata->local->rx_chains; 930 sdata->needed_rx_chains = sdata->local->rx_chains;
931 sdata->radar_required = params->radar_required;
925 932
926 err = ieee80211_vif_use_channel(sdata, &params->chandef, 933 err = ieee80211_vif_use_channel(sdata, &params->chandef,
927 IEEE80211_CHANCTX_SHARED); 934 IEEE80211_CHANCTX_SHARED);
928 if (err) 935 if (err)
929 return err; 936 return err;
937 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
930 938
931 /* 939 /*
932 * Apply control port protocol, this allows us to 940 * Apply control port protocol, this allows us to
@@ -1041,6 +1049,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1041 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); 1049 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
1042 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 1050 skb_queue_purge(&sdata->u.ap.ps.bc_buf);
1043 1051
1052 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
1044 ieee80211_vif_release_channel(sdata); 1053 ieee80211_vif_release_channel(sdata);
1045 1054
1046 return 0; 1055 return 0;
@@ -1243,25 +1252,26 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1243 1252
1244 if (params->ht_capa) 1253 if (params->ht_capa)
1245 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 1254 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
1246 params->ht_capa, 1255 params->ht_capa, sta);
1247 &sta->sta.ht_cap);
1248 1256
1249 if (params->vht_capa) 1257 if (params->vht_capa)
1250 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 1258 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
1251 params->vht_capa, 1259 params->vht_capa, sta);
1252 &sta->sta.vht_cap);
1253 1260
1254 if (ieee80211_vif_is_mesh(&sdata->vif)) { 1261 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1255#ifdef CONFIG_MAC80211_MESH 1262#ifdef CONFIG_MAC80211_MESH
1263 u32 changed = 0;
1256 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) { 1264 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) {
1257 u32 changed = 0;
1258
1259 switch (params->plink_state) { 1265 switch (params->plink_state) {
1260 case NL80211_PLINK_ESTAB: 1266 case NL80211_PLINK_ESTAB:
1261 if (sta->plink_state != NL80211_PLINK_ESTAB) 1267 if (sta->plink_state != NL80211_PLINK_ESTAB)
1262 changed = mesh_plink_inc_estab_count( 1268 changed = mesh_plink_inc_estab_count(
1263 sdata); 1269 sdata);
1264 sta->plink_state = params->plink_state; 1270 sta->plink_state = params->plink_state;
1271
1272 ieee80211_mps_sta_status_update(sta);
1273 changed |= ieee80211_mps_set_sta_local_pm(sta,
1274 sdata->u.mesh.mshcfg.power_mode);
1265 break; 1275 break;
1266 case NL80211_PLINK_LISTEN: 1276 case NL80211_PLINK_LISTEN:
1267 case NL80211_PLINK_BLOCKED: 1277 case NL80211_PLINK_BLOCKED:
@@ -1273,22 +1283,31 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1273 changed = mesh_plink_dec_estab_count( 1283 changed = mesh_plink_dec_estab_count(
1274 sdata); 1284 sdata);
1275 sta->plink_state = params->plink_state; 1285 sta->plink_state = params->plink_state;
1286
1287 ieee80211_mps_sta_status_update(sta);
1288 changed |=
1289 ieee80211_mps_local_status_update(sdata);
1276 break; 1290 break;
1277 default: 1291 default:
1278 /* nothing */ 1292 /* nothing */
1279 break; 1293 break;
1280 } 1294 }
1281 ieee80211_bss_info_change_notify(sdata, changed);
1282 } else { 1295 } else {
1283 switch (params->plink_action) { 1296 switch (params->plink_action) {
1284 case PLINK_ACTION_OPEN: 1297 case PLINK_ACTION_OPEN:
1285 mesh_plink_open(sta); 1298 changed |= mesh_plink_open(sta);
1286 break; 1299 break;
1287 case PLINK_ACTION_BLOCK: 1300 case PLINK_ACTION_BLOCK:
1288 mesh_plink_block(sta); 1301 changed |= mesh_plink_block(sta);
1289 break; 1302 break;
1290 } 1303 }
1291 } 1304 }
1305
1306 if (params->local_pm)
1307 changed |=
1308 ieee80211_mps_set_sta_local_pm(sta,
1309 params->local_pm);
1310 ieee80211_bss_info_change_notify(sdata, changed);
1292#endif 1311#endif
1293 } 1312 }
1294 1313
@@ -1393,9 +1412,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1393 return -ENOENT; 1412 return -ENOENT;
1394 } 1413 }
1395 1414
1396 /* in station mode, supported rates are only valid with TDLS */ 1415 /* in station mode, some updates are only valid with TDLS */
1397 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1416 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1398 params->supported_rates && 1417 (params->supported_rates || params->ht_capa || params->vht_capa ||
1418 params->sta_modify_mask ||
1419 (params->sta_flags_mask & BIT(NL80211_STA_FLAG_WME))) &&
1399 !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { 1420 !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
1400 mutex_unlock(&local->sta_mtx); 1421 mutex_unlock(&local->sta_mtx);
1401 return -EINVAL; 1422 return -EINVAL;
@@ -1777,6 +1798,14 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1777 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask)) 1798 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask))
1778 conf->dot11MeshHWMPconfirmationInterval = 1799 conf->dot11MeshHWMPconfirmationInterval =
1779 nconf->dot11MeshHWMPconfirmationInterval; 1800 nconf->dot11MeshHWMPconfirmationInterval;
1801 if (_chg_mesh_attr(NL80211_MESHCONF_POWER_MODE, mask)) {
1802 conf->power_mode = nconf->power_mode;
1803 ieee80211_mps_local_status_update(sdata);
1804 }
1805 if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask))
1806 conf->dot11MeshAwakeWindowDuration =
1807 nconf->dot11MeshAwakeWindowDuration;
1808 ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1780 return 0; 1809 return 0;
1781} 1810}
1782 1811
@@ -1802,9 +1831,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
1802 if (err) 1831 if (err)
1803 return err; 1832 return err;
1804 1833
1805 ieee80211_start_mesh(sdata); 1834 return ieee80211_start_mesh(sdata);
1806
1807 return 0;
1808} 1835}
1809 1836
1810static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) 1837static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
@@ -2058,7 +2085,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
2058{ 2085{
2059 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2086 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2060 2087
2061 memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate)); 2088 memcpy(sdata->vif.bss_conf.mcast_rate, rate,
2089 sizeof(int) * IEEE80211_NUM_BANDS);
2062 2090
2063 return 0; 2091 return 0;
2064} 2092}
@@ -2368,7 +2396,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2368 INIT_LIST_HEAD(&roc->dependents); 2396 INIT_LIST_HEAD(&roc->dependents);
2369 2397
2370 /* if there's one pending or we're scanning, queue this one */ 2398 /* if there's one pending or we're scanning, queue this one */
2371 if (!list_empty(&local->roc_list) || local->scanning) 2399 if (!list_empty(&local->roc_list) ||
2400 local->scanning || local->radar_detect_enabled)
2372 goto out_check_combine; 2401 goto out_check_combine;
2373 2402
2374 /* if not HW assist, just queue & schedule work */ 2403 /* if not HW assist, just queue & schedule work */
@@ -2618,6 +2647,37 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
2618 return ieee80211_cancel_roc(local, cookie, false); 2647 return ieee80211_cancel_roc(local, cookie, false);
2619} 2648}
2620 2649
2650static int ieee80211_start_radar_detection(struct wiphy *wiphy,
2651 struct net_device *dev,
2652 struct cfg80211_chan_def *chandef)
2653{
2654 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2655 struct ieee80211_local *local = sdata->local;
2656 unsigned long timeout;
2657 int err;
2658
2659 if (!list_empty(&local->roc_list) || local->scanning)
2660 return -EBUSY;
2661
2662 /* whatever, but channel contexts should not complain about that one */
2663 sdata->smps_mode = IEEE80211_SMPS_OFF;
2664 sdata->needed_rx_chains = local->rx_chains;
2665 sdata->radar_required = true;
2666
2667 mutex_lock(&local->iflist_mtx);
2668 err = ieee80211_vif_use_channel(sdata, chandef,
2669 IEEE80211_CHANCTX_SHARED);
2670 mutex_unlock(&local->iflist_mtx);
2671 if (err)
2672 return err;
2673
2674 timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
2675 ieee80211_queue_delayed_work(&sdata->local->hw,
2676 &sdata->dfs_cac_timer_work, timeout);
2677
2678 return 0;
2679}
2680
2621static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 2681static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2622 struct ieee80211_channel *chan, bool offchan, 2682 struct ieee80211_channel *chan, bool offchan,
2623 unsigned int wait, const u8 *buf, size_t len, 2683 unsigned int wait, const u8 *buf, size_t len,
@@ -2722,7 +2782,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2722 goto out_unlock; 2782 goto out_unlock;
2723 } 2783 }
2724 2784
2725 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 2785 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
2786 IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
2726 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) 2787 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
2727 IEEE80211_SKB_CB(skb)->hw_queue = 2788 IEEE80211_SKB_CB(skb)->hw_queue =
2728 local->hw.offchannel_tx_hw_queue; 2789 local->hw.offchannel_tx_hw_queue;
@@ -3322,4 +3383,5 @@ struct cfg80211_ops mac80211_config_ops = {
3322 .get_et_stats = ieee80211_get_et_stats, 3383 .get_et_stats = ieee80211_get_et_stats,
3323 .get_et_strings = ieee80211_get_et_strings, 3384 .get_et_strings = ieee80211_get_et_strings,
3324 .get_channel = ieee80211_cfg_get_channel, 3385 .get_channel = ieee80211_cfg_get_channel,
3386 .start_radar_detection = ieee80211_start_radar_detection,
3325}; 3387};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 1bfe0a8b19d2..78c0d90dd641 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -9,7 +9,7 @@
9#include "ieee80211_i.h" 9#include "ieee80211_i.h"
10#include "driver-ops.h" 10#include "driver-ops.h"
11 11
12static void ieee80211_change_chandef(struct ieee80211_local *local, 12static void ieee80211_change_chanctx(struct ieee80211_local *local,
13 struct ieee80211_chanctx *ctx, 13 struct ieee80211_chanctx *ctx,
14 const struct cfg80211_chan_def *chandef) 14 const struct cfg80211_chan_def *chandef)
15{ 15{
@@ -49,7 +49,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
49 if (!compat) 49 if (!compat)
50 continue; 50 continue;
51 51
52 ieee80211_change_chandef(local, ctx, compat); 52 ieee80211_change_chanctx(local, ctx, compat);
53 53
54 return ctx; 54 return ctx;
55 } 55 }
@@ -91,6 +91,10 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
91 91
92 list_add_rcu(&ctx->list, &local->chanctx_list); 92 list_add_rcu(&ctx->list, &local->chanctx_list);
93 93
94 mutex_lock(&local->mtx);
95 ieee80211_recalc_idle(local);
96 mutex_unlock(&local->mtx);
97
94 return ctx; 98 return ctx;
95} 99}
96 100
@@ -110,6 +114,10 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
110 114
111 list_del_rcu(&ctx->list); 115 list_del_rcu(&ctx->list);
112 kfree_rcu(ctx, rcu_head); 116 kfree_rcu(ctx, rcu_head);
117
118 mutex_lock(&local->mtx);
119 ieee80211_recalc_idle(local);
120 mutex_unlock(&local->mtx);
113} 121}
114 122
115static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 123static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -128,6 +136,11 @@ static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
128 ctx->refcount++; 136 ctx->refcount++;
129 137
130 ieee80211_recalc_txpower(sdata); 138 ieee80211_recalc_txpower(sdata);
139 sdata->vif.bss_conf.idle = false;
140
141 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
142 sdata->vif.type != NL80211_IFTYPE_MONITOR)
143 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
131 144
132 return 0; 145 return 0;
133} 146}
@@ -162,7 +175,7 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
162 if (WARN_ON_ONCE(!compat)) 175 if (WARN_ON_ONCE(!compat))
163 return; 176 return;
164 177
165 ieee80211_change_chandef(local, ctx, compat); 178 ieee80211_change_chanctx(local, ctx, compat);
166} 179}
167 180
168static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 181static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -175,11 +188,18 @@ static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
175 ctx->refcount--; 188 ctx->refcount--;
176 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL); 189 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
177 190
191 sdata->vif.bss_conf.idle = true;
192
193 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
194 sdata->vif.type != NL80211_IFTYPE_MONITOR)
195 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
196
178 drv_unassign_vif_chanctx(local, sdata, ctx); 197 drv_unassign_vif_chanctx(local, sdata, ctx);
179 198
180 if (ctx->refcount > 0) { 199 if (ctx->refcount > 0) {
181 ieee80211_recalc_chanctx_chantype(sdata->local, ctx); 200 ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
182 ieee80211_recalc_smps_chanctx(local, ctx); 201 ieee80211_recalc_smps_chanctx(local, ctx);
202 ieee80211_recalc_radar_chanctx(local, ctx);
183 } 203 }
184} 204}
185 205
@@ -198,20 +218,42 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
198 218
199 ctx = container_of(conf, struct ieee80211_chanctx, conf); 219 ctx = container_of(conf, struct ieee80211_chanctx, conf);
200 220
201 if (sdata->vif.type == NL80211_IFTYPE_AP) {
202 struct ieee80211_sub_if_data *vlan;
203
204 /* for the VLAN list */
205 ASSERT_RTNL();
206 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
207 rcu_assign_pointer(vlan->vif.chanctx_conf, NULL);
208 }
209
210 ieee80211_unassign_vif_chanctx(sdata, ctx); 221 ieee80211_unassign_vif_chanctx(sdata, ctx);
211 if (ctx->refcount == 0) 222 if (ctx->refcount == 0)
212 ieee80211_free_chanctx(local, ctx); 223 ieee80211_free_chanctx(local, ctx);
213} 224}
214 225
226void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
227 struct ieee80211_chanctx *chanctx)
228{
229 struct ieee80211_sub_if_data *sdata;
230 bool radar_enabled = false;
231
232 lockdep_assert_held(&local->chanctx_mtx);
233
234 rcu_read_lock();
235 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
236 if (sdata->radar_required) {
237 radar_enabled = true;
238 break;
239 }
240 }
241 rcu_read_unlock();
242
243 if (radar_enabled == chanctx->conf.radar_enabled)
244 return;
245
246 chanctx->conf.radar_enabled = radar_enabled;
247 local->radar_detect_enabled = chanctx->conf.radar_enabled;
248
249 if (!local->use_chanctx) {
250 local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
251 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
252 }
253
254 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
255}
256
215void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 257void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
216 struct ieee80211_chanctx *chanctx) 258 struct ieee80211_chanctx *chanctx)
217{ 259{
@@ -326,16 +368,57 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
326 goto out; 368 goto out;
327 } 369 }
328 370
329 if (sdata->vif.type == NL80211_IFTYPE_AP) { 371 ieee80211_recalc_smps_chanctx(local, ctx);
330 struct ieee80211_sub_if_data *vlan; 372 ieee80211_recalc_radar_chanctx(local, ctx);
373 out:
374 mutex_unlock(&local->chanctx_mtx);
375 return ret;
376}
377
378int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
379 const struct cfg80211_chan_def *chandef,
380 u32 *changed)
381{
382 struct ieee80211_local *local = sdata->local;
383 struct ieee80211_chanctx_conf *conf;
384 struct ieee80211_chanctx *ctx;
385 int ret;
386
387 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
388 IEEE80211_CHAN_DISABLED))
389 return -EINVAL;
390
391 mutex_lock(&local->chanctx_mtx);
392 if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) {
393 ret = 0;
394 goto out;
395 }
396
397 if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
398 sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
399 ret = -EINVAL;
400 goto out;
401 }
331 402
332 /* for the VLAN list */ 403 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
333 ASSERT_RTNL(); 404 lockdep_is_held(&local->chanctx_mtx));
334 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 405 if (!conf) {
335 rcu_assign_pointer(vlan->vif.chanctx_conf, &ctx->conf); 406 ret = -EINVAL;
407 goto out;
336 } 408 }
337 409
338 ieee80211_recalc_smps_chanctx(local, ctx); 410 ctx = container_of(conf, struct ieee80211_chanctx, conf);
411 if (!cfg80211_chandef_compatible(&conf->def, chandef)) {
412 ret = -EINVAL;
413 goto out;
414 }
415
416 sdata->vif.bss_conf.chandef = *chandef;
417
418 ieee80211_recalc_chanctx_chantype(local, ctx);
419
420 *changed |= BSS_CHANGED_BANDWIDTH;
421 ret = 0;
339 out: 422 out:
340 mutex_unlock(&local->chanctx_mtx); 423 mutex_unlock(&local->chanctx_mtx);
341 return ret; 424 return ret;
@@ -369,6 +452,40 @@ void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
369 mutex_unlock(&local->chanctx_mtx); 452 mutex_unlock(&local->chanctx_mtx);
370} 453}
371 454
455void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
456 bool clear)
457{
458 struct ieee80211_local *local = sdata->local;
459 struct ieee80211_sub_if_data *vlan;
460 struct ieee80211_chanctx_conf *conf;
461
462 ASSERT_RTNL();
463
464 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
465 return;
466
467 mutex_lock(&local->chanctx_mtx);
468
469 /*
470 * Check that conf exists, even when clearing this function
471 * must be called with the AP's channel context still there
472 * as it would otherwise cause VLANs to have an invalid
473 * channel context pointer for a while, possibly pointing
474 * to a channel context that has already been freed.
475 */
476 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
477 lockdep_is_held(&local->chanctx_mtx));
478 WARN_ON(!conf);
479
480 if (clear)
481 conf = NULL;
482
483 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
484 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
485
486 mutex_unlock(&local->chanctx_mtx);
487}
488
372void ieee80211_iter_chan_contexts_atomic( 489void ieee80211_iter_chan_contexts_atomic(
373 struct ieee80211_hw *hw, 490 struct ieee80211_hw *hw,
374 void (*iter)(struct ieee80211_hw *hw, 491 void (*iter)(struct ieee80211_hw *hw,
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index 8f383a576016..4ccc5ed6237d 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -44,6 +44,12 @@
44#define MAC80211_MESH_SYNC_DEBUG 0 44#define MAC80211_MESH_SYNC_DEBUG 0
45#endif 45#endif
46 46
47#ifdef CONFIG_MAC80211_MESH_PS_DEBUG
48#define MAC80211_MESH_PS_DEBUG 1
49#else
50#define MAC80211_MESH_PS_DEBUG 0
51#endif
52
47#ifdef CONFIG_MAC80211_TDLS_DEBUG 53#ifdef CONFIG_MAC80211_TDLS_DEBUG
48#define MAC80211_TDLS_DEBUG 1 54#define MAC80211_TDLS_DEBUG 1
49#else 55#else
@@ -151,6 +157,10 @@ do { \
151 _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \ 157 _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \
152 sdata, fmt, ##__VA_ARGS__) 158 sdata, fmt, ##__VA_ARGS__)
153 159
160#define mps_dbg(sdata, fmt, ...) \
161 _sdata_dbg(MAC80211_MESH_PS_DEBUG, \
162 sdata, fmt, ##__VA_ARGS__)
163
154#define tdls_dbg(sdata, fmt, ...) \ 164#define tdls_dbg(sdata, fmt, ...) \
155 _sdata_dbg(MAC80211_TDLS_DEBUG, \ 165 _sdata_dbg(MAC80211_TDLS_DEBUG, \
156 sdata, fmt, ##__VA_ARGS__) 166 sdata, fmt, ##__VA_ARGS__)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 466f4b45dd94..b0e32d628114 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -121,8 +121,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
121 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n"); 121 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
122 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 122 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
123 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n"); 123 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
124 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) 124 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
125 sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_PERIOD\n"); 125 sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_BEFORE_ASSOC\n");
126 if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT) 126 if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
127 sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n"); 127 sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
128 if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) 128 if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
@@ -151,8 +151,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
151 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); 151 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
152 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW) 152 if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
153 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n"); 153 sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
154 if (local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)
155 sf += snprintf(buf + sf, mxln - sf, "SCAN_WHILE_IDLE\n");
156 154
157 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 155 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
158 kfree(buf); 156 kfree(buf);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cbde5cc49a40..059bbb82e84f 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -515,6 +515,9 @@ IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
515 u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC); 515 u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
516IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval, 516IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
517 u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC); 517 u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
518IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC);
519IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
520 u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC);
518#endif 521#endif
519 522
520#define DEBUGFS_ADD_MODE(name, mode) \ 523#define DEBUGFS_ADD_MODE(name, mode) \
@@ -620,6 +623,8 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
620 MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout); 623 MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
621 MESHPARAMS_ADD(dot11MeshHWMProotInterval); 624 MESHPARAMS_ADD(dot11MeshHWMProotInterval);
622 MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval); 625 MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
626 MESHPARAMS_ADD(power_mode);
627 MESHPARAMS_ADD(dot11MeshAwakeWindowDuration);
623#undef MESHPARAMS_ADD 628#undef MESHPARAMS_ADD
624} 629}
625#endif 630#endif
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 6fb1168b9f16..c7591f73dbc3 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -65,7 +65,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
65 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 65 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
66 66
67 int res = scnprintf(buf, sizeof(buf), 67 int res = scnprintf(buf, sizeof(buf),
68 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 68 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
69 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 69 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
70 TEST(PS_DRIVER), TEST(AUTHORIZED), 70 TEST(PS_DRIVER), TEST(AUTHORIZED),
71 TEST(SHORT_PREAMBLE), 71 TEST(SHORT_PREAMBLE),
@@ -74,7 +74,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
74 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 74 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
75 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), 75 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
76 TEST(INSERTED), TEST(RATE_CONTROL), 76 TEST(INSERTED), TEST(RATE_CONTROL),
77 TEST(TOFFSET_KNOWN)); 77 TEST(TOFFSET_KNOWN), TEST(MPSP_OWNER),
78 TEST(MPSP_RECIPIENT));
78#undef TEST 79#undef TEST
79 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 80 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
80} 81}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 0c07f94c5378..ee56d0779d8b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -207,13 +207,16 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
207{ 207{
208 might_sleep(); 208 might_sleep();
209 209
210 WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON | 210 if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON |
211 BSS_CHANGED_BEACON_ENABLED) && 211 BSS_CHANGED_BEACON_ENABLED) &&
212 sdata->vif.type != NL80211_IFTYPE_AP && 212 sdata->vif.type != NL80211_IFTYPE_AP &&
213 sdata->vif.type != NL80211_IFTYPE_ADHOC && 213 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
214 sdata->vif.type != NL80211_IFTYPE_MESH_POINT); 214 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
215 WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE && 215 return;
216 changed & ~BSS_CHANGED_IDLE); 216
217 if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
218 sdata->vif.type == NL80211_IFTYPE_MONITOR))
219 return;
217 220
218 check_sdata_in_driver(sdata); 221 check_sdata_in_driver(sdata);
219 222
@@ -528,6 +531,43 @@ static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
528 local->ops->sta_remove_debugfs(&local->hw, &sdata->vif, 531 local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
529 sta, dir); 532 sta, dir);
530} 533}
534
535static inline
536void drv_add_interface_debugfs(struct ieee80211_local *local,
537 struct ieee80211_sub_if_data *sdata)
538{
539 might_sleep();
540
541 check_sdata_in_driver(sdata);
542
543 if (!local->ops->add_interface_debugfs)
544 return;
545
546 local->ops->add_interface_debugfs(&local->hw, &sdata->vif,
547 sdata->debugfs.dir);
548}
549
550static inline
551void drv_remove_interface_debugfs(struct ieee80211_local *local,
552 struct ieee80211_sub_if_data *sdata)
553{
554 might_sleep();
555
556 check_sdata_in_driver(sdata);
557
558 if (!local->ops->remove_interface_debugfs)
559 return;
560
561 local->ops->remove_interface_debugfs(&local->hw, &sdata->vif,
562 sdata->debugfs.dir);
563}
564#else
565static inline
566void drv_add_interface_debugfs(struct ieee80211_local *local,
567 struct ieee80211_sub_if_data *sdata) {}
568static inline
569void drv_remove_interface_debugfs(struct ieee80211_local *local,
570 struct ieee80211_sub_if_data *sdata) {}
531#endif 571#endif
532 572
533static inline __must_check 573static inline __must_check
@@ -569,7 +609,8 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
569 check_sdata_in_driver(sdata); 609 check_sdata_in_driver(sdata);
570 610
571 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED && 611 WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
572 sdata->vif.type != NL80211_IFTYPE_ADHOC); 612 (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
613 sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
573 614
574 trace_drv_sta_rc_update(local, sdata, sta, changed); 615 trace_drv_sta_rc_update(local, sdata, sta, changed);
575 if (local->ops->sta_rc_update) 616 if (local->ops->sta_rc_update)
@@ -845,11 +886,12 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
845} 886}
846 887
847static inline void drv_rssi_callback(struct ieee80211_local *local, 888static inline void drv_rssi_callback(struct ieee80211_local *local,
889 struct ieee80211_sub_if_data *sdata,
848 const enum ieee80211_rssi_event event) 890 const enum ieee80211_rssi_event event)
849{ 891{
850 trace_drv_rssi_callback(local, event); 892 trace_drv_rssi_callback(local, sdata, event);
851 if (local->ops->rssi_callback) 893 if (local->ops->rssi_callback)
852 local->ops->rssi_callback(&local->hw, event); 894 local->ops->rssi_callback(&local->hw, &sdata->vif, event);
853 trace_drv_return_void(local); 895 trace_drv_return_void(local);
854} 896}
855 897
@@ -1020,4 +1062,32 @@ static inline void drv_restart_complete(struct ieee80211_local *local)
1020 trace_drv_return_void(local); 1062 trace_drv_return_void(local);
1021} 1063}
1022 1064
1065static inline void
1066drv_set_default_unicast_key(struct ieee80211_local *local,
1067 struct ieee80211_sub_if_data *sdata,
1068 int key_idx)
1069{
1070 check_sdata_in_driver(sdata);
1071
1072 WARN_ON_ONCE(key_idx < -1 || key_idx > 3);
1073
1074 trace_drv_set_default_unicast_key(local, sdata, key_idx);
1075 if (local->ops->set_default_unicast_key)
1076 local->ops->set_default_unicast_key(&local->hw, &sdata->vif,
1077 key_idx);
1078 trace_drv_return_void(local);
1079}
1080
1081#if IS_ENABLED(CONFIG_IPV6)
1082static inline void drv_ipv6_addr_change(struct ieee80211_local *local,
1083 struct ieee80211_sub_if_data *sdata,
1084 struct inet6_dev *idev)
1085{
1086 trace_drv_ipv6_addr_change(local, sdata);
1087 if (local->ops->ipv6_addr_change)
1088 local->ops->ipv6_addr_change(&local->hw, &sdata->vif, idev);
1089 trace_drv_return_void(local);
1090}
1091#endif
1092
1023#endif /* __MAC80211_DRIVER_OPS */ 1093#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 61ac7c48ac0c..0db25d4bb223 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -37,6 +37,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
37 u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask); 37 u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
38 int i; 38 int i;
39 39
40 if (!ht_cap->ht_supported)
41 return;
42
40 if (sdata->vif.type != NL80211_IFTYPE_STATION) { 43 if (sdata->vif.type != NL80211_IFTYPE_STATION) {
41 /* AP interfaces call this code when adding new stations, 44 /* AP interfaces call this code when adding new stations,
42 * so just silently ignore non station interfaces. 45 * so just silently ignore non station interfaces.
@@ -89,22 +92,24 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
89} 92}
90 93
91 94
92void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 95bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
93 struct ieee80211_supported_band *sband, 96 struct ieee80211_supported_band *sband,
94 struct ieee80211_ht_cap *ht_cap_ie, 97 const struct ieee80211_ht_cap *ht_cap_ie,
95 struct ieee80211_sta_ht_cap *ht_cap) 98 struct sta_info *sta)
96{ 99{
100 struct ieee80211_sta_ht_cap ht_cap;
97 u8 ampdu_info, tx_mcs_set_cap; 101 u8 ampdu_info, tx_mcs_set_cap;
98 int i, max_tx_streams; 102 int i, max_tx_streams;
103 bool changed;
104 enum ieee80211_sta_rx_bandwidth bw;
105 enum ieee80211_smps_mode smps_mode;
99 106
100 BUG_ON(!ht_cap); 107 memset(&ht_cap, 0, sizeof(ht_cap));
101
102 memset(ht_cap, 0, sizeof(*ht_cap));
103 108
104 if (!ht_cap_ie || !sband->ht_cap.ht_supported) 109 if (!ht_cap_ie || !sband->ht_cap.ht_supported)
105 return; 110 goto apply;
106 111
107 ht_cap->ht_supported = true; 112 ht_cap.ht_supported = true;
108 113
109 /* 114 /*
110 * The bits listed in this expression should be 115 * The bits listed in this expression should be
@@ -112,7 +117,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
112 * advertises more then we can't use those thus 117 * advertises more then we can't use those thus
113 * we mask them out. 118 * we mask them out.
114 */ 119 */
115 ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & 120 ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) &
116 (sband->ht_cap.cap | 121 (sband->ht_cap.cap |
117 ~(IEEE80211_HT_CAP_LDPC_CODING | 122 ~(IEEE80211_HT_CAP_LDPC_CODING |
118 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 123 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -121,44 +126,30 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
121 IEEE80211_HT_CAP_SGI_40 | 126 IEEE80211_HT_CAP_SGI_40 |
122 IEEE80211_HT_CAP_DSSSCCK40)); 127 IEEE80211_HT_CAP_DSSSCCK40));
123 128
124 /* Unset 40 MHz if we're not using a 40 MHz channel */
125 switch (sdata->vif.bss_conf.chandef.width) {
126 case NL80211_CHAN_WIDTH_20_NOHT:
127 case NL80211_CHAN_WIDTH_20:
128 ht_cap->cap &= ~IEEE80211_HT_CAP_SGI_40;
129 ht_cap->cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
130 break;
131 case NL80211_CHAN_WIDTH_40:
132 case NL80211_CHAN_WIDTH_80:
133 case NL80211_CHAN_WIDTH_80P80:
134 case NL80211_CHAN_WIDTH_160:
135 break;
136 }
137
138 /* 129 /*
139 * The STBC bits are asymmetric -- if we don't have 130 * The STBC bits are asymmetric -- if we don't have
140 * TX then mask out the peer's RX and vice versa. 131 * TX then mask out the peer's RX and vice versa.
141 */ 132 */
142 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)) 133 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
143 ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC; 134 ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC;
144 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)) 135 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC))
145 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; 136 ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC;
146 137
147 ampdu_info = ht_cap_ie->ampdu_params_info; 138 ampdu_info = ht_cap_ie->ampdu_params_info;
148 ht_cap->ampdu_factor = 139 ht_cap.ampdu_factor =
149 ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; 140 ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR;
150 ht_cap->ampdu_density = 141 ht_cap.ampdu_density =
151 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; 142 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
152 143
153 /* own MCS TX capabilities */ 144 /* own MCS TX capabilities */
154 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; 145 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params;
155 146
156 /* Copy peer MCS TX capabilities, the driver might need them. */ 147 /* Copy peer MCS TX capabilities, the driver might need them. */
157 ht_cap->mcs.tx_params = ht_cap_ie->mcs.tx_params; 148 ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params;
158 149
159 /* can we TX with MCS rates? */ 150 /* can we TX with MCS rates? */
160 if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) 151 if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED))
161 return; 152 goto apply;
162 153
163 /* Counting from 0, therefore +1 */ 154 /* Counting from 0, therefore +1 */
164 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) 155 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF)
@@ -176,25 +167,75 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
176 * - remainder are multiple spatial streams using unequal modulation 167 * - remainder are multiple spatial streams using unequal modulation
177 */ 168 */
178 for (i = 0; i < max_tx_streams; i++) 169 for (i = 0; i < max_tx_streams; i++)
179 ht_cap->mcs.rx_mask[i] = 170 ht_cap.mcs.rx_mask[i] =
180 sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; 171 sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i];
181 172
182 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) 173 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION)
183 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; 174 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE;
184 i < IEEE80211_HT_MCS_MASK_LEN; i++) 175 i < IEEE80211_HT_MCS_MASK_LEN; i++)
185 ht_cap->mcs.rx_mask[i] = 176 ht_cap.mcs.rx_mask[i] =
186 sband->ht_cap.mcs.rx_mask[i] & 177 sband->ht_cap.mcs.rx_mask[i] &
187 ht_cap_ie->mcs.rx_mask[i]; 178 ht_cap_ie->mcs.rx_mask[i];
188 179
189 /* handle MCS rate 32 too */ 180 /* handle MCS rate 32 too */
190 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) 181 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
191 ht_cap->mcs.rx_mask[32/8] |= 1; 182 ht_cap.mcs.rx_mask[32/8] |= 1;
192 183
184 apply:
193 /* 185 /*
194 * If user has specified capability over-rides, take care 186 * If user has specified capability over-rides, take care
195 * of that here. 187 * of that here.
196 */ 188 */
197 ieee80211_apply_htcap_overrides(sdata, ht_cap); 189 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
190
191 changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
192
193 memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
194
195 switch (sdata->vif.bss_conf.chandef.width) {
196 default:
197 WARN_ON_ONCE(1);
198 /* fall through */
199 case NL80211_CHAN_WIDTH_20_NOHT:
200 case NL80211_CHAN_WIDTH_20:
201 bw = IEEE80211_STA_RX_BW_20;
202 break;
203 case NL80211_CHAN_WIDTH_40:
204 case NL80211_CHAN_WIDTH_80:
205 case NL80211_CHAN_WIDTH_80P80:
206 case NL80211_CHAN_WIDTH_160:
207 bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
208 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
209 break;
210 }
211
212 if (bw != sta->sta.bandwidth)
213 changed = true;
214 sta->sta.bandwidth = bw;
215
216 sta->cur_max_bandwidth =
217 ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
218 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
219
220 switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS)
221 >> IEEE80211_HT_CAP_SM_PS_SHIFT) {
222 case WLAN_HT_CAP_SM_PS_INVALID:
223 case WLAN_HT_CAP_SM_PS_STATIC:
224 smps_mode = IEEE80211_SMPS_STATIC;
225 break;
226 case WLAN_HT_CAP_SM_PS_DYNAMIC:
227 smps_mode = IEEE80211_SMPS_DYNAMIC;
228 break;
229 case WLAN_HT_CAP_SM_PS_DISABLED:
230 smps_mode = IEEE80211_SMPS_OFF;
231 break;
232 }
233
234 if (smps_mode != sta->sta.smps_mode)
235 changed = true;
236 sta->sta.smps_mode = smps_mode;
237
238 return changed;
198} 239}
199 240
200void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, 241void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
@@ -406,6 +447,9 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
406 if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF)) 447 if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
407 smps_mode = IEEE80211_SMPS_AUTOMATIC; 448 smps_mode = IEEE80211_SMPS_AUTOMATIC;
408 449
450 if (sdata->u.mgd.driver_smps_mode == smps_mode)
451 return;
452
409 sdata->u.mgd.driver_smps_mode = smps_mode; 453 sdata->u.mgd.driver_smps_mode = smps_mode;
410 454
411 ieee80211_queue_work(&sdata->local->hw, 455 ieee80211_queue_work(&sdata->local->hw,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b4b866f41919..40b71dfcc79d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -228,7 +228,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
228 228
229 bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan, 229 bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
230 mgmt, skb->len, 0, GFP_KERNEL); 230 mgmt, skb->len, 0, GFP_KERNEL);
231 cfg80211_put_bss(bss); 231 cfg80211_put_bss(local->hw.wiphy, bss);
232 netif_carrier_on(sdata->dev); 232 netif_carrier_on(sdata->dev);
233 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL); 233 cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
234} 234}
@@ -242,6 +242,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
242 u32 basic_rates; 242 u32 basic_rates;
243 int i, j; 243 int i, j;
244 u16 beacon_int = cbss->beacon_interval; 244 u16 beacon_int = cbss->beacon_interval;
245 const struct cfg80211_bss_ies *ies;
246 u64 tsf;
245 247
246 lockdep_assert_held(&sdata->u.ibss.mtx); 248 lockdep_assert_held(&sdata->u.ibss.mtx);
247 249
@@ -265,13 +267,17 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
265 } 267 }
266 } 268 }
267 269
270 rcu_read_lock();
271 ies = rcu_dereference(cbss->ies);
272 tsf = ies->tsf;
273 rcu_read_unlock();
274
268 __ieee80211_sta_join_ibss(sdata, cbss->bssid, 275 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
269 beacon_int, 276 beacon_int,
270 cbss->channel, 277 cbss->channel,
271 basic_rates, 278 basic_rates,
272 cbss->capability, 279 cbss->capability,
273 cbss->tsf, 280 tsf, false);
274 false);
275} 281}
276 282
277static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, 283static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
@@ -302,7 +308,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
302 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n", 308 "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
303 sdata->vif.addr, addr, sdata->u.ibss.bssid); 309 sdata->vif.addr, addr, sdata->u.ibss.bssid);
304 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0, 310 ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0,
305 addr, sdata->u.ibss.bssid, NULL, 0, 0); 311 addr, sdata->u.ibss.bssid, NULL, 0, 0, 0);
306 } 312 }
307 return sta; 313 return sta;
308} 314}
@@ -422,7 +428,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
422 * has actually implemented this. 428 * has actually implemented this.
423 */ 429 */
424 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0, 430 ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0,
425 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0); 431 mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0);
426} 432}
427 433
428static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 434static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -490,33 +496,26 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
490 if (sta && elems->ht_operation && elems->ht_cap_elem && 496 if (sta && elems->ht_operation && elems->ht_cap_elem &&
491 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { 497 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
492 /* we both use HT */ 498 /* we both use HT */
493 struct ieee80211_sta_ht_cap sta_ht_cap_new; 499 struct ieee80211_ht_cap htcap_ie;
494 struct cfg80211_chan_def chandef; 500 struct cfg80211_chan_def chandef;
495 501
496 ieee80211_ht_oper_to_chandef(channel, 502 ieee80211_ht_oper_to_chandef(channel,
497 elems->ht_operation, 503 elems->ht_operation,
498 &chandef); 504 &chandef);
499 505
500 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 506 memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
501 elems->ht_cap_elem,
502 &sta_ht_cap_new);
503 507
504 /* 508 /*
505 * fall back to HT20 if we don't use or use 509 * fall back to HT20 if we don't use or use
506 * the other extension channel 510 * the other extension channel
507 */ 511 */
508 if (chandef.width != NL80211_CHAN_WIDTH_40 || 512 if (cfg80211_get_chandef_type(&chandef) !=
509 cfg80211_get_chandef_type(&chandef) !=
510 sdata->u.ibss.channel_type) 513 sdata->u.ibss.channel_type)
511 sta_ht_cap_new.cap &= 514 htcap_ie.cap_info &=
512 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 515 cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40);
513 516
514 if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new, 517 rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(
515 sizeof(sta_ht_cap_new))) { 518 sdata, sband, &htcap_ie, sta);
516 memcpy(&sta->sta.ht_cap, &sta_ht_cap_new,
517 sizeof(sta_ht_cap_new));
518 rates_updated = true;
519 }
520 } 519 }
521 520
522 if (sta && rates_updated) { 521 if (sta && rates_updated) {
@@ -535,8 +534,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
535 534
536 cbss = container_of((void *)bss, struct cfg80211_bss, priv); 535 cbss = container_of((void *)bss, struct cfg80211_bss, priv);
537 536
538 /* was just updated in ieee80211_bss_info_update */ 537 /* same for beacon and probe response */
539 beacon_timestamp = cbss->tsf; 538 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
540 539
541 /* check if we need to merge IBSS */ 540 /* check if we need to merge IBSS */
542 541
@@ -1102,10 +1101,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1102 1101
1103 mutex_unlock(&sdata->u.ibss.mtx); 1102 mutex_unlock(&sdata->u.ibss.mtx);
1104 1103
1105 mutex_lock(&sdata->local->mtx);
1106 ieee80211_recalc_idle(sdata->local);
1107 mutex_unlock(&sdata->local->mtx);
1108
1109 /* 1104 /*
1110 * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is 1105 * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
1111 * reserved, but an HT STA shall protect HT transmissions as though 1106 * reserved, but an HT STA shall protect HT transmissions as though
@@ -1159,7 +1154,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1159 1154
1160 if (cbss) { 1155 if (cbss) {
1161 cfg80211_unlink_bss(local->hw.wiphy, cbss); 1156 cfg80211_unlink_bss(local->hw.wiphy, cbss);
1162 cfg80211_put_bss(cbss); 1157 cfg80211_put_bss(local->hw.wiphy, cbss);
1163 } 1158 }
1164 } 1159 }
1165 1160
@@ -1203,9 +1198,5 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1203 1198
1204 mutex_unlock(&sdata->u.ibss.mtx); 1199 mutex_unlock(&sdata->u.ibss.mtx);
1205 1200
1206 mutex_lock(&local->mtx);
1207 ieee80211_recalc_idle(sdata->local);
1208 mutex_unlock(&local->mtx);
1209
1210 return 0; 1201 return 0;
1211} 1202}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 63f0430c131e..388580a1bada 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -86,23 +86,11 @@ struct ieee80211_fragment_entry {
86 86
87 87
88struct ieee80211_bss { 88struct ieee80211_bss {
89 /* don't want to look up all the time */ 89 u32 device_ts_beacon, device_ts_presp;
90 size_t ssid_len;
91 u8 ssid[IEEE80211_MAX_SSID_LEN];
92
93 u32 device_ts;
94 90
95 bool wmm_used; 91 bool wmm_used;
96 bool uapsd_supported; 92 bool uapsd_supported;
97 93
98 unsigned long last_probe_resp;
99
100#ifdef CONFIG_MAC80211_MESH
101 u8 *mesh_id;
102 size_t mesh_id_len;
103 u8 *mesh_cfg;
104#endif
105
106#define IEEE80211_MAX_SUPP_RATES 32 94#define IEEE80211_MAX_SUPP_RATES 32
107 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 95 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
108 size_t supp_rates_len; 96 size_t supp_rates_len;
@@ -153,31 +141,6 @@ enum ieee80211_bss_valid_data_flags {
153 IEEE80211_BSS_VALID_ERP = BIT(3) 141 IEEE80211_BSS_VALID_ERP = BIT(3)
154}; 142};
155 143
156static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
157{
158#ifdef CONFIG_MAC80211_MESH
159 return bss->mesh_cfg;
160#endif
161 return NULL;
162}
163
164static inline u8 *bss_mesh_id(struct ieee80211_bss *bss)
165{
166#ifdef CONFIG_MAC80211_MESH
167 return bss->mesh_id;
168#endif
169 return NULL;
170}
171
172static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss)
173{
174#ifdef CONFIG_MAC80211_MESH
175 return bss->mesh_id_len;
176#endif
177 return 0;
178}
179
180
181typedef unsigned __bitwise__ ieee80211_tx_result; 144typedef unsigned __bitwise__ ieee80211_tx_result;
182#define TX_CONTINUE ((__force ieee80211_tx_result) 0u) 145#define TX_CONTINUE ((__force ieee80211_tx_result) 0u)
183#define TX_DROP ((__force ieee80211_tx_result) 1u) 146#define TX_DROP ((__force ieee80211_tx_result) 1u)
@@ -380,6 +343,7 @@ struct ieee80211_mgd_auth_data {
380 u8 key[WLAN_KEY_LEN_WEP104]; 343 u8 key[WLAN_KEY_LEN_WEP104];
381 u8 key_len, key_idx; 344 u8 key_len, key_idx;
382 bool done; 345 bool done;
346 bool timeout_started;
383 347
384 u16 sae_trans, sae_status; 348 u16 sae_trans, sae_status;
385 size_t data_len; 349 size_t data_len;
@@ -399,9 +363,9 @@ struct ieee80211_mgd_assoc_data {
399 u8 ssid_len; 363 u8 ssid_len;
400 u8 supp_rates_len; 364 u8 supp_rates_len;
401 bool wmm, uapsd; 365 bool wmm, uapsd;
402 bool have_beacon; 366 bool have_beacon, need_beacon;
403 bool sent_assoc;
404 bool synced; 367 bool synced;
368 bool timeout_started;
405 369
406 u8 ap_ht_param; 370 u8 ap_ht_param;
407 371
@@ -425,6 +389,7 @@ struct ieee80211_if_managed {
425 unsigned long probe_timeout; 389 unsigned long probe_timeout;
426 int probe_send_count; 390 int probe_send_count;
427 bool nullfunc_failed; 391 bool nullfunc_failed;
392 bool connection_loss;
428 393
429 struct mutex mtx; 394 struct mutex mtx;
430 struct cfg80211_bss *associated; 395 struct cfg80211_bss *associated;
@@ -449,6 +414,10 @@ struct ieee80211_if_managed {
449 bool beacon_crc_valid; 414 bool beacon_crc_valid;
450 u32 beacon_crc; 415 u32 beacon_crc;
451 416
417 bool status_acked;
418 bool status_received;
419 __le16 status_fc;
420
452 enum { 421 enum {
453 IEEE80211_MFP_DISABLED, 422 IEEE80211_MFP_DISABLED,
454 IEEE80211_MFP_OPTIONAL, 423 IEEE80211_MFP_OPTIONAL,
@@ -611,6 +580,9 @@ struct ieee80211_if_mesh {
611 u32 mesh_seqnum; 580 u32 mesh_seqnum;
612 bool accepting_plinks; 581 bool accepting_plinks;
613 int num_gates; 582 int num_gates;
583 struct beacon_data __rcu *beacon;
584 /* just protects beacon updates for now */
585 struct mutex mtx;
614 const u8 *ie; 586 const u8 *ie;
615 u8 ie_len; 587 u8 ie_len;
616 enum { 588 enum {
@@ -623,6 +595,11 @@ struct ieee80211_if_mesh {
623 s64 sync_offset_clockdrift_max; 595 s64 sync_offset_clockdrift_max;
624 spinlock_t sync_offset_lock; 596 spinlock_t sync_offset_lock;
625 bool adjusting_tbtt; 597 bool adjusting_tbtt;
598 /* mesh power save */
599 enum nl80211_mesh_power_mode nonpeer_pm;
600 int ps_peers_light_sleep;
601 int ps_peers_deep_sleep;
602 struct ps_data ps;
626}; 603};
627 604
628#ifdef CONFIG_MAC80211_MESH 605#ifdef CONFIG_MAC80211_MESH
@@ -717,9 +694,6 @@ struct ieee80211_sub_if_data {
717 694
718 char name[IFNAMSIZ]; 695 char name[IFNAMSIZ];
719 696
720 /* to detect idle changes */
721 bool old_idle;
722
723 /* Fragment table for host-based reassembly */ 697 /* Fragment table for host-based reassembly */
724 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 698 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
725 unsigned int fragment_next; 699 unsigned int fragment_next;
@@ -747,14 +721,15 @@ struct ieee80211_sub_if_data {
747 struct work_struct work; 721 struct work_struct work;
748 struct sk_buff_head skb_queue; 722 struct sk_buff_head skb_queue;
749 723
750 bool arp_filter_state;
751
752 u8 needed_rx_chains; 724 u8 needed_rx_chains;
753 enum ieee80211_smps_mode smps_mode; 725 enum ieee80211_smps_mode smps_mode;
754 726
755 int user_power_level; /* in dBm */ 727 int user_power_level; /* in dBm */
756 int ap_power_level; /* in dBm */ 728 int ap_power_level; /* in dBm */
757 729
730 bool radar_required;
731 struct delayed_work dfs_cac_timer_work;
732
758 /* 733 /*
759 * AP this belongs to: self in AP mode and 734 * AP this belongs to: self in AP mode and
760 * corresponding AP in VLAN mode, NULL for 735 * corresponding AP in VLAN mode, NULL for
@@ -842,6 +817,7 @@ enum queue_stop_reason {
842 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 817 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
843 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 818 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
844 IEEE80211_QUEUE_STOP_REASON_SKB_ADD, 819 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
820 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
845}; 821};
846 822
847#ifdef CONFIG_MAC80211_LEDS 823#ifdef CONFIG_MAC80211_LEDS
@@ -974,6 +950,10 @@ struct ieee80211_local {
974 /* wowlan is enabled -- don't reconfig on resume */ 950 /* wowlan is enabled -- don't reconfig on resume */
975 bool wowlan; 951 bool wowlan;
976 952
953 /* DFS/radar detection is enabled */
954 bool radar_detect_enabled;
955 struct work_struct radar_detected_work;
956
977 /* number of RX chains the hardware has */ 957 /* number of RX chains the hardware has */
978 u8 rx_chains; 958 u8 rx_chains;
979 959
@@ -988,14 +968,7 @@ struct ieee80211_local {
988 struct sk_buff_head skb_queue; 968 struct sk_buff_head skb_queue;
989 struct sk_buff_head skb_queue_unreliable; 969 struct sk_buff_head skb_queue_unreliable;
990 970
991 /* 971 spinlock_t rx_path_lock;
992 * Internal FIFO queue which is shared between multiple rx path
993 * stages. Its main task is to provide a serialization mechanism,
994 * so all rx handlers can enjoy having exclusive access to their
995 * private data structures.
996 */
997 struct sk_buff_head rx_skb_queue;
998 bool running_rx_handler; /* protected by rx_skb_queue.lock */
999 972
1000 /* Station data */ 973 /* Station data */
1001 /* 974 /*
@@ -1129,14 +1102,13 @@ struct ieee80211_local {
1129 struct timer_list dynamic_ps_timer; 1102 struct timer_list dynamic_ps_timer;
1130 struct notifier_block network_latency_notifier; 1103 struct notifier_block network_latency_notifier;
1131 struct notifier_block ifa_notifier; 1104 struct notifier_block ifa_notifier;
1105 struct notifier_block ifa6_notifier;
1132 1106
1133 /* 1107 /*
1134 * The dynamic ps timeout configured from user space via WEXT - 1108 * The dynamic ps timeout configured from user space via WEXT -
1135 * this will override whatever chosen by mac80211 internally. 1109 * this will override whatever chosen by mac80211 internally.
1136 */ 1110 */
1137 int dynamic_ps_forced_timeout; 1111 int dynamic_ps_forced_timeout;
1138 int dynamic_ps_user_timeout;
1139 bool disable_dynamic_ps;
1140 1112
1141 int user_power_level; /* in dBm, for all interfaces */ 1113 int user_power_level; /* in dBm, for all interfaces */
1142 1114
@@ -1194,40 +1166,41 @@ struct ieee80211_ra_tid {
1194 1166
1195/* Parsed Information Elements */ 1167/* Parsed Information Elements */
1196struct ieee802_11_elems { 1168struct ieee802_11_elems {
1197 u8 *ie_start; 1169 const u8 *ie_start;
1198 size_t total_len; 1170 size_t total_len;
1199 1171
1200 /* pointers to IEs */ 1172 /* pointers to IEs */
1201 u8 *ssid; 1173 const u8 *ssid;
1202 u8 *supp_rates; 1174 const u8 *supp_rates;
1203 u8 *fh_params; 1175 const u8 *fh_params;
1204 u8 *ds_params; 1176 const u8 *ds_params;
1205 u8 *cf_params; 1177 const u8 *cf_params;
1206 struct ieee80211_tim_ie *tim; 1178 const struct ieee80211_tim_ie *tim;
1207 u8 *ibss_params; 1179 const u8 *ibss_params;
1208 u8 *challenge; 1180 const u8 *challenge;
1209 u8 *wpa; 1181 const u8 *rsn;
1210 u8 *rsn; 1182 const u8 *erp_info;
1211 u8 *erp_info; 1183 const u8 *ext_supp_rates;
1212 u8 *ext_supp_rates; 1184 const u8 *wmm_info;
1213 u8 *wmm_info; 1185 const u8 *wmm_param;
1214 u8 *wmm_param; 1186 const struct ieee80211_ht_cap *ht_cap_elem;
1215 struct ieee80211_ht_cap *ht_cap_elem; 1187 const struct ieee80211_ht_operation *ht_operation;
1216 struct ieee80211_ht_operation *ht_operation; 1188 const struct ieee80211_vht_cap *vht_cap_elem;
1217 struct ieee80211_vht_cap *vht_cap_elem; 1189 const struct ieee80211_vht_operation *vht_operation;
1218 struct ieee80211_vht_operation *vht_operation; 1190 const struct ieee80211_meshconf_ie *mesh_config;
1219 struct ieee80211_meshconf_ie *mesh_config; 1191 const u8 *mesh_id;
1220 u8 *mesh_id; 1192 const u8 *peering;
1221 u8 *peering; 1193 const __le16 *awake_window;
1222 u8 *preq; 1194 const u8 *preq;
1223 u8 *prep; 1195 const u8 *prep;
1224 u8 *perr; 1196 const u8 *perr;
1225 struct ieee80211_rann_ie *rann; 1197 const struct ieee80211_rann_ie *rann;
1226 struct ieee80211_channel_sw_ie *ch_switch_ie; 1198 const struct ieee80211_channel_sw_ie *ch_switch_ie;
1227 u8 *country_elem; 1199 const u8 *country_elem;
1228 u8 *pwr_constr_elem; 1200 const u8 *pwr_constr_elem;
1229 u8 *quiet_elem; /* first quite element */ 1201 const u8 *quiet_elem; /* first quite element */
1230 u8 *timeout_int; 1202 const u8 *timeout_int;
1203 const u8 *opmode_notif;
1231 1204
1232 /* length of them, respectively */ 1205 /* length of them, respectively */
1233 u8 ssid_len; 1206 u8 ssid_len;
@@ -1238,7 +1211,6 @@ struct ieee802_11_elems {
1238 u8 tim_len; 1211 u8 tim_len;
1239 u8 ibss_params_len; 1212 u8 ibss_params_len;
1240 u8 challenge_len; 1213 u8 challenge_len;
1241 u8 wpa_len;
1242 u8 rsn_len; 1214 u8 rsn_len;
1243 u8 erp_info_len; 1215 u8 erp_info_len;
1244 u8 ext_supp_rates_len; 1216 u8 ext_supp_rates_len;
@@ -1307,10 +1279,10 @@ void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
1307int ieee80211_max_network_latency(struct notifier_block *nb, 1279int ieee80211_max_network_latency(struct notifier_block *nb,
1308 unsigned long data, void *dummy); 1280 unsigned long data, void *dummy);
1309int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); 1281int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
1310void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1282void
1311 struct ieee80211_channel_sw_ie *sw_elem, 1283ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1312 struct ieee80211_bss *bss, 1284 const struct ieee80211_channel_sw_ie *sw_elem,
1313 u64 timestamp); 1285 struct ieee80211_bss *bss, u64 timestamp);
1314void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); 1286void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
1315void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); 1287void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
1316void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); 1288void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
@@ -1319,6 +1291,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1319void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); 1291void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1320void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); 1292void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1321void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); 1293void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1294void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
1295 __le16 fc, bool acked);
1322 1296
1323/* IBSS code */ 1297/* IBSS code */
1324void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1298void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1414,10 +1388,10 @@ void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
1414/* HT */ 1388/* HT */
1415void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 1389void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
1416 struct ieee80211_sta_ht_cap *ht_cap); 1390 struct ieee80211_sta_ht_cap *ht_cap);
1417void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 1391bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
1418 struct ieee80211_supported_band *sband, 1392 struct ieee80211_supported_band *sband,
1419 struct ieee80211_ht_cap *ht_cap_ie, 1393 const struct ieee80211_ht_cap *ht_cap_ie,
1420 struct ieee80211_sta_ht_cap *ht_cap); 1394 struct sta_info *sta);
1421void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1395void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1422 const u8 *da, u16 tid, 1396 const u8 *da, u16 tid,
1423 u16 initiator, u16 reason_code); 1397 u16 initiator, u16 reason_code);
@@ -1457,10 +1431,17 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
1457u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs); 1431u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
1458 1432
1459/* VHT */ 1433/* VHT */
1460void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 1434void
1461 struct ieee80211_supported_band *sband, 1435ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
1462 struct ieee80211_vht_cap *vht_cap_ie, 1436 struct ieee80211_supported_band *sband,
1463 struct ieee80211_sta_vht_cap *vht_cap); 1437 const struct ieee80211_vht_cap *vht_cap_ie,
1438 struct sta_info *sta);
1439enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
1440void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1441void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1442 struct sta_info *sta, u8 opmode,
1443 enum ieee80211_band band, bool nss_only);
1444
1464/* Spectrum management */ 1445/* Spectrum management */
1465void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1446void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1466 struct ieee80211_mgmt *mgmt, 1447 struct ieee80211_mgmt *mgmt,
@@ -1578,8 +1559,9 @@ static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1578 1559
1579void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1560void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1580 u16 transaction, u16 auth_alg, u16 status, 1561 u16 transaction, u16 auth_alg, u16 status,
1581 u8 *extra, size_t extra_len, const u8 *bssid, 1562 const u8 *extra, size_t extra_len, const u8 *bssid,
1582 const u8 *da, const u8 *key, u8 key_len, u8 key_idx); 1563 const u8 *da, const u8 *key, u8 key_len, u8 key_idx,
1564 u32 tx_flags);
1583void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 1565void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
1584 const u8 *bssid, u16 stype, u16 reason, 1566 const u8 *bssid, u16 stype, u16 reason,
1585 bool send_frame, u8 *frame_buf); 1567 bool send_frame, u8 *frame_buf);
@@ -1596,7 +1578,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1596void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1578void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1597 const u8 *ssid, size_t ssid_len, 1579 const u8 *ssid, size_t ssid_len,
1598 const u8 *ie, size_t ie_len, 1580 const u8 *ie, size_t ie_len,
1599 u32 ratemask, bool directed, bool no_cck, 1581 u32 ratemask, bool directed, u32 tx_flags,
1600 struct ieee80211_channel *channel, bool scan); 1582 struct ieee80211_channel *channel, bool scan);
1601 1583
1602void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1584void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1628,18 +1610,31 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
1628 1610
1629/* channel management */ 1611/* channel management */
1630void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, 1612void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
1631 struct ieee80211_ht_operation *ht_oper, 1613 const struct ieee80211_ht_operation *ht_oper,
1632 struct cfg80211_chan_def *chandef); 1614 struct cfg80211_chan_def *chandef);
1633 1615
1634int __must_check 1616int __must_check
1635ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, 1617ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
1636 const struct cfg80211_chan_def *chandef, 1618 const struct cfg80211_chan_def *chandef,
1637 enum ieee80211_chanctx_mode mode); 1619 enum ieee80211_chanctx_mode mode);
1620int __must_check
1621ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
1622 const struct cfg80211_chan_def *chandef,
1623 u32 *changed);
1638void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata); 1624void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
1639void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata); 1625void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
1626void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
1627 bool clear);
1640 1628
1641void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 1629void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
1642 struct ieee80211_chanctx *chanctx); 1630 struct ieee80211_chanctx *chanctx);
1631void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
1632 struct ieee80211_chanctx *chanctx);
1633
1634void ieee80211_dfs_cac_timer(unsigned long data);
1635void ieee80211_dfs_cac_timer_work(struct work_struct *work);
1636void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
1637void ieee80211_dfs_radar_detected_work(struct work_struct *work);
1643 1638
1644#ifdef CONFIG_MAC80211_NOINLINE 1639#ifdef CONFIG_MAC80211_NOINLINE
1645#define debug_noinline noinline 1640#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 06fac2991d40..86c83084542a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -78,8 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); 78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
79} 79}
80 80
81static u32 ieee80211_idle_off(struct ieee80211_local *local, 81static u32 ieee80211_idle_off(struct ieee80211_local *local)
82 const char *reason)
83{ 82{
84 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) 83 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
85 return 0; 84 return 0;
@@ -99,110 +98,45 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
99 return IEEE80211_CONF_CHANGE_IDLE; 98 return IEEE80211_CONF_CHANGE_IDLE;
100} 99}
101 100
102static u32 __ieee80211_recalc_idle(struct ieee80211_local *local) 101void ieee80211_recalc_idle(struct ieee80211_local *local)
103{ 102{
104 struct ieee80211_sub_if_data *sdata; 103 bool working = false, scanning, active;
105 int count = 0;
106 bool working = false, scanning = false;
107 unsigned int led_trig_start = 0, led_trig_stop = 0; 104 unsigned int led_trig_start = 0, led_trig_stop = 0;
108 struct ieee80211_roc_work *roc; 105 struct ieee80211_roc_work *roc;
106 u32 change;
109 107
110#ifdef CONFIG_PROVE_LOCKING
111 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
112 !lockdep_is_held(&local->iflist_mtx));
113#endif
114 lockdep_assert_held(&local->mtx); 108 lockdep_assert_held(&local->mtx);
115 109
116 list_for_each_entry(sdata, &local->interfaces, list) { 110 active = !list_empty(&local->chanctx_list);
117 if (!ieee80211_sdata_running(sdata)) {
118 sdata->vif.bss_conf.idle = true;
119 continue;
120 }
121
122 sdata->old_idle = sdata->vif.bss_conf.idle;
123
124 /* do not count disabled managed interfaces */
125 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
126 !sdata->u.mgd.associated &&
127 !sdata->u.mgd.auth_data &&
128 !sdata->u.mgd.assoc_data) {
129 sdata->vif.bss_conf.idle = true;
130 continue;
131 }
132 /* do not count unused IBSS interfaces */
133 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
134 !sdata->u.ibss.ssid_len) {
135 sdata->vif.bss_conf.idle = true;
136 continue;
137 }
138
139 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
140 continue;
141
142 /* count everything else */
143 sdata->vif.bss_conf.idle = false;
144 count++;
145 }
146 111
147 if (!local->ops->remain_on_channel) { 112 if (!local->ops->remain_on_channel) {
148 list_for_each_entry(roc, &local->roc_list, list) { 113 list_for_each_entry(roc, &local->roc_list, list) {
149 working = true; 114 working = true;
150 roc->sdata->vif.bss_conf.idle = false; 115 break;
151 } 116 }
152 } 117 }
153 118
154 sdata = rcu_dereference_protected(local->scan_sdata, 119 scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
155 lockdep_is_held(&local->mtx)); 120 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
156 if (sdata && !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
157 scanning = true;
158 sdata->vif.bss_conf.idle = false;
159 }
160
161 list_for_each_entry(sdata, &local->interfaces, list) {
162 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
163 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
164 sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
165 continue;
166 if (sdata->old_idle == sdata->vif.bss_conf.idle)
167 continue;
168 if (!ieee80211_sdata_running(sdata))
169 continue;
170 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
171 }
172 121
173 if (working || scanning) 122 if (working || scanning)
174 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; 123 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
175 else 124 else
176 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; 125 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
177 126
178 if (count) 127 if (active)
179 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; 128 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
180 else 129 else
181 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; 130 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
182 131
183 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); 132 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
184 133
185 if (working) 134 if (working || scanning || active)
186 return ieee80211_idle_off(local, "working"); 135 change = ieee80211_idle_off(local);
187 if (scanning)
188 return ieee80211_idle_off(local, "scanning");
189 if (!count)
190 return ieee80211_idle_on(local);
191 else 136 else
192 return ieee80211_idle_off(local, "in use"); 137 change = ieee80211_idle_on(local);
193 138 if (change)
194 return 0; 139 ieee80211_hw_config(local, change);
195}
196
197void ieee80211_recalc_idle(struct ieee80211_local *local)
198{
199 u32 chg;
200
201 mutex_lock(&local->iflist_mtx);
202 chg = __ieee80211_recalc_idle(local);
203 mutex_unlock(&local->iflist_mtx);
204 if (chg)
205 ieee80211_hw_config(local, chg);
206} 140}
207 141
208static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) 142static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
@@ -621,6 +555,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
621 goto err_del_interface; 555 goto err_del_interface;
622 } 556 }
623 557
558 drv_add_interface_debugfs(local, sdata);
559
624 if (sdata->vif.type == NL80211_IFTYPE_AP) { 560 if (sdata->vif.type == NL80211_IFTYPE_AP) {
625 local->fif_pspoll++; 561 local->fif_pspoll++;
626 local->fif_probe_req++; 562 local->fif_probe_req++;
@@ -694,10 +630,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
694 if (sdata->flags & IEEE80211_SDATA_PROMISC) 630 if (sdata->flags & IEEE80211_SDATA_PROMISC)
695 atomic_inc(&local->iff_promiscs); 631 atomic_inc(&local->iff_promiscs);
696 632
697 mutex_lock(&local->mtx);
698 hw_reconf_flags |= __ieee80211_recalc_idle(local);
699 mutex_unlock(&local->mtx);
700
701 if (coming_up) 633 if (coming_up)
702 local->open_count++; 634 local->open_count++;
703 635
@@ -748,6 +680,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
748 struct sk_buff *skb, *tmp; 680 struct sk_buff *skb, *tmp;
749 u32 hw_reconf_flags = 0; 681 u32 hw_reconf_flags = 0;
750 int i, flushed; 682 int i, flushed;
683 struct ps_data *ps;
751 684
752 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 685 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
753 686
@@ -817,6 +750,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
817 750
818 cancel_work_sync(&sdata->recalc_smps); 751 cancel_work_sync(&sdata->recalc_smps);
819 752
753 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
754
755 if (sdata->wdev.cac_started) {
756 mutex_lock(&local->iflist_mtx);
757 ieee80211_vif_release_channel(sdata);
758 mutex_unlock(&local->iflist_mtx);
759 cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
760 GFP_KERNEL);
761 }
762
820 /* APs need special treatment */ 763 /* APs need special treatment */
821 if (sdata->vif.type == NL80211_IFTYPE_AP) { 764 if (sdata->vif.type == NL80211_IFTYPE_AP) {
822 struct ieee80211_sub_if_data *vlan, *tmpsdata; 765 struct ieee80211_sub_if_data *vlan, *tmpsdata;
@@ -826,6 +769,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
826 u.vlan.list) 769 u.vlan.list)
827 dev_close(vlan->dev); 770 dev_close(vlan->dev);
828 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 771 WARN_ON(!list_empty(&sdata->u.ap.vlans));
772 } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
773 /* remove all packets in parent bc_buf pointing to this dev */
774 ps = &sdata->bss->ps;
775
776 spin_lock_irqsave(&ps->bc_buf.lock, flags);
777 skb_queue_walk_safe(&ps->bc_buf, skb, tmp) {
778 if (skb->dev == sdata->dev) {
779 __skb_unlink(skb, &ps->bc_buf);
780 local->total_ps_buffered--;
781 ieee80211_free_txskb(&local->hw, skb);
782 }
783 }
784 spin_unlock_irqrestore(&ps->bc_buf.lock, flags);
829 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) { 785 } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
830 ieee80211_mgd_stop(sdata); 786 ieee80211_mgd_stop(sdata);
831 } 787 }
@@ -882,16 +838,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
882 */ 838 */
883 ieee80211_free_keys(sdata); 839 ieee80211_free_keys(sdata);
884 840
841 drv_remove_interface_debugfs(local, sdata);
842
885 if (going_down) 843 if (going_down)
886 drv_remove_interface(local, sdata); 844 drv_remove_interface(local, sdata);
887 } 845 }
888 846
889 sdata->bss = NULL; 847 sdata->bss = NULL;
890 848
891 mutex_lock(&local->mtx);
892 hw_reconf_flags |= __ieee80211_recalc_idle(local);
893 mutex_unlock(&local->mtx);
894
895 ieee80211_recalc_ps(local, -1); 849 ieee80211_recalc_ps(local, -1);
896 850
897 if (local->open_count == 0) { 851 if (local->open_count == 0) {
@@ -1574,9 +1528,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1574 /* initialise type-independent data */ 1528 /* initialise type-independent data */
1575 sdata->wdev.wiphy = local->hw.wiphy; 1529 sdata->wdev.wiphy = local->hw.wiphy;
1576 sdata->local = local; 1530 sdata->local = local;
1577#ifdef CONFIG_INET
1578 sdata->arp_filter_state = true;
1579#endif
1580 1531
1581 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 1532 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
1582 skb_queue_head_init(&sdata->fragments[i].skb_list); 1533 skb_queue_head_init(&sdata->fragments[i].skb_list);
@@ -1586,6 +1537,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1586 spin_lock_init(&sdata->cleanup_stations_lock); 1537 spin_lock_init(&sdata->cleanup_stations_lock);
1587 INIT_LIST_HEAD(&sdata->cleanup_stations); 1538 INIT_LIST_HEAD(&sdata->cleanup_stations);
1588 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk); 1539 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
1540 INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
1541 ieee80211_dfs_cac_timer_work);
1589 1542
1590 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 1543 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1591 struct ieee80211_supported_band *sband; 1544 struct ieee80211_supported_band *sband;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 619c5d697999..ef252eb58c36 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -204,8 +204,11 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
204 if (idx >= 0 && idx < NUM_DEFAULT_KEYS) 204 if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
205 key = key_mtx_dereference(sdata->local, sdata->keys[idx]); 205 key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
206 206
207 if (uni) 207 if (uni) {
208 rcu_assign_pointer(sdata->default_unicast_key, key); 208 rcu_assign_pointer(sdata->default_unicast_key, key);
209 drv_set_default_unicast_key(sdata->local, sdata, idx);
210 }
211
209 if (multi) 212 if (multi)
210 rcu_assign_pointer(sdata->default_multicast_key, key); 213 rcu_assign_pointer(sdata->default_multicast_key, key);
211 214
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 39cfe8f10ad2..f9747689d604 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -23,6 +23,7 @@
23#include <linux/inetdevice.h> 23#include <linux/inetdevice.h>
24#include <net/net_namespace.h> 24#include <net/net_namespace.h>
25#include <net/cfg80211.h> 25#include <net/cfg80211.h>
26#include <net/addrconf.h>
26 27
27#include "ieee80211_i.h" 28#include "ieee80211_i.h"
28#include "driver-ops.h" 29#include "driver-ops.h"
@@ -33,8 +34,6 @@
33#include "cfg.h" 34#include "cfg.h"
34#include "debugfs.h" 35#include "debugfs.h"
35 36
36static struct lock_class_key ieee80211_rx_skb_queue_class;
37
38void ieee80211_configure_filter(struct ieee80211_local *local) 37void ieee80211_configure_filter(struct ieee80211_local *local)
39{ 38{
40 u64 mc; 39 u64 mc;
@@ -349,27 +348,19 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
349 348
350 /* Copy the addresses to the bss_conf list */ 349 /* Copy the addresses to the bss_conf list */
351 ifa = idev->ifa_list; 350 ifa = idev->ifa_list;
352 while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) { 351 while (ifa) {
353 bss_conf->arp_addr_list[c] = ifa->ifa_address; 352 if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN)
353 bss_conf->arp_addr_list[c] = ifa->ifa_address;
354 ifa = ifa->ifa_next; 354 ifa = ifa->ifa_next;
355 c++; 355 c++;
356 } 356 }
357 357
358 /* If not all addresses fit the list, disable filtering */
359 if (ifa) {
360 sdata->arp_filter_state = false;
361 c = 0;
362 } else {
363 sdata->arp_filter_state = true;
364 }
365 bss_conf->arp_addr_cnt = c; 358 bss_conf->arp_addr_cnt = c;
366 359
367 /* Configure driver only if associated (which also implies it is up) */ 360 /* Configure driver only if associated (which also implies it is up) */
368 if (ifmgd->associated) { 361 if (ifmgd->associated)
369 bss_conf->arp_filter_enabled = sdata->arp_filter_state;
370 ieee80211_bss_info_change_notify(sdata, 362 ieee80211_bss_info_change_notify(sdata,
371 BSS_CHANGED_ARP_FILTER); 363 BSS_CHANGED_ARP_FILTER);
372 }
373 364
374 mutex_unlock(&ifmgd->mtx); 365 mutex_unlock(&ifmgd->mtx);
375 366
@@ -377,6 +368,37 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
377} 368}
378#endif 369#endif
379 370
371#if IS_ENABLED(CONFIG_IPV6)
372static int ieee80211_ifa6_changed(struct notifier_block *nb,
373 unsigned long data, void *arg)
374{
375 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg;
376 struct inet6_dev *idev = ifa->idev;
377 struct net_device *ndev = ifa->idev->dev;
378 struct ieee80211_local *local =
379 container_of(nb, struct ieee80211_local, ifa6_notifier);
380 struct wireless_dev *wdev = ndev->ieee80211_ptr;
381 struct ieee80211_sub_if_data *sdata;
382
383 /* Make sure it's our interface that got changed */
384 if (!wdev || wdev->wiphy != local->hw.wiphy)
385 return NOTIFY_DONE;
386
387 sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
388
389 /*
390 * For now only support station mode. This is mostly because
391 * doing AP would have to handle AP_VLAN in some way ...
392 */
393 if (sdata->vif.type != NL80211_IFTYPE_STATION)
394 return NOTIFY_DONE;
395
396 drv_ipv6_addr_change(local, sdata, idev);
397
398 return NOTIFY_DONE;
399}
400#endif
401
380static int ieee80211_napi_poll(struct napi_struct *napi, int budget) 402static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
381{ 403{
382 struct ieee80211_local *local = 404 struct ieee80211_local *local =
@@ -479,6 +501,11 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
479 }, 501 },
480}; 502};
481 503
504static const u8 extended_capabilities[] = {
505 0, 0, 0, 0, 0, 0, 0,
506 WLAN_EXT_CAPA8_OPMODE_NOTIF,
507};
508
482struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 509struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
483 const struct ieee80211_ops *ops) 510 const struct ieee80211_ops *ops)
484{ 511{
@@ -535,14 +562,17 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
535 WIPHY_FLAG_REPORTS_OBSS | 562 WIPHY_FLAG_REPORTS_OBSS |
536 WIPHY_FLAG_OFFCHAN_TX; 563 WIPHY_FLAG_OFFCHAN_TX;
537 564
565 wiphy->extended_capabilities = extended_capabilities;
566 wiphy->extended_capabilities_mask = extended_capabilities;
567 wiphy->extended_capabilities_len = ARRAY_SIZE(extended_capabilities);
568
538 if (ops->remain_on_channel) 569 if (ops->remain_on_channel)
539 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 570 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
540 571
541 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | 572 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
542 NL80211_FEATURE_SAE | 573 NL80211_FEATURE_SAE |
543 NL80211_FEATURE_HT_IBSS | 574 NL80211_FEATURE_HT_IBSS |
544 NL80211_FEATURE_VIF_TXPOWER | 575 NL80211_FEATURE_VIF_TXPOWER;
545 NL80211_FEATURE_FULL_AP_CLIENT_STATE;
546 576
547 if (!ops->hw_scan) 577 if (!ops->hw_scan)
548 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | 578 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -589,25 +619,19 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
589 619
590 mutex_init(&local->key_mtx); 620 mutex_init(&local->key_mtx);
591 spin_lock_init(&local->filter_lock); 621 spin_lock_init(&local->filter_lock);
622 spin_lock_init(&local->rx_path_lock);
592 spin_lock_init(&local->queue_stop_reason_lock); 623 spin_lock_init(&local->queue_stop_reason_lock);
593 624
594 INIT_LIST_HEAD(&local->chanctx_list); 625 INIT_LIST_HEAD(&local->chanctx_list);
595 mutex_init(&local->chanctx_mtx); 626 mutex_init(&local->chanctx_mtx);
596 627
597 /*
598 * The rx_skb_queue is only accessed from tasklets,
599 * but other SKB queues are used from within IRQ
600 * context. Therefore, this one needs a different
601 * locking class so our direct, non-irq-safe use of
602 * the queue's lock doesn't throw lockdep warnings.
603 */
604 skb_queue_head_init_class(&local->rx_skb_queue,
605 &ieee80211_rx_skb_queue_class);
606
607 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 628 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
608 629
609 INIT_WORK(&local->restart_work, ieee80211_restart_work); 630 INIT_WORK(&local->restart_work, ieee80211_restart_work);
610 631
632 INIT_WORK(&local->radar_detected_work,
633 ieee80211_dfs_radar_detected_work);
634
611 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 635 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
612 local->smps_mode = IEEE80211_SMPS_OFF; 636 local->smps_mode = IEEE80211_SMPS_OFF;
613 637
@@ -683,9 +707,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
683 return -EINVAL; 707 return -EINVAL;
684#endif 708#endif
685 709
686 if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
687 return -EINVAL;
688
689 if (!local->use_chanctx) { 710 if (!local->use_chanctx) {
690 for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { 711 for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
691 const struct ieee80211_iface_combination *comb; 712 const struct ieee80211_iface_combination *comb;
@@ -703,6 +724,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
703 */ 724 */
704 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS)) 725 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))
705 return -EINVAL; 726 return -EINVAL;
727
728 /* DFS currently not supported with channel context drivers */
729 for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
730 const struct ieee80211_iface_combination *comb;
731
732 comb = &local->hw.wiphy->iface_combinations[i];
733
734 if (comb->radar_detect_widths)
735 return -EINVAL;
736 }
706 } 737 }
707 738
708 /* Only HW csum features are currently compatible with mac80211 */ 739 /* Only HW csum features are currently compatible with mac80211 */
@@ -985,12 +1016,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
985 goto fail_ifa; 1016 goto fail_ifa;
986#endif 1017#endif
987 1018
1019#if IS_ENABLED(CONFIG_IPV6)
1020 local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed;
1021 result = register_inet6addr_notifier(&local->ifa6_notifier);
1022 if (result)
1023 goto fail_ifa6;
1024#endif
1025
988 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll, 1026 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
989 local->hw.napi_weight); 1027 local->hw.napi_weight);
990 1028
991 return 0; 1029 return 0;
992 1030
1031#if IS_ENABLED(CONFIG_IPV6)
1032 fail_ifa6:
993#ifdef CONFIG_INET 1033#ifdef CONFIG_INET
1034 unregister_inetaddr_notifier(&local->ifa_notifier);
1035#endif
1036#endif
1037#if defined(CONFIG_INET) || defined(CONFIG_IPV6)
994 fail_ifa: 1038 fail_ifa:
995 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, 1039 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
996 &local->network_latency_notifier); 1040 &local->network_latency_notifier);
@@ -1026,6 +1070,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1026#ifdef CONFIG_INET 1070#ifdef CONFIG_INET
1027 unregister_inetaddr_notifier(&local->ifa_notifier); 1071 unregister_inetaddr_notifier(&local->ifa_notifier);
1028#endif 1072#endif
1073#if IS_ENABLED(CONFIG_IPV6)
1074 unregister_inet6addr_notifier(&local->ifa6_notifier);
1075#endif
1029 1076
1030 rtnl_lock(); 1077 rtnl_lock();
1031 1078
@@ -1049,7 +1096,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1049 wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); 1096 wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
1050 skb_queue_purge(&local->skb_queue); 1097 skb_queue_purge(&local->skb_queue);
1051 skb_queue_purge(&local->skb_queue_unreliable); 1098 skb_queue_purge(&local->skb_queue_unreliable);
1052 skb_queue_purge(&local->rx_skb_queue);
1053 1099
1054 destroy_workqueue(local->workqueue); 1100 destroy_workqueue(local->workqueue);
1055 wiphy_unregister(local->hw.wiphy); 1101 wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 694e27376afa..a77d40ed4e61 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -149,6 +149,31 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
149 return changed; 149 return changed;
150} 150}
151 151
152/*
153 * mesh_sta_cleanup - clean up any mesh sta state
154 *
155 * @sta: mesh sta to clean up.
156 */
157void mesh_sta_cleanup(struct sta_info *sta)
158{
159 struct ieee80211_sub_if_data *sdata = sta->sdata;
160 u32 changed;
161
162 /*
163 * maybe userspace handles peer allocation and peering, but in either
164 * case the beacon is still generated by the kernel and we might need
165 * an update.
166 */
167 changed = mesh_accept_plinks_update(sdata);
168 if (sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
169 changed |= mesh_plink_deactivate(sta);
170 del_timer_sync(&sta->plink_timer);
171 }
172
173 if (changed)
174 ieee80211_mbss_info_change_notify(sdata, changed);
175}
176
152int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) 177int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
153{ 178{
154 int i; 179 int i;
@@ -261,6 +286,9 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
261 *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; 286 *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
262 *pos |= ifmsh->accepting_plinks ? 287 *pos |= ifmsh->accepting_plinks ?
263 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 288 IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
289 /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
290 *pos |= ifmsh->ps_peers_deep_sleep ?
291 IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
264 *pos++ |= ifmsh->adjusting_tbtt ? 292 *pos++ |= ifmsh->adjusting_tbtt ?
265 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; 293 IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
266 *pos++ = 0x00; 294 *pos++ = 0x00;
@@ -286,6 +314,29 @@ mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
286 return 0; 314 return 0;
287} 315}
288 316
317int mesh_add_awake_window_ie(struct sk_buff *skb,
318 struct ieee80211_sub_if_data *sdata)
319{
320 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
321 u8 *pos;
322
323 /* see IEEE802.11-2012 13.14.6 */
324 if (ifmsh->ps_peers_light_sleep == 0 &&
325 ifmsh->ps_peers_deep_sleep == 0 &&
326 ifmsh->nonpeer_pm == NL80211_MESH_POWER_ACTIVE)
327 return 0;
328
329 if (skb_tailroom(skb) < 4)
330 return -ENOMEM;
331
332 pos = skb_put(skb, 2 + 2);
333 *pos++ = WLAN_EID_MESH_AWAKE_WINDOW;
334 *pos++ = 2;
335 put_unaligned_le16(ifmsh->mshcfg.dot11MeshAwakeWindowDuration, pos);
336
337 return 0;
338}
339
289int 340int
290mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) 341mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
291{ 342{
@@ -342,8 +393,6 @@ mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
342int mesh_add_ds_params_ie(struct sk_buff *skb, 393int mesh_add_ds_params_ie(struct sk_buff *skb,
343 struct ieee80211_sub_if_data *sdata) 394 struct ieee80211_sub_if_data *sdata)
344{ 395{
345 struct ieee80211_local *local = sdata->local;
346 struct ieee80211_supported_band *sband;
347 struct ieee80211_chanctx_conf *chanctx_conf; 396 struct ieee80211_chanctx_conf *chanctx_conf;
348 struct ieee80211_channel *chan; 397 struct ieee80211_channel *chan;
349 u8 *pos; 398 u8 *pos;
@@ -360,13 +409,10 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
360 chan = chanctx_conf->def.chan; 409 chan = chanctx_conf->def.chan;
361 rcu_read_unlock(); 410 rcu_read_unlock();
362 411
363 sband = local->hw.wiphy->bands[chan->band]; 412 pos = skb_put(skb, 2 + 1);
364 if (sband->band == IEEE80211_BAND_2GHZ) { 413 *pos++ = WLAN_EID_DS_PARAMS;
365 pos = skb_put(skb, 2 + 1); 414 *pos++ = 1;
366 *pos++ = WLAN_EID_DS_PARAMS; 415 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
367 *pos++ = 1;
368 *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
369 }
370 416
371 return 0; 417 return 0;
372} 418}
@@ -547,7 +593,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
547 mesh_path_expire(sdata); 593 mesh_path_expire(sdata);
548 594
549 changed = mesh_accept_plinks_update(sdata); 595 changed = mesh_accept_plinks_update(sdata);
550 ieee80211_bss_info_change_notify(sdata, changed); 596 ieee80211_mbss_info_change_notify(sdata, changed);
551 597
552 mod_timer(&ifmsh->housekeeping_timer, 598 mod_timer(&ifmsh->housekeeping_timer,
553 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); 599 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
@@ -598,7 +644,140 @@ void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
598} 644}
599#endif 645#endif
600 646
601void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) 647static int
648ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
649{
650 struct beacon_data *bcn;
651 int head_len, tail_len;
652 struct sk_buff *skb;
653 struct ieee80211_mgmt *mgmt;
654 struct ieee80211_chanctx_conf *chanctx_conf;
655 enum ieee80211_band band;
656 u8 *pos;
657 struct ieee80211_sub_if_data *sdata;
658 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
659 sizeof(mgmt->u.beacon);
660
661 sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
662 rcu_read_lock();
663 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
664 band = chanctx_conf->def.chan->band;
665 rcu_read_unlock();
666
667 head_len = hdr_len +
668 2 + /* NULL SSID */
669 2 + 8 + /* supported rates */
670 2 + 3; /* DS params */
671 tail_len = 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
672 2 + sizeof(struct ieee80211_ht_cap) +
673 2 + sizeof(struct ieee80211_ht_operation) +
674 2 + ifmsh->mesh_id_len +
675 2 + sizeof(struct ieee80211_meshconf_ie) +
676 2 + sizeof(__le16) + /* awake window */
677 ifmsh->ie_len;
678
679 bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
680 /* need an skb for IE builders to operate on */
681 skb = dev_alloc_skb(max(head_len, tail_len));
682
683 if (!bcn || !skb)
684 goto out_free;
685
686 /*
687 * pointers go into the block we allocated,
688 * memory is | beacon_data | head | tail |
689 */
690 bcn->head = ((u8 *) bcn) + sizeof(*bcn);
691
692 /* fill in the head */
693 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
694 memset(mgmt, 0, hdr_len);
695 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
696 IEEE80211_STYPE_BEACON);
697 eth_broadcast_addr(mgmt->da);
698 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
699 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
700 ieee80211_mps_set_frame_flags(sdata, NULL, (void *) mgmt);
701 mgmt->u.beacon.beacon_int =
702 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
703 mgmt->u.beacon.capab_info |= cpu_to_le16(
704 sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
705
706 pos = skb_put(skb, 2);
707 *pos++ = WLAN_EID_SSID;
708 *pos++ = 0x0;
709
710 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
711 mesh_add_ds_params_ie(skb, sdata))
712 goto out_free;
713
714 bcn->head_len = skb->len;
715 memcpy(bcn->head, skb->data, bcn->head_len);
716
717 /* now the tail */
718 skb_trim(skb, 0);
719 bcn->tail = bcn->head + bcn->head_len;
720
721 if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
722 mesh_add_rsn_ie(skb, sdata) ||
723 mesh_add_ht_cap_ie(skb, sdata) ||
724 mesh_add_ht_oper_ie(skb, sdata) ||
725 mesh_add_meshid_ie(skb, sdata) ||
726 mesh_add_meshconf_ie(skb, sdata) ||
727 mesh_add_awake_window_ie(skb, sdata) ||
728 mesh_add_vendor_ies(skb, sdata))
729 goto out_free;
730
731 bcn->tail_len = skb->len;
732 memcpy(bcn->tail, skb->data, bcn->tail_len);
733
734 dev_kfree_skb(skb);
735 rcu_assign_pointer(ifmsh->beacon, bcn);
736 return 0;
737out_free:
738 kfree(bcn);
739 dev_kfree_skb(skb);
740 return -ENOMEM;
741}
742
743static int
744ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh)
745{
746 struct ieee80211_sub_if_data *sdata;
747 struct beacon_data *old_bcn;
748 int ret;
749 sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
750
751 mutex_lock(&ifmsh->mtx);
752
753 old_bcn = rcu_dereference_protected(ifmsh->beacon,
754 lockdep_is_held(&ifmsh->mtx));
755 ret = ieee80211_mesh_build_beacon(ifmsh);
756 if (ret)
757 /* just reuse old beacon */
758 goto out;
759
760 if (old_bcn)
761 kfree_rcu(old_bcn, rcu_head);
762out:
763 mutex_unlock(&ifmsh->mtx);
764 return ret;
765}
766
767void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
768 u32 changed)
769{
770 if (sdata->vif.bss_conf.enable_beacon &&
771 (changed & (BSS_CHANGED_BEACON |
772 BSS_CHANGED_HT |
773 BSS_CHANGED_BASIC_RATES |
774 BSS_CHANGED_BEACON_INT)))
775 if (ieee80211_mesh_rebuild_beacon(&sdata->u.mesh))
776 return;
777 ieee80211_bss_info_change_notify(sdata, changed);
778}
779
780int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
602{ 781{
603 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 782 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
604 struct ieee80211_local *local = sdata->local; 783 struct ieee80211_local *local = sdata->local;
@@ -629,20 +808,24 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
629 sdata->vif.bss_conf.basic_rates = 808 sdata->vif.bss_conf.basic_rates =
630 ieee80211_mandatory_rates(local, band); 809 ieee80211_mandatory_rates(local, band);
631 810
632 if (band == IEEE80211_BAND_5GHZ) { 811 changed |= ieee80211_mps_local_status_update(sdata);
633 sdata->vif.bss_conf.use_short_slot = true; 812
634 changed |= BSS_CHANGED_ERP_SLOT; 813 if (ieee80211_mesh_build_beacon(ifmsh)) {
814 ieee80211_stop_mesh(sdata);
815 return -ENOMEM;
635 } 816 }
636 817
637 ieee80211_bss_info_change_notify(sdata, changed); 818 ieee80211_bss_info_change_notify(sdata, changed);
638 819
639 netif_carrier_on(sdata->dev); 820 netif_carrier_on(sdata->dev);
821 return 0;
640} 822}
641 823
642void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) 824void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
643{ 825{
644 struct ieee80211_local *local = sdata->local; 826 struct ieee80211_local *local = sdata->local;
645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 827 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
828 struct beacon_data *bcn;
646 829
647 netif_carrier_off(sdata->dev); 830 netif_carrier_off(sdata->dev);
648 831
@@ -651,11 +834,21 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
651 sdata->vif.bss_conf.enable_beacon = false; 834 sdata->vif.bss_conf.enable_beacon = false;
652 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 835 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
653 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 836 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
837 mutex_lock(&ifmsh->mtx);
838 bcn = rcu_dereference_protected(ifmsh->beacon,
839 lockdep_is_held(&ifmsh->mtx));
840 rcu_assign_pointer(ifmsh->beacon, NULL);
841 kfree_rcu(bcn, rcu_head);
842 mutex_unlock(&ifmsh->mtx);
654 843
655 /* flush STAs and mpaths on this iface */ 844 /* flush STAs and mpaths on this iface */
656 sta_info_flush(sdata); 845 sta_info_flush(sdata);
657 mesh_path_flush_by_iface(sdata); 846 mesh_path_flush_by_iface(sdata);
658 847
848 /* free all potentially still buffered group-addressed frames */
849 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
850 skb_queue_purge(&ifmsh->ps.bc_buf);
851
659 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 852 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
660 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 853 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
661 del_timer_sync(&sdata->u.mesh.mesh_path_timer); 854 del_timer_sync(&sdata->u.mesh.mesh_path_timer);
@@ -675,6 +868,63 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
675 sdata->u.mesh.timers_running = 0; 868 sdata->u.mesh.timers_running = 0;
676} 869}
677 870
871static void
872ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
873 struct ieee80211_mgmt *mgmt, size_t len)
874{
875 struct ieee80211_local *local = sdata->local;
876 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
877 struct sk_buff *presp;
878 struct beacon_data *bcn;
879 struct ieee80211_mgmt *hdr;
880 struct ieee802_11_elems elems;
881 size_t baselen;
882 u8 *pos, *end;
883
884 end = ((u8 *) mgmt) + len;
885 pos = mgmt->u.probe_req.variable;
886 baselen = (u8 *) pos - (u8 *) mgmt;
887 if (baselen > len)
888 return;
889
890 ieee802_11_parse_elems(pos, len - baselen, &elems);
891
892 /* 802.11-2012 10.1.4.3.2 */
893 if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
894 !is_broadcast_ether_addr(mgmt->da)) ||
895 elems.ssid_len != 0)
896 return;
897
898 if (elems.mesh_id_len != 0 &&
899 (elems.mesh_id_len != ifmsh->mesh_id_len ||
900 memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len)))
901 return;
902
903 rcu_read_lock();
904 bcn = rcu_dereference(ifmsh->beacon);
905
906 if (!bcn)
907 goto out;
908
909 presp = dev_alloc_skb(local->tx_headroom +
910 bcn->head_len + bcn->tail_len);
911 if (!presp)
912 goto out;
913
914 skb_reserve(presp, local->tx_headroom);
915 memcpy(skb_put(presp, bcn->head_len), bcn->head, bcn->head_len);
916 memcpy(skb_put(presp, bcn->tail_len), bcn->tail, bcn->tail_len);
917 hdr = (struct ieee80211_mgmt *) presp->data;
918 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
919 IEEE80211_STYPE_PROBE_RESP);
920 memcpy(hdr->da, mgmt->sa, ETH_ALEN);
921 mpl_dbg(sdata, "sending probe resp. to %pM\n", hdr->da);
922 IEEE80211_SKB_CB(presp)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
923 ieee80211_tx_skb(sdata, presp);
924out:
925 rcu_read_unlock();
926}
927
678static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, 928static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
679 u16 stype, 929 u16 stype,
680 struct ieee80211_mgmt *mgmt, 930 struct ieee80211_mgmt *mgmt,
@@ -764,6 +1014,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
764 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, 1014 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
765 rx_status); 1015 rx_status);
766 break; 1016 break;
1017 case IEEE80211_STYPE_PROBE_REQ:
1018 ieee80211_mesh_rx_probe_req(sdata, mgmt, skb->len);
1019 break;
767 case IEEE80211_STYPE_ACTION: 1020 case IEEE80211_STYPE_ACTION:
768 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); 1021 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
769 break; 1022 break;
@@ -833,8 +1086,11 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
833 ieee80211_mesh_path_root_timer, 1086 ieee80211_mesh_path_root_timer,
834 (unsigned long) sdata); 1087 (unsigned long) sdata);
835 INIT_LIST_HEAD(&ifmsh->preq_queue.list); 1088 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
1089 skb_queue_head_init(&ifmsh->ps.bc_buf);
836 spin_lock_init(&ifmsh->mesh_preq_queue_lock); 1090 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
837 spin_lock_init(&ifmsh->sync_offset_lock); 1091 spin_lock_init(&ifmsh->sync_offset_lock);
1092 RCU_INIT_POINTER(ifmsh->beacon, NULL);
1093 mutex_init(&ifmsh->mtx);
838 1094
839 sdata->vif.bss_conf.bssid = zero_addr; 1095 sdata->vif.bss_conf.bssid = zero_addr;
840} 1096}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index aff301544c7f..1a1da877b1d2 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -222,6 +222,8 @@ int mesh_add_meshid_ie(struct sk_buff *skb,
222 struct ieee80211_sub_if_data *sdata); 222 struct ieee80211_sub_if_data *sdata);
223int mesh_add_rsn_ie(struct sk_buff *skb, 223int mesh_add_rsn_ie(struct sk_buff *skb,
224 struct ieee80211_sub_if_data *sdata); 224 struct ieee80211_sub_if_data *sdata);
225int mesh_add_awake_window_ie(struct sk_buff *skb,
226 struct ieee80211_sub_if_data *sdata);
225int mesh_add_vendor_ies(struct sk_buff *skb, 227int mesh_add_vendor_ies(struct sk_buff *skb,
226 struct ieee80211_sub_if_data *sdata); 228 struct ieee80211_sub_if_data *sdata);
227int mesh_add_ds_params_ie(struct sk_buff *skb, 229int mesh_add_ds_params_ie(struct sk_buff *skb,
@@ -237,10 +239,28 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
237 struct sta_info *sta, struct sk_buff *skb); 239 struct sta_info *sta, struct sk_buff *skb);
238void ieee80211s_stop(void); 240void ieee80211s_stop(void);
239void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 241void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
240void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 242int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
241void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); 243void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
242void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); 244void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
243const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); 245const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
246/* wrapper for ieee80211_bss_info_change_notify() */
247void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
248 u32 changed);
249
250/* mesh power save */
251u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata);
252u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
253 enum nl80211_mesh_power_mode pm);
254void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
255 struct sta_info *sta,
256 struct ieee80211_hdr *hdr);
257void ieee80211_mps_sta_status_update(struct sta_info *sta);
258void ieee80211_mps_rx_h_sta_process(struct sta_info *sta,
259 struct ieee80211_hdr *hdr);
260void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
261 bool tx, bool acked);
262void ieee80211_mps_frame_release(struct sta_info *sta,
263 struct ieee802_11_elems *elems);
244 264
245/* Mesh paths */ 265/* Mesh paths */
246int mesh_nexthop_lookup(struct sk_buff *skb, 266int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -248,8 +268,8 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
248int mesh_nexthop_resolve(struct sk_buff *skb, 268int mesh_nexthop_resolve(struct sk_buff *skb,
249 struct ieee80211_sub_if_data *sdata); 269 struct ieee80211_sub_if_data *sdata);
250void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); 270void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
251struct mesh_path *mesh_path_lookup(u8 *dst, 271struct mesh_path *mesh_path_lookup(const u8 *dst,
252 struct ieee80211_sub_if_data *sdata); 272 struct ieee80211_sub_if_data *sdata);
253struct mesh_path *mpp_path_lookup(u8 *dst, 273struct mesh_path *mpp_path_lookup(u8 *dst,
254 struct ieee80211_sub_if_data *sdata); 274 struct ieee80211_sub_if_data *sdata);
255int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); 275int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata);
@@ -259,7 +279,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
259void mesh_path_expire(struct ieee80211_sub_if_data *sdata); 279void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
260void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, 280void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
261 struct ieee80211_mgmt *mgmt, size_t len); 281 struct ieee80211_mgmt *mgmt, size_t len);
262int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 282int mesh_path_add(const u8 *dst, struct ieee80211_sub_if_data *sdata);
263 283
264int mesh_path_add_gate(struct mesh_path *mpath); 284int mesh_path_add_gate(struct mesh_path *mpath);
265int mesh_path_send_to_gates(struct mesh_path *mpath); 285int mesh_path_send_to_gates(struct mesh_path *mpath);
@@ -271,20 +291,22 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
271bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 291bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
272u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 292u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
273void mesh_plink_broken(struct sta_info *sta); 293void mesh_plink_broken(struct sta_info *sta);
274void mesh_plink_deactivate(struct sta_info *sta); 294u32 mesh_plink_deactivate(struct sta_info *sta);
275int mesh_plink_open(struct sta_info *sta); 295u32 mesh_plink_open(struct sta_info *sta);
276void mesh_plink_block(struct sta_info *sta); 296u32 mesh_plink_block(struct sta_info *sta);
277void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, 297void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
278 struct ieee80211_mgmt *mgmt, size_t len, 298 struct ieee80211_mgmt *mgmt, size_t len,
279 struct ieee80211_rx_status *rx_status); 299 struct ieee80211_rx_status *rx_status);
300void mesh_sta_cleanup(struct sta_info *sta);
280 301
281/* Private interfaces */ 302/* Private interfaces */
282/* Mesh tables */ 303/* Mesh tables */
283void mesh_mpath_table_grow(void); 304void mesh_mpath_table_grow(void);
284void mesh_mpp_table_grow(void); 305void mesh_mpp_table_grow(void);
285/* Mesh paths */ 306/* Mesh paths */
286int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, 307int mesh_path_error_tx(u8 ttl, const u8 *target, __le32 target_sn,
287 const u8 *ra, struct ieee80211_sub_if_data *sdata); 308 __le16 target_rcode, const u8 *ra,
309 struct ieee80211_sub_if_data *sdata);
288void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); 310void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
289void mesh_path_flush_pending(struct mesh_path *mpath); 311void mesh_path_flush_pending(struct mesh_path *mpath);
290void mesh_path_tx_pending(struct mesh_path *mpath); 312void mesh_path_tx_pending(struct mesh_path *mpath);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 6b4603a90031..585c1e26cca8 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -30,14 +30,14 @@
30 30
31static void mesh_queue_preq(struct mesh_path *, u8); 31static void mesh_queue_preq(struct mesh_path *, u8);
32 32
33static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) 33static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
34{ 34{
35 if (ae) 35 if (ae)
36 offset += 6; 36 offset += 6;
37 return get_unaligned_le32(preq_elem + offset); 37 return get_unaligned_le32(preq_elem + offset);
38} 38}
39 39
40static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae) 40static inline u32 u16_field_get(const u8 *preq_elem, int offset, bool ae)
41{ 41{
42 if (ae) 42 if (ae)
43 offset += 6; 43 offset += 6;
@@ -102,10 +102,13 @@ enum mpath_frame_type {
102static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 102static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
103 103
104static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 104static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
105 u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target, 105 const u8 *orig_addr, __le32 orig_sn,
106 __le32 target_sn, const u8 *da, u8 hop_count, u8 ttl, 106 u8 target_flags, const u8 *target,
107 __le32 lifetime, __le32 metric, __le32 preq_id, 107 __le32 target_sn, const u8 *da,
108 struct ieee80211_sub_if_data *sdata) 108 u8 hop_count, u8 ttl,
109 __le32 lifetime, __le32 metric,
110 __le32 preq_id,
111 struct ieee80211_sub_if_data *sdata)
109{ 112{
110 struct ieee80211_local *local = sdata->local; 113 struct ieee80211_local *local = sdata->local;
111 struct sk_buff *skb; 114 struct sk_buff *skb;
@@ -205,6 +208,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
205 struct sk_buff *skb) 208 struct sk_buff *skb)
206{ 209{
207 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 210 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
211 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
208 212
209 skb_set_mac_header(skb, 0); 213 skb_set_mac_header(skb, 0);
210 skb_set_network_header(skb, 0); 214 skb_set_network_header(skb, 0);
@@ -217,6 +221,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
217 info->control.vif = &sdata->vif; 221 info->control.vif = &sdata->vif;
218 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 222 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
219 ieee80211_set_qos_hdr(sdata, skb); 223 ieee80211_set_qos_hdr(sdata, skb);
224 ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
220} 225}
221 226
222/** 227/**
@@ -233,7 +238,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
233 * also acquires in the TX path. To avoid a deadlock we don't transmit the 238 * also acquires in the TX path. To avoid a deadlock we don't transmit the
234 * frame directly but add it to the pending queue instead. 239 * frame directly but add it to the pending queue instead.
235 */ 240 */
236int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, 241int mesh_path_error_tx(u8 ttl, const u8 *target, __le32 target_sn,
237 __le16 target_rcode, const u8 *ra, 242 __le16 target_rcode, const u8 *ra,
238 struct ieee80211_sub_if_data *sdata) 243 struct ieee80211_sub_if_data *sdata)
239{ 244{
@@ -367,14 +372,14 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
367 * path routing information is updated. 372 * path routing information is updated.
368 */ 373 */
369static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, 374static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
370 struct ieee80211_mgmt *mgmt, 375 struct ieee80211_mgmt *mgmt,
371 u8 *hwmp_ie, enum mpath_frame_type action) 376 const u8 *hwmp_ie, enum mpath_frame_type action)
372{ 377{
373 struct ieee80211_local *local = sdata->local; 378 struct ieee80211_local *local = sdata->local;
374 struct mesh_path *mpath; 379 struct mesh_path *mpath;
375 struct sta_info *sta; 380 struct sta_info *sta;
376 bool fresh_info; 381 bool fresh_info;
377 u8 *orig_addr, *ta; 382 const u8 *orig_addr, *ta;
378 u32 orig_sn, orig_metric; 383 u32 orig_sn, orig_metric;
379 unsigned long orig_lifetime, exp_time; 384 unsigned long orig_lifetime, exp_time;
380 u32 last_hop_metric, new_metric; 385 u32 last_hop_metric, new_metric;
@@ -509,11 +514,11 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
509 514
510static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, 515static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
511 struct ieee80211_mgmt *mgmt, 516 struct ieee80211_mgmt *mgmt,
512 u8 *preq_elem, u32 metric) 517 const u8 *preq_elem, u32 metric)
513{ 518{
514 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 519 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
515 struct mesh_path *mpath = NULL; 520 struct mesh_path *mpath = NULL;
516 u8 *target_addr, *orig_addr; 521 const u8 *target_addr, *orig_addr;
517 const u8 *da; 522 const u8 *da;
518 u8 target_flags, ttl, flags; 523 u8 target_flags, ttl, flags;
519 u32 orig_sn, target_sn, lifetime, orig_metric; 524 u32 orig_sn, target_sn, lifetime, orig_metric;
@@ -646,11 +651,11 @@ next_hop_deref_protected(struct mesh_path *mpath)
646 651
647static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, 652static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
648 struct ieee80211_mgmt *mgmt, 653 struct ieee80211_mgmt *mgmt,
649 u8 *prep_elem, u32 metric) 654 const u8 *prep_elem, u32 metric)
650{ 655{
651 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 656 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
652 struct mesh_path *mpath; 657 struct mesh_path *mpath;
653 u8 *target_addr, *orig_addr; 658 const u8 *target_addr, *orig_addr;
654 u8 ttl, hopcount, flags; 659 u8 ttl, hopcount, flags;
655 u8 next_hop[ETH_ALEN]; 660 u8 next_hop[ETH_ALEN];
656 u32 target_sn, orig_sn, lifetime; 661 u32 target_sn, orig_sn, lifetime;
@@ -709,12 +714,13 @@ fail:
709} 714}
710 715
711static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, 716static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
712 struct ieee80211_mgmt *mgmt, u8 *perr_elem) 717 struct ieee80211_mgmt *mgmt,
718 const u8 *perr_elem)
713{ 719{
714 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 720 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
715 struct mesh_path *mpath; 721 struct mesh_path *mpath;
716 u8 ttl; 722 u8 ttl;
717 u8 *ta, *target_addr; 723 const u8 *ta, *target_addr;
718 u32 target_sn; 724 u32 target_sn;
719 u16 target_rcode; 725 u16 target_rcode;
720 726
@@ -756,15 +762,15 @@ endperr:
756} 762}
757 763
758static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, 764static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
759 struct ieee80211_mgmt *mgmt, 765 struct ieee80211_mgmt *mgmt,
760 struct ieee80211_rann_ie *rann) 766 const struct ieee80211_rann_ie *rann)
761{ 767{
762 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 768 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
763 struct ieee80211_local *local = sdata->local; 769 struct ieee80211_local *local = sdata->local;
764 struct sta_info *sta; 770 struct sta_info *sta;
765 struct mesh_path *mpath; 771 struct mesh_path *mpath;
766 u8 ttl, flags, hopcount; 772 u8 ttl, flags, hopcount;
767 u8 *orig_addr; 773 const u8 *orig_addr;
768 u32 orig_sn, metric, metric_txsta, interval; 774 u32 orig_sn, metric, metric_txsta, interval;
769 bool root_is_gate; 775 bool root_is_gate;
770 776
@@ -1080,6 +1086,10 @@ int mesh_nexthop_resolve(struct sk_buff *skb,
1080 u8 *target_addr = hdr->addr3; 1086 u8 *target_addr = hdr->addr3;
1081 int err = 0; 1087 int err = 0;
1082 1088
1089 /* Nulls are only sent to peers for PS and should be pre-addressed */
1090 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1091 return 0;
1092
1083 rcu_read_lock(); 1093 rcu_read_lock();
1084 err = mesh_nexthop_lookup(skb, sdata); 1094 err = mesh_nexthop_lookup(skb, sdata);
1085 if (!err) 1095 if (!err)
@@ -1151,6 +1161,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
1151 if (next_hop) { 1161 if (next_hop) {
1152 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN); 1162 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1153 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); 1163 memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1164 ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1154 err = 0; 1165 err = 0;
1155 } 1166 }
1156 1167
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index aa749818860e..2ce4c4023a97 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -181,7 +181,7 @@ errcopy:
181 return -ENOMEM; 181 return -ENOMEM;
182} 182}
183 183
184static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, 184static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
185 struct mesh_table *tbl) 185 struct mesh_table *tbl)
186{ 186{
187 /* Use last four bytes of hw addr and interface index as hash index */ 187 /* Use last four bytes of hw addr and interface index as hash index */
@@ -212,6 +212,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
212 hdr = (struct ieee80211_hdr *) skb->data; 212 hdr = (struct ieee80211_hdr *) skb->data;
213 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 213 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
214 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 214 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
215 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
215 } 216 }
216 217
217 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 218 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
@@ -325,8 +326,8 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
325} 326}
326 327
327 328
328static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst, 329static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
329 struct ieee80211_sub_if_data *sdata) 330 struct ieee80211_sub_if_data *sdata)
330{ 331{
331 struct mesh_path *mpath; 332 struct mesh_path *mpath;
332 struct hlist_node *n; 333 struct hlist_node *n;
@@ -358,7 +359,8 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
358 * 359 *
359 * Locking: must be called within a read rcu section. 360 * Locking: must be called within a read rcu section.
360 */ 361 */
361struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 362struct mesh_path *mesh_path_lookup(const u8 *dst,
363 struct ieee80211_sub_if_data *sdata)
362{ 364{
363 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata); 365 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
364} 366}
@@ -493,7 +495,7 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
493 * 495 *
494 * State: the initial state of the new path is set to 0 496 * State: the initial state of the new path is set to 0
495 */ 497 */
496int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) 498int mesh_path_add(const u8 *dst, struct ieee80211_sub_if_data *sdata)
497{ 499{
498 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 500 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
499 struct ieee80211_local *local = sdata->local; 501 struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 9e0416696a83..f7526e509aa8 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -56,27 +56,63 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
56} 56}
57 57
58/* 58/*
59 * Allocate mesh sta entry and insert into station table 59 * mesh_set_short_slot_time - enable / disable ERP short slot time.
60 *
61 * The standard indirectly mandates mesh STAs to turn off short slot time by
62 * disallowing advertising this (802.11-2012 8.4.1.4), but that doesn't mean we
63 * can't be sneaky about it. Enable short slot time if all mesh STAs in the
64 * MBSS support ERP rates.
65 *
66 * Returns BSS_CHANGED_ERP_SLOT or 0 for no change.
60 */ 67 */
61static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, 68static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
62 u8 *hw_addr)
63{ 69{
70 struct ieee80211_local *local = sdata->local;
71 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
72 struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
64 struct sta_info *sta; 73 struct sta_info *sta;
74 u32 erp_rates = 0, changed = 0;
75 int i;
76 bool short_slot = false;
65 77
66 if (sdata->local->num_sta >= MESH_MAX_PLINKS) 78 if (band == IEEE80211_BAND_5GHZ) {
67 return NULL; 79 /* (IEEE 802.11-2012 19.4.5) */
80 short_slot = true;
81 goto out;
82 } else if (band != IEEE80211_BAND_2GHZ ||
83 (band == IEEE80211_BAND_2GHZ &&
84 local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
85 goto out;
68 86
69 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); 87 for (i = 0; i < sband->n_bitrates; i++)
70 if (!sta) 88 if (sband->bitrates[i].flags & IEEE80211_RATE_ERP_G)
71 return NULL; 89 erp_rates |= BIT(i);
72 90
73 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 91 if (!erp_rates)
74 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 92 goto out;
75 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
76 93
77 set_sta_flag(sta, WLAN_STA_WME); 94 rcu_read_lock();
95 list_for_each_entry_rcu(sta, &local->sta_list, list) {
96 if (sdata != sta->sdata ||
97 sta->plink_state != NL80211_PLINK_ESTAB)
98 continue;
78 99
79 return sta; 100 short_slot = false;
101 if (erp_rates & sta->sta.supp_rates[band])
102 short_slot = true;
103 else
104 break;
105 }
106 rcu_read_unlock();
107
108out:
109 if (sdata->vif.bss_conf.use_short_slot != short_slot) {
110 sdata->vif.bss_conf.use_short_slot = short_slot;
111 changed = BSS_CHANGED_ERP_SLOT;
112 mpl_dbg(sdata, "mesh_plink %pM: ERP short slot time %d\n",
113 sdata->vif.addr, short_slot);
114 }
115 return changed;
80} 116}
81 117
82/** 118/**
@@ -165,6 +201,9 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta)
165 sta->plink_state = NL80211_PLINK_BLOCKED; 201 sta->plink_state = NL80211_PLINK_BLOCKED;
166 mesh_path_flush_by_nexthop(sta); 202 mesh_path_flush_by_nexthop(sta);
167 203
204 ieee80211_mps_sta_status_update(sta);
205 changed |= ieee80211_mps_local_status_update(sdata);
206
168 return changed; 207 return changed;
169} 208}
170 209
@@ -175,7 +214,7 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta)
175 * 214 *
176 * All mesh paths with this peer as next hop will be flushed 215 * All mesh paths with this peer as next hop will be flushed
177 */ 216 */
178void mesh_plink_deactivate(struct sta_info *sta) 217u32 mesh_plink_deactivate(struct sta_info *sta)
179{ 218{
180 struct ieee80211_sub_if_data *sdata = sta->sdata; 219 struct ieee80211_sub_if_data *sdata = sta->sdata;
181 u32 changed; 220 u32 changed;
@@ -188,7 +227,7 @@ void mesh_plink_deactivate(struct sta_info *sta)
188 sta->reason); 227 sta->reason);
189 spin_unlock_bh(&sta->lock); 228 spin_unlock_bh(&sta->lock);
190 229
191 ieee80211_bss_info_change_notify(sdata, changed); 230 return changed;
192} 231}
193 232
194static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 233static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -309,59 +348,32 @@ free:
309 return err; 348 return err;
310} 349}
311 350
312/** 351static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
313 * mesh_peer_init - initialize new mesh peer and return resulting sta_info 352 struct sta_info *sta,
314 * 353 struct ieee802_11_elems *elems, bool insert)
315 * @sdata: local meshif
316 * @addr: peer's address
317 * @elems: IEs from beacon or mesh peering frame
318 *
319 * call under RCU
320 */
321static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
322 u8 *addr,
323 struct ieee802_11_elems *elems)
324{ 354{
325 struct ieee80211_local *local = sdata->local; 355 struct ieee80211_local *local = sdata->local;
326 enum ieee80211_band band = ieee80211_get_sdata_band(sdata); 356 enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
327 struct ieee80211_supported_band *sband; 357 struct ieee80211_supported_band *sband;
328 u32 rates, basic_rates = 0; 358 u32 rates, basic_rates = 0, changed = 0;
329 struct sta_info *sta;
330 bool insert = false;
331 359
332 sband = local->hw.wiphy->bands[band]; 360 sband = local->hw.wiphy->bands[band];
333 rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates); 361 rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
334 362
335 sta = sta_info_get(sdata, addr);
336 if (!sta) {
337 /* Userspace handles peer allocation when security is enabled */
338 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
339 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
340 elems->ie_start,
341 elems->total_len,
342 GFP_ATOMIC);
343 return NULL;
344 }
345
346 sta = mesh_plink_alloc(sdata, addr);
347 if (!sta)
348 return NULL;
349 insert = true;
350 }
351
352 spin_lock_bh(&sta->lock); 363 spin_lock_bh(&sta->lock);
353 sta->last_rx = jiffies; 364 sta->last_rx = jiffies;
354 if (sta->plink_state == NL80211_PLINK_ESTAB) {
355 spin_unlock_bh(&sta->lock);
356 return sta;
357 }
358 365
366 /* rates and capabilities don't change during peering */
367 if (sta->plink_state == NL80211_PLINK_ESTAB)
368 goto out;
369
370 if (sta->sta.supp_rates[band] != rates)
371 changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
359 sta->sta.supp_rates[band] = rates; 372 sta->sta.supp_rates[band] = rates;
360 if (elems->ht_cap_elem && 373 if (elems->ht_cap_elem &&
361 sdata->vif.bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) 374 sdata->vif.bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
362 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 375 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
363 elems->ht_cap_elem, 376 elems->ht_cap_elem, sta);
364 &sta->sta.ht_cap);
365 else 377 else
366 memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); 378 memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
367 379
@@ -370,31 +382,119 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
370 382
371 if (!(elems->ht_operation->ht_param & 383 if (!(elems->ht_operation->ht_param &
372 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) 384 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
373 sta->sta.ht_cap.cap &= 385 sta->sta.bandwidth = IEEE80211_STA_RX_BW_20;
374 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
375 ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan, 386 ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
376 elems->ht_operation, &chandef); 387 elems->ht_operation, &chandef);
388 if (sta->ch_width != chandef.width)
389 changed |= IEEE80211_RC_BW_CHANGED;
377 sta->ch_width = chandef.width; 390 sta->ch_width = chandef.width;
378 } 391 }
379 392
380 if (insert) 393 if (insert)
381 rate_control_rate_init(sta); 394 rate_control_rate_init(sta);
395 else
396 rate_control_rate_update(local, sband, sta, changed);
397out:
382 spin_unlock_bh(&sta->lock); 398 spin_unlock_bh(&sta->lock);
399}
383 400
384 if (insert && sta_info_insert(sta)) 401static struct sta_info *
402__mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
403{
404 struct sta_info *sta;
405
406 if (sdata->local->num_sta >= MESH_MAX_PLINKS)
385 return NULL; 407 return NULL;
386 408
409 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
410 if (!sta)
411 return NULL;
412
413 sta->plink_state = NL80211_PLINK_LISTEN;
414 init_timer(&sta->plink_timer);
415
416 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
417 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
418 sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
419
420 set_sta_flag(sta, WLAN_STA_WME);
421
422 return sta;
423}
424
425static struct sta_info *
426mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
427 struct ieee802_11_elems *elems)
428{
429 struct sta_info *sta = NULL;
430
431 /* Userspace handles peer allocation when security is enabled */
432 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
433 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
434 elems->ie_start,
435 elems->total_len,
436 GFP_KERNEL);
437 else
438 sta = __mesh_sta_info_alloc(sdata, addr);
439
440 return sta;
441}
442
443/*
444 * mesh_sta_info_get - return mesh sta info entry for @addr.
445 *
446 * @sdata: local meshif
447 * @addr: peer's address
448 * @elems: IEs from beacon or mesh peering frame.
449 *
450 * Return existing or newly allocated sta_info under RCU read lock.
451 * (re)initialize with given IEs.
452 */
453static struct sta_info *
454mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
455 u8 *addr, struct ieee802_11_elems *elems) __acquires(RCU)
456{
457 struct sta_info *sta = NULL;
458
459 rcu_read_lock();
460 sta = sta_info_get(sdata, addr);
461 if (sta) {
462 mesh_sta_info_init(sdata, sta, elems, false);
463 } else {
464 rcu_read_unlock();
465 /* can't run atomic */
466 sta = mesh_sta_info_alloc(sdata, addr, elems);
467 if (!sta) {
468 rcu_read_lock();
469 return NULL;
470 }
471
472 mesh_sta_info_init(sdata, sta, elems, true);
473
474 if (sta_info_insert_rcu(sta))
475 return NULL;
476 }
477
387 return sta; 478 return sta;
388} 479}
389 480
481/*
482 * mesh_neighbour_update - update or initialize new mesh neighbor.
483 *
484 * @sdata: local meshif
485 * @addr: peer's address
486 * @elems: IEs from beacon or mesh peering frame
487 *
488 * Initiates peering if appropriate.
489 */
390void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, 490void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
391 u8 *hw_addr, 491 u8 *hw_addr,
392 struct ieee802_11_elems *elems) 492 struct ieee802_11_elems *elems)
393{ 493{
394 struct sta_info *sta; 494 struct sta_info *sta;
495 u32 changed = 0;
395 496
396 rcu_read_lock(); 497 sta = mesh_sta_info_get(sdata, hw_addr, elems);
397 sta = mesh_peer_init(sdata, hw_addr, elems);
398 if (!sta) 498 if (!sta)
399 goto out; 499 goto out;
400 500
@@ -403,10 +503,12 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
403 sdata->u.mesh.accepting_plinks && 503 sdata->u.mesh.accepting_plinks &&
404 sdata->u.mesh.mshcfg.auto_open_plinks && 504 sdata->u.mesh.mshcfg.auto_open_plinks &&
405 rssi_threshold_check(sta, sdata)) 505 rssi_threshold_check(sta, sdata))
406 mesh_plink_open(sta); 506 changed = mesh_plink_open(sta);
407 507
508 ieee80211_mps_frame_release(sta, elems);
408out: 509out:
409 rcu_read_unlock(); 510 rcu_read_unlock();
511 ieee80211_mbss_info_change_notify(sdata, changed);
410} 512}
411 513
412static void mesh_plink_timer(unsigned long data) 514static void mesh_plink_timer(unsigned long data)
@@ -490,6 +592,13 @@ static void mesh_plink_timer(unsigned long data)
490#ifdef CONFIG_PM 592#ifdef CONFIG_PM
491void mesh_plink_quiesce(struct sta_info *sta) 593void mesh_plink_quiesce(struct sta_info *sta)
492{ 594{
595 if (!ieee80211_vif_is_mesh(&sta->sdata->vif))
596 return;
597
598 /* no kernel mesh sta timers have been initialized */
599 if (sta->sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
600 return;
601
493 if (del_timer_sync(&sta->plink_timer)) 602 if (del_timer_sync(&sta->plink_timer))
494 sta->plink_timer_was_running = true; 603 sta->plink_timer_was_running = true;
495} 604}
@@ -512,13 +621,14 @@ static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
512 add_timer(&sta->plink_timer); 621 add_timer(&sta->plink_timer);
513} 622}
514 623
515int mesh_plink_open(struct sta_info *sta) 624u32 mesh_plink_open(struct sta_info *sta)
516{ 625{
517 __le16 llid; 626 __le16 llid;
518 struct ieee80211_sub_if_data *sdata = sta->sdata; 627 struct ieee80211_sub_if_data *sdata = sta->sdata;
628 u32 changed;
519 629
520 if (!test_sta_flag(sta, WLAN_STA_AUTH)) 630 if (!test_sta_flag(sta, WLAN_STA_AUTH))
521 return -EPERM; 631 return 0;
522 632
523 spin_lock_bh(&sta->lock); 633 spin_lock_bh(&sta->lock);
524 get_random_bytes(&llid, 2); 634 get_random_bytes(&llid, 2);
@@ -526,7 +636,7 @@ int mesh_plink_open(struct sta_info *sta)
526 if (sta->plink_state != NL80211_PLINK_LISTEN && 636 if (sta->plink_state != NL80211_PLINK_LISTEN &&
527 sta->plink_state != NL80211_PLINK_BLOCKED) { 637 sta->plink_state != NL80211_PLINK_BLOCKED) {
528 spin_unlock_bh(&sta->lock); 638 spin_unlock_bh(&sta->lock);
529 return -EBUSY; 639 return 0;
530 } 640 }
531 sta->plink_state = NL80211_PLINK_OPN_SNT; 641 sta->plink_state = NL80211_PLINK_OPN_SNT;
532 mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout); 642 mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
@@ -535,13 +645,16 @@ int mesh_plink_open(struct sta_info *sta)
535 "Mesh plink: starting establishment with %pM\n", 645 "Mesh plink: starting establishment with %pM\n",
536 sta->sta.addr); 646 sta->sta.addr);
537 647
538 return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN, 648 /* set the non-peer mode to active during peering */
539 sta->sta.addr, llid, 0, 0); 649 changed = ieee80211_mps_local_status_update(sdata);
650
651 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
652 sta->sta.addr, llid, 0, 0);
653 return changed;
540} 654}
541 655
542void mesh_plink_block(struct sta_info *sta) 656u32 mesh_plink_block(struct sta_info *sta)
543{ 657{
544 struct ieee80211_sub_if_data *sdata = sta->sdata;
545 u32 changed; 658 u32 changed;
546 659
547 spin_lock_bh(&sta->lock); 660 spin_lock_bh(&sta->lock);
@@ -549,7 +662,7 @@ void mesh_plink_block(struct sta_info *sta)
549 sta->plink_state = NL80211_PLINK_BLOCKED; 662 sta->plink_state = NL80211_PLINK_BLOCKED;
550 spin_unlock_bh(&sta->lock); 663 spin_unlock_bh(&sta->lock);
551 664
552 ieee80211_bss_info_change_notify(sdata, changed); 665 return changed;
553} 666}
554 667
555 668
@@ -632,6 +745,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
632 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) 745 (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
633 memcpy(&llid, PLINK_GET_PLID(elems.peering), 2); 746 memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
634 747
748 /* WARNING: Only for sta pointer, is dropped & re-acquired */
635 rcu_read_lock(); 749 rcu_read_lock();
636 750
637 sta = sta_info_get(sdata, mgmt->sa); 751 sta = sta_info_get(sdata, mgmt->sa);
@@ -735,8 +849,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
735 } 849 }
736 850
737 if (event == OPN_ACPT) { 851 if (event == OPN_ACPT) {
852 rcu_read_unlock();
738 /* allocate sta entry if necessary and update info */ 853 /* allocate sta entry if necessary and update info */
739 sta = mesh_peer_init(sdata, mgmt->sa, &elems); 854 sta = mesh_sta_info_get(sdata, mgmt->sa, &elems);
740 if (!sta) { 855 if (!sta) {
741 mpl_dbg(sdata, "Mesh plink: failed to init peer!\n"); 856 mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
742 rcu_read_unlock(); 857 rcu_read_unlock();
@@ -766,6 +881,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
766 sta->llid = llid; 881 sta->llid = llid;
767 mesh_plink_timer_set(sta, 882 mesh_plink_timer_set(sta,
768 mshcfg->dot11MeshRetryTimeout); 883 mshcfg->dot11MeshRetryTimeout);
884
885 /* set the non-peer mode to active during peering */
886 changed |= ieee80211_mps_local_status_update(sdata);
887
769 spin_unlock_bh(&sta->lock); 888 spin_unlock_bh(&sta->lock);
770 mesh_plink_frame_tx(sdata, 889 mesh_plink_frame_tx(sdata,
771 WLAN_SP_MESH_PEERING_OPEN, 890 WLAN_SP_MESH_PEERING_OPEN,
@@ -856,8 +975,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
856 spin_unlock_bh(&sta->lock); 975 spin_unlock_bh(&sta->lock);
857 changed |= mesh_plink_inc_estab_count(sdata); 976 changed |= mesh_plink_inc_estab_count(sdata);
858 changed |= mesh_set_ht_prot_mode(sdata); 977 changed |= mesh_set_ht_prot_mode(sdata);
978 changed |= mesh_set_short_slot_time(sdata);
859 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", 979 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
860 sta->sta.addr); 980 sta->sta.addr);
981 ieee80211_mps_sta_status_update(sta);
982 changed |= ieee80211_mps_set_sta_local_pm(sta,
983 mshcfg->power_mode);
861 break; 984 break;
862 default: 985 default:
863 spin_unlock_bh(&sta->lock); 986 spin_unlock_bh(&sta->lock);
@@ -891,11 +1014,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
891 spin_unlock_bh(&sta->lock); 1014 spin_unlock_bh(&sta->lock);
892 changed |= mesh_plink_inc_estab_count(sdata); 1015 changed |= mesh_plink_inc_estab_count(sdata);
893 changed |= mesh_set_ht_prot_mode(sdata); 1016 changed |= mesh_set_ht_prot_mode(sdata);
1017 changed |= mesh_set_short_slot_time(sdata);
894 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n", 1018 mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
895 sta->sta.addr); 1019 sta->sta.addr);
896 mesh_plink_frame_tx(sdata, 1020 mesh_plink_frame_tx(sdata,
897 WLAN_SP_MESH_PEERING_CONFIRM, 1021 WLAN_SP_MESH_PEERING_CONFIRM,
898 sta->sta.addr, llid, plid, 0); 1022 sta->sta.addr, llid, plid, 0);
1023 ieee80211_mps_sta_status_update(sta);
1024 changed |= ieee80211_mps_set_sta_local_pm(sta,
1025 mshcfg->power_mode);
899 break; 1026 break;
900 default: 1027 default:
901 spin_unlock_bh(&sta->lock); 1028 spin_unlock_bh(&sta->lock);
@@ -914,6 +1041,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
914 mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); 1041 mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
915 spin_unlock_bh(&sta->lock); 1042 spin_unlock_bh(&sta->lock);
916 changed |= mesh_set_ht_prot_mode(sdata); 1043 changed |= mesh_set_ht_prot_mode(sdata);
1044 changed |= mesh_set_short_slot_time(sdata);
917 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, 1045 mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
918 sta->sta.addr, llid, plid, reason); 1046 sta->sta.addr, llid, plid, reason);
919 break; 1047 break;
@@ -962,5 +1090,5 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
962 rcu_read_unlock(); 1090 rcu_read_unlock();
963 1091
964 if (changed) 1092 if (changed)
965 ieee80211_bss_info_change_notify(sdata, changed); 1093 ieee80211_mbss_info_change_notify(sdata, changed);
966} 1094}
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
new file mode 100644
index 000000000000..3b7bfc01ee36
--- /dev/null
+++ b/net/mac80211/mesh_ps.c
@@ -0,0 +1,598 @@
1/*
2 * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
3 * Copyright 2012-2013, cozybit Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include "mesh.h"
11#include "wme.h"
12
13
14/* mesh PS management */
15
16/**
17 * mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave
18 */
19static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
20{
21 struct ieee80211_sub_if_data *sdata = sta->sdata;
22 struct ieee80211_local *local = sdata->local;
23 struct ieee80211_hdr *nullfunc; /* use 4addr header */
24 struct sk_buff *skb;
25 int size = sizeof(*nullfunc);
26 __le16 fc;
27
28 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size + 2);
29 if (!skb)
30 return NULL;
31 skb_reserve(skb, local->hw.extra_tx_headroom);
32
33 nullfunc = (struct ieee80211_hdr *) skb_put(skb, size);
34 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
35 ieee80211_fill_mesh_addresses(nullfunc, &fc, sta->sta.addr,
36 sdata->vif.addr);
37 nullfunc->frame_control = fc;
38 nullfunc->duration_id = 0;
39 /* no address resolution for this frame -> set addr 1 immediately */
40 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
41 memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
42 ieee80211_mps_set_frame_flags(sdata, sta, nullfunc);
43
44 return skb;
45}
46
47/**
48 * mps_qos_null_tx - send a QoS Null to indicate link-specific power mode
49 */
50static void mps_qos_null_tx(struct sta_info *sta)
51{
52 struct sk_buff *skb;
53
54 skb = mps_qos_null_get(sta);
55 if (!skb)
56 return;
57
58 mps_dbg(sta->sdata, "announcing peer-specific power mode to %pM\n",
59 sta->sta.addr);
60
61 /* don't unintentionally start a MPSP */
62 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
63 u8 *qc = ieee80211_get_qos_ctl((void *) skb->data);
64
65 qc[0] |= IEEE80211_QOS_CTL_EOSP;
66 }
67
68 ieee80211_tx_skb(sta->sdata, skb);
69}
70
71/**
72 * ieee80211_mps_local_status_update - track status of local link-specific PMs
73 *
74 * @sdata: local mesh subif
75 *
76 * sets the non-peer power mode and triggers the driver PS (re-)configuration
77 * Return BSS_CHANGED_BEACON if a beacon update is necessary.
78 */
79u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
80{
81 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
82 struct sta_info *sta;
83 bool peering = false;
84 int light_sleep_cnt = 0;
85 int deep_sleep_cnt = 0;
86 u32 changed = 0;
87 enum nl80211_mesh_power_mode nonpeer_pm;
88
89 rcu_read_lock();
90 list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
91 if (sdata != sta->sdata)
92 continue;
93
94 switch (sta->plink_state) {
95 case NL80211_PLINK_OPN_SNT:
96 case NL80211_PLINK_OPN_RCVD:
97 case NL80211_PLINK_CNF_RCVD:
98 peering = true;
99 break;
100 case NL80211_PLINK_ESTAB:
101 if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
102 light_sleep_cnt++;
103 else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
104 deep_sleep_cnt++;
105 break;
106 default:
107 break;
108 }
109 }
110 rcu_read_unlock();
111
112 /*
113 * Set non-peer mode to active during peering/scanning/authentication
114 * (see IEEE802.11-2012 13.14.8.3). The non-peer mesh power mode is
115 * deep sleep if the local STA is in light or deep sleep towards at
116 * least one mesh peer (see 13.14.3.1). Otherwise, set it to the
117 * user-configured default value.
118 */
119 if (peering) {
120 mps_dbg(sdata, "setting non-peer PM to active for peering\n");
121 nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
122 } else if (light_sleep_cnt || deep_sleep_cnt) {
123 mps_dbg(sdata, "setting non-peer PM to deep sleep\n");
124 nonpeer_pm = NL80211_MESH_POWER_DEEP_SLEEP;
125 } else {
126 mps_dbg(sdata, "setting non-peer PM to user value\n");
127 nonpeer_pm = ifmsh->mshcfg.power_mode;
128 }
129
130 /* need update if sleep counts move between 0 and non-zero */
131 if (ifmsh->nonpeer_pm != nonpeer_pm ||
132 !ifmsh->ps_peers_light_sleep != !light_sleep_cnt ||
133 !ifmsh->ps_peers_deep_sleep != !deep_sleep_cnt)
134 changed = BSS_CHANGED_BEACON;
135
136 ifmsh->nonpeer_pm = nonpeer_pm;
137 ifmsh->ps_peers_light_sleep = light_sleep_cnt;
138 ifmsh->ps_peers_deep_sleep = deep_sleep_cnt;
139
140 return changed;
141}
142
143/**
144 * ieee80211_mps_set_sta_local_pm - set local PM towards a mesh STA
145 *
146 * @sta: mesh STA
147 * @pm: the power mode to set
148 * Return BSS_CHANGED_BEACON if a beacon update is in order.
149 */
150u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
151 enum nl80211_mesh_power_mode pm)
152{
153 struct ieee80211_sub_if_data *sdata = sta->sdata;
154
155 mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
156 pm, sta->sta.addr);
157
158 sta->local_pm = pm;
159
160 /*
161 * announce peer-specific power mode transition
162 * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
163 */
164 if (sta->plink_state == NL80211_PLINK_ESTAB)
165 mps_qos_null_tx(sta);
166
167 return ieee80211_mps_local_status_update(sdata);
168}
169
170/**
171 * ieee80211_mps_set_frame_flags - set mesh PS flags in FC (and QoS Control)
172 *
173 * @sdata: local mesh subif
174 * @sta: mesh STA
175 * @hdr: 802.11 frame header
176 *
177 * see IEEE802.11-2012 8.2.4.1.7 and 8.2.4.5.11
178 *
179 * NOTE: sta must be given when an individually-addressed QoS frame header
180 * is handled, for group-addressed and management frames it is not used
181 */
182void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
183 struct sta_info *sta,
184 struct ieee80211_hdr *hdr)
185{
186 enum nl80211_mesh_power_mode pm;
187 u8 *qc;
188
189 if (WARN_ON(is_unicast_ether_addr(hdr->addr1) &&
190 ieee80211_is_data_qos(hdr->frame_control) &&
191 !sta))
192 return;
193
194 if (is_unicast_ether_addr(hdr->addr1) &&
195 ieee80211_is_data_qos(hdr->frame_control) &&
196 sta->plink_state == NL80211_PLINK_ESTAB)
197 pm = sta->local_pm;
198 else
199 pm = sdata->u.mesh.nonpeer_pm;
200
201 if (pm == NL80211_MESH_POWER_ACTIVE)
202 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_PM);
203 else
204 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
205
206 if (!ieee80211_is_data_qos(hdr->frame_control))
207 return;
208
209 qc = ieee80211_get_qos_ctl(hdr);
210
211 if ((is_unicast_ether_addr(hdr->addr1) &&
212 pm == NL80211_MESH_POWER_DEEP_SLEEP) ||
213 (is_multicast_ether_addr(hdr->addr1) &&
214 sdata->u.mesh.ps_peers_deep_sleep > 0))
215 qc[1] |= (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
216 else
217 qc[1] &= ~(IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8);
218}
219
220/**
221 * ieee80211_mps_sta_status_update - update buffering status of neighbor STA
222 *
223 * @sta: mesh STA
224 *
225 * called after change of peering status or non-peer/peer-specific power mode
226 */
227void ieee80211_mps_sta_status_update(struct sta_info *sta)
228{
229 enum nl80211_mesh_power_mode pm;
230 bool do_buffer;
231
232 /*
233 * use peer-specific power mode if peering is established and the
234 * peer's power mode is known
235 */
236 if (sta->plink_state == NL80211_PLINK_ESTAB &&
237 sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
238 pm = sta->peer_pm;
239 else
240 pm = sta->nonpeer_pm;
241
242 do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
243
244 /* Don't let the same PS state be set twice */
245 if (test_sta_flag(sta, WLAN_STA_PS_STA) == do_buffer)
246 return;
247
248 if (do_buffer) {
249 set_sta_flag(sta, WLAN_STA_PS_STA);
250 atomic_inc(&sta->sdata->u.mesh.ps.num_sta_ps);
251 mps_dbg(sta->sdata, "start PS buffering frames towards %pM\n",
252 sta->sta.addr);
253 } else {
254 ieee80211_sta_ps_deliver_wakeup(sta);
255 }
256
257 /* clear the MPSP flags for non-peers or active STA */
258 if (sta->plink_state != NL80211_PLINK_ESTAB) {
259 clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
260 clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
261 } else if (!do_buffer) {
262 clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
263 }
264}
265
266static void mps_set_sta_peer_pm(struct sta_info *sta,
267 struct ieee80211_hdr *hdr)
268{
269 enum nl80211_mesh_power_mode pm;
270 u8 *qc = ieee80211_get_qos_ctl(hdr);
271
272 /*
273 * Test Power Management field of frame control (PW) and
274 * mesh power save level subfield of QoS control field (PSL)
275 *
276 * | PM | PSL| Mesh PM |
277 * +----+----+---------+
278 * | 0 |Rsrv| Active |
279 * | 1 | 0 | Light |
280 * | 1 | 1 | Deep |
281 */
282 if (ieee80211_has_pm(hdr->frame_control)) {
283 if (qc[1] & (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8))
284 pm = NL80211_MESH_POWER_DEEP_SLEEP;
285 else
286 pm = NL80211_MESH_POWER_LIGHT_SLEEP;
287 } else {
288 pm = NL80211_MESH_POWER_ACTIVE;
289 }
290
291 if (sta->peer_pm == pm)
292 return;
293
294 mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
295 sta->sta.addr, pm);
296
297 sta->peer_pm = pm;
298
299 ieee80211_mps_sta_status_update(sta);
300}
301
302static void mps_set_sta_nonpeer_pm(struct sta_info *sta,
303 struct ieee80211_hdr *hdr)
304{
305 enum nl80211_mesh_power_mode pm;
306
307 if (ieee80211_has_pm(hdr->frame_control))
308 pm = NL80211_MESH_POWER_DEEP_SLEEP;
309 else
310 pm = NL80211_MESH_POWER_ACTIVE;
311
312 if (sta->nonpeer_pm == pm)
313 return;
314
315 mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
316 sta->sta.addr, pm);
317
318 sta->nonpeer_pm = pm;
319
320 ieee80211_mps_sta_status_update(sta);
321}
322
323/**
324 * ieee80211_mps_rx_h_sta_process - frame receive handler for mesh powersave
325 *
326 * @sta: STA info that transmitted the frame
327 * @hdr: IEEE 802.11 (QoS) Header
328 */
329void ieee80211_mps_rx_h_sta_process(struct sta_info *sta,
330 struct ieee80211_hdr *hdr)
331{
332 if (is_unicast_ether_addr(hdr->addr1) &&
333 ieee80211_is_data_qos(hdr->frame_control)) {
334 /*
335 * individually addressed QoS Data/Null frames contain
336 * peer link-specific PS mode towards the local STA
337 */
338 mps_set_sta_peer_pm(sta, hdr);
339
340 /* check for mesh Peer Service Period trigger frames */
341 ieee80211_mpsp_trigger_process(ieee80211_get_qos_ctl(hdr),
342 sta, false, false);
343 } else {
344 /*
345 * can only determine non-peer PS mode
346 * (see IEEE802.11-2012 8.2.4.1.7)
347 */
348 mps_set_sta_nonpeer_pm(sta, hdr);
349 }
350}
351
352
353/* mesh PS frame release */
354
355static void mpsp_trigger_send(struct sta_info *sta, bool rspi, bool eosp)
356{
357 struct ieee80211_sub_if_data *sdata = sta->sdata;
358 struct sk_buff *skb;
359 struct ieee80211_hdr *nullfunc;
360 struct ieee80211_tx_info *info;
361 u8 *qc;
362
363 skb = mps_qos_null_get(sta);
364 if (!skb)
365 return;
366
367 nullfunc = (struct ieee80211_hdr *) skb->data;
368 if (!eosp)
369 nullfunc->frame_control |=
370 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
371 /*
372 * | RSPI | EOSP | MPSP triggering |
373 * +------+------+--------------------+
374 * | 0 | 0 | local STA is owner |
375 * | 0 | 1 | no MPSP (MPSP end) |
376 * | 1 | 0 | both STA are owner |
377 * | 1 | 1 | peer STA is owner | see IEEE802.11-2012 13.14.9.2
378 */
379 qc = ieee80211_get_qos_ctl(nullfunc);
380 if (rspi)
381 qc[1] |= (IEEE80211_QOS_CTL_RSPI >> 8);
382 if (eosp)
383 qc[0] |= IEEE80211_QOS_CTL_EOSP;
384
385 info = IEEE80211_SKB_CB(skb);
386
387 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
388 IEEE80211_TX_CTL_REQ_TX_STATUS;
389
390 mps_dbg(sdata, "sending MPSP trigger%s%s to %pM\n",
391 rspi ? " RSPI" : "", eosp ? " EOSP" : "", sta->sta.addr);
392
393 ieee80211_tx_skb(sdata, skb);
394}
395
396/**
397 * mpsp_qos_null_append - append QoS Null frame to MPSP skb queue if needed
398 *
399 * To properly end a mesh MPSP the last transmitted frame has to set the EOSP
400 * flag in the QoS Control field. In case the current tailing frame is not a
401 * QoS Data frame, append a QoS Null to carry the flag.
402 */
403static void mpsp_qos_null_append(struct sta_info *sta,
404 struct sk_buff_head *frames)
405{
406 struct ieee80211_sub_if_data *sdata = sta->sdata;
407 struct sk_buff *new_skb, *skb = skb_peek_tail(frames);
408 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
409 struct ieee80211_tx_info *info;
410
411 if (ieee80211_is_data_qos(hdr->frame_control))
412 return;
413
414 new_skb = mps_qos_null_get(sta);
415 if (!new_skb)
416 return;
417
418 mps_dbg(sdata, "appending QoS Null in MPSP towards %pM\n",
419 sta->sta.addr);
420 /*
421 * This frame has to be transmitted last. Assign lowest priority to
422 * make sure it cannot pass other frames when releasing multiple ACs.
423 */
424 new_skb->priority = 1;
425 skb_set_queue_mapping(new_skb, IEEE80211_AC_BK);
426 ieee80211_set_qos_hdr(sdata, new_skb);
427
428 info = IEEE80211_SKB_CB(new_skb);
429 info->control.vif = &sdata->vif;
430 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
431
432 __skb_queue_tail(frames, new_skb);
433}
434
435/**
436 * mps_frame_deliver - transmit frames during mesh powersave
437 *
438 * @sta: STA info to transmit to
439 * @n_frames: number of frames to transmit. -1 for all
440 */
441static void mps_frame_deliver(struct sta_info *sta, int n_frames)
442{
443 struct ieee80211_sub_if_data *sdata = sta->sdata;
444 struct ieee80211_local *local = sdata->local;
445 int ac;
446 struct sk_buff_head frames;
447 struct sk_buff *skb;
448 bool more_data = false;
449
450 skb_queue_head_init(&frames);
451
452 /* collect frame(s) from buffers */
453 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
454 while (n_frames != 0) {
455 skb = skb_dequeue(&sta->tx_filtered[ac]);
456 if (!skb) {
457 skb = skb_dequeue(
458 &sta->ps_tx_buf[ac]);
459 if (skb)
460 local->total_ps_buffered--;
461 }
462 if (!skb)
463 break;
464 n_frames--;
465 __skb_queue_tail(&frames, skb);
466 }
467
468 if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
469 !skb_queue_empty(&sta->ps_tx_buf[ac]))
470 more_data = true;
471 }
472
473 /* nothing to send? -> EOSP */
474 if (skb_queue_empty(&frames)) {
475 mpsp_trigger_send(sta, false, true);
476 return;
477 }
478
479 /* in a MPSP make sure the last skb is a QoS Data frame */
480 if (test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
481 mpsp_qos_null_append(sta, &frames);
482
483 mps_dbg(sta->sdata, "sending %d frames to PS STA %pM\n",
484 skb_queue_len(&frames), sta->sta.addr);
485
486 /* prepare collected frames for transmission */
487 skb_queue_walk(&frames, skb) {
488 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
489 struct ieee80211_hdr *hdr = (void *) skb->data;
490
491 /*
492 * Tell TX path to send this frame even though the
493 * STA may still remain is PS mode after this frame
494 * exchange.
495 */
496 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
497
498 if (more_data || !skb_queue_is_last(&frames, skb))
499 hdr->frame_control |=
500 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
501 else
502 hdr->frame_control &=
503 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
504
505 if (skb_queue_is_last(&frames, skb) &&
506 ieee80211_is_data_qos(hdr->frame_control)) {
507 u8 *qoshdr = ieee80211_get_qos_ctl(hdr);
508
509 /* MPSP trigger frame ends service period */
510 *qoshdr |= IEEE80211_QOS_CTL_EOSP;
511 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
512 }
513 }
514
515 ieee80211_add_pending_skbs(local, &frames);
516 sta_info_recalc_tim(sta);
517}
518
519/**
520 * ieee80211_mpsp_trigger_process - track status of mesh Peer Service Periods
521 *
522 * @qc: QoS Control field
523 * @sta: peer to start a MPSP with
524 * @tx: frame was transmitted by the local STA
525 * @acked: frame has been transmitted successfully
526 *
527 * NOTE: active mode STA may only serve as MPSP owner
528 */
529void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
530 bool tx, bool acked)
531{
532 u8 rspi = qc[1] & (IEEE80211_QOS_CTL_RSPI >> 8);
533 u8 eosp = qc[0] & IEEE80211_QOS_CTL_EOSP;
534
535 if (tx) {
536 if (rspi && acked)
537 set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
538
539 if (eosp)
540 clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
541 else if (acked &&
542 test_sta_flag(sta, WLAN_STA_PS_STA) &&
543 !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
544 mps_frame_deliver(sta, -1);
545 } else {
546 if (eosp)
547 clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
548 else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
549 set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
550
551 if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
552 mps_frame_deliver(sta, -1);
553 }
554}
555
556/**
557 * ieee80211_mps_frame_release - release buffered frames in response to beacon
558 *
559 * @sta: mesh STA
560 * @elems: beacon IEs
561 *
562 * For peers if we have individually-addressed frames buffered or the peer
563 * indicates buffered frames, send a corresponding MPSP trigger frame. Since
564 * we do not evaluate the awake window duration, QoS Nulls are used as MPSP
565 * trigger frames. If the neighbour STA is not a peer, only send single frames.
566 */
567void ieee80211_mps_frame_release(struct sta_info *sta,
568 struct ieee802_11_elems *elems)
569{
570 int ac, buffer_local = 0;
571 bool has_buffered = false;
572
573 /* TIM map only for LLID <= IEEE80211_MAX_AID */
574 if (sta->plink_state == NL80211_PLINK_ESTAB)
575 has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
576 le16_to_cpu(sta->llid) % IEEE80211_MAX_AID);
577
578 if (has_buffered)
579 mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
580 sta->sta.addr);
581
582 /* only transmit to PS STA with announced, non-zero awake window */
583 if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
584 (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
585 return;
586
587 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
588 buffer_local += skb_queue_len(&sta->ps_tx_buf[ac]) +
589 skb_queue_len(&sta->tx_filtered[ac]);
590
591 if (!has_buffered && !buffer_local)
592 return;
593
594 if (sta->plink_state == NL80211_PLINK_ESTAB)
595 mpsp_trigger_send(sta, has_buffered, !buffer_local);
596 else
597 mps_frame_deliver(sta, 1);
598}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e930175771ff..9f6464f3e05f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,11 +30,13 @@
30#include "rate.h" 30#include "rate.h"
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_MAX_TRIES 3 34#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
35#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) 35#define IEEE80211_AUTH_MAX_TRIES 3
36#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 36#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
37#define IEEE80211_ASSOC_MAX_TRIES 3 37#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
38#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
39#define IEEE80211_ASSOC_MAX_TRIES 3
38 40
39static int max_nullfunc_tries = 2; 41static int max_nullfunc_tries = 2;
40module_param(max_nullfunc_tries, int, 0644); 42module_param(max_nullfunc_tries, int, 0644);
@@ -112,6 +114,9 @@ enum rx_mgmt_action {
112 114
113 /* caller must call cfg80211_send_assoc_timeout() */ 115 /* caller must call cfg80211_send_assoc_timeout() */
114 RX_MGMT_CFG80211_ASSOC_TIMEOUT, 116 RX_MGMT_CFG80211_ASSOC_TIMEOUT,
117
118 /* used when a processed beacon causes a deauth */
119 RX_MGMT_CFG80211_TX_DEAUTH,
115}; 120};
116 121
117/* utils */ 122/* utils */
@@ -172,79 +177,331 @@ static int ecw2cw(int ecw)
172 return (1 << ecw) - 1; 177 return (1 << ecw) - 1;
173} 178}
174 179
175static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, 180static u32 chandef_downgrade(struct cfg80211_chan_def *c)
176 struct ieee80211_ht_operation *ht_oper, 181{
177 const u8 *bssid, bool reconfig) 182 u32 ret;
183 int tmp;
184
185 switch (c->width) {
186 case NL80211_CHAN_WIDTH_20:
187 c->width = NL80211_CHAN_WIDTH_20_NOHT;
188 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
189 break;
190 case NL80211_CHAN_WIDTH_40:
191 c->width = NL80211_CHAN_WIDTH_20;
192 c->center_freq1 = c->chan->center_freq;
193 ret = IEEE80211_STA_DISABLE_40MHZ |
194 IEEE80211_STA_DISABLE_VHT;
195 break;
196 case NL80211_CHAN_WIDTH_80:
197 tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
198 /* n_P40 */
199 tmp /= 2;
200 /* freq_P40 */
201 c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
202 c->width = NL80211_CHAN_WIDTH_40;
203 ret = IEEE80211_STA_DISABLE_VHT;
204 break;
205 case NL80211_CHAN_WIDTH_80P80:
206 c->center_freq2 = 0;
207 c->width = NL80211_CHAN_WIDTH_80;
208 ret = IEEE80211_STA_DISABLE_80P80MHZ |
209 IEEE80211_STA_DISABLE_160MHZ;
210 break;
211 case NL80211_CHAN_WIDTH_160:
212 /* n_P20 */
213 tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
214 /* n_P80 */
215 tmp /= 4;
216 c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
217 c->width = NL80211_CHAN_WIDTH_80;
218 ret = IEEE80211_STA_DISABLE_80P80MHZ |
219 IEEE80211_STA_DISABLE_160MHZ;
220 break;
221 default:
222 case NL80211_CHAN_WIDTH_20_NOHT:
223 WARN_ON_ONCE(1);
224 c->width = NL80211_CHAN_WIDTH_20_NOHT;
225 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
226 break;
227 }
228
229 WARN_ON_ONCE(!cfg80211_chandef_valid(c));
230
231 return ret;
232}
233
234static u32
235ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
236 struct ieee80211_supported_band *sband,
237 struct ieee80211_channel *channel,
238 const struct ieee80211_ht_operation *ht_oper,
239 const struct ieee80211_vht_operation *vht_oper,
240 struct cfg80211_chan_def *chandef, bool verbose)
241{
242 struct cfg80211_chan_def vht_chandef;
243 u32 ht_cfreq, ret;
244
245 chandef->chan = channel;
246 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
247 chandef->center_freq1 = channel->center_freq;
248 chandef->center_freq2 = 0;
249
250 if (!ht_oper || !sband->ht_cap.ht_supported) {
251 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
252 goto out;
253 }
254
255 chandef->width = NL80211_CHAN_WIDTH_20;
256
257 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
258 channel->band);
259 /* check that channel matches the right operating channel */
260 if (channel->center_freq != ht_cfreq) {
261 /*
262 * It's possible that some APs are confused here;
263 * Netgear WNDR3700 sometimes reports 4 higher than
264 * the actual channel in association responses, but
265 * since we look at probe response/beacon data here
266 * it should be OK.
267 */
268 if (verbose)
269 sdata_info(sdata,
270 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
271 channel->center_freq, ht_cfreq,
272 ht_oper->primary_chan, channel->band);
273 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
274 goto out;
275 }
276
277 /* check 40 MHz support, if we have it */
278 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
279 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
280 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
281 chandef->width = NL80211_CHAN_WIDTH_40;
282 chandef->center_freq1 += 10;
283 break;
284 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
285 chandef->width = NL80211_CHAN_WIDTH_40;
286 chandef->center_freq1 -= 10;
287 break;
288 }
289 } else {
290 /* 40 MHz (and 80 MHz) must be supported for VHT */
291 ret = IEEE80211_STA_DISABLE_VHT;
292 goto out;
293 }
294
295 if (!vht_oper || !sband->vht_cap.vht_supported) {
296 ret = IEEE80211_STA_DISABLE_VHT;
297 goto out;
298 }
299
300 vht_chandef.chan = channel;
301 vht_chandef.center_freq1 =
302 ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
303 channel->band);
304 vht_chandef.center_freq2 = 0;
305
306 if (vht_oper->center_freq_seg2_idx)
307 vht_chandef.center_freq2 =
308 ieee80211_channel_to_frequency(
309 vht_oper->center_freq_seg2_idx,
310 channel->band);
311
312 switch (vht_oper->chan_width) {
313 case IEEE80211_VHT_CHANWIDTH_USE_HT:
314 vht_chandef.width = chandef->width;
315 break;
316 case IEEE80211_VHT_CHANWIDTH_80MHZ:
317 vht_chandef.width = NL80211_CHAN_WIDTH_80;
318 break;
319 case IEEE80211_VHT_CHANWIDTH_160MHZ:
320 vht_chandef.width = NL80211_CHAN_WIDTH_160;
321 break;
322 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
323 vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
324 break;
325 default:
326 if (verbose)
327 sdata_info(sdata,
328 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
329 vht_oper->chan_width);
330 ret = IEEE80211_STA_DISABLE_VHT;
331 goto out;
332 }
333
334 if (!cfg80211_chandef_valid(&vht_chandef)) {
335 if (verbose)
336 sdata_info(sdata,
337 "AP VHT information is invalid, disable VHT\n");
338 ret = IEEE80211_STA_DISABLE_VHT;
339 goto out;
340 }
341
342 if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
343 ret = 0;
344 goto out;
345 }
346
347 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
348 if (verbose)
349 sdata_info(sdata,
350 "AP VHT information doesn't match HT, disable VHT\n");
351 ret = IEEE80211_STA_DISABLE_VHT;
352 goto out;
353 }
354
355 *chandef = vht_chandef;
356
357 ret = 0;
358
359out:
360 /* don't print the message below for VHT mismatch if VHT is disabled */
361 if (ret & IEEE80211_STA_DISABLE_VHT)
362 vht_chandef = *chandef;
363
364 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
365 IEEE80211_CHAN_DISABLED)) {
366 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
367 ret = IEEE80211_STA_DISABLE_HT |
368 IEEE80211_STA_DISABLE_VHT;
369 goto out;
370 }
371
372 ret |= chandef_downgrade(chandef);
373 }
374
375 if (chandef->width != vht_chandef.width && verbose)
376 sdata_info(sdata,
377 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
378
379 WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
380 return ret;
381}
382
383static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
384 struct sta_info *sta,
385 const struct ieee80211_ht_operation *ht_oper,
386 const struct ieee80211_vht_operation *vht_oper,
387 const u8 *bssid, u32 *changed)
178{ 388{
179 struct ieee80211_local *local = sdata->local; 389 struct ieee80211_local *local = sdata->local;
390 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
180 struct ieee80211_supported_band *sband; 391 struct ieee80211_supported_band *sband;
181 struct ieee80211_chanctx_conf *chanctx_conf;
182 struct ieee80211_channel *chan; 392 struct ieee80211_channel *chan;
183 struct sta_info *sta; 393 struct cfg80211_chan_def chandef;
184 u32 changed = 0;
185 u16 ht_opmode; 394 u16 ht_opmode;
186 bool disable_40 = false; 395 u32 flags;
396 enum ieee80211_sta_rx_bandwidth new_sta_bw;
397 int ret;
187 398
188 rcu_read_lock(); 399 /* if HT was/is disabled, don't track any bandwidth changes */
189 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 400 if (ifmgd->flags & IEEE80211_STA_DISABLE_HT || !ht_oper)
190 if (WARN_ON(!chanctx_conf)) {
191 rcu_read_unlock();
192 return 0; 401 return 0;
193 } 402
194 chan = chanctx_conf->def.chan; 403 /* don't check VHT if we associated as non-VHT station */
195 rcu_read_unlock(); 404 if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
405 vht_oper = NULL;
406
407 if (WARN_ON_ONCE(!sta))
408 return -EINVAL;
409
410 chan = sdata->vif.bss_conf.chandef.chan;
196 sband = local->hw.wiphy->bands[chan->band]; 411 sband = local->hw.wiphy->bands[chan->band];
197 412
198 switch (sdata->vif.bss_conf.chandef.width) { 413 /* calculate new channel (type) based on HT/VHT operation IEs */
414 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
415 vht_oper, &chandef, false);
416
417 /*
418 * Downgrade the new channel if we associated with restricted
419 * capabilities. For example, if we associated as a 20 MHz STA
420 * to a 40 MHz AP (due to regulatory, capabilities or config
421 * reasons) then switching to a 40 MHz channel now won't do us
422 * any good -- we couldn't use it with the AP.
423 */
424 if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
425 chandef.width == NL80211_CHAN_WIDTH_80P80)
426 flags |= chandef_downgrade(&chandef);
427 if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
428 chandef.width == NL80211_CHAN_WIDTH_160)
429 flags |= chandef_downgrade(&chandef);
430 if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
431 chandef.width > NL80211_CHAN_WIDTH_20)
432 flags |= chandef_downgrade(&chandef);
433
434 if (cfg80211_chandef_identical(&chandef, &sdata->vif.bss_conf.chandef))
435 return 0;
436
437 sdata_info(sdata,
438 "AP %pM changed bandwidth, new config is %d MHz, width %d (%d/%d MHz)\n",
439 ifmgd->bssid, chandef.chan->center_freq, chandef.width,
440 chandef.center_freq1, chandef.center_freq2);
441
442 if (flags != (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
443 IEEE80211_STA_DISABLE_VHT |
444 IEEE80211_STA_DISABLE_40MHZ |
445 IEEE80211_STA_DISABLE_80P80MHZ |
446 IEEE80211_STA_DISABLE_160MHZ)) ||
447 !cfg80211_chandef_valid(&chandef)) {
448 sdata_info(sdata,
449 "AP %pM changed bandwidth in a way we can't support - disconnect\n",
450 ifmgd->bssid);
451 return -EINVAL;
452 }
453
454 switch (chandef.width) {
455 case NL80211_CHAN_WIDTH_20_NOHT:
456 case NL80211_CHAN_WIDTH_20:
457 new_sta_bw = IEEE80211_STA_RX_BW_20;
458 break;
199 case NL80211_CHAN_WIDTH_40: 459 case NL80211_CHAN_WIDTH_40:
200 if (sdata->vif.bss_conf.chandef.chan->center_freq > 460 new_sta_bw = IEEE80211_STA_RX_BW_40;
201 sdata->vif.bss_conf.chandef.center_freq1 &&
202 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
203 disable_40 = true;
204 if (sdata->vif.bss_conf.chandef.chan->center_freq <
205 sdata->vif.bss_conf.chandef.center_freq1 &&
206 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
207 disable_40 = true;
208 break; 461 break;
209 default: 462 case NL80211_CHAN_WIDTH_80:
463 new_sta_bw = IEEE80211_STA_RX_BW_80;
210 break; 464 break;
465 case NL80211_CHAN_WIDTH_80P80:
466 case NL80211_CHAN_WIDTH_160:
467 new_sta_bw = IEEE80211_STA_RX_BW_160;
468 break;
469 default:
470 return -EINVAL;
211 } 471 }
212 472
213 /* This can change during the lifetime of the BSS */ 473 if (new_sta_bw > sta->cur_max_bandwidth)
214 if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) 474 new_sta_bw = sta->cur_max_bandwidth;
215 disable_40 = true;
216
217 mutex_lock(&local->sta_mtx);
218 sta = sta_info_get(sdata, bssid);
219
220 WARN_ON_ONCE(!sta);
221
222 if (sta && !sta->supports_40mhz)
223 disable_40 = true;
224 475
225 if (sta && (!reconfig || 476 if (new_sta_bw < sta->sta.bandwidth) {
226 (disable_40 != !(sta->sta.ht_cap.cap & 477 sta->sta.bandwidth = new_sta_bw;
227 IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) { 478 rate_control_rate_update(local, sband, sta,
479 IEEE80211_RC_BW_CHANGED);
480 }
228 481
229 if (disable_40) 482 ret = ieee80211_vif_change_bandwidth(sdata, &chandef, changed);
230 sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 483 if (ret) {
231 else 484 sdata_info(sdata,
232 sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 485 "AP %pM changed bandwidth to incompatible one - disconnect\n",
486 ifmgd->bssid);
487 return ret;
488 }
233 489
490 if (new_sta_bw > sta->sta.bandwidth) {
491 sta->sta.bandwidth = new_sta_bw;
234 rate_control_rate_update(local, sband, sta, 492 rate_control_rate_update(local, sband, sta,
235 IEEE80211_RC_BW_CHANGED); 493 IEEE80211_RC_BW_CHANGED);
236 } 494 }
237 mutex_unlock(&local->sta_mtx);
238 495
239 ht_opmode = le16_to_cpu(ht_oper->operation_mode); 496 ht_opmode = le16_to_cpu(ht_oper->operation_mode);
240 497
241 /* if bss configuration changed store the new one */ 498 /* if bss configuration changed store the new one */
242 if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) { 499 if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
243 changed |= BSS_CHANGED_HT; 500 *changed |= BSS_CHANGED_HT;
244 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 501 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
245 } 502 }
246 503
247 return changed; 504 return 0;
248} 505}
249 506
250/* frame sending functions */ 507/* frame sending functions */
@@ -644,6 +901,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
644 drv_mgd_prepare_tx(local, sdata); 901 drv_mgd_prepare_tx(local, sdata);
645 902
646 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 903 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
904 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
905 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
906 IEEE80211_TX_INTFL_MLME_CONN_TX;
647 ieee80211_tx_skb(sdata, skb); 907 ieee80211_tx_skb(sdata, skb);
648} 908}
649 909
@@ -680,7 +940,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
680 if (powersave) 940 if (powersave)
681 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 941 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
682 942
683 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 943 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
944 IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
684 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 945 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
685 IEEE80211_STA_CONNECTION_POLL)) 946 IEEE80211_STA_CONNECTION_POLL))
686 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; 947 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
@@ -784,10 +1045,10 @@ static void ieee80211_chswitch_timer(unsigned long data)
784 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); 1045 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
785} 1046}
786 1047
787void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1048void
788 struct ieee80211_channel_sw_ie *sw_elem, 1049ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
789 struct ieee80211_bss *bss, 1050 const struct ieee80211_channel_sw_ie *sw_elem,
790 u64 timestamp) 1051 struct ieee80211_bss *bss, u64 timestamp)
791{ 1052{
792 struct cfg80211_bss *cbss = 1053 struct cfg80211_bss *cbss =
793 container_of((void *)bss, struct cfg80211_bss, priv); 1054 container_of((void *)bss, struct cfg80211_bss, priv);
@@ -946,39 +1207,6 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
946 return 0; 1207 return 0;
947} 1208}
948 1209
949void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
950{
951 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
952 struct ieee80211_local *local = sdata->local;
953 struct ieee80211_conf *conf = &local->hw.conf;
954
955 WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
956 !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
957 (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
958
959 local->disable_dynamic_ps = false;
960 conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout;
961}
962EXPORT_SYMBOL(ieee80211_enable_dyn_ps);
963
964void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif)
965{
966 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
967 struct ieee80211_local *local = sdata->local;
968 struct ieee80211_conf *conf = &local->hw.conf;
969
970 WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
971 !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
972 (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
973
974 local->disable_dynamic_ps = true;
975 conf->dynamic_ps_timeout = 0;
976 del_timer_sync(&local->dynamic_ps_timer);
977 ieee80211_queue_work(&local->hw,
978 &local->dynamic_ps_enable_work);
979}
980EXPORT_SYMBOL(ieee80211_disable_dyn_ps);
981
982/* powersave */ 1210/* powersave */
983static void ieee80211_enable_ps(struct ieee80211_local *local, 1211static void ieee80211_enable_ps(struct ieee80211_local *local,
984 struct ieee80211_sub_if_data *sdata) 1212 struct ieee80211_sub_if_data *sdata)
@@ -1081,7 +1309,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1081 } 1309 }
1082 1310
1083 if (count == 1 && ieee80211_powersave_allowed(found)) { 1311 if (count == 1 && ieee80211_powersave_allowed(found)) {
1084 struct ieee80211_conf *conf = &local->hw.conf;
1085 s32 beaconint_us; 1312 s32 beaconint_us;
1086 1313
1087 if (latency < 0) 1314 if (latency < 0)
@@ -1105,10 +1332,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
1105 else 1332 else
1106 timeout = 100; 1333 timeout = 100;
1107 } 1334 }
1108 local->dynamic_ps_user_timeout = timeout; 1335 local->hw.conf.dynamic_ps_timeout = timeout;
1109 if (!local->disable_dynamic_ps)
1110 conf->dynamic_ps_timeout =
1111 local->dynamic_ps_user_timeout;
1112 1336
1113 if (beaconint_us > latency) { 1337 if (beaconint_us > latency) {
1114 local->ps_sdata = NULL; 1338 local->ps_sdata = NULL;
@@ -1178,8 +1402,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
1178 if (local->hw.conf.flags & IEEE80211_CONF_PS) 1402 if (local->hw.conf.flags & IEEE80211_CONF_PS)
1179 return; 1403 return;
1180 1404
1181 if (!local->disable_dynamic_ps && 1405 if (local->hw.conf.dynamic_ps_timeout > 0) {
1182 local->hw.conf.dynamic_ps_timeout > 0) {
1183 /* don't enter PS if TX frames are pending */ 1406 /* don't enter PS if TX frames are pending */
1184 if (drv_tx_frames_pending(local)) { 1407 if (drv_tx_frames_pending(local)) {
1185 mod_timer(&local->dynamic_ps_timer, jiffies + 1408 mod_timer(&local->dynamic_ps_timer, jiffies +
@@ -1244,16 +1467,30 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
1244 ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); 1467 ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
1245} 1468}
1246 1469
1470void ieee80211_dfs_cac_timer_work(struct work_struct *work)
1471{
1472 struct delayed_work *delayed_work =
1473 container_of(work, struct delayed_work, work);
1474 struct ieee80211_sub_if_data *sdata =
1475 container_of(delayed_work, struct ieee80211_sub_if_data,
1476 dfs_cac_timer_work);
1477
1478 ieee80211_vif_release_channel(sdata);
1479
1480 cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
1481}
1482
1247/* MLME */ 1483/* MLME */
1248static bool ieee80211_sta_wmm_params(struct ieee80211_local *local, 1484static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
1249 struct ieee80211_sub_if_data *sdata, 1485 struct ieee80211_sub_if_data *sdata,
1250 u8 *wmm_param, size_t wmm_param_len) 1486 const u8 *wmm_param, size_t wmm_param_len)
1251{ 1487{
1252 struct ieee80211_tx_queue_params params; 1488 struct ieee80211_tx_queue_params params;
1253 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1489 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1254 size_t left; 1490 size_t left;
1255 int count; 1491 int count;
1256 u8 *pos, uapsd_queues = 0; 1492 const u8 *pos;
1493 u8 uapsd_queues = 0;
1257 1494
1258 if (!local->ops->conf_tx) 1495 if (!local->ops->conf_tx)
1259 return false; 1496 return false;
@@ -1445,7 +1682,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1445 1682
1446 ieee80211_led_assoc(local, 1); 1683 ieee80211_led_assoc(local, 1);
1447 1684
1448 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) { 1685 if (sdata->u.mgd.assoc_data->have_beacon) {
1449 /* 1686 /*
1450 * If the AP is buggy we may get here with no DTIM period 1687 * If the AP is buggy we may get here with no DTIM period
1451 * known, so assume it's 1 which is the only safe assumption 1688 * known, so assume it's 1 which is the only safe assumption
@@ -1453,6 +1690,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1453 * probably just won't work at all. 1690 * probably just won't work at all.
1454 */ 1691 */
1455 bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1; 1692 bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
1693 bss_info_changed |= BSS_CHANGED_DTIM_PERIOD;
1456 } else { 1694 } else {
1457 bss_conf->dtim_period = 0; 1695 bss_conf->dtim_period = 0;
1458 } 1696 }
@@ -1465,10 +1703,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1465 bss_info_changed |= BSS_CHANGED_CQM; 1703 bss_info_changed |= BSS_CHANGED_CQM;
1466 1704
1467 /* Enable ARP filtering */ 1705 /* Enable ARP filtering */
1468 if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) { 1706 if (bss_conf->arp_addr_cnt)
1469 bss_conf->arp_filter_enabled = sdata->arp_filter_state;
1470 bss_info_changed |= BSS_CHANGED_ARP_FILTER; 1707 bss_info_changed |= BSS_CHANGED_ARP_FILTER;
1471 }
1472 1708
1473 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 1709 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
1474 1710
@@ -1489,7 +1725,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1489{ 1725{
1490 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1726 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1491 struct ieee80211_local *local = sdata->local; 1727 struct ieee80211_local *local = sdata->local;
1492 struct sta_info *sta;
1493 u32 changed = 0; 1728 u32 changed = 0;
1494 1729
1495 ASSERT_MGD_MTX(ifmgd); 1730 ASSERT_MGD_MTX(ifmgd);
@@ -1521,14 +1756,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1521 netif_tx_stop_all_queues(sdata->dev); 1756 netif_tx_stop_all_queues(sdata->dev);
1522 netif_carrier_off(sdata->dev); 1757 netif_carrier_off(sdata->dev);
1523 1758
1524 mutex_lock(&local->sta_mtx);
1525 sta = sta_info_get(sdata, ifmgd->bssid);
1526 if (sta) {
1527 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1528 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
1529 }
1530 mutex_unlock(&local->sta_mtx);
1531
1532 /* 1759 /*
1533 * if we want to get out of ps before disassoc (why?) we have 1760 * if we want to get out of ps before disassoc (why?) we have
1534 * to do it before sending disassoc, as otherwise the null-packet 1761 * to do it before sending disassoc, as otherwise the null-packet
@@ -1582,10 +1809,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1582 cancel_work_sync(&local->dynamic_ps_enable_work); 1809 cancel_work_sync(&local->dynamic_ps_enable_work);
1583 1810
1584 /* Disable ARP filtering */ 1811 /* Disable ARP filtering */
1585 if (sdata->vif.bss_conf.arp_filter_enabled) { 1812 if (sdata->vif.bss_conf.arp_addr_cnt)
1586 sdata->vif.bss_conf.arp_filter_enabled = false;
1587 changed |= BSS_CHANGED_ARP_FILTER; 1813 changed |= BSS_CHANGED_ARP_FILTER;
1588 }
1589 1814
1590 sdata->vif.bss_conf.qos = false; 1815 sdata->vif.bss_conf.qos = false;
1591 changed |= BSS_CHANGED_QOS; 1816 changed |= BSS_CHANGED_QOS;
@@ -1668,17 +1893,18 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
1668 if (!ieee80211_is_data(hdr->frame_control)) 1893 if (!ieee80211_is_data(hdr->frame_control))
1669 return; 1894 return;
1670 1895
1671 if (ack)
1672 ieee80211_sta_reset_conn_monitor(sdata);
1673
1674 if (ieee80211_is_nullfunc(hdr->frame_control) && 1896 if (ieee80211_is_nullfunc(hdr->frame_control) &&
1675 sdata->u.mgd.probe_send_count > 0) { 1897 sdata->u.mgd.probe_send_count > 0) {
1676 if (ack) 1898 if (ack)
1677 sdata->u.mgd.probe_send_count = 0; 1899 ieee80211_sta_reset_conn_monitor(sdata);
1678 else 1900 else
1679 sdata->u.mgd.nullfunc_failed = true; 1901 sdata->u.mgd.nullfunc_failed = true;
1680 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 1902 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1903 return;
1681 } 1904 }
1905
1906 if (ack)
1907 ieee80211_sta_reset_conn_monitor(sdata);
1682} 1908}
1683 1909
1684static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) 1910static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
@@ -1719,7 +1945,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1719 ssid_len = ssid[1]; 1945 ssid_len = ssid[1];
1720 1946
1721 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, 1947 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
1722 0, (u32) -1, true, false, 1948 0, (u32) -1, true, 0,
1723 ifmgd->associated->channel, false); 1949 ifmgd->associated->channel, false);
1724 rcu_read_unlock(); 1950 rcu_read_unlock();
1725 } 1951 }
@@ -1753,7 +1979,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1753 1979
1754 if (beacon) 1980 if (beacon)
1755 mlme_dbg_ratelimited(sdata, 1981 mlme_dbg_ratelimited(sdata,
1756 "detected beacon loss from AP - sending probe request\n"); 1982 "detected beacon loss from AP - probing\n");
1757 1983
1758 ieee80211_cqm_rssi_notify(&sdata->vif, 1984 ieee80211_cqm_rssi_notify(&sdata->vif,
1759 NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL); 1985 NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
@@ -1834,11 +2060,9 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1834} 2060}
1835EXPORT_SYMBOL(ieee80211_ap_probereq_get); 2061EXPORT_SYMBOL(ieee80211_ap_probereq_get);
1836 2062
1837static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata, 2063static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
1838 bool transmit_frame)
1839{ 2064{
1840 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2065 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1841 struct ieee80211_local *local = sdata->local;
1842 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 2066 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
1843 2067
1844 mutex_lock(&ifmgd->mtx); 2068 mutex_lock(&ifmgd->mtx);
@@ -1849,8 +2073,10 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
1849 2073
1850 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 2074 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
1851 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 2075 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1852 transmit_frame, frame_buf); 2076 true, frame_buf);
1853 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 2077 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
2078 ieee80211_wake_queues_by_reason(&sdata->local->hw,
2079 IEEE80211_QUEUE_STOP_REASON_CSA);
1854 mutex_unlock(&ifmgd->mtx); 2080 mutex_unlock(&ifmgd->mtx);
1855 2081
1856 /* 2082 /*
@@ -1858,10 +2084,6 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
1858 * but that's not a problem. 2084 * but that's not a problem.
1859 */ 2085 */
1860 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); 2086 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
1861
1862 mutex_lock(&local->mtx);
1863 ieee80211_recalc_idle(local);
1864 mutex_unlock(&local->mtx);
1865} 2087}
1866 2088
1867static void ieee80211_beacon_connection_loss_work(struct work_struct *work) 2089static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -1880,10 +2102,10 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
1880 rcu_read_unlock(); 2102 rcu_read_unlock();
1881 } 2103 }
1882 2104
1883 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) { 2105 if (ifmgd->connection_loss) {
1884 sdata_info(sdata, "Connection to AP %pM lost\n", 2106 sdata_info(sdata, "Connection to AP %pM lost\n",
1885 ifmgd->bssid); 2107 ifmgd->bssid);
1886 __ieee80211_disconnect(sdata, false); 2108 __ieee80211_disconnect(sdata);
1887 } else { 2109 } else {
1888 ieee80211_mgd_probe_ap(sdata, true); 2110 ieee80211_mgd_probe_ap(sdata, true);
1889 } 2111 }
@@ -1895,9 +2117,7 @@ static void ieee80211_csa_connection_drop_work(struct work_struct *work)
1895 container_of(work, struct ieee80211_sub_if_data, 2117 container_of(work, struct ieee80211_sub_if_data,
1896 u.mgd.csa_connection_drop_work); 2118 u.mgd.csa_connection_drop_work);
1897 2119
1898 ieee80211_wake_queues_by_reason(&sdata->local->hw, 2120 __ieee80211_disconnect(sdata);
1899 IEEE80211_QUEUE_STOP_REASON_CSA);
1900 __ieee80211_disconnect(sdata, true);
1901} 2121}
1902 2122
1903void ieee80211_beacon_loss(struct ieee80211_vif *vif) 2123void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -1908,6 +2128,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
1908 trace_api_beacon_loss(sdata); 2128 trace_api_beacon_loss(sdata);
1909 2129
1910 WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR); 2130 WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR);
2131 sdata->u.mgd.connection_loss = false;
1911 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); 2132 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
1912} 2133}
1913EXPORT_SYMBOL(ieee80211_beacon_loss); 2134EXPORT_SYMBOL(ieee80211_beacon_loss);
@@ -1919,7 +2140,7 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif)
1919 2140
1920 trace_api_connection_loss(sdata); 2141 trace_api_connection_loss(sdata);
1921 2142
1922 WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR)); 2143 sdata->u.mgd.connection_loss = true;
1923 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); 2144 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
1924} 2145}
1925EXPORT_SYMBOL(ieee80211_connection_loss); 2146EXPORT_SYMBOL(ieee80211_connection_loss);
@@ -1941,7 +2162,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
1941 ieee80211_vif_release_channel(sdata); 2162 ieee80211_vif_release_channel(sdata);
1942 } 2163 }
1943 2164
1944 cfg80211_put_bss(auth_data->bss); 2165 cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
1945 kfree(auth_data); 2166 kfree(auth_data);
1946 sdata->u.mgd.auth_data = NULL; 2167 sdata->u.mgd.auth_data = NULL;
1947} 2168}
@@ -1949,9 +2170,11 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
1949static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, 2170static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1950 struct ieee80211_mgmt *mgmt, size_t len) 2171 struct ieee80211_mgmt *mgmt, size_t len)
1951{ 2172{
2173 struct ieee80211_local *local = sdata->local;
1952 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; 2174 struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
1953 u8 *pos; 2175 u8 *pos;
1954 struct ieee802_11_elems elems; 2176 struct ieee802_11_elems elems;
2177 u32 tx_flags = 0;
1955 2178
1956 pos = mgmt->u.auth.variable; 2179 pos = mgmt->u.auth.variable;
1957 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 2180 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -1959,11 +2182,14 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1959 return; 2182 return;
1960 auth_data->expected_transaction = 4; 2183 auth_data->expected_transaction = 4;
1961 drv_mgd_prepare_tx(sdata->local, sdata); 2184 drv_mgd_prepare_tx(sdata->local, sdata);
2185 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
2186 tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
2187 IEEE80211_TX_INTFL_MLME_CONN_TX;
1962 ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, 2188 ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
1963 elems.challenge - 2, elems.challenge_len + 2, 2189 elems.challenge - 2, elems.challenge_len + 2,
1964 auth_data->bss->bssid, auth_data->bss->bssid, 2190 auth_data->bss->bssid, auth_data->bss->bssid,
1965 auth_data->key, auth_data->key_len, 2191 auth_data->key, auth_data->key_len,
1966 auth_data->key_idx); 2192 auth_data->key_idx, tx_flags);
1967} 2193}
1968 2194
1969static enum rx_mgmt_action __must_check 2195static enum rx_mgmt_action __must_check
@@ -2030,6 +2256,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
2030 sdata_info(sdata, "authenticated\n"); 2256 sdata_info(sdata, "authenticated\n");
2031 ifmgd->auth_data->done = true; 2257 ifmgd->auth_data->done = true;
2032 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; 2258 ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
2259 ifmgd->auth_data->timeout_started = true;
2033 run_again(ifmgd, ifmgd->auth_data->timeout); 2260 run_again(ifmgd, ifmgd->auth_data->timeout);
2034 2261
2035 if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && 2262 if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
@@ -2088,10 +2315,6 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
2088 2315
2089 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 2316 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
2090 2317
2091 mutex_lock(&sdata->local->mtx);
2092 ieee80211_recalc_idle(sdata->local);
2093 mutex_unlock(&sdata->local->mtx);
2094
2095 return RX_MGMT_CFG80211_DEAUTH; 2318 return RX_MGMT_CFG80211_DEAUTH;
2096} 2319}
2097 2320
@@ -2119,10 +2342,6 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
2119 2342
2120 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 2343 ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
2121 2344
2122 mutex_lock(&sdata->local->mtx);
2123 ieee80211_recalc_idle(sdata->local);
2124 mutex_unlock(&sdata->local->mtx);
2125
2126 return RX_MGMT_CFG80211_DISASSOC; 2345 return RX_MGMT_CFG80211_DISASSOC;
2127} 2346}
2128 2347
@@ -2232,6 +2451,24 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2232 2451
2233 ifmgd->aid = aid; 2452 ifmgd->aid = aid;
2234 2453
2454 /*
2455 * We previously checked these in the beacon/probe response, so
2456 * they should be present here. This is just a safety net.
2457 */
2458 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2459 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
2460 sdata_info(sdata,
2461 "HT AP is missing WMM params or HT capability/operation in AssocResp\n");
2462 return false;
2463 }
2464
2465 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2466 (!elems.vht_cap_elem || !elems.vht_operation)) {
2467 sdata_info(sdata,
2468 "VHT AP is missing VHT capability/operation in AssocResp\n");
2469 return false;
2470 }
2471
2235 mutex_lock(&sdata->local->sta_mtx); 2472 mutex_lock(&sdata->local->sta_mtx);
2236 /* 2473 /*
2237 * station info was already allocated and inserted before 2474 * station info was already allocated and inserted before
@@ -2245,17 +2482,36 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2245 2482
2246 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; 2483 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
2247 2484
2485 /* Set up internal HT/VHT capabilities */
2248 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) 2486 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
2249 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2487 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
2250 elems.ht_cap_elem, &sta->sta.ht_cap); 2488 elems.ht_cap_elem, sta);
2251
2252 sta->supports_40mhz =
2253 sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
2254 2489
2255 if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) 2490 if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
2256 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, 2491 ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
2257 elems.vht_cap_elem, 2492 elems.vht_cap_elem, sta);
2258 &sta->sta.vht_cap); 2493
2494 /*
2495 * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data
2496 * in their association response, so ignore that data for our own
2497 * configuration. If it changed since the last beacon, we'll get the
2498 * next beacon and update then.
2499 */
2500
2501 /*
2502 * If an operating mode notification IE is present, override the
2503 * NSS calculation (that would be done in rate_control_rate_init())
2504 * and use the # of streams from that element.
2505 */
2506 if (elems.opmode_notif &&
2507 !(*elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)) {
2508 u8 nss;
2509
2510 nss = *elems.opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
2511 nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
2512 nss += 1;
2513 sta->sta.rx_nss = nss;
2514 }
2259 2515
2260 rate_control_rate_init(sta); 2516 rate_control_rate_init(sta);
2261 2517
@@ -2265,9 +2521,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2265 if (elems.wmm_param) 2521 if (elems.wmm_param)
2266 set_sta_flag(sta, WLAN_STA_WME); 2522 set_sta_flag(sta, WLAN_STA_WME);
2267 2523
2268 err = sta_info_move_state(sta, IEEE80211_STA_AUTH); 2524 err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
2269 if (!err)
2270 err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
2271 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) 2525 if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
2272 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); 2526 err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
2273 if (err) { 2527 if (err) {
@@ -2296,11 +2550,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2296 ieee80211_set_wmm_default(sdata, false); 2550 ieee80211_set_wmm_default(sdata, false);
2297 changed |= BSS_CHANGED_QOS; 2551 changed |= BSS_CHANGED_QOS;
2298 2552
2299 if (elems.ht_operation && elems.wmm_param &&
2300 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
2301 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2302 cbss->bssid, false);
2303
2304 /* set AID and assoc capability, 2553 /* set AID and assoc capability,
2305 * ieee80211_set_associated() will tell the driver */ 2554 * ieee80211_set_associated() will tell the driver */
2306 bss_conf->aid = aid; 2555 bss_conf->aid = aid;
@@ -2374,6 +2623,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2374 "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n", 2623 "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
2375 mgmt->sa, tu, ms); 2624 mgmt->sa, tu, ms);
2376 assoc_data->timeout = jiffies + msecs_to_jiffies(ms); 2625 assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
2626 assoc_data->timeout_started = true;
2377 if (ms > IEEE80211_ASSOC_TIMEOUT) 2627 if (ms > IEEE80211_ASSOC_TIMEOUT)
2378 run_again(ifmgd, assoc_data->timeout); 2628 run_again(ifmgd, assoc_data->timeout);
2379 return RX_MGMT_NONE; 2629 return RX_MGMT_NONE;
@@ -2389,7 +2639,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2389 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { 2639 if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
2390 /* oops -- internal error -- send timeout for now */ 2640 /* oops -- internal error -- send timeout for now */
2391 ieee80211_destroy_assoc_data(sdata, false); 2641 ieee80211_destroy_assoc_data(sdata, false);
2392 cfg80211_put_bss(*bss); 2642 cfg80211_put_bss(sdata->local->hw.wiphy, *bss);
2393 return RX_MGMT_CFG80211_ASSOC_TIMEOUT; 2643 return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
2394 } 2644 }
2395 sdata_info(sdata, "associated\n"); 2645 sdata_info(sdata, "associated\n");
@@ -2425,7 +2675,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2425 need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period; 2675 need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
2426 2676
2427 if (elems->tim && !elems->parse_error) { 2677 if (elems->tim && !elems->parse_error) {
2428 struct ieee80211_tim_ie *tim_ie = elems->tim; 2678 const struct ieee80211_tim_ie *tim_ie = elems->tim;
2429 sdata->u.mgd.dtim_period = tim_ie->dtim_period; 2679 sdata->u.mgd.dtim_period = tim_ie->dtim_period;
2430 } 2680 }
2431 } 2681 }
@@ -2497,6 +2747,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2497 sdata_info(sdata, "direct probe responded\n"); 2747 sdata_info(sdata, "direct probe responded\n");
2498 ifmgd->auth_data->tries = 0; 2748 ifmgd->auth_data->tries = 0;
2499 ifmgd->auth_data->timeout = jiffies; 2749 ifmgd->auth_data->timeout = jiffies;
2750 ifmgd->auth_data->timeout_started = true;
2500 run_again(ifmgd, ifmgd->auth_data->timeout); 2751 run_again(ifmgd, ifmgd->auth_data->timeout);
2501 } 2752 }
2502} 2753}
@@ -2522,10 +2773,10 @@ static const u64 care_about_ies =
2522 (1ULL << WLAN_EID_HT_CAPABILITY) | 2773 (1ULL << WLAN_EID_HT_CAPABILITY) |
2523 (1ULL << WLAN_EID_HT_OPERATION); 2774 (1ULL << WLAN_EID_HT_OPERATION);
2524 2775
2525static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 2776static enum rx_mgmt_action
2526 struct ieee80211_mgmt *mgmt, 2777ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2527 size_t len, 2778 struct ieee80211_mgmt *mgmt, size_t len,
2528 struct ieee80211_rx_status *rx_status) 2779 u8 *deauth_buf, struct ieee80211_rx_status *rx_status)
2529{ 2780{
2530 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2781 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2531 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 2782 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
@@ -2534,6 +2785,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2534 struct ieee80211_local *local = sdata->local; 2785 struct ieee80211_local *local = sdata->local;
2535 struct ieee80211_chanctx_conf *chanctx_conf; 2786 struct ieee80211_chanctx_conf *chanctx_conf;
2536 struct ieee80211_channel *chan; 2787 struct ieee80211_channel *chan;
2788 struct sta_info *sta;
2537 u32 changed = 0; 2789 u32 changed = 0;
2538 bool erp_valid; 2790 bool erp_valid;
2539 u8 erp_value = 0; 2791 u8 erp_value = 0;
@@ -2545,39 +2797,51 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2545 /* Process beacon from the current BSS */ 2797 /* Process beacon from the current BSS */
2546 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; 2798 baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
2547 if (baselen > len) 2799 if (baselen > len)
2548 return; 2800 return RX_MGMT_NONE;
2549 2801
2550 rcu_read_lock(); 2802 rcu_read_lock();
2551 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 2803 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2552 if (!chanctx_conf) { 2804 if (!chanctx_conf) {
2553 rcu_read_unlock(); 2805 rcu_read_unlock();
2554 return; 2806 return RX_MGMT_NONE;
2555 } 2807 }
2556 2808
2557 if (rx_status->freq != chanctx_conf->def.chan->center_freq) { 2809 if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
2558 rcu_read_unlock(); 2810 rcu_read_unlock();
2559 return; 2811 return RX_MGMT_NONE;
2560 } 2812 }
2561 chan = chanctx_conf->def.chan; 2813 chan = chanctx_conf->def.chan;
2562 rcu_read_unlock(); 2814 rcu_read_unlock();
2563 2815
2564 if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && 2816 if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
2565 ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { 2817 ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
2566 ieee802_11_parse_elems(mgmt->u.beacon.variable, 2818 ieee802_11_parse_elems(mgmt->u.beacon.variable,
2567 len - baselen, &elems); 2819 len - baselen, &elems);
2568 2820
2569 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); 2821 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
2570 ifmgd->assoc_data->have_beacon = true; 2822 ifmgd->assoc_data->have_beacon = true;
2571 ifmgd->assoc_data->sent_assoc = false; 2823 ifmgd->assoc_data->need_beacon = false;
2824 if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
2825 sdata->vif.bss_conf.sync_tsf =
2826 le64_to_cpu(mgmt->u.beacon.timestamp);
2827 sdata->vif.bss_conf.sync_device_ts =
2828 rx_status->device_timestamp;
2829 if (elems.tim)
2830 sdata->vif.bss_conf.sync_dtim_count =
2831 elems.tim->dtim_count;
2832 else
2833 sdata->vif.bss_conf.sync_dtim_count = 0;
2834 }
2572 /* continue assoc process */ 2835 /* continue assoc process */
2573 ifmgd->assoc_data->timeout = jiffies; 2836 ifmgd->assoc_data->timeout = jiffies;
2837 ifmgd->assoc_data->timeout_started = true;
2574 run_again(ifmgd, ifmgd->assoc_data->timeout); 2838 run_again(ifmgd, ifmgd->assoc_data->timeout);
2575 return; 2839 return RX_MGMT_NONE;
2576 } 2840 }
2577 2841
2578 if (!ifmgd->associated || 2842 if (!ifmgd->associated ||
2579 !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) 2843 !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
2580 return; 2844 return RX_MGMT_NONE;
2581 bssid = ifmgd->associated->bssid; 2845 bssid = ifmgd->associated->bssid;
2582 2846
2583 /* Track average RSSI from the Beacon frames of the current AP */ 2847 /* Track average RSSI from the Beacon frames of the current AP */
@@ -2608,12 +2872,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2608 if (sig > ifmgd->rssi_max_thold && 2872 if (sig > ifmgd->rssi_max_thold &&
2609 (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) { 2873 (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
2610 ifmgd->last_ave_beacon_signal = sig; 2874 ifmgd->last_ave_beacon_signal = sig;
2611 drv_rssi_callback(local, RSSI_EVENT_HIGH); 2875 drv_rssi_callback(local, sdata, RSSI_EVENT_HIGH);
2612 } else if (sig < ifmgd->rssi_min_thold && 2876 } else if (sig < ifmgd->rssi_min_thold &&
2613 (last_sig >= ifmgd->rssi_max_thold || 2877 (last_sig >= ifmgd->rssi_max_thold ||
2614 last_sig == 0)) { 2878 last_sig == 0)) {
2615 ifmgd->last_ave_beacon_signal = sig; 2879 ifmgd->last_ave_beacon_signal = sig;
2616 drv_rssi_callback(local, RSSI_EVENT_LOW); 2880 drv_rssi_callback(local, sdata, RSSI_EVENT_LOW);
2617 } 2881 }
2618 } 2882 }
2619 2883
@@ -2643,7 +2907,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2643 2907
2644 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { 2908 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
2645 mlme_dbg_ratelimited(sdata, 2909 mlme_dbg_ratelimited(sdata,
2646 "cancelling probereq poll due to a received beacon\n"); 2910 "cancelling AP probe due to a received beacon\n");
2647 mutex_lock(&local->mtx); 2911 mutex_lock(&local->mtx);
2648 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2912 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
2649 ieee80211_run_deferred_scan(local); 2913 ieee80211_run_deferred_scan(local);
@@ -2715,7 +2979,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2715 } 2979 }
2716 2980
2717 if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) 2981 if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
2718 return; 2982 return RX_MGMT_NONE;
2719 ifmgd->beacon_crc = ncrc; 2983 ifmgd->beacon_crc = ncrc;
2720 ifmgd->beacon_crc_valid = true; 2984 ifmgd->beacon_crc_valid = true;
2721 2985
@@ -2725,6 +2989,32 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2725 elems.wmm_param_len)) 2989 elems.wmm_param_len))
2726 changed |= BSS_CHANGED_QOS; 2990 changed |= BSS_CHANGED_QOS;
2727 2991
2992 /*
2993 * If we haven't had a beacon before, tell the driver about the
2994 * DTIM period (and beacon timing if desired) now.
2995 */
2996 if (!bss_conf->dtim_period) {
2997 /* a few bogus AP send dtim_period = 0 or no TIM IE */
2998 if (elems.tim)
2999 bss_conf->dtim_period = elems.tim->dtim_period ?: 1;
3000 else
3001 bss_conf->dtim_period = 1;
3002
3003 if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
3004 sdata->vif.bss_conf.sync_tsf =
3005 le64_to_cpu(mgmt->u.beacon.timestamp);
3006 sdata->vif.bss_conf.sync_device_ts =
3007 rx_status->device_timestamp;
3008 if (elems.tim)
3009 sdata->vif.bss_conf.sync_dtim_count =
3010 elems.tim->dtim_count;
3011 else
3012 sdata->vif.bss_conf.sync_dtim_count = 0;
3013 }
3014
3015 changed |= BSS_CHANGED_DTIM_PERIOD;
3016 }
3017
2728 if (elems.erp_info && elems.erp_info_len >= 1) { 3018 if (elems.erp_info && elems.erp_info_len >= 1) {
2729 erp_valid = true; 3019 erp_valid = true;
2730 erp_value = elems.erp_info[0]; 3020 erp_value = elems.erp_info[0];
@@ -2735,11 +3025,22 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2735 le16_to_cpu(mgmt->u.beacon.capab_info), 3025 le16_to_cpu(mgmt->u.beacon.capab_info),
2736 erp_valid, erp_value); 3026 erp_valid, erp_value);
2737 3027
3028 mutex_lock(&local->sta_mtx);
3029 sta = sta_info_get(sdata, bssid);
2738 3030
2739 if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && 3031 if (ieee80211_config_bw(sdata, sta, elems.ht_operation,
2740 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) 3032 elems.vht_operation, bssid, &changed)) {
2741 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, 3033 mutex_unlock(&local->sta_mtx);
2742 bssid, true); 3034 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3035 WLAN_REASON_DEAUTH_LEAVING,
3036 true, deauth_buf);
3037 return RX_MGMT_CFG80211_TX_DEAUTH;
3038 }
3039
3040 if (sta && elems.opmode_notif)
3041 ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif,
3042 rx_status->band, true);
3043 mutex_unlock(&local->sta_mtx);
2743 3044
2744 if (elems.country_elem && elems.pwr_constr_elem && 3045 if (elems.country_elem && elems.pwr_constr_elem &&
2745 mgmt->u.probe_resp.capab_info & 3046 mgmt->u.probe_resp.capab_info &
@@ -2750,6 +3051,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2750 elems.pwr_constr_elem); 3051 elems.pwr_constr_elem);
2751 3052
2752 ieee80211_bss_info_change_notify(sdata, changed); 3053 ieee80211_bss_info_change_notify(sdata, changed);
3054
3055 return RX_MGMT_NONE;
2753} 3056}
2754 3057
2755void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 3058void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -2760,6 +3063,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2760 struct ieee80211_mgmt *mgmt; 3063 struct ieee80211_mgmt *mgmt;
2761 struct cfg80211_bss *bss = NULL; 3064 struct cfg80211_bss *bss = NULL;
2762 enum rx_mgmt_action rma = RX_MGMT_NONE; 3065 enum rx_mgmt_action rma = RX_MGMT_NONE;
3066 u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
2763 u16 fc; 3067 u16 fc;
2764 3068
2765 rx_status = (struct ieee80211_rx_status *) skb->cb; 3069 rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -2770,7 +3074,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2770 3074
2771 switch (fc & IEEE80211_FCTL_STYPE) { 3075 switch (fc & IEEE80211_FCTL_STYPE) {
2772 case IEEE80211_STYPE_BEACON: 3076 case IEEE80211_STYPE_BEACON:
2773 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); 3077 rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
3078 deauth_buf, rx_status);
2774 break; 3079 break;
2775 case IEEE80211_STYPE_PROBE_RESP: 3080 case IEEE80211_STYPE_PROBE_RESP:
2776 ieee80211_rx_mgmt_probe_resp(sdata, skb); 3081 ieee80211_rx_mgmt_probe_resp(sdata, skb);
@@ -2819,6 +3124,10 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
2819 case RX_MGMT_CFG80211_ASSOC_TIMEOUT: 3124 case RX_MGMT_CFG80211_ASSOC_TIMEOUT:
2820 cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); 3125 cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid);
2821 break; 3126 break;
3127 case RX_MGMT_CFG80211_TX_DEAUTH:
3128 cfg80211_send_deauth(sdata->dev, deauth_buf,
3129 sizeof(deauth_buf));
3130 break;
2822 default: 3131 default:
2823 WARN(1, "unexpected: %d", rma); 3132 WARN(1, "unexpected: %d", rma);
2824 } 3133 }
@@ -2840,14 +3149,13 @@ static void ieee80211_sta_timer(unsigned long data)
2840} 3149}
2841 3150
2842static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, 3151static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2843 u8 *bssid, u8 reason) 3152 u8 *bssid, u8 reason, bool tx)
2844{ 3153{
2845 struct ieee80211_local *local = sdata->local;
2846 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3154 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2847 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 3155 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
2848 3156
2849 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 3157 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2850 false, frame_buf); 3158 tx, frame_buf);
2851 mutex_unlock(&ifmgd->mtx); 3159 mutex_unlock(&ifmgd->mtx);
2852 3160
2853 /* 3161 /*
@@ -2856,10 +3164,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2856 */ 3164 */
2857 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); 3165 cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
2858 3166
2859 mutex_lock(&local->mtx);
2860 ieee80211_recalc_idle(local);
2861 mutex_unlock(&local->mtx);
2862
2863 mutex_lock(&ifmgd->mtx); 3167 mutex_lock(&ifmgd->mtx);
2864} 3168}
2865 3169
@@ -2868,12 +3172,17 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2868 struct ieee80211_local *local = sdata->local; 3172 struct ieee80211_local *local = sdata->local;
2869 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3173 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2870 struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; 3174 struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
3175 u32 tx_flags = 0;
2871 3176
2872 lockdep_assert_held(&ifmgd->mtx); 3177 lockdep_assert_held(&ifmgd->mtx);
2873 3178
2874 if (WARN_ON_ONCE(!auth_data)) 3179 if (WARN_ON_ONCE(!auth_data))
2875 return -EINVAL; 3180 return -EINVAL;
2876 3181
3182 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
3183 tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
3184 IEEE80211_TX_INTFL_MLME_CONN_TX;
3185
2877 auth_data->tries++; 3186 auth_data->tries++;
2878 3187
2879 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) { 3188 if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -2910,7 +3219,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2910 ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, 3219 ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
2911 auth_data->data, auth_data->data_len, 3220 auth_data->data, auth_data->data_len,
2912 auth_data->bss->bssid, 3221 auth_data->bss->bssid,
2913 auth_data->bss->bssid, NULL, 0, 0); 3222 auth_data->bss->bssid, NULL, 0, 0,
3223 tx_flags);
2914 } else { 3224 } else {
2915 const u8 *ssidie; 3225 const u8 *ssidie;
2916 3226
@@ -2929,13 +3239,18 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2929 * will not answer to direct packet in unassociated state. 3239 * will not answer to direct packet in unassociated state.
2930 */ 3240 */
2931 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], 3241 ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
2932 NULL, 0, (u32) -1, true, false, 3242 NULL, 0, (u32) -1, true, tx_flags,
2933 auth_data->bss->channel, false); 3243 auth_data->bss->channel, false);
2934 rcu_read_unlock(); 3244 rcu_read_unlock();
2935 } 3245 }
2936 3246
2937 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3247 if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
2938 run_again(ifmgd, auth_data->timeout); 3248 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
3249 ifmgd->auth_data->timeout_started = true;
3250 run_again(ifmgd, auth_data->timeout);
3251 } else {
3252 auth_data->timeout_started = false;
3253 }
2939 3254
2940 return 0; 3255 return 0;
2941} 3256}
@@ -2966,12 +3281,29 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2966 IEEE80211_ASSOC_MAX_TRIES); 3281 IEEE80211_ASSOC_MAX_TRIES);
2967 ieee80211_send_assoc(sdata); 3282 ieee80211_send_assoc(sdata);
2968 3283
2969 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; 3284 if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
2970 run_again(&sdata->u.mgd, assoc_data->timeout); 3285 assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
3286 assoc_data->timeout_started = true;
3287 run_again(&sdata->u.mgd, assoc_data->timeout);
3288 } else {
3289 assoc_data->timeout_started = false;
3290 }
2971 3291
2972 return 0; 3292 return 0;
2973} 3293}
2974 3294
3295void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
3296 __le16 fc, bool acked)
3297{
3298 struct ieee80211_local *local = sdata->local;
3299
3300 sdata->u.mgd.status_fc = fc;
3301 sdata->u.mgd.status_acked = acked;
3302 sdata->u.mgd.status_received = true;
3303
3304 ieee80211_queue_work(&local->hw, &sdata->work);
3305}
3306
2975void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) 3307void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2976{ 3308{
2977 struct ieee80211_local *local = sdata->local; 3309 struct ieee80211_local *local = sdata->local;
@@ -2979,7 +3311,36 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2979 3311
2980 mutex_lock(&ifmgd->mtx); 3312 mutex_lock(&ifmgd->mtx);
2981 3313
2982 if (ifmgd->auth_data && 3314 if (ifmgd->status_received) {
3315 __le16 fc = ifmgd->status_fc;
3316 bool status_acked = ifmgd->status_acked;
3317
3318 ifmgd->status_received = false;
3319 if (ifmgd->auth_data &&
3320 (ieee80211_is_probe_req(fc) || ieee80211_is_auth(fc))) {
3321 if (status_acked) {
3322 ifmgd->auth_data->timeout =
3323 jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
3324 run_again(ifmgd, ifmgd->auth_data->timeout);
3325 } else {
3326 ifmgd->auth_data->timeout = jiffies - 1;
3327 }
3328 ifmgd->auth_data->timeout_started = true;
3329 } else if (ifmgd->assoc_data &&
3330 (ieee80211_is_assoc_req(fc) ||
3331 ieee80211_is_reassoc_req(fc))) {
3332 if (status_acked) {
3333 ifmgd->assoc_data->timeout =
3334 jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT;
3335 run_again(ifmgd, ifmgd->assoc_data->timeout);
3336 } else {
3337 ifmgd->assoc_data->timeout = jiffies - 1;
3338 }
3339 ifmgd->assoc_data->timeout_started = true;
3340 }
3341 }
3342
3343 if (ifmgd->auth_data && ifmgd->auth_data->timeout_started &&
2983 time_after(jiffies, ifmgd->auth_data->timeout)) { 3344 time_after(jiffies, ifmgd->auth_data->timeout)) {
2984 if (ifmgd->auth_data->done) { 3345 if (ifmgd->auth_data->done) {
2985 /* 3346 /*
@@ -2998,12 +3359,13 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2998 cfg80211_send_auth_timeout(sdata->dev, bssid); 3359 cfg80211_send_auth_timeout(sdata->dev, bssid);
2999 mutex_lock(&ifmgd->mtx); 3360 mutex_lock(&ifmgd->mtx);
3000 } 3361 }
3001 } else if (ifmgd->auth_data) 3362 } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started)
3002 run_again(ifmgd, ifmgd->auth_data->timeout); 3363 run_again(ifmgd, ifmgd->auth_data->timeout);
3003 3364
3004 if (ifmgd->assoc_data && 3365 if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started &&
3005 time_after(jiffies, ifmgd->assoc_data->timeout)) { 3366 time_after(jiffies, ifmgd->assoc_data->timeout)) {
3006 if (!ifmgd->assoc_data->have_beacon || 3367 if ((ifmgd->assoc_data->need_beacon &&
3368 !ifmgd->assoc_data->have_beacon) ||
3007 ieee80211_do_assoc(sdata)) { 3369 ieee80211_do_assoc(sdata)) {
3008 u8 bssid[ETH_ALEN]; 3370 u8 bssid[ETH_ALEN];
3009 3371
@@ -3015,7 +3377,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
3015 cfg80211_send_assoc_timeout(sdata->dev, bssid); 3377 cfg80211_send_assoc_timeout(sdata->dev, bssid);
3016 mutex_lock(&ifmgd->mtx); 3378 mutex_lock(&ifmgd->mtx);
3017 } 3379 }
3018 } else if (ifmgd->assoc_data) 3380 } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
3019 run_again(ifmgd, ifmgd->assoc_data->timeout); 3381 run_again(ifmgd, ifmgd->assoc_data->timeout);
3020 3382
3021 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 3383 if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
@@ -3046,7 +3408,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
3046 "No ack for nullfunc frame to AP %pM, disconnecting.\n", 3408 "No ack for nullfunc frame to AP %pM, disconnecting.\n",
3047 bssid); 3409 bssid);
3048 ieee80211_sta_connection_lost(sdata, bssid, 3410 ieee80211_sta_connection_lost(sdata, bssid,
3049 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 3411 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
3412 false);
3050 } 3413 }
3051 } else if (time_is_after_jiffies(ifmgd->probe_timeout)) 3414 } else if (time_is_after_jiffies(ifmgd->probe_timeout))
3052 run_again(ifmgd, ifmgd->probe_timeout); 3415 run_again(ifmgd, ifmgd->probe_timeout);
@@ -3055,7 +3418,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
3055 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", 3418 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
3056 bssid, probe_wait_ms); 3419 bssid, probe_wait_ms);
3057 ieee80211_sta_connection_lost(sdata, bssid, 3420 ieee80211_sta_connection_lost(sdata, bssid,
3058 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 3421 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
3059 } else if (ifmgd->probe_send_count < max_tries) { 3422 } else if (ifmgd->probe_send_count < max_tries) {
3060 mlme_dbg(sdata, 3423 mlme_dbg(sdata,
3061 "No probe response from AP %pM after %dms, try %d/%i\n", 3424 "No probe response from AP %pM after %dms, try %d/%i\n",
@@ -3074,15 +3437,11 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
3074 bssid, probe_wait_ms); 3437 bssid, probe_wait_ms);
3075 3438
3076 ieee80211_sta_connection_lost(sdata, bssid, 3439 ieee80211_sta_connection_lost(sdata, bssid,
3077 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 3440 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
3078 } 3441 }
3079 } 3442 }
3080 3443
3081 mutex_unlock(&ifmgd->mtx); 3444 mutex_unlock(&ifmgd->mtx);
3082
3083 mutex_lock(&local->mtx);
3084 ieee80211_recalc_idle(local);
3085 mutex_unlock(&local->mtx);
3086} 3445}
3087 3446
3088static void ieee80211_sta_bcn_mon_timer(unsigned long data) 3447static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -3094,6 +3453,7 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
3094 if (local->quiescing) 3453 if (local->quiescing)
3095 return; 3454 return;
3096 3455
3456 sdata->u.mgd.connection_loss = false;
3097 ieee80211_queue_work(&sdata->local->hw, 3457 ieee80211_queue_work(&sdata->local->hw,
3098 &sdata->u.mgd.beacon_connection_loss_work); 3458 &sdata->u.mgd.beacon_connection_loss_work);
3099} 3459}
@@ -3169,23 +3529,23 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
3169{ 3529{
3170 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3530 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3171 3531
3172 if (!ifmgd->associated) 3532 mutex_lock(&ifmgd->mtx);
3533 if (!ifmgd->associated) {
3534 mutex_unlock(&ifmgd->mtx);
3173 return; 3535 return;
3536 }
3174 3537
3175 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) { 3538 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
3176 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME; 3539 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
3177 mutex_lock(&ifmgd->mtx); 3540 mlme_dbg(sdata, "driver requested disconnect after resume\n");
3178 if (ifmgd->associated) { 3541 ieee80211_sta_connection_lost(sdata,
3179 mlme_dbg(sdata, 3542 ifmgd->associated->bssid,
3180 "driver requested disconnect after resume\n"); 3543 WLAN_REASON_UNSPECIFIED,
3181 ieee80211_sta_connection_lost(sdata, 3544 true);
3182 ifmgd->associated->bssid,
3183 WLAN_REASON_UNSPECIFIED);
3184 mutex_unlock(&ifmgd->mtx);
3185 return;
3186 }
3187 mutex_unlock(&ifmgd->mtx); 3545 mutex_unlock(&ifmgd->mtx);
3546 return;
3188 } 3547 }
3548 mutex_unlock(&ifmgd->mtx);
3189 3549
3190 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running)) 3550 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
3191 add_timer(&ifmgd->timer); 3551 add_timer(&ifmgd->timer);
@@ -3261,201 +3621,6 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
3261 return 0; 3621 return 0;
3262} 3622}
3263 3623
3264static u32 chandef_downgrade(struct cfg80211_chan_def *c)
3265{
3266 u32 ret;
3267 int tmp;
3268
3269 switch (c->width) {
3270 case NL80211_CHAN_WIDTH_20:
3271 c->width = NL80211_CHAN_WIDTH_20_NOHT;
3272 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3273 break;
3274 case NL80211_CHAN_WIDTH_40:
3275 c->width = NL80211_CHAN_WIDTH_20;
3276 c->center_freq1 = c->chan->center_freq;
3277 ret = IEEE80211_STA_DISABLE_40MHZ |
3278 IEEE80211_STA_DISABLE_VHT;
3279 break;
3280 case NL80211_CHAN_WIDTH_80:
3281 tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
3282 /* n_P40 */
3283 tmp /= 2;
3284 /* freq_P40 */
3285 c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
3286 c->width = NL80211_CHAN_WIDTH_40;
3287 ret = IEEE80211_STA_DISABLE_VHT;
3288 break;
3289 case NL80211_CHAN_WIDTH_80P80:
3290 c->center_freq2 = 0;
3291 c->width = NL80211_CHAN_WIDTH_80;
3292 ret = IEEE80211_STA_DISABLE_80P80MHZ |
3293 IEEE80211_STA_DISABLE_160MHZ;
3294 break;
3295 case NL80211_CHAN_WIDTH_160:
3296 /* n_P20 */
3297 tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
3298 /* n_P80 */
3299 tmp /= 4;
3300 c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
3301 c->width = NL80211_CHAN_WIDTH_80;
3302 ret = IEEE80211_STA_DISABLE_80P80MHZ |
3303 IEEE80211_STA_DISABLE_160MHZ;
3304 break;
3305 default:
3306 case NL80211_CHAN_WIDTH_20_NOHT:
3307 WARN_ON_ONCE(1);
3308 c->width = NL80211_CHAN_WIDTH_20_NOHT;
3309 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3310 break;
3311 }
3312
3313 WARN_ON_ONCE(!cfg80211_chandef_valid(c));
3314
3315 return ret;
3316}
3317
3318static u32
3319ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
3320 struct ieee80211_supported_band *sband,
3321 struct ieee80211_channel *channel,
3322 const struct ieee80211_ht_operation *ht_oper,
3323 const struct ieee80211_vht_operation *vht_oper,
3324 struct cfg80211_chan_def *chandef)
3325{
3326 struct cfg80211_chan_def vht_chandef;
3327 u32 ht_cfreq, ret;
3328
3329 chandef->chan = channel;
3330 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
3331 chandef->center_freq1 = channel->center_freq;
3332 chandef->center_freq2 = 0;
3333
3334 if (!ht_oper || !sband->ht_cap.ht_supported) {
3335 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3336 goto out;
3337 }
3338
3339 chandef->width = NL80211_CHAN_WIDTH_20;
3340
3341 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
3342 channel->band);
3343 /* check that channel matches the right operating channel */
3344 if (channel->center_freq != ht_cfreq) {
3345 /*
3346 * It's possible that some APs are confused here;
3347 * Netgear WNDR3700 sometimes reports 4 higher than
3348 * the actual channel in association responses, but
3349 * since we look at probe response/beacon data here
3350 * it should be OK.
3351 */
3352 sdata_info(sdata,
3353 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
3354 channel->center_freq, ht_cfreq,
3355 ht_oper->primary_chan, channel->band);
3356 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
3357 goto out;
3358 }
3359
3360 /* check 40 MHz support, if we have it */
3361 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
3362 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
3363 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3364 chandef->width = NL80211_CHAN_WIDTH_40;
3365 chandef->center_freq1 += 10;
3366 break;
3367 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3368 chandef->width = NL80211_CHAN_WIDTH_40;
3369 chandef->center_freq1 -= 10;
3370 break;
3371 }
3372 } else {
3373 /* 40 MHz (and 80 MHz) must be supported for VHT */
3374 ret = IEEE80211_STA_DISABLE_VHT;
3375 goto out;
3376 }
3377
3378 if (!vht_oper || !sband->vht_cap.vht_supported) {
3379 ret = IEEE80211_STA_DISABLE_VHT;
3380 goto out;
3381 }
3382
3383 vht_chandef.chan = channel;
3384 vht_chandef.center_freq1 =
3385 ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
3386 channel->band);
3387 vht_chandef.center_freq2 = 0;
3388
3389 if (vht_oper->center_freq_seg2_idx)
3390 vht_chandef.center_freq2 =
3391 ieee80211_channel_to_frequency(
3392 vht_oper->center_freq_seg2_idx,
3393 channel->band);
3394
3395 switch (vht_oper->chan_width) {
3396 case IEEE80211_VHT_CHANWIDTH_USE_HT:
3397 vht_chandef.width = chandef->width;
3398 break;
3399 case IEEE80211_VHT_CHANWIDTH_80MHZ:
3400 vht_chandef.width = NL80211_CHAN_WIDTH_80;
3401 break;
3402 case IEEE80211_VHT_CHANWIDTH_160MHZ:
3403 vht_chandef.width = NL80211_CHAN_WIDTH_160;
3404 break;
3405 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
3406 vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
3407 break;
3408 default:
3409 sdata_info(sdata,
3410 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
3411 vht_oper->chan_width);
3412 ret = IEEE80211_STA_DISABLE_VHT;
3413 goto out;
3414 }
3415
3416 if (!cfg80211_chandef_valid(&vht_chandef)) {
3417 sdata_info(sdata,
3418 "AP VHT information is invalid, disable VHT\n");
3419 ret = IEEE80211_STA_DISABLE_VHT;
3420 goto out;
3421 }
3422
3423 if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
3424 ret = 0;
3425 goto out;
3426 }
3427
3428 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
3429 sdata_info(sdata,
3430 "AP VHT information doesn't match HT, disable VHT\n");
3431 ret = IEEE80211_STA_DISABLE_VHT;
3432 goto out;
3433 }
3434
3435 *chandef = vht_chandef;
3436
3437 ret = 0;
3438
3439 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
3440 IEEE80211_CHAN_DISABLED)) {
3441 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
3442 ret = IEEE80211_STA_DISABLE_HT |
3443 IEEE80211_STA_DISABLE_VHT;
3444 goto out;
3445 }
3446
3447 ret = chandef_downgrade(chandef);
3448 }
3449
3450 if (chandef->width != vht_chandef.width)
3451 sdata_info(sdata,
3452 "local regulatory prevented using AP HT/VHT configuration, downgraded\n");
3453
3454out:
3455 WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
3456 return ret;
3457}
3458
3459static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata, 3624static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
3460 struct cfg80211_bss *cbss) 3625 struct cfg80211_bss *cbss)
3461{ 3626{
@@ -3521,16 +3686,22 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3521 3686
3522 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && 3687 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
3523 sband->ht_cap.ht_supported) { 3688 sband->ht_cap.ht_supported) {
3524 const u8 *ht_oper_ie; 3689 const u8 *ht_oper_ie, *ht_cap;
3525 3690
3526 ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION); 3691 ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION);
3527 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) 3692 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
3528 ht_oper = (void *)(ht_oper_ie + 2); 3693 ht_oper = (void *)(ht_oper_ie + 2);
3694
3695 ht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY);
3696 if (!ht_cap || ht_cap[1] < sizeof(struct ieee80211_ht_cap)) {
3697 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3698 ht_oper = NULL;
3699 }
3529 } 3700 }
3530 3701
3531 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && 3702 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
3532 sband->vht_cap.vht_supported) { 3703 sband->vht_cap.vht_supported) {
3533 const u8 *vht_oper_ie; 3704 const u8 *vht_oper_ie, *vht_cap;
3534 3705
3535 vht_oper_ie = ieee80211_bss_get_ie(cbss, 3706 vht_oper_ie = ieee80211_bss_get_ie(cbss,
3536 WLAN_EID_VHT_OPERATION); 3707 WLAN_EID_VHT_OPERATION);
@@ -3540,15 +3711,21 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3540 vht_oper = NULL; 3711 vht_oper = NULL;
3541 sdata_info(sdata, 3712 sdata_info(sdata,
3542 "AP advertised VHT without HT, disabling both\n"); 3713 "AP advertised VHT without HT, disabling both\n");
3543 sdata->flags |= IEEE80211_STA_DISABLE_HT; 3714 ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
3544 sdata->flags |= IEEE80211_STA_DISABLE_VHT; 3715 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3716 }
3717
3718 vht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY);
3719 if (!vht_cap || vht_cap[1] < sizeof(struct ieee80211_vht_cap)) {
3720 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3721 vht_oper = NULL;
3545 } 3722 }
3546 } 3723 }
3547 3724
3548 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, 3725 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
3549 cbss->channel, 3726 cbss->channel,
3550 ht_oper, vht_oper, 3727 ht_oper, vht_oper,
3551 &chandef); 3728 &chandef, true);
3552 3729
3553 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), 3730 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
3554 local->rx_chains); 3731 local->rx_chains);
@@ -3565,8 +3742,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3565 */ 3742 */
3566 ret = ieee80211_vif_use_channel(sdata, &chandef, 3743 ret = ieee80211_vif_use_channel(sdata, &chandef,
3567 IEEE80211_CHANCTX_SHARED); 3744 IEEE80211_CHANCTX_SHARED);
3568 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) 3745 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
3569 ifmgd->flags |= chandef_downgrade(&chandef); 3746 ifmgd->flags |= chandef_downgrade(&chandef);
3747 ret = ieee80211_vif_use_channel(sdata, &chandef,
3748 IEEE80211_CHANCTX_SHARED);
3749 }
3570 return ret; 3750 return ret;
3571} 3751}
3572 3752
@@ -3595,15 +3775,12 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3595 return -ENOMEM; 3775 return -ENOMEM;
3596 } 3776 }
3597 3777
3598 mutex_lock(&local->mtx);
3599 ieee80211_recalc_idle(sdata->local);
3600 mutex_unlock(&local->mtx);
3601
3602 if (new_sta) { 3778 if (new_sta) {
3603 u32 rates = 0, basic_rates = 0; 3779 u32 rates = 0, basic_rates = 0;
3604 bool have_higher_than_11mbit; 3780 bool have_higher_than_11mbit;
3605 int min_rate = INT_MAX, min_rate_index = -1; 3781 int min_rate = INT_MAX, min_rate_index = -1;
3606 struct ieee80211_supported_band *sband; 3782 struct ieee80211_supported_band *sband;
3783 const struct cfg80211_bss_ies *ies;
3607 3784
3608 sband = local->hw.wiphy->bands[cbss->channel->band]; 3785 sband = local->hw.wiphy->bands[cbss->channel->band];
3609 3786
@@ -3647,8 +3824,34 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3647 3824
3648 /* set timing information */ 3825 /* set timing information */
3649 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; 3826 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
3650 sdata->vif.bss_conf.sync_tsf = cbss->tsf; 3827 rcu_read_lock();
3651 sdata->vif.bss_conf.sync_device_ts = bss->device_ts; 3828 ies = rcu_dereference(cbss->beacon_ies);
3829 if (ies) {
3830 const u8 *tim_ie;
3831
3832 sdata->vif.bss_conf.sync_tsf = ies->tsf;
3833 sdata->vif.bss_conf.sync_device_ts =
3834 bss->device_ts_beacon;
3835 tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
3836 ies->data, ies->len);
3837 if (tim_ie && tim_ie[1] >= 2)
3838 sdata->vif.bss_conf.sync_dtim_count = tim_ie[2];
3839 else
3840 sdata->vif.bss_conf.sync_dtim_count = 0;
3841 } else if (!(local->hw.flags &
3842 IEEE80211_HW_TIMING_BEACON_ONLY)) {
3843 ies = rcu_dereference(cbss->proberesp_ies);
3844 /* must be non-NULL since beacon IEs were NULL */
3845 sdata->vif.bss_conf.sync_tsf = ies->tsf;
3846 sdata->vif.bss_conf.sync_device_ts =
3847 bss->device_ts_presp;
3848 sdata->vif.bss_conf.sync_dtim_count = 0;
3849 } else {
3850 sdata->vif.bss_conf.sync_tsf = 0;
3851 sdata->vif.bss_conf.sync_device_ts = 0;
3852 sdata->vif.bss_conf.sync_dtim_count = 0;
3853 }
3854 rcu_read_unlock();
3652 3855
3653 /* tell driver about BSSID, basic rates and timing */ 3856 /* tell driver about BSSID, basic rates and timing */
3654 ieee80211_bss_info_change_notify(sdata, 3857 ieee80211_bss_info_change_notify(sdata,
@@ -3768,7 +3971,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
3768 } 3971 }
3769 3972
3770 /* hold our own reference */ 3973 /* hold our own reference */
3771 cfg80211_ref_bss(auth_data->bss); 3974 cfg80211_ref_bss(local->hw.wiphy, auth_data->bss);
3772 err = 0; 3975 err = 0;
3773 goto out_unlock; 3976 goto out_unlock;
3774 3977
@@ -3791,6 +3994,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3791 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 3994 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3792 struct ieee80211_bss *bss = (void *)req->bss->priv; 3995 struct ieee80211_bss *bss = (void *)req->bss->priv;
3793 struct ieee80211_mgd_assoc_data *assoc_data; 3996 struct ieee80211_mgd_assoc_data *assoc_data;
3997 const struct cfg80211_bss_ies *beacon_ies;
3794 struct ieee80211_supported_band *sband; 3998 struct ieee80211_supported_band *sband;
3795 const u8 *ssidie, *ht_ie, *vht_ie; 3999 const u8 *ssidie, *ht_ie, *vht_ie;
3796 int i, err; 4000 int i, err;
@@ -3956,40 +4160,48 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3956 if (err) 4160 if (err)
3957 goto err_clear; 4161 goto err_clear;
3958 4162
3959 if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) { 4163 rcu_read_lock();
3960 const struct cfg80211_bss_ies *beacon_ies; 4164 beacon_ies = rcu_dereference(req->bss->beacon_ies);
3961 4165
3962 rcu_read_lock(); 4166 if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC &&
3963 beacon_ies = rcu_dereference(req->bss->beacon_ies); 4167 !beacon_ies) {
3964 if (!beacon_ies) { 4168 /*
3965 /* 4169 * Wait up to one beacon interval ...
3966 * Wait up to one beacon interval ... 4170 * should this be more if we miss one?
3967 * should this be more if we miss one? 4171 */
3968 */ 4172 sdata_info(sdata, "waiting for beacon from %pM\n",
3969 sdata_info(sdata, "waiting for beacon from %pM\n", 4173 ifmgd->bssid);
3970 ifmgd->bssid); 4174 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
3971 assoc_data->timeout = 4175 assoc_data->timeout_started = true;
3972 TU_TO_EXP_TIME(req->bss->beacon_interval); 4176 assoc_data->need_beacon = true;
3973 } else { 4177 } else if (beacon_ies) {
3974 const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM, 4178 const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
3975 beacon_ies->data, 4179 beacon_ies->data,
3976 beacon_ies->len); 4180 beacon_ies->len);
3977 if (tim_ie && tim_ie[1] >= 4181 u8 dtim_count = 0;
3978 sizeof(struct ieee80211_tim_ie)) { 4182
3979 const struct ieee80211_tim_ie *tim; 4183 if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) {
3980 tim = (void *)(tim_ie + 2); 4184 const struct ieee80211_tim_ie *tim;
3981 ifmgd->dtim_period = tim->dtim_period; 4185 tim = (void *)(tim_ie + 2);
3982 } 4186 ifmgd->dtim_period = tim->dtim_period;
3983 assoc_data->have_beacon = true; 4187 dtim_count = tim->dtim_count;
3984 assoc_data->sent_assoc = false;
3985 assoc_data->timeout = jiffies;
3986 } 4188 }
3987 rcu_read_unlock();
3988 } else {
3989 assoc_data->have_beacon = true; 4189 assoc_data->have_beacon = true;
3990 assoc_data->sent_assoc = false;
3991 assoc_data->timeout = jiffies; 4190 assoc_data->timeout = jiffies;
4191 assoc_data->timeout_started = true;
4192
4193 if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
4194 sdata->vif.bss_conf.sync_tsf = beacon_ies->tsf;
4195 sdata->vif.bss_conf.sync_device_ts =
4196 bss->device_ts_beacon;
4197 sdata->vif.bss_conf.sync_dtim_count = dtim_count;
4198 }
4199 } else {
4200 assoc_data->timeout = jiffies;
4201 assoc_data->timeout_started = true;
3992 } 4202 }
4203 rcu_read_unlock();
4204
3993 run_again(ifmgd, assoc_data->timeout); 4205 run_again(ifmgd, assoc_data->timeout);
3994 4206
3995 if (bss->corrupt_data) { 4207 if (bss->corrupt_data) {
@@ -4056,10 +4268,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
4056 mutex_unlock(&ifmgd->mtx); 4268 mutex_unlock(&ifmgd->mtx);
4057 4269
4058 out: 4270 out:
4059 mutex_lock(&sdata->local->mtx);
4060 ieee80211_recalc_idle(sdata->local);
4061 mutex_unlock(&sdata->local->mtx);
4062
4063 if (sent_frame) 4271 if (sent_frame)
4064 __cfg80211_send_deauth(sdata->dev, frame_buf, 4272 __cfg80211_send_deauth(sdata->dev, frame_buf,
4065 IEEE80211_DEAUTH_FRAME_LEN); 4273 IEEE80211_DEAUTH_FRAME_LEN);
@@ -4100,10 +4308,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
4100 __cfg80211_send_disassoc(sdata->dev, frame_buf, 4308 __cfg80211_send_disassoc(sdata->dev, frame_buf,
4101 IEEE80211_DEAUTH_FRAME_LEN); 4309 IEEE80211_DEAUTH_FRAME_LEN);
4102 4310
4103 mutex_lock(&sdata->local->mtx);
4104 ieee80211_recalc_idle(sdata->local);
4105 mutex_unlock(&sdata->local->mtx);
4106
4107 return 0; 4311 return 0;
4108} 4312}
4109 4313
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 82baf5b6ecf4..cc79b4a2e821 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -113,6 +113,15 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
113 * notify the AP about us leaving the channel and stop all 113 * notify the AP about us leaving the channel and stop all
114 * STA interfaces. 114 * STA interfaces.
115 */ 115 */
116
117 /*
118 * Stop queues and transmit all frames queued by the driver
119 * before sending nullfunc to enable powersave at the AP.
120 */
121 ieee80211_stop_queues_by_reason(&local->hw,
122 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
123 drv_flush(local, false);
124
116 mutex_lock(&local->iflist_mtx); 125 mutex_lock(&local->iflist_mtx);
117 list_for_each_entry(sdata, &local->interfaces, list) { 126 list_for_each_entry(sdata, &local->interfaces, list) {
118 if (!ieee80211_sdata_running(sdata)) 127 if (!ieee80211_sdata_running(sdata))
@@ -133,12 +142,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
133 sdata, BSS_CHANGED_BEACON_ENABLED); 142 sdata, BSS_CHANGED_BEACON_ENABLED);
134 } 143 }
135 144
136 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 145 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
137 netif_tx_stop_all_queues(sdata->dev); 146 sdata->u.mgd.associated)
138 if (sdata->vif.type == NL80211_IFTYPE_STATION && 147 ieee80211_offchannel_ps_enable(sdata);
139 sdata->u.mgd.associated)
140 ieee80211_offchannel_ps_enable(sdata);
141 }
142 } 148 }
143 mutex_unlock(&local->iflist_mtx); 149 mutex_unlock(&local->iflist_mtx);
144} 150}
@@ -166,20 +172,6 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
166 sdata->u.mgd.associated) 172 sdata->u.mgd.associated)
167 ieee80211_offchannel_ps_disable(sdata); 173 ieee80211_offchannel_ps_disable(sdata);
168 174
169 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
170 /*
171 * This may wake up queues even though the driver
172 * currently has them stopped. This is not very
173 * likely, since the driver won't have gotten any
174 * (or hardly any) new packets while we weren't
175 * on the right channel, and even if it happens
176 * it will at most lead to queueing up one more
177 * packet per queue in mac80211 rather than on
178 * the interface qdisc.
179 */
180 netif_tx_wake_all_queues(sdata->dev);
181 }
182
183 if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, 175 if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
184 &sdata->state)) { 176 &sdata->state)) {
185 sdata->vif.bss_conf.enable_beacon = true; 177 sdata->vif.bss_conf.enable_beacon = true;
@@ -188,6 +180,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
188 } 180 }
189 } 181 }
190 mutex_unlock(&local->iflist_mtx); 182 mutex_unlock(&local->iflist_mtx);
183
184 ieee80211_wake_queues_by_reason(&local->hw,
185 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
191} 186}
192 187
193void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc) 188void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e45b83610e85..d0275f34bf70 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -38,6 +38,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
38 38
39 ieee80211_scan_cancel(local); 39 ieee80211_scan_cancel(local);
40 40
41 ieee80211_dfs_cac_cancel(local);
42
41 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 43 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
42 mutex_lock(&local->sta_mtx); 44 mutex_lock(&local->sta_mtx);
43 list_for_each_entry(sta, &local->sta_list, list) { 45 list_for_each_entry(sta, &local->sta_list, list) {
@@ -228,3 +230,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
228 * ieee80211_reconfig(), which is also needed for hardware 230 * ieee80211_reconfig(), which is also needed for hardware
229 * hang/firmware failure/etc. recovery. 231 * hang/firmware failure/etc. recovery.
230 */ 232 */
233
234void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
235 struct cfg80211_wowlan_wakeup *wakeup,
236 gfp_t gfp)
237{
238 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
239
240 cfg80211_report_wowlan_wakeup(&sdata->wdev, wakeup, gfp);
241}
242EXPORT_SYMBOL(ieee80211_report_wowlan_wakeup);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 301386dabf88..d35a5dd3fb13 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -68,6 +68,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
68 sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; 68 sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
69 rcu_read_unlock(); 69 rcu_read_unlock();
70 70
71 ieee80211_sta_set_rx_nss(sta);
72
71 ref->ops->rate_init(ref->priv, sband, ista, priv_sta); 73 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
72 set_sta_flag(sta, WLAN_STA_RATE_CONTROL); 74 set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
73} 75}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 8c5acdc06226..eea45a2c7c35 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -494,6 +494,33 @@ minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
494 kfree(mi); 494 kfree(mi);
495} 495}
496 496
497static void
498minstrel_init_cck_rates(struct minstrel_priv *mp)
499{
500 static const int bitrates[4] = { 10, 20, 55, 110 };
501 struct ieee80211_supported_band *sband;
502 int i, j;
503
504 sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
505 if (!sband)
506 return;
507
508 for (i = 0, j = 0; i < sband->n_bitrates; i++) {
509 struct ieee80211_rate *rate = &sband->bitrates[i];
510
511 if (rate->flags & IEEE80211_RATE_ERP_G)
512 continue;
513
514 for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
515 if (rate->bitrate != bitrates[j])
516 continue;
517
518 mp->cck_rates[j] = i;
519 break;
520 }
521 }
522}
523
497static void * 524static void *
498minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 525minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
499{ 526{
@@ -539,6 +566,8 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
539 S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx); 566 S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
540#endif 567#endif
541 568
569 minstrel_init_cck_rates(mp);
570
542 return mp; 571 return mp;
543} 572}
544 573
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 5d278eccaef0..5ecf757817f2 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -79,6 +79,8 @@ struct minstrel_priv {
79 unsigned int lookaround_rate; 79 unsigned int lookaround_rate;
80 unsigned int lookaround_rate_mrr; 80 unsigned int lookaround_rate_mrr;
81 81
82 u8 cck_rates[4];
83
82#ifdef CONFIG_MAC80211_DEBUGFS 84#ifdef CONFIG_MAC80211_DEBUGFS
83 /* 85 /*
84 * enable fixed rate processing per RC 86 * enable fixed rate processing per RC
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 9f9c453bc45d..3af141c69712 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> 2 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -63,6 +63,30 @@
63 } \ 63 } \
64} 64}
65 65
66#define CCK_DURATION(_bitrate, _short, _len) \
67 (10 /* SIFS */ + \
68 (_short ? 72 + 24 : 144 + 48 ) + \
69 (8 * (_len + 4) * 10) / (_bitrate))
70
71#define CCK_ACK_DURATION(_bitrate, _short) \
72 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
73 CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))
74
75#define CCK_DURATION_LIST(_short) \
76 CCK_ACK_DURATION(10, _short), \
77 CCK_ACK_DURATION(20, _short), \
78 CCK_ACK_DURATION(55, _short), \
79 CCK_ACK_DURATION(110, _short)
80
81#define CCK_GROUP \
82 [MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = { \
83 .streams = 0, \
84 .duration = { \
85 CCK_DURATION_LIST(false), \
86 CCK_DURATION_LIST(true) \
87 } \
88 }
89
66/* 90/*
67 * To enable sufficiently targeted rate sampling, MCS rates are divided into 91 * To enable sufficiently targeted rate sampling, MCS rates are divided into
68 * groups, based on the number of streams and flags (HT40, SGI) that they 92 * groups, based on the number of streams and flags (HT40, SGI) that they
@@ -95,8 +119,13 @@ const struct mcs_group minstrel_mcs_groups[] = {
95#if MINSTREL_MAX_STREAMS >= 3 119#if MINSTREL_MAX_STREAMS >= 3
96 MCS_GROUP(3, 1, 1), 120 MCS_GROUP(3, 1, 1),
97#endif 121#endif
122
123 /* must be last */
124 CCK_GROUP
98}; 125};
99 126
127#define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1)
128
100static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; 129static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
101 130
102/* 131/*
@@ -119,6 +148,29 @@ minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
119 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); 148 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
120} 149}
121 150
151static struct minstrel_rate_stats *
152minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
153 struct ieee80211_tx_rate *rate)
154{
155 int group, idx;
156
157 if (rate->flags & IEEE80211_TX_RC_MCS) {
158 group = minstrel_ht_get_group_idx(rate);
159 idx = rate->idx % MCS_GROUP_RATES;
160 } else {
161 group = MINSTREL_CCK_GROUP;
162
163 for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
164 if (rate->idx == mp->cck_rates[idx])
165 break;
166
167 /* short preamble */
168 if (!(mi->groups[group].supported & BIT(idx)))
169 idx += 4;
170 }
171 return &mi->groups[group].rates[idx];
172}
173
122static inline struct minstrel_rate_stats * 174static inline struct minstrel_rate_stats *
123minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) 175minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
124{ 176{
@@ -159,7 +211,7 @@ static void
159minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) 211minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
160{ 212{
161 struct minstrel_rate_stats *mr; 213 struct minstrel_rate_stats *mr;
162 unsigned int usecs; 214 unsigned int usecs = 0;
163 215
164 mr = &mi->groups[group].rates[rate]; 216 mr = &mi->groups[group].rates[rate];
165 217
@@ -168,7 +220,9 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
168 return; 220 return;
169 } 221 }
170 222
171 usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 223 if (group != MINSTREL_CCK_GROUP)
224 usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
225
172 usecs += minstrel_mcs_groups[group].duration[rate]; 226 usecs += minstrel_mcs_groups[group].duration[rate];
173 mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability); 227 mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
174} 228}
@@ -231,10 +285,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
231 if (!mr->cur_tp) 285 if (!mr->cur_tp)
232 continue; 286 continue;
233 287
234 /* ignore the lowest rate of each single-stream group */
235 if (!i && minstrel_mcs_groups[group].streams == 1)
236 continue;
237
238 if ((mr->cur_tp > cur_prob_tp && mr->probability > 288 if ((mr->cur_tp > cur_prob_tp && mr->probability >
239 MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { 289 MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
240 mg->max_prob_rate = index; 290 mg->max_prob_rate = index;
@@ -297,7 +347,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
297} 347}
298 348
299static bool 349static bool
300minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) 350minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
301{ 351{
302 if (rate->idx < 0) 352 if (rate->idx < 0)
303 return false; 353 return false;
@@ -305,7 +355,13 @@ minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
305 if (!rate->count) 355 if (!rate->count)
306 return false; 356 return false;
307 357
308 return !!(rate->flags & IEEE80211_TX_RC_MCS); 358 if (rate->flags & IEEE80211_TX_RC_MCS)
359 return true;
360
361 return rate->idx == mp->cck_rates[0] ||
362 rate->idx == mp->cck_rates[1] ||
363 rate->idx == mp->cck_rates[2] ||
364 rate->idx == mp->cck_rates[3];
309} 365}
310 366
311static void 367static void
@@ -390,7 +446,6 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
390 struct minstrel_rate_stats *rate, *rate2; 446 struct minstrel_rate_stats *rate, *rate2;
391 struct minstrel_priv *mp = priv; 447 struct minstrel_priv *mp = priv;
392 bool last; 448 bool last;
393 int group;
394 int i; 449 int i;
395 450
396 if (!msp->is_ht) 451 if (!msp->is_ht)
@@ -419,13 +474,12 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
419 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 474 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
420 mi->sample_packets += info->status.ampdu_len; 475 mi->sample_packets += info->status.ampdu_len;
421 476
422 last = !minstrel_ht_txstat_valid(&ar[0]); 477 last = !minstrel_ht_txstat_valid(mp, &ar[0]);
423 for (i = 0; !last; i++) { 478 for (i = 0; !last; i++) {
424 last = (i == IEEE80211_TX_MAX_RATES - 1) || 479 last = (i == IEEE80211_TX_MAX_RATES - 1) ||
425 !minstrel_ht_txstat_valid(&ar[i + 1]); 480 !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
426 481
427 group = minstrel_ht_get_group_idx(&ar[i]); 482 rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
428 rate = &mi->groups[group].rates[ar[i].idx % 8];
429 483
430 if (last) 484 if (last)
431 rate->success += info->status.ampdu_ack_len; 485 rate->success += info->status.ampdu_ack_len;
@@ -451,7 +505,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
451 505
452 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { 506 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
453 minstrel_ht_update_stats(mp, mi); 507 minstrel_ht_update_stats(mp, mi);
454 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 508 if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
509 mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
455 minstrel_aggr_check(sta, skb); 510 minstrel_aggr_check(sta, skb);
456 } 511 }
457} 512}
@@ -467,6 +522,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
467 unsigned int ctime = 0; 522 unsigned int ctime = 0;
468 unsigned int t_slot = 9; /* FIXME */ 523 unsigned int t_slot = 9; /* FIXME */
469 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); 524 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
525 unsigned int overhead = 0, overhead_rtscts = 0;
470 526
471 mr = minstrel_get_ratestats(mi, index); 527 mr = minstrel_get_ratestats(mi, index);
472 if (mr->probability < MINSTREL_FRAC(1, 10)) { 528 if (mr->probability < MINSTREL_FRAC(1, 10)) {
@@ -488,9 +544,14 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
488 ctime += (t_slot * cw) >> 1; 544 ctime += (t_slot * cw) >> 1;
489 cw = min((cw << 1) | 1, mp->cw_max); 545 cw = min((cw << 1) | 1, mp->cw_max);
490 546
547 if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
548 overhead = mi->overhead;
549 overhead_rtscts = mi->overhead_rtscts;
550 }
551
491 /* Total TX time for data and Contention after first 2 tries */ 552 /* Total TX time for data and Contention after first 2 tries */
492 tx_time = ctime + 2 * (mi->overhead + tx_time_data); 553 tx_time = ctime + 2 * (overhead + tx_time_data);
493 tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data); 554 tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
494 555
495 /* See how many more tries we can fit inside segment size */ 556 /* See how many more tries we can fit inside segment size */
496 do { 557 do {
@@ -499,8 +560,8 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
499 cw = min((cw << 1) | 1, mp->cw_max); 560 cw = min((cw << 1) | 1, mp->cw_max);
500 561
501 /* Total TX time after this try */ 562 /* Total TX time after this try */
502 tx_time += ctime + mi->overhead + tx_time_data; 563 tx_time += ctime + overhead + tx_time_data;
503 tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data; 564 tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
504 565
505 if (tx_time_rtscts < mp->segment_size) 566 if (tx_time_rtscts < mp->segment_size)
506 mr->retry_count_rtscts++; 567 mr->retry_count_rtscts++;
@@ -530,9 +591,16 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
530 else 591 else
531 rate->count = mr->retry_count; 592 rate->count = mr->retry_count;
532 593
533 rate->flags = IEEE80211_TX_RC_MCS | group->flags; 594 rate->flags = 0;
534 if (rtscts) 595 if (rtscts)
535 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; 596 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
597
598 if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
599 rate->idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
600 return;
601 }
602
603 rate->flags |= IEEE80211_TX_RC_MCS | group->flags;
536 rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES; 604 rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
537} 605}
538 606
@@ -596,6 +664,22 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
596} 664}
597 665
598static void 666static void
667minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
668 struct minstrel_ht_sta *mi, bool val)
669{
670 u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;
671
672 if (!supported || !mi->cck_supported_short)
673 return;
674
675 if (supported & (mi->cck_supported_short << (val * 4)))
676 return;
677
678 supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
679 mi->groups[MINSTREL_CCK_GROUP].supported = supported;
680}
681
682static void
599minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, 683minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
600 struct ieee80211_tx_rate_control *txrc) 684 struct ieee80211_tx_rate_control *txrc)
601{ 685{
@@ -614,6 +698,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
614 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); 698 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
615 699
616 info->flags |= mi->tx_flags; 700 info->flags |= mi->tx_flags;
701 minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
617 702
618 /* Don't use EAPOL frames for sampling on non-mrr hw */ 703 /* Don't use EAPOL frames for sampling on non-mrr hw */
619 if (mp->hw->max_rates == 1 && 704 if (mp->hw->max_rates == 1 &&
@@ -687,6 +772,30 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
687} 772}
688 773
689static void 774static void
775minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
776 struct ieee80211_supported_band *sband,
777 struct ieee80211_sta *sta)
778{
779 int i;
780
781 if (sband->band != IEEE80211_BAND_2GHZ)
782 return;
783
784 mi->cck_supported = 0;
785 mi->cck_supported_short = 0;
786 for (i = 0; i < 4; i++) {
787 if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
788 continue;
789
790 mi->cck_supported |= BIT(i);
791 if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
792 mi->cck_supported_short |= BIT(i);
793 }
794
795 mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
796}
797
798static void
690minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, 799minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
691 struct ieee80211_sta *sta, void *priv_sta) 800 struct ieee80211_sta *sta, void *priv_sta)
692{ 801{
@@ -699,14 +808,13 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
699 int ack_dur; 808 int ack_dur;
700 int stbc; 809 int stbc;
701 int i; 810 int i;
702 unsigned int smps;
703 811
704 /* fall back to the old minstrel for legacy stations */ 812 /* fall back to the old minstrel for legacy stations */
705 if (!sta->ht_cap.ht_supported) 813 if (!sta->ht_cap.ht_supported)
706 goto use_legacy; 814 goto use_legacy;
707 815
708 BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != 816 BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
709 MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); 817 MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1);
710 818
711 msp->is_ht = true; 819 msp->is_ht = true;
712 memset(mi, 0, sizeof(*mi)); 820 memset(mi, 0, sizeof(*mi));
@@ -735,28 +843,29 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
735 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) 843 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
736 mi->tx_flags |= IEEE80211_TX_CTL_LDPC; 844 mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
737 845
738 smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
739 IEEE80211_HT_CAP_SM_PS_SHIFT;
740
741 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { 846 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
742 u16 req = 0;
743
744 mi->groups[i].supported = 0; 847 mi->groups[i].supported = 0;
745 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { 848 if (i == MINSTREL_CCK_GROUP) {
746 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 849 minstrel_ht_update_cck(mp, mi, sband, sta);
747 req |= IEEE80211_HT_CAP_SGI_40; 850 continue;
748 else
749 req |= IEEE80211_HT_CAP_SGI_20;
750 } 851 }
751 852
752 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 853 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
753 req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 854 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
855 if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
856 continue;
857 } else {
858 if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
859 continue;
860 }
861 }
754 862
755 if ((sta_cap & req) != req) 863 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
864 sta->bandwidth < IEEE80211_STA_RX_BW_40)
756 continue; 865 continue;
757 866
758 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */ 867 /* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
759 if (smps == WLAN_HT_CAP_SM_PS_STATIC && 868 if (sta->smps_mode == IEEE80211_SMPS_STATIC &&
760 minstrel_mcs_groups[i].streams > 1) 869 minstrel_mcs_groups[i].streams > 1)
761 continue; 870 continue;
762 871
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 462d2b227ed5..302dbd52180d 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -107,8 +107,11 @@ struct minstrel_ht_sta {
107 /* current MCS group to be sampled */ 107 /* current MCS group to be sampled */
108 u8 sample_group; 108 u8 sample_group;
109 109
110 u8 cck_supported;
111 u8 cck_supported_short;
112
110 /* MCS rate group info and statistics */ 113 /* MCS rate group info and statistics */
111 struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS]; 114 struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1];
112}; 115};
113 116
114struct minstrel_ht_sta_priv { 117struct minstrel_ht_sta_priv {
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index e788f76a1dfe..df44a5ad8270 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -15,13 +15,76 @@
15#include "rc80211_minstrel.h" 15#include "rc80211_minstrel.h"
16#include "rc80211_minstrel_ht.h" 16#include "rc80211_minstrel_ht.h"
17 17
18static char *
19minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
20{
21 unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
22 const struct mcs_group *mg;
23 unsigned int j, tp, prob, eprob;
24 char htmode = '2';
25 char gimode = 'L';
26
27 if (!mi->groups[i].supported)
28 return p;
29
30 mg = &minstrel_mcs_groups[i];
31 if (mg->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
32 htmode = '4';
33 if (mg->flags & IEEE80211_TX_RC_SHORT_GI)
34 gimode = 'S';
35
36 for (j = 0; j < MCS_GROUP_RATES; j++) {
37 struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
38 static const int bitrates[4] = { 10, 20, 55, 110 };
39 int idx = i * MCS_GROUP_RATES + j;
40
41 if (!(mi->groups[i].supported & BIT(j)))
42 continue;
43
44 if (i == max_mcs)
45 p += sprintf(p, "CCK/%cP ", j < 4 ? 'L' : 'S');
46 else
47 p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
48
49 *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
50 *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
51 *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
52
53 if (i == max_mcs) {
54 int r = bitrates[j % 4];
55 p += sprintf(p, " %2u.%1uM", r / 10, r % 10);
56 } else {
57 p += sprintf(p, " MCS%-2u", (mg->streams - 1) *
58 MCS_GROUP_RATES + j);
59 }
60
61 tp = mr->cur_tp / 10;
62 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
63 eprob = MINSTREL_TRUNC(mr->probability * 1000);
64
65 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
66 "%3u %3u(%3u) %8llu %8llu\n",
67 tp / 10, tp % 10,
68 eprob / 10, eprob % 10,
69 prob / 10, prob % 10,
70 mr->retry_count,
71 mr->last_success,
72 mr->last_attempts,
73 (unsigned long long)mr->succ_hist,
74 (unsigned long long)mr->att_hist);
75 }
76
77 return p;
78}
79
18static int 80static int
19minstrel_ht_stats_open(struct inode *inode, struct file *file) 81minstrel_ht_stats_open(struct inode *inode, struct file *file)
20{ 82{
21 struct minstrel_ht_sta_priv *msp = inode->i_private; 83 struct minstrel_ht_sta_priv *msp = inode->i_private;
22 struct minstrel_ht_sta *mi = &msp->ht; 84 struct minstrel_ht_sta *mi = &msp->ht;
23 struct minstrel_debugfs_info *ms; 85 struct minstrel_debugfs_info *ms;
24 unsigned int i, j, tp, prob, eprob; 86 unsigned int i;
87 unsigned int max_mcs = MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS;
25 char *p; 88 char *p;
26 int ret; 89 int ret;
27 90
@@ -38,50 +101,13 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
38 101
39 file->private_data = ms; 102 file->private_data = ms;
40 p = ms->buf; 103 p = ms->buf;
41 p += sprintf(p, "type rate throughput ewma prob this prob " 104 p += sprintf(p, "type rate throughput ewma prob this prob "
42 "this succ/attempt success attempts\n"); 105 "retry this succ/attempt success attempts\n");
43 for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) {
44 char htmode = '2';
45 char gimode = 'L';
46
47 if (!mi->groups[i].supported)
48 continue;
49
50 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
51 htmode = '4';
52 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI)
53 gimode = 'S';
54 106
55 for (j = 0; j < MCS_GROUP_RATES; j++) { 107 p = minstrel_ht_stats_dump(mi, max_mcs, p);
56 struct minstrel_rate_stats *mr = &mi->groups[i].rates[j]; 108 for (i = 0; i < max_mcs; i++)
57 int idx = i * MCS_GROUP_RATES + j; 109 p = minstrel_ht_stats_dump(mi, i, p);
58 110
59 if (!(mi->groups[i].supported & BIT(j)))
60 continue;
61
62 p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
63
64 *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
65 *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
66 *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
67 p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) *
68 MCS_GROUP_RATES + j);
69
70 tp = mr->cur_tp / 10;
71 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
72 eprob = MINSTREL_TRUNC(mr->probability * 1000);
73
74 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
75 "%3u(%3u) %8llu %8llu\n",
76 tp / 10, tp % 10,
77 eprob / 10, eprob % 10,
78 prob / 10, prob % 10,
79 mr->last_success,
80 mr->last_attempts,
81 (unsigned long long)mr->succ_hist,
82 (unsigned long long)mr->att_hist);
83 }
84 }
85 p += sprintf(p, "\nTotal packet count:: ideal %d " 111 p += sprintf(p, "\nTotal packet count:: ideal %d "
86 "lookaround %d\n", 112 "lookaround %d\n",
87 max(0, (int) mi->total_packets - (int) mi->sample_packets), 113 max(0, (int) mi->total_packets - (int) mi->sample_packets),
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a19089565c4b..3acb70b73e22 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -668,9 +668,9 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
668 668
669static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 669static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
670 struct tid_ampdu_rx *tid_agg_rx, 670 struct tid_ampdu_rx *tid_agg_rx,
671 int index) 671 int index,
672 struct sk_buff_head *frames)
672{ 673{
673 struct ieee80211_local *local = sdata->local;
674 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 674 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
675 struct ieee80211_rx_status *status; 675 struct ieee80211_rx_status *status;
676 676
@@ -684,7 +684,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
684 tid_agg_rx->reorder_buf[index] = NULL; 684 tid_agg_rx->reorder_buf[index] = NULL;
685 status = IEEE80211_SKB_RXCB(skb); 685 status = IEEE80211_SKB_RXCB(skb);
686 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 686 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
687 skb_queue_tail(&local->rx_skb_queue, skb); 687 __skb_queue_tail(frames, skb);
688 688
689no_frame: 689no_frame:
690 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 690 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -692,7 +692,8 @@ no_frame:
692 692
693static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 693static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
694 struct tid_ampdu_rx *tid_agg_rx, 694 struct tid_ampdu_rx *tid_agg_rx,
695 u16 head_seq_num) 695 u16 head_seq_num,
696 struct sk_buff_head *frames)
696{ 697{
697 int index; 698 int index;
698 699
@@ -701,7 +702,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
701 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 702 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
702 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 703 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
703 tid_agg_rx->buf_size; 704 tid_agg_rx->buf_size;
704 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); 705 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
706 frames);
705 } 707 }
706} 708}
707 709
@@ -717,7 +719,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
717#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 719#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
718 720
719static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 721static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
720 struct tid_ampdu_rx *tid_agg_rx) 722 struct tid_ampdu_rx *tid_agg_rx,
723 struct sk_buff_head *frames)
721{ 724{
722 int index, j; 725 int index, j;
723 726
@@ -746,7 +749,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
746 749
747 ht_dbg_ratelimited(sdata, 750 ht_dbg_ratelimited(sdata,
748 "release an RX reorder frame due to timeout on earlier frames\n"); 751 "release an RX reorder frame due to timeout on earlier frames\n");
749 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j); 752 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
753 frames);
750 754
751 /* 755 /*
752 * Increment the head seq# also for the skipped slots. 756 * Increment the head seq# also for the skipped slots.
@@ -756,7 +760,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
756 skipped = 0; 760 skipped = 0;
757 } 761 }
758 } else while (tid_agg_rx->reorder_buf[index]) { 762 } else while (tid_agg_rx->reorder_buf[index]) {
759 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); 763 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
764 frames);
760 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 765 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
761 tid_agg_rx->buf_size; 766 tid_agg_rx->buf_size;
762 } 767 }
@@ -788,7 +793,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
788 */ 793 */
789static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 794static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
790 struct tid_ampdu_rx *tid_agg_rx, 795 struct tid_ampdu_rx *tid_agg_rx,
791 struct sk_buff *skb) 796 struct sk_buff *skb,
797 struct sk_buff_head *frames)
792{ 798{
793 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 799 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
794 u16 sc = le16_to_cpu(hdr->seq_ctrl); 800 u16 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -816,7 +822,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
816 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 822 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
817 /* release stored frames up to new head to stack */ 823 /* release stored frames up to new head to stack */
818 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 824 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
819 head_seq_num); 825 head_seq_num, frames);
820 } 826 }
821 827
822 /* Now the new frame is always in the range of the reordering buffer */ 828 /* Now the new frame is always in the range of the reordering buffer */
@@ -846,7 +852,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
846 tid_agg_rx->reorder_buf[index] = skb; 852 tid_agg_rx->reorder_buf[index] = skb;
847 tid_agg_rx->reorder_time[index] = jiffies; 853 tid_agg_rx->reorder_time[index] = jiffies;
848 tid_agg_rx->stored_mpdu_num++; 854 tid_agg_rx->stored_mpdu_num++;
849 ieee80211_sta_reorder_release(sdata, tid_agg_rx); 855 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
850 856
851 out: 857 out:
852 spin_unlock(&tid_agg_rx->reorder_lock); 858 spin_unlock(&tid_agg_rx->reorder_lock);
@@ -857,7 +863,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
857 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 863 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
858 * true if the MPDU was buffered, false if it should be processed. 864 * true if the MPDU was buffered, false if it should be processed.
859 */ 865 */
860static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) 866static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
867 struct sk_buff_head *frames)
861{ 868{
862 struct sk_buff *skb = rx->skb; 869 struct sk_buff *skb = rx->skb;
863 struct ieee80211_local *local = rx->local; 870 struct ieee80211_local *local = rx->local;
@@ -922,11 +929,12 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
922 * sure that we cannot get to it any more before doing 929 * sure that we cannot get to it any more before doing
923 * anything with it. 930 * anything with it.
924 */ 931 */
925 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb)) 932 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
933 frames))
926 return; 934 return;
927 935
928 dont_reorder: 936 dont_reorder:
929 skb_queue_tail(&local->rx_skb_queue, skb); 937 __skb_queue_tail(frames, skb);
930} 938}
931 939
932static ieee80211_rx_result debug_noinline 940static ieee80211_rx_result debug_noinline
@@ -1452,6 +1460,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1452 } 1460 }
1453 } 1461 }
1454 1462
1463 /* mesh power save support */
1464 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1465 ieee80211_mps_rx_h_sta_process(sta, hdr);
1466
1455 /* 1467 /*
1456 * Drop (qos-)data::nullfunc frames silently, since they 1468 * Drop (qos-)data::nullfunc frames silently, since they
1457 * are used only to control station power saving mode. 1469 * are used only to control station power saving mode.
@@ -2090,7 +2102,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2090 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2102 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2091 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2103 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2092 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2104 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2105 /* update power mode indication when forwarding */
2106 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2093 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) { 2107 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
2108 /* mesh power mode flags updated in mesh_nexthop_lookup */
2094 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2109 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2095 } else { 2110 } else {
2096 /* unable to resolve next hop */ 2111 /* unable to resolve next hop */
@@ -2177,7 +2192,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2177} 2192}
2178 2193
2179static ieee80211_rx_result debug_noinline 2194static ieee80211_rx_result debug_noinline
2180ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 2195ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2181{ 2196{
2182 struct sk_buff *skb = rx->skb; 2197 struct sk_buff *skb = rx->skb;
2183 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2198 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
@@ -2216,7 +2231,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2216 spin_lock(&tid_agg_rx->reorder_lock); 2231 spin_lock(&tid_agg_rx->reorder_lock);
2217 /* release stored frames up to start of BAR */ 2232 /* release stored frames up to start of BAR */
2218 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2233 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2219 start_seq_num); 2234 start_seq_num, frames);
2220 spin_unlock(&tid_agg_rx->reorder_lock); 2235 spin_unlock(&tid_agg_rx->reorder_lock);
2221 2236
2222 kfree_skb(skb); 2237 kfree_skb(skb);
@@ -2360,31 +2375,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2360 switch (mgmt->u.action.u.ht_smps.action) { 2375 switch (mgmt->u.action.u.ht_smps.action) {
2361 case WLAN_HT_ACTION_SMPS: { 2376 case WLAN_HT_ACTION_SMPS: {
2362 struct ieee80211_supported_band *sband; 2377 struct ieee80211_supported_band *sband;
2363 u8 smps; 2378 enum ieee80211_smps_mode smps_mode;
2364 2379
2365 /* convert to HT capability */ 2380 /* convert to HT capability */
2366 switch (mgmt->u.action.u.ht_smps.smps_control) { 2381 switch (mgmt->u.action.u.ht_smps.smps_control) {
2367 case WLAN_HT_SMPS_CONTROL_DISABLED: 2382 case WLAN_HT_SMPS_CONTROL_DISABLED:
2368 smps = WLAN_HT_CAP_SM_PS_DISABLED; 2383 smps_mode = IEEE80211_SMPS_OFF;
2369 break; 2384 break;
2370 case WLAN_HT_SMPS_CONTROL_STATIC: 2385 case WLAN_HT_SMPS_CONTROL_STATIC:
2371 smps = WLAN_HT_CAP_SM_PS_STATIC; 2386 smps_mode = IEEE80211_SMPS_STATIC;
2372 break; 2387 break;
2373 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2388 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
2374 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 2389 smps_mode = IEEE80211_SMPS_DYNAMIC;
2375 break; 2390 break;
2376 default: 2391 default:
2377 goto invalid; 2392 goto invalid;
2378 } 2393 }
2379 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
2380 2394
2381 /* if no change do nothing */ 2395 /* if no change do nothing */
2382 if ((rx->sta->sta.ht_cap.cap & 2396 if (rx->sta->sta.smps_mode == smps_mode)
2383 IEEE80211_HT_CAP_SM_PS) == smps)
2384 goto handled; 2397 goto handled;
2385 2398 rx->sta->sta.smps_mode = smps_mode;
2386 rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
2387 rx->sta->sta.ht_cap.cap |= smps;
2388 2399
2389 sband = rx->local->hw.wiphy->bands[status->band]; 2400 sband = rx->local->hw.wiphy->bands[status->band];
2390 2401
@@ -2395,26 +2406,21 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2395 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 2406 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
2396 struct ieee80211_supported_band *sband; 2407 struct ieee80211_supported_band *sband;
2397 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 2408 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
2398 bool old_40mhz, new_40mhz; 2409 enum ieee80211_sta_rx_bandwidth new_bw;
2399 2410
2400 /* If it doesn't support 40 MHz it can't change ... */ 2411 /* If it doesn't support 40 MHz it can't change ... */
2401 if (!rx->sta->supports_40mhz) 2412 if (!(rx->sta->sta.ht_cap.cap &
2413 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
2402 goto handled; 2414 goto handled;
2403 2415
2404 old_40mhz = rx->sta->sta.ht_cap.cap & 2416 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
2405 IEEE80211_HT_CAP_SUP_WIDTH_20_40; 2417 new_bw = IEEE80211_STA_RX_BW_20;
2406 new_40mhz = chanwidth == IEEE80211_HT_CHANWIDTH_ANY; 2418 else
2419 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
2407 2420
2408 if (old_40mhz == new_40mhz) 2421 if (rx->sta->sta.bandwidth == new_bw)
2409 goto handled; 2422 goto handled;
2410 2423
2411 if (new_40mhz)
2412 rx->sta->sta.ht_cap.cap |=
2413 IEEE80211_HT_CAP_SUP_WIDTH_20_40;
2414 else
2415 rx->sta->sta.ht_cap.cap &=
2416 ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
2417
2418 sband = rx->local->hw.wiphy->bands[status->band]; 2424 sband = rx->local->hw.wiphy->bands[status->band];
2419 2425
2420 rate_control_rate_update(local, sband, rx->sta, 2426 rate_control_rate_update(local, sband, rx->sta,
@@ -2426,6 +2432,37 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2426 } 2432 }
2427 2433
2428 break; 2434 break;
2435 case WLAN_CATEGORY_VHT:
2436 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2437 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2438 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2439 sdata->vif.type != NL80211_IFTYPE_AP &&
2440 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2441 break;
2442
2443 /* verify action code is present */
2444 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2445 goto invalid;
2446
2447 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
2448 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
2449 u8 opmode;
2450
2451 /* verify opmode is present */
2452 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2453 goto invalid;
2454
2455 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
2456
2457 ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
2458 opmode, status->band,
2459 false);
2460 goto handled;
2461 }
2462 default:
2463 break;
2464 }
2465 break;
2429 case WLAN_CATEGORY_BACK: 2466 case WLAN_CATEGORY_BACK:
2430 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2467 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2431 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2468 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
@@ -2677,8 +2714,9 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2677 return RX_DROP_MONITOR; 2714 return RX_DROP_MONITOR;
2678 break; 2715 break;
2679 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2716 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2680 /* process only for ibss */ 2717 /* process only for ibss and mesh */
2681 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2718 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2719 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
2682 return RX_DROP_MONITOR; 2720 return RX_DROP_MONITOR;
2683 break; 2721 break;
2684 default: 2722 default:
@@ -2801,7 +2839,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2801 } 2839 }
2802} 2840}
2803 2841
2804static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) 2842static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2843 struct sk_buff_head *frames)
2805{ 2844{
2806 ieee80211_rx_result res = RX_DROP_MONITOR; 2845 ieee80211_rx_result res = RX_DROP_MONITOR;
2807 struct sk_buff *skb; 2846 struct sk_buff *skb;
@@ -2813,15 +2852,9 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2813 goto rxh_next; \ 2852 goto rxh_next; \
2814 } while (0); 2853 } while (0);
2815 2854
2816 spin_lock(&rx->local->rx_skb_queue.lock); 2855 spin_lock_bh(&rx->local->rx_path_lock);
2817 if (rx->local->running_rx_handler)
2818 goto unlock;
2819
2820 rx->local->running_rx_handler = true;
2821
2822 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2823 spin_unlock(&rx->local->rx_skb_queue.lock);
2824 2856
2857 while ((skb = __skb_dequeue(frames))) {
2825 /* 2858 /*
2826 * all the other fields are valid across frames 2859 * all the other fields are valid across frames
2827 * that belong to an aMPDU since they are on the 2860 * that belong to an aMPDU since they are on the
@@ -2842,7 +2875,12 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2842#endif 2875#endif
2843 CALL_RXH(ieee80211_rx_h_amsdu) 2876 CALL_RXH(ieee80211_rx_h_amsdu)
2844 CALL_RXH(ieee80211_rx_h_data) 2877 CALL_RXH(ieee80211_rx_h_data)
2845 CALL_RXH(ieee80211_rx_h_ctrl); 2878
2879 /* special treatment -- needs the queue */
2880 res = ieee80211_rx_h_ctrl(rx, frames);
2881 if (res != RX_CONTINUE)
2882 goto rxh_next;
2883
2846 CALL_RXH(ieee80211_rx_h_mgmt_check) 2884 CALL_RXH(ieee80211_rx_h_mgmt_check)
2847 CALL_RXH(ieee80211_rx_h_action) 2885 CALL_RXH(ieee80211_rx_h_action)
2848 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 2886 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
@@ -2851,20 +2889,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2851 2889
2852 rxh_next: 2890 rxh_next:
2853 ieee80211_rx_handlers_result(rx, res); 2891 ieee80211_rx_handlers_result(rx, res);
2854 spin_lock(&rx->local->rx_skb_queue.lock); 2892
2855#undef CALL_RXH 2893#undef CALL_RXH
2856 } 2894 }
2857 2895
2858 rx->local->running_rx_handler = false; 2896 spin_unlock_bh(&rx->local->rx_path_lock);
2859
2860 unlock:
2861 spin_unlock(&rx->local->rx_skb_queue.lock);
2862} 2897}
2863 2898
2864static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 2899static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2865{ 2900{
2901 struct sk_buff_head reorder_release;
2866 ieee80211_rx_result res = RX_DROP_MONITOR; 2902 ieee80211_rx_result res = RX_DROP_MONITOR;
2867 2903
2904 __skb_queue_head_init(&reorder_release);
2905
2868#define CALL_RXH(rxh) \ 2906#define CALL_RXH(rxh) \
2869 do { \ 2907 do { \
2870 res = rxh(rx); \ 2908 res = rxh(rx); \
@@ -2874,9 +2912,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2874 2912
2875 CALL_RXH(ieee80211_rx_h_check) 2913 CALL_RXH(ieee80211_rx_h_check)
2876 2914
2877 ieee80211_rx_reorder_ampdu(rx); 2915 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2878 2916
2879 ieee80211_rx_handlers(rx); 2917 ieee80211_rx_handlers(rx, &reorder_release);
2880 return; 2918 return;
2881 2919
2882 rxh_next: 2920 rxh_next:
@@ -2891,6 +2929,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2891 */ 2929 */
2892void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 2930void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2893{ 2931{
2932 struct sk_buff_head frames;
2894 struct ieee80211_rx_data rx = { 2933 struct ieee80211_rx_data rx = {
2895 .sta = sta, 2934 .sta = sta,
2896 .sdata = sta->sdata, 2935 .sdata = sta->sdata,
@@ -2906,11 +2945,13 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2906 if (!tid_agg_rx) 2945 if (!tid_agg_rx)
2907 return; 2946 return;
2908 2947
2948 __skb_queue_head_init(&frames);
2949
2909 spin_lock(&tid_agg_rx->reorder_lock); 2950 spin_lock(&tid_agg_rx->reorder_lock);
2910 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx); 2951 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
2911 spin_unlock(&tid_agg_rx->reorder_lock); 2952 spin_unlock(&tid_agg_rx->reorder_lock);
2912 2953
2913 ieee80211_rx_handlers(&rx); 2954 ieee80211_rx_handlers(&rx, &frames);
2914} 2955}
2915 2956
2916/* main receive path */ 2957/* main receive path */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 607684c47d55..43a45cf00e06 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -27,22 +27,15 @@
27 27
28#define IEEE80211_PROBE_DELAY (HZ / 33) 28#define IEEE80211_PROBE_DELAY (HZ / 33)
29#define IEEE80211_CHANNEL_TIME (HZ / 33) 29#define IEEE80211_CHANNEL_TIME (HZ / 33)
30#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) 30#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 9)
31
32static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
33{
34 struct ieee80211_bss *bss = (void *)cbss->priv;
35
36 kfree(bss_mesh_id(bss));
37 kfree(bss_mesh_cfg(bss));
38}
39 31
40void ieee80211_rx_bss_put(struct ieee80211_local *local, 32void ieee80211_rx_bss_put(struct ieee80211_local *local,
41 struct ieee80211_bss *bss) 33 struct ieee80211_bss *bss)
42{ 34{
43 if (!bss) 35 if (!bss)
44 return; 36 return;
45 cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv)); 37 cfg80211_put_bss(local->hw.wiphy,
38 container_of((void *)bss, struct cfg80211_bss, priv));
46} 39}
47 40
48static bool is_uapsd_supported(struct ieee802_11_elems *elems) 41static bool is_uapsd_supported(struct ieee802_11_elems *elems)
@@ -85,10 +78,12 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
85 if (!cbss) 78 if (!cbss)
86 return NULL; 79 return NULL;
87 80
88 cbss->free_priv = ieee80211_rx_bss_free;
89 bss = (void *)cbss->priv; 81 bss = (void *)cbss->priv;
90 82
91 bss->device_ts = rx_status->device_timestamp; 83 if (beacon)
84 bss->device_ts_beacon = rx_status->device_timestamp;
85 else
86 bss->device_ts_presp = rx_status->device_timestamp;
92 87
93 if (elems->parse_error) { 88 if (elems->parse_error) {
94 if (beacon) 89 if (beacon)
@@ -146,9 +141,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
146 bss->valid_data |= IEEE80211_BSS_VALID_WMM; 141 bss->valid_data |= IEEE80211_BSS_VALID_WMM;
147 } 142 }
148 143
149 if (!beacon)
150 bss->last_probe_resp = jiffies;
151
152 return bss; 144 return bss;
153} 145}
154 146
@@ -342,6 +334,9 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
342 334
343 ieee80211_offchannel_stop_vifs(local); 335 ieee80211_offchannel_stop_vifs(local);
344 336
337 /* ensure nullfunc is transmitted before leaving operating channel */
338 drv_flush(local, false);
339
345 ieee80211_configure_filter(local); 340 ieee80211_configure_filter(local);
346 341
347 /* We need to set power level at maximum rate for scanning. */ 342 /* We need to set power level at maximum rate for scanning. */
@@ -356,6 +351,9 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
356static bool ieee80211_can_scan(struct ieee80211_local *local, 351static bool ieee80211_can_scan(struct ieee80211_local *local,
357 struct ieee80211_sub_if_data *sdata) 352 struct ieee80211_sub_if_data *sdata)
358{ 353{
354 if (local->radar_detect_enabled)
355 return false;
356
359 if (!list_empty(&local->roc_list)) 357 if (!list_empty(&local->roc_list))
360 return false; 358 return false;
361 359
@@ -390,6 +388,11 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
390 int i; 388 int i;
391 struct ieee80211_sub_if_data *sdata; 389 struct ieee80211_sub_if_data *sdata;
392 enum ieee80211_band band = local->hw.conf.channel->band; 390 enum ieee80211_band band = local->hw.conf.channel->band;
391 u32 tx_flags;
392
393 tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
394 if (local->scan_req->no_cck)
395 tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
393 396
394 sdata = rcu_dereference_protected(local->scan_sdata, 397 sdata = rcu_dereference_protected(local->scan_sdata,
395 lockdep_is_held(&local->mtx)); 398 lockdep_is_held(&local->mtx));
@@ -401,8 +404,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
401 local->scan_req->ssids[i].ssid_len, 404 local->scan_req->ssids[i].ssid_len,
402 local->scan_req->ie, local->scan_req->ie_len, 405 local->scan_req->ie, local->scan_req->ie_len,
403 local->scan_req->rates[band], false, 406 local->scan_req->rates[band], false,
404 local->scan_req->no_cck, 407 tx_flags, local->hw.conf.channel, true);
405 local->hw.conf.channel, true);
406 408
407 /* 409 /*
408 * After sending probe requests, wait for probe responses 410 * After sending probe requests, wait for probe responses
@@ -546,8 +548,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
546 bool associated = false; 548 bool associated = false;
547 bool tx_empty = true; 549 bool tx_empty = true;
548 bool bad_latency; 550 bool bad_latency;
549 bool listen_int_exceeded;
550 unsigned long min_beacon_int = 0;
551 struct ieee80211_sub_if_data *sdata; 551 struct ieee80211_sub_if_data *sdata;
552 struct ieee80211_channel *next_chan; 552 struct ieee80211_channel *next_chan;
553 enum mac80211_scan_state next_scan_state; 553 enum mac80211_scan_state next_scan_state;
@@ -566,11 +566,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
566 if (sdata->u.mgd.associated) { 566 if (sdata->u.mgd.associated) {
567 associated = true; 567 associated = true;
568 568
569 if (sdata->vif.bss_conf.beacon_int <
570 min_beacon_int || min_beacon_int == 0)
571 min_beacon_int =
572 sdata->vif.bss_conf.beacon_int;
573
574 if (!qdisc_all_tx_empty(sdata->dev)) { 569 if (!qdisc_all_tx_empty(sdata->dev)) {
575 tx_empty = false; 570 tx_empty = false;
576 break; 571 break;
@@ -587,34 +582,19 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
587 * see if we can scan another channel without interfering 582 * see if we can scan another channel without interfering
588 * with the current traffic situation. 583 * with the current traffic situation.
589 * 584 *
590 * Since we don't know if the AP has pending frames for us 585 * Keep good latency, do not stay off-channel more than 125 ms.
591 * we can only check for our tx queues and use the current
592 * pm_qos requirements for rx. Hence, if no tx traffic occurs
593 * at all we will scan as many channels in a row as the pm_qos
594 * latency allows us to. Additionally we also check for the
595 * currently negotiated listen interval to prevent losing
596 * frames unnecessarily.
597 *
598 * Otherwise switch back to the operating channel.
599 */ 586 */
600 587
601 bad_latency = time_after(jiffies + 588 bad_latency = time_after(jiffies +
602 ieee80211_scan_get_channel_time(next_chan), 589 ieee80211_scan_get_channel_time(next_chan),
603 local->leave_oper_channel_time + 590 local->leave_oper_channel_time + HZ / 8);
604 usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
605
606 listen_int_exceeded = time_after(jiffies +
607 ieee80211_scan_get_channel_time(next_chan),
608 local->leave_oper_channel_time +
609 usecs_to_jiffies(min_beacon_int * 1024) *
610 local->hw.conf.listen_interval);
611 591
612 if (associated && !tx_empty) { 592 if (associated && !tx_empty) {
613 if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) 593 if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
614 next_scan_state = SCAN_ABORT; 594 next_scan_state = SCAN_ABORT;
615 else 595 else
616 next_scan_state = SCAN_SUSPEND; 596 next_scan_state = SCAN_SUSPEND;
617 } else if (associated && (bad_latency || listen_int_exceeded)) { 597 } else if (associated && bad_latency) {
618 next_scan_state = SCAN_SUSPEND; 598 next_scan_state = SCAN_SUSPEND;
619 } else { 599 } else {
620 next_scan_state = SCAN_SET_CHANNEL; 600 next_scan_state = SCAN_SET_CHANNEL;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 9d864ed5f3da..a79ce820cb50 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -120,6 +120,8 @@ static void cleanup_single_sta(struct sta_info *sta)
120 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 120 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
121 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 121 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
122 ps = &sdata->bss->ps; 122 ps = &sdata->bss->ps;
123 else if (ieee80211_vif_is_mesh(&sdata->vif))
124 ps = &sdata->u.mesh.ps;
123 else 125 else
124 return; 126 return;
125 127
@@ -135,13 +137,8 @@ static void cleanup_single_sta(struct sta_info *sta)
135 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 137 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
136 } 138 }
137 139
138#ifdef CONFIG_MAC80211_MESH 140 if (ieee80211_vif_is_mesh(&sdata->vif))
139 if (ieee80211_vif_is_mesh(&sdata->vif)) { 141 mesh_sta_cleanup(sta);
140 mesh_accept_plinks_update(sdata);
141 mesh_plink_deactivate(sta);
142 del_timer_sync(&sta->plink_timer);
143 }
144#endif
145 142
146 cancel_work_sync(&sta->drv_unblock_wk); 143 cancel_work_sync(&sta->drv_unblock_wk);
147 144
@@ -378,12 +375,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
378 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 375 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
379 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 376 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
380 377
381 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 378 sta->sta.smps_mode = IEEE80211_SMPS_OFF;
382 379
383#ifdef CONFIG_MAC80211_MESH 380 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
384 sta->plink_state = NL80211_PLINK_LISTEN;
385 init_timer(&sta->plink_timer);
386#endif
387 381
388 return sta; 382 return sta;
389} 383}
@@ -579,7 +573,6 @@ void sta_info_recalc_tim(struct sta_info *sta)
579{ 573{
580 struct ieee80211_local *local = sta->local; 574 struct ieee80211_local *local = sta->local;
581 struct ps_data *ps; 575 struct ps_data *ps;
582 unsigned long flags;
583 bool indicate_tim = false; 576 bool indicate_tim = false;
584 u8 ignore_for_tim = sta->sta.uapsd_queues; 577 u8 ignore_for_tim = sta->sta.uapsd_queues;
585 int ac; 578 int ac;
@@ -592,6 +585,12 @@ void sta_info_recalc_tim(struct sta_info *sta)
592 585
593 ps = &sta->sdata->bss->ps; 586 ps = &sta->sdata->bss->ps;
594 id = sta->sta.aid; 587 id = sta->sta.aid;
588#ifdef CONFIG_MAC80211_MESH
589 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
590 ps = &sta->sdata->u.mesh.ps;
591 /* TIM map only for PLID <= IEEE80211_MAX_AID */
592 id = le16_to_cpu(sta->plid) % IEEE80211_MAX_AID;
593#endif
595 } else { 594 } else {
596 return; 595 return;
597 } 596 }
@@ -630,7 +629,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
630 } 629 }
631 630
632 done: 631 done:
633 spin_lock_irqsave(&local->tim_lock, flags); 632 spin_lock_bh(&local->tim_lock);
634 633
635 if (indicate_tim) 634 if (indicate_tim)
636 __bss_tim_set(ps->tim, id); 635 __bss_tim_set(ps->tim, id);
@@ -643,7 +642,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
643 local->tim_in_locked_section = false; 642 local->tim_in_locked_section = false;
644 } 643 }
645 644
646 spin_unlock_irqrestore(&local->tim_lock, flags); 645 spin_unlock_bh(&local->tim_lock);
647} 646}
648 647
649static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 648static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
@@ -750,8 +749,9 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
750 bool have_buffered = false; 749 bool have_buffered = false;
751 int ac; 750 int ac;
752 751
753 /* This is only necessary for stations on BSS interfaces */ 752 /* This is only necessary for stations on BSS/MBSS interfaces */
754 if (!sta->sdata->bss) 753 if (!sta->sdata->bss &&
754 !ieee80211_vif_is_mesh(&sta->sdata->vif))
755 return false; 755 return false;
756 756
757 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 757 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -939,6 +939,11 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
939 if (time_after(jiffies, sta->last_rx + exp_time)) { 939 if (time_after(jiffies, sta->last_rx + exp_time)) {
940 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 940 sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
941 sta->sta.addr); 941 sta->sta.addr);
942
943 if (ieee80211_vif_is_mesh(&sdata->vif) &&
944 test_sta_flag(sta, WLAN_STA_PS_STA))
945 atomic_dec(&sdata->u.mesh.ps.num_sta_ps);
946
942 WARN_ON(__sta_info_destroy(sta)); 947 WARN_ON(__sta_info_destroy(sta));
943 } 948 }
944 } 949 }
@@ -997,6 +1002,8 @@ static void clear_sta_ps_flags(void *_sta)
997 if (sdata->vif.type == NL80211_IFTYPE_AP || 1002 if (sdata->vif.type == NL80211_IFTYPE_AP ||
998 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1003 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
999 ps = &sdata->bss->ps; 1004 ps = &sdata->bss->ps;
1005 else if (ieee80211_vif_is_mesh(&sdata->vif))
1006 ps = &sdata->u.mesh.ps;
1000 else 1007 else
1001 return; 1008 return;
1002 1009
@@ -1114,6 +1121,8 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
1114 1121
1115 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false); 1122 drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
1116 1123
1124 skb->dev = sdata->dev;
1125
1117 rcu_read_lock(); 1126 rcu_read_lock();
1118 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1127 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1119 if (WARN_ON(!chanctx_conf)) { 1128 if (WARN_ON(!chanctx_conf)) {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index af7d78aa5523..63dfdb5e91da 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -56,6 +56,8 @@
56 * @WLAN_STA_INSERTED: This station is inserted into the hash table. 56 * @WLAN_STA_INSERTED: This station is inserted into the hash table.
57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. 57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
58 * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid. 58 * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
59 * @WLAN_STA_MPSP_OWNER: local STA is owner of a mesh Peer Service Period.
60 * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP.
59 */ 61 */
60enum ieee80211_sta_info_flags { 62enum ieee80211_sta_info_flags {
61 WLAN_STA_AUTH, 63 WLAN_STA_AUTH,
@@ -78,6 +80,8 @@ enum ieee80211_sta_info_flags {
78 WLAN_STA_INSERTED, 80 WLAN_STA_INSERTED,
79 WLAN_STA_RATE_CONTROL, 81 WLAN_STA_RATE_CONTROL,
80 WLAN_STA_TOFFSET_KNOWN, 82 WLAN_STA_TOFFSET_KNOWN,
83 WLAN_STA_MPSP_OWNER,
84 WLAN_STA_MPSP_RECIPIENT,
81}; 85};
82 86
83#define ADDBA_RESP_INTERVAL HZ 87#define ADDBA_RESP_INTERVAL HZ
@@ -282,6 +286,9 @@ struct sta_ampdu_mlme {
282 * @t_offset_setpoint: reference timing offset of this sta to be used when 286 * @t_offset_setpoint: reference timing offset of this sta to be used when
283 * calculating clockdrift 287 * calculating clockdrift
284 * @ch_width: peer's channel width 288 * @ch_width: peer's channel width
289 * @local_pm: local link-specific power save mode
290 * @peer_pm: peer-specific power save mode towards local STA
291 * @nonpeer_pm: STA power save mode towards non-peer neighbors
285 * @debugfs: debug filesystem info 292 * @debugfs: debug filesystem info
286 * @dead: set to true when sta is unlinked 293 * @dead: set to true when sta is unlinked
287 * @uploaded: set to true when sta is uploaded to the driver 294 * @uploaded: set to true when sta is uploaded to the driver
@@ -289,8 +296,9 @@ struct sta_ampdu_mlme {
289 * @sta: station information we share with the driver 296 * @sta: station information we share with the driver
290 * @sta_state: duplicates information about station state (for debug) 297 * @sta_state: duplicates information about station state (for debug)
291 * @beacon_loss_count: number of times beacon loss has triggered 298 * @beacon_loss_count: number of times beacon loss has triggered
292 * @supports_40mhz: tracks whether the station advertised 40 MHz support 299 * @rcu_head: RCU head used for freeing this station struct
293 * as we overwrite its HT parameters with the currently used value 300 * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
301 * taken from HT/VHT capabilities or VHT operating mode notification
294 */ 302 */
295struct sta_info { 303struct sta_info {
296 /* General information, mostly static */ 304 /* General information, mostly static */
@@ -379,6 +387,10 @@ struct sta_info {
379 s64 t_offset; 387 s64 t_offset;
380 s64 t_offset_setpoint; 388 s64 t_offset_setpoint;
381 enum nl80211_chan_width ch_width; 389 enum nl80211_chan_width ch_width;
390 /* mesh power save */
391 enum nl80211_mesh_power_mode local_pm;
392 enum nl80211_mesh_power_mode peer_pm;
393 enum nl80211_mesh_power_mode nonpeer_pm;
382#endif 394#endif
383 395
384#ifdef CONFIG_MAC80211_DEBUGFS 396#ifdef CONFIG_MAC80211_DEBUGFS
@@ -388,11 +400,11 @@ struct sta_info {
388 } debugfs; 400 } debugfs;
389#endif 401#endif
390 402
403 enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
404
391 unsigned int lost_packets; 405 unsigned int lost_packets;
392 unsigned int beacon_loss_count; 406 unsigned int beacon_loss_count;
393 407
394 bool supports_40mhz;
395
396 /* keep last! */ 408 /* keep last! */
397 struct ieee80211_sta sta; 409 struct ieee80211_sta sta;
398}; 410};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 07d99578a2b1..43439203f4e4 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -335,7 +335,8 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
335 if (dropped) 335 if (dropped)
336 acked = false; 336 acked = false;
337 337
338 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 338 if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
339 IEEE80211_TX_INTFL_MLME_CONN_TX)) {
339 struct ieee80211_sub_if_data *sdata = NULL; 340 struct ieee80211_sub_if_data *sdata = NULL;
340 struct ieee80211_sub_if_data *iter_sdata; 341 struct ieee80211_sub_if_data *iter_sdata;
341 u64 cookie = (unsigned long)skb; 342 u64 cookie = (unsigned long)skb;
@@ -357,10 +358,13 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
357 sdata = rcu_dereference(local->p2p_sdata); 358 sdata = rcu_dereference(local->p2p_sdata);
358 } 359 }
359 360
360 if (!sdata) 361 if (!sdata) {
361 skb->dev = NULL; 362 skb->dev = NULL;
362 else if (ieee80211_is_nullfunc(hdr->frame_control) || 363 } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
363 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 364 ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control,
365 acked);
366 } else if (ieee80211_is_nullfunc(hdr->frame_control) ||
367 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
364 cfg80211_probe_status(sdata->dev, hdr->addr1, 368 cfg80211_probe_status(sdata->dev, hdr->addr1,
365 cookie, acked, GFP_ATOMIC); 369 cookie, acked, GFP_ATOMIC);
366 } else { 370 } else {
@@ -468,6 +472,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
468 return; 472 return;
469 } 473 }
470 474
475 /* mesh Peer Service Period support */
476 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
477 ieee80211_is_data_qos(fc))
478 ieee80211_mpsp_trigger_process(
479 ieee80211_get_qos_ctl(hdr),
480 sta, true, acked);
481
471 if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) && 482 if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
472 (rates_idx != -1)) 483 (rates_idx != -1))
473 sta->last_tx_rate = info->status.rates[rates_idx]; 484 sta->last_tx_rate = info->status.rates[rates_idx];
@@ -502,11 +513,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
502 IEEE80211_BAR_CTRL_TID_INFO_MASK) >> 513 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
503 IEEE80211_BAR_CTRL_TID_INFO_SHIFT; 514 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
504 515
505 if (local->hw.flags & 516 ieee80211_set_bar_pending(sta, tid, ssn);
506 IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
507 ieee80211_stop_tx_ba_session(&sta->sta, tid);
508 else
509 ieee80211_set_bar_pending(sta, tid, ssn);
510 } 517 }
511 } 518 }
512 519
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 57e14d59e12f..3ed801d90f1e 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -177,12 +177,11 @@ void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
177 struct ieee80211_key *key = (struct ieee80211_key *) 177 struct ieee80211_key *key = (struct ieee80211_key *)
178 container_of(keyconf, struct ieee80211_key, conf); 178 container_of(keyconf, struct ieee80211_key, conf);
179 struct tkip_ctx *ctx = &key->u.tkip.tx; 179 struct tkip_ctx *ctx = &key->u.tkip.tx;
180 unsigned long flags;
181 180
182 spin_lock_irqsave(&key->u.tkip.txlock, flags); 181 spin_lock_bh(&key->u.tkip.txlock);
183 ieee80211_compute_tkip_p1k(key, iv32); 182 ieee80211_compute_tkip_p1k(key, iv32);
184 memcpy(p1k, ctx->p1k, sizeof(ctx->p1k)); 183 memcpy(p1k, ctx->p1k, sizeof(ctx->p1k));
185 spin_unlock_irqrestore(&key->u.tkip.txlock, flags); 184 spin_unlock_bh(&key->u.tkip.txlock);
186} 185}
187EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv); 186EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
188 187
@@ -208,12 +207,11 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
208 const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); 207 const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
209 u32 iv32 = get_unaligned_le32(&data[4]); 208 u32 iv32 = get_unaligned_le32(&data[4]);
210 u16 iv16 = data[2] | (data[0] << 8); 209 u16 iv16 = data[2] | (data[0] << 8);
211 unsigned long flags;
212 210
213 spin_lock_irqsave(&key->u.tkip.txlock, flags); 211 spin_lock_bh(&key->u.tkip.txlock);
214 ieee80211_compute_tkip_p1k(key, iv32); 212 ieee80211_compute_tkip_p1k(key, iv32);
215 tkip_mixing_phase2(tk, ctx, iv16, p2k); 213 tkip_mixing_phase2(tk, ctx, iv16, p2k);
216 spin_unlock_irqrestore(&key->u.tkip.txlock, flags); 214 spin_unlock_bh(&key->u.tkip.txlock);
217} 215}
218EXPORT_SYMBOL(ieee80211_get_tkip_p2k); 216EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
219 217
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 41861b91daa3..1183c4a4fee5 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -36,7 +36,7 @@
36 __entry->control_freq = (c)->chan->center_freq; \ 36 __entry->control_freq = (c)->chan->center_freq; \
37 __entry->chan_width = (c)->width; \ 37 __entry->chan_width = (c)->width; \
38 __entry->center_freq1 = (c)->center_freq1; \ 38 __entry->center_freq1 = (c)->center_freq1; \
39 __entry->center_freq1 = (c)->center_freq2; 39 __entry->center_freq2 = (c)->center_freq2;
40#define CHANDEF_PR_FMT " control:%d MHz width:%d center: %d/%d MHz" 40#define CHANDEF_PR_FMT " control:%d MHz width:%d center: %d/%d MHz"
41#define CHANDEF_PR_ARG __entry->control_freq, __entry->chan_width, \ 41#define CHANDEF_PR_ARG __entry->control_freq, __entry->chan_width, \
42 __entry->center_freq1, __entry->center_freq2 42 __entry->center_freq1, __entry->center_freq2
@@ -340,6 +340,7 @@ TRACE_EVENT(drv_bss_info_changed,
340 __field(u16, assoc_cap) 340 __field(u16, assoc_cap)
341 __field(u64, sync_tsf) 341 __field(u64, sync_tsf)
342 __field(u32, sync_device_ts) 342 __field(u32, sync_device_ts)
343 __field(u8, sync_dtim_count)
343 __field(u32, basic_rates) 344 __field(u32, basic_rates)
344 __array(int, mcast_rate, IEEE80211_NUM_BANDS) 345 __array(int, mcast_rate, IEEE80211_NUM_BANDS)
345 __field(u16, ht_operation_mode) 346 __field(u16, ht_operation_mode)
@@ -347,8 +348,11 @@ TRACE_EVENT(drv_bss_info_changed,
347 __field(s32, cqm_rssi_hyst); 348 __field(s32, cqm_rssi_hyst);
348 __field(u32, channel_width); 349 __field(u32, channel_width);
349 __field(u32, channel_cfreq1); 350 __field(u32, channel_cfreq1);
350 __dynamic_array(u32, arp_addr_list, info->arp_addr_cnt); 351 __dynamic_array(u32, arp_addr_list,
351 __field(bool, arp_filter_enabled); 352 info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
353 IEEE80211_BSS_ARP_ADDR_LIST_LEN :
354 info->arp_addr_cnt);
355 __field(int, arp_addr_cnt);
352 __field(bool, qos); 356 __field(bool, qos);
353 __field(bool, idle); 357 __field(bool, idle);
354 __field(bool, ps); 358 __field(bool, ps);
@@ -376,6 +380,7 @@ TRACE_EVENT(drv_bss_info_changed,
376 __entry->assoc_cap = info->assoc_capability; 380 __entry->assoc_cap = info->assoc_capability;
377 __entry->sync_tsf = info->sync_tsf; 381 __entry->sync_tsf = info->sync_tsf;
378 __entry->sync_device_ts = info->sync_device_ts; 382 __entry->sync_device_ts = info->sync_device_ts;
383 __entry->sync_dtim_count = info->sync_dtim_count;
379 __entry->basic_rates = info->basic_rates; 384 __entry->basic_rates = info->basic_rates;
380 memcpy(__entry->mcast_rate, info->mcast_rate, 385 memcpy(__entry->mcast_rate, info->mcast_rate,
381 sizeof(__entry->mcast_rate)); 386 sizeof(__entry->mcast_rate));
@@ -384,9 +389,11 @@ TRACE_EVENT(drv_bss_info_changed,
384 __entry->cqm_rssi_hyst = info->cqm_rssi_hyst; 389 __entry->cqm_rssi_hyst = info->cqm_rssi_hyst;
385 __entry->channel_width = info->chandef.width; 390 __entry->channel_width = info->chandef.width;
386 __entry->channel_cfreq1 = info->chandef.center_freq1; 391 __entry->channel_cfreq1 = info->chandef.center_freq1;
392 __entry->arp_addr_cnt = info->arp_addr_cnt;
387 memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list, 393 memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list,
388 sizeof(u32) * info->arp_addr_cnt); 394 sizeof(u32) * (info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ?
389 __entry->arp_filter_enabled = info->arp_filter_enabled; 395 IEEE80211_BSS_ARP_ADDR_LIST_LEN :
396 info->arp_addr_cnt));
390 __entry->qos = info->qos; 397 __entry->qos = info->qos;
391 __entry->idle = info->idle; 398 __entry->idle = info->idle;
392 __entry->ps = info->ps; 399 __entry->ps = info->ps;
@@ -1184,23 +1191,26 @@ TRACE_EVENT(drv_set_rekey_data,
1184 1191
1185TRACE_EVENT(drv_rssi_callback, 1192TRACE_EVENT(drv_rssi_callback,
1186 TP_PROTO(struct ieee80211_local *local, 1193 TP_PROTO(struct ieee80211_local *local,
1194 struct ieee80211_sub_if_data *sdata,
1187 enum ieee80211_rssi_event rssi_event), 1195 enum ieee80211_rssi_event rssi_event),
1188 1196
1189 TP_ARGS(local, rssi_event), 1197 TP_ARGS(local, sdata, rssi_event),
1190 1198
1191 TP_STRUCT__entry( 1199 TP_STRUCT__entry(
1192 LOCAL_ENTRY 1200 LOCAL_ENTRY
1201 VIF_ENTRY
1193 __field(u32, rssi_event) 1202 __field(u32, rssi_event)
1194 ), 1203 ),
1195 1204
1196 TP_fast_assign( 1205 TP_fast_assign(
1197 LOCAL_ASSIGN; 1206 LOCAL_ASSIGN;
1207 VIF_ASSIGN;
1198 __entry->rssi_event = rssi_event; 1208 __entry->rssi_event = rssi_event;
1199 ), 1209 ),
1200 1210
1201 TP_printk( 1211 TP_printk(
1202 LOCAL_PR_FMT " rssi_event:%d", 1212 LOCAL_PR_FMT VIF_PR_FMT " rssi_event:%d",
1203 LOCAL_PR_ARG, __entry->rssi_event 1213 LOCAL_PR_ARG, VIF_PR_ARG, __entry->rssi_event
1204 ) 1214 )
1205); 1215);
1206 1216
@@ -1432,6 +1442,14 @@ DEFINE_EVENT(local_only_evt, drv_restart_complete,
1432 TP_ARGS(local) 1442 TP_ARGS(local)
1433); 1443);
1434 1444
1445#if IS_ENABLED(CONFIG_IPV6)
1446DEFINE_EVENT(local_sdata_evt, drv_ipv6_addr_change,
1447 TP_PROTO(struct ieee80211_local *local,
1448 struct ieee80211_sub_if_data *sdata),
1449 TP_ARGS(local, sdata)
1450);
1451#endif
1452
1435/* 1453/*
1436 * Tracing for API calls that drivers call. 1454 * Tracing for API calls that drivers call.
1437 */ 1455 */
@@ -1821,6 +1839,48 @@ TRACE_EVENT(stop_queue,
1821 ) 1839 )
1822); 1840);
1823 1841
1842TRACE_EVENT(drv_set_default_unicast_key,
1843 TP_PROTO(struct ieee80211_local *local,
1844 struct ieee80211_sub_if_data *sdata,
1845 int key_idx),
1846
1847 TP_ARGS(local, sdata, key_idx),
1848
1849 TP_STRUCT__entry(
1850 LOCAL_ENTRY
1851 VIF_ENTRY
1852 __field(int, key_idx)
1853 ),
1854
1855 TP_fast_assign(
1856 LOCAL_ASSIGN;
1857 VIF_ASSIGN;
1858 __entry->key_idx = key_idx;
1859 ),
1860
1861 TP_printk(LOCAL_PR_FMT VIF_PR_FMT " key_idx:%d",
1862 LOCAL_PR_ARG, VIF_PR_ARG, __entry->key_idx)
1863);
1864
1865TRACE_EVENT(api_radar_detected,
1866 TP_PROTO(struct ieee80211_local *local),
1867
1868 TP_ARGS(local),
1869
1870 TP_STRUCT__entry(
1871 LOCAL_ENTRY
1872 ),
1873
1874 TP_fast_assign(
1875 LOCAL_ASSIGN;
1876 ),
1877
1878 TP_printk(
1879 LOCAL_PR_FMT " radar detected",
1880 LOCAL_PR_ARG
1881 )
1882);
1883
1824#ifdef CONFIG_MAC80211_MESSAGE_TRACING 1884#ifdef CONFIG_MAC80211_MESSAGE_TRACING
1825#undef TRACE_SYSTEM 1885#undef TRACE_SYSTEM
1826#define TRACE_SYSTEM mac80211_msg 1886#define TRACE_SYSTEM mac80211_msg
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f32d68186dbc..fe644f91ae05 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -329,6 +329,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
329 329
330 if (sdata->vif.type == NL80211_IFTYPE_AP) 330 if (sdata->vif.type == NL80211_IFTYPE_AP)
331 ps = &sdata->u.ap.ps; 331 ps = &sdata->u.ap.ps;
332 else if (ieee80211_vif_is_mesh(&sdata->vif))
333 ps = &sdata->u.mesh.ps;
332 else 334 else
333 continue; 335 continue;
334 336
@@ -372,18 +374,20 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
372 /* 374 /*
373 * broadcast/multicast frame 375 * broadcast/multicast frame
374 * 376 *
375 * If any of the associated stations is in power save mode, 377 * If any of the associated/peer stations is in power save mode,
376 * the frame is buffered to be sent after DTIM beacon frame. 378 * the frame is buffered to be sent after DTIM beacon frame.
377 * This is done either by the hardware or us. 379 * This is done either by the hardware or us.
378 */ 380 */
379 381
380 /* powersaving STAs currently only in AP/VLAN mode */ 382 /* powersaving STAs currently only in AP/VLAN/mesh mode */
381 if (tx->sdata->vif.type == NL80211_IFTYPE_AP || 383 if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
382 tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 384 tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
383 if (!tx->sdata->bss) 385 if (!tx->sdata->bss)
384 return TX_CONTINUE; 386 return TX_CONTINUE;
385 387
386 ps = &tx->sdata->bss->ps; 388 ps = &tx->sdata->bss->ps;
389 } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
390 ps = &tx->sdata->u.mesh.ps;
387 } else { 391 } else {
388 return TX_CONTINUE; 392 return TX_CONTINUE;
389 } 393 }
@@ -594,7 +598,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
594 break; 598 break;
595 } 599 }
596 600
597 if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED)) 601 if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
602 !ieee80211_is_deauth(hdr->frame_control)))
598 return TX_DROP; 603 return TX_DROP;
599 604
600 if (!skip_hw && tx->key && 605 if (!skip_hw && tx->key &&
@@ -1225,6 +1230,21 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1225 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1230 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1226 if (local->queue_stop_reasons[q] || 1231 if (local->queue_stop_reasons[q] ||
1227 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1232 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1233 if (unlikely(info->flags &
1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK &&
1235 local->queue_stop_reasons[q] &
1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) {
1237 /*
1238 * Drop off-channel frames if queues are stopped
1239 * for any reason other than off-channel
1240 * operation. Never queue them.
1241 */
1242 spin_unlock_irqrestore(
1243 &local->queue_stop_reason_lock, flags);
1244 ieee80211_purge_tx_queue(&local->hw, skbs);
1245 return true;
1246 }
1247
1228 /* 1248 /*
1229 * Since queue is stopped, queue up frames for later 1249 * Since queue is stopped, queue up frames for later
1230 * transmission from the tx-pending tasklet when the 1250 * transmission from the tx-pending tasklet when the
@@ -1472,12 +1492,14 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1472 hdr = (struct ieee80211_hdr *) skb->data; 1492 hdr = (struct ieee80211_hdr *) skb->data;
1473 info->control.vif = &sdata->vif; 1493 info->control.vif = &sdata->vif;
1474 1494
1475 if (ieee80211_vif_is_mesh(&sdata->vif) && 1495 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1476 ieee80211_is_data(hdr->frame_control) && 1496 if (ieee80211_is_data(hdr->frame_control) &&
1477 !is_multicast_ether_addr(hdr->addr1) && 1497 is_unicast_ether_addr(hdr->addr1)) {
1478 mesh_nexthop_resolve(skb, sdata)) { 1498 if (mesh_nexthop_resolve(skb, sdata))
1479 /* skb queued: don't free */ 1499 return; /* skb queued: don't free */
1480 return; 1500 } else {
1501 ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
1502 }
1481 } 1503 }
1482 1504
1483 ieee80211_set_qos_hdr(sdata, skb); 1505 ieee80211_set_qos_hdr(sdata, skb);
@@ -1787,16 +1809,16 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1787 break; 1809 break;
1788 /* fall through */ 1810 /* fall through */
1789 case NL80211_IFTYPE_AP: 1811 case NL80211_IFTYPE_AP:
1812 if (sdata->vif.type == NL80211_IFTYPE_AP)
1813 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1814 if (!chanctx_conf)
1815 goto fail_rcu;
1790 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1816 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1791 /* DA BSSID SA */ 1817 /* DA BSSID SA */
1792 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1818 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1793 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1819 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1794 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1820 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1795 hdrlen = 24; 1821 hdrlen = 24;
1796 if (sdata->vif.type == NL80211_IFTYPE_AP)
1797 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1798 if (!chanctx_conf)
1799 goto fail_rcu;
1800 band = chanctx_conf->def.chan->band; 1822 band = chanctx_conf->def.chan->band;
1801 break; 1823 break;
1802 case NL80211_IFTYPE_WDS: 1824 case NL80211_IFTYPE_WDS:
@@ -2342,11 +2364,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2342 if (local->tim_in_locked_section) { 2364 if (local->tim_in_locked_section) {
2343 __ieee80211_beacon_add_tim(sdata, ps, skb); 2365 __ieee80211_beacon_add_tim(sdata, ps, skb);
2344 } else { 2366 } else {
2345 unsigned long flags; 2367 spin_lock(&local->tim_lock);
2346
2347 spin_lock_irqsave(&local->tim_lock, flags);
2348 __ieee80211_beacon_add_tim(sdata, ps, skb); 2368 __ieee80211_beacon_add_tim(sdata, ps, skb);
2349 spin_unlock_irqrestore(&local->tim_lock, flags); 2369 spin_unlock(&local->tim_lock);
2350 } 2370 }
2351 2371
2352 return 0; 2372 return 0;
@@ -2424,66 +2444,26 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2424 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2444 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2425 IEEE80211_STYPE_BEACON); 2445 IEEE80211_STYPE_BEACON);
2426 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 2446 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2427 struct ieee80211_mgmt *mgmt;
2428 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2447 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2429 u8 *pos; 2448 struct beacon_data *bcn = rcu_dereference(ifmsh->beacon);
2430 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
2431 sizeof(mgmt->u.beacon);
2432 2449
2433#ifdef CONFIG_MAC80211_MESH 2450 if (!bcn)
2434 if (!sdata->u.mesh.mesh_id_len)
2435 goto out; 2451 goto out;
2436#endif
2437 2452
2438 if (ifmsh->sync_ops) 2453 if (ifmsh->sync_ops)
2439 ifmsh->sync_ops->adjust_tbtt( 2454 ifmsh->sync_ops->adjust_tbtt(
2440 sdata); 2455 sdata);
2441 2456
2442 skb = dev_alloc_skb(local->tx_headroom + 2457 skb = dev_alloc_skb(local->tx_headroom +
2443 hdr_len + 2458 bcn->head_len +
2444 2 + /* NULL SSID */ 2459 256 + /* TIM IE */
2445 2 + 8 + /* supported rates */ 2460 bcn->tail_len);
2446 2 + 3 + /* DS params */
2447 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2448 2 + sizeof(struct ieee80211_ht_cap) +
2449 2 + sizeof(struct ieee80211_ht_operation) +
2450 2 + sdata->u.mesh.mesh_id_len +
2451 2 + sizeof(struct ieee80211_meshconf_ie) +
2452 sdata->u.mesh.ie_len);
2453 if (!skb) 2461 if (!skb)
2454 goto out; 2462 goto out;
2455 2463 skb_reserve(skb, local->tx_headroom);
2456 skb_reserve(skb, local->hw.extra_tx_headroom); 2464 memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
2457 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 2465 ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb);
2458 memset(mgmt, 0, hdr_len); 2466 memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
2459 mgmt->frame_control =
2460 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2461 eth_broadcast_addr(mgmt->da);
2462 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2463 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2464 mgmt->u.beacon.beacon_int =
2465 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2466 mgmt->u.beacon.capab_info |= cpu_to_le16(
2467 sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
2468
2469 pos = skb_put(skb, 2);
2470 *pos++ = WLAN_EID_SSID;
2471 *pos++ = 0x0;
2472
2473 band = chanctx_conf->def.chan->band;
2474
2475 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
2476 mesh_add_ds_params_ie(skb, sdata) ||
2477 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
2478 mesh_add_rsn_ie(skb, sdata) ||
2479 mesh_add_ht_cap_ie(skb, sdata) ||
2480 mesh_add_ht_oper_ie(skb, sdata) ||
2481 mesh_add_meshid_ie(skb, sdata) ||
2482 mesh_add_meshconf_ie(skb, sdata) ||
2483 mesh_add_vendor_ies(skb, sdata)) {
2484 pr_err("o11s: couldn't add ies!\n");
2485 goto out;
2486 }
2487 } else { 2467 } else {
2488 WARN_ON(1); 2468 WARN_ON(1);
2489 goto out; 2469 goto out;
@@ -2733,6 +2713,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2733 goto out; 2713 goto out;
2734 2714
2735 ps = &sdata->u.ap.ps; 2715 ps = &sdata->u.ap.ps;
2716 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2717 ps = &sdata->u.mesh.ps;
2736 } else { 2718 } else {
2737 goto out; 2719 goto out;
2738 } 2720 }
@@ -2756,6 +2738,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2756 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2738 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2757 } 2739 }
2758 2740
2741 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
2759 if (!ieee80211_tx_prepare(sdata, &tx, skb)) 2742 if (!ieee80211_tx_prepare(sdata, &tx, skb))
2760 break; 2743 break;
2761 dev_kfree_skb_any(skb); 2744 dev_kfree_skb_any(skb);
@@ -2788,6 +2771,8 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
2788 skb_set_queue_mapping(skb, ac); 2771 skb_set_queue_mapping(skb, ac);
2789 skb->priority = tid; 2772 skb->priority = tid;
2790 2773
2774 skb->dev = sdata->dev;
2775
2791 /* 2776 /*
2792 * The other path calling ieee80211_xmit is from the tasklet, 2777 * The other path calling ieee80211_xmit is from the tasklet,
2793 * and while we can handle concurrent transmissions locking 2778 * and while we can handle concurrent transmissions locking
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 7519018ff71a..0f38f43ac62e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -739,11 +739,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
739 if (calc_crc) 739 if (calc_crc)
740 crc = crc32_be(crc, pos - 2, elen + 2); 740 crc = crc32_be(crc, pos - 2, elen + 2);
741 741
742 if (pos[3] == 1) { 742 if (elen >= 5 && pos[3] == 2) {
743 /* OUI Type 1 - WPA IE */
744 elems->wpa = pos;
745 elems->wpa_len = elen;
746 } else if (elen >= 5 && pos[3] == 2) {
747 /* OUI Type 2 - WMM IE */ 743 /* OUI Type 2 - WMM IE */
748 if (pos[4] == 0) { 744 if (pos[4] == 0) {
749 elems->wmm_info = pos; 745 elems->wmm_info = pos;
@@ -791,6 +787,12 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
791 else 787 else
792 elem_parse_failed = true; 788 elem_parse_failed = true;
793 break; 789 break;
790 case WLAN_EID_OPMODE_NOTIF:
791 if (elen > 0)
792 elems->opmode_notif = pos;
793 else
794 elem_parse_failed = true;
795 break;
794 case WLAN_EID_MESH_ID: 796 case WLAN_EID_MESH_ID:
795 elems->mesh_id = pos; 797 elems->mesh_id = pos;
796 elems->mesh_id_len = elen; 798 elems->mesh_id_len = elen;
@@ -805,6 +807,10 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
805 elems->peering = pos; 807 elems->peering = pos;
806 elems->peering_len = elen; 808 elems->peering_len = elen;
807 break; 809 break;
810 case WLAN_EID_MESH_AWAKE_WINDOW:
811 if (elen >= 2)
812 elems->awake_window = (void *)pos;
813 break;
808 case WLAN_EID_PREQ: 814 case WLAN_EID_PREQ:
809 elems->preq = pos; 815 elems->preq = pos;
810 elems->preq_len = elen; 816 elems->preq_len = elen;
@@ -1029,8 +1035,9 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
1029 1035
1030void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1036void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1031 u16 transaction, u16 auth_alg, u16 status, 1037 u16 transaction, u16 auth_alg, u16 status,
1032 u8 *extra, size_t extra_len, const u8 *da, 1038 const u8 *extra, size_t extra_len, const u8 *da,
1033 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx) 1039 const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx,
1040 u32 tx_flags)
1034{ 1041{
1035 struct ieee80211_local *local = sdata->local; 1042 struct ieee80211_local *local = sdata->local;
1036 struct sk_buff *skb; 1043 struct sk_buff *skb;
@@ -1063,7 +1070,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1063 WARN_ON(err); 1070 WARN_ON(err);
1064 } 1071 }
1065 1072
1066 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1073 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
1074 tx_flags;
1067 ieee80211_tx_skb(sdata, skb); 1075 ieee80211_tx_skb(sdata, skb);
1068} 1076}
1069 1077
@@ -1277,7 +1285,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1277void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1285void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1278 const u8 *ssid, size_t ssid_len, 1286 const u8 *ssid, size_t ssid_len,
1279 const u8 *ie, size_t ie_len, 1287 const u8 *ie, size_t ie_len,
1280 u32 ratemask, bool directed, bool no_cck, 1288 u32 ratemask, bool directed, u32 tx_flags,
1281 struct ieee80211_channel *channel, bool scan) 1289 struct ieee80211_channel *channel, bool scan)
1282{ 1290{
1283 struct sk_buff *skb; 1291 struct sk_buff *skb;
@@ -1286,9 +1294,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1286 ssid, ssid_len, 1294 ssid, ssid_len,
1287 ie, ie_len, directed); 1295 ie, ie_len, directed);
1288 if (skb) { 1296 if (skb) {
1289 if (no_cck) 1297 IEEE80211_SKB_CB(skb)->flags |= tx_flags;
1290 IEEE80211_SKB_CB(skb)->flags |=
1291 IEEE80211_TX_CTL_NO_CCK_RATE;
1292 if (scan) 1298 if (scan)
1293 ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); 1299 ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
1294 else 1300 else
@@ -1538,6 +1544,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1538 changed |= BSS_CHANGED_ASSOC | 1544 changed |= BSS_CHANGED_ASSOC |
1539 BSS_CHANGED_ARP_FILTER | 1545 BSS_CHANGED_ARP_FILTER |
1540 BSS_CHANGED_PS; 1546 BSS_CHANGED_PS;
1547
1548 if (sdata->u.mgd.dtim_period)
1549 changed |= BSS_CHANGED_DTIM_PERIOD;
1550
1541 mutex_lock(&sdata->u.mgd.mtx); 1551 mutex_lock(&sdata->u.mgd.mtx);
1542 ieee80211_bss_info_change_notify(sdata, changed); 1552 ieee80211_bss_info_change_notify(sdata, changed);
1543 mutex_unlock(&sdata->u.mgd.mtx); 1553 mutex_unlock(&sdata->u.mgd.mtx);
@@ -1937,7 +1947,7 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1937} 1947}
1938 1948
1939void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, 1949void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
1940 struct ieee80211_ht_operation *ht_oper, 1950 const struct ieee80211_ht_operation *ht_oper,
1941 struct cfg80211_chan_def *chandef) 1951 struct cfg80211_chan_def *chandef)
1942{ 1952{
1943 enum nl80211_channel_type channel_type; 1953 enum nl80211_channel_type channel_type;
@@ -2125,3 +2135,49 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
2125 2135
2126 return ts; 2136 return ts;
2127} 2137}
2138
2139void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
2140{
2141 struct ieee80211_sub_if_data *sdata;
2142
2143 mutex_lock(&local->iflist_mtx);
2144 list_for_each_entry(sdata, &local->interfaces, list) {
2145 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
2146
2147 if (sdata->wdev.cac_started) {
2148 ieee80211_vif_release_channel(sdata);
2149 cfg80211_cac_event(sdata->dev,
2150 NL80211_RADAR_CAC_ABORTED,
2151 GFP_KERNEL);
2152 }
2153 }
2154 mutex_unlock(&local->iflist_mtx);
2155}
2156
2157void ieee80211_dfs_radar_detected_work(struct work_struct *work)
2158{
2159 struct ieee80211_local *local =
2160 container_of(work, struct ieee80211_local, radar_detected_work);
2161 struct cfg80211_chan_def chandef;
2162
2163 ieee80211_dfs_cac_cancel(local);
2164
2165 if (local->use_chanctx)
2166 /* currently not handled */
2167 WARN_ON(1);
2168 else {
2169 cfg80211_chandef_create(&chandef, local->hw.conf.channel,
2170 local->hw.conf.channel_type);
2171 cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
2172 }
2173}
2174
2175void ieee80211_radar_detected(struct ieee80211_hw *hw)
2176{
2177 struct ieee80211_local *local = hw_to_local(hw);
2178
2179 trace_api_radar_detected(local);
2180
2181 ieee80211_queue_work(hw, &local->radar_detected_work);
2182}
2183EXPORT_SYMBOL(ieee80211_radar_detected);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index f311388aeedf..a2c2258bc84e 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -10,21 +10,29 @@
10#include <linux/export.h> 10#include <linux/export.h>
11#include <net/mac80211.h> 11#include <net/mac80211.h>
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "rate.h"
13 14
14 15
15void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 16void
16 struct ieee80211_supported_band *sband, 17ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
17 struct ieee80211_vht_cap *vht_cap_ie, 18 struct ieee80211_supported_band *sband,
18 struct ieee80211_sta_vht_cap *vht_cap) 19 const struct ieee80211_vht_cap *vht_cap_ie,
20 struct sta_info *sta)
19{ 21{
20 if (WARN_ON_ONCE(!vht_cap)) 22 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
21 return;
22 23
23 memset(vht_cap, 0, sizeof(*vht_cap)); 24 memset(vht_cap, 0, sizeof(*vht_cap));
24 25
26 if (!sta->sta.ht_cap.ht_supported)
27 return;
28
25 if (!vht_cap_ie || !sband->vht_cap.vht_supported) 29 if (!vht_cap_ie || !sband->vht_cap.vht_supported)
26 return; 30 return;
27 31
32 /* A VHT STA must support 40 MHz */
33 if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
34 return;
35
28 vht_cap->vht_supported = true; 36 vht_cap->vht_supported = true;
29 37
30 vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info); 38 vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info);
@@ -32,4 +40,156 @@ void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
32 /* Copy peer MCS info, the driver might need them. */ 40 /* Copy peer MCS info, the driver might need them. */
33 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, 41 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
34 sizeof(struct ieee80211_vht_mcs_info)); 42 sizeof(struct ieee80211_vht_mcs_info));
43
44 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
45 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
46 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
47 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
48 break;
49 default:
50 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
51 }
52
53 sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
54}
55
56enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
57{
58 struct ieee80211_sub_if_data *sdata = sta->sdata;
59 u32 cap = sta->sta.vht_cap.cap;
60 enum ieee80211_sta_rx_bandwidth bw;
61
62 if (!sta->sta.vht_cap.vht_supported) {
63 bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
64 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
65 goto check_max;
66 }
67
68 switch (sdata->vif.bss_conf.chandef.width) {
69 default:
70 WARN_ON_ONCE(1);
71 /* fall through */
72 case NL80211_CHAN_WIDTH_20_NOHT:
73 case NL80211_CHAN_WIDTH_20:
74 case NL80211_CHAN_WIDTH_40:
75 bw = sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
76 IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
77 break;
78 case NL80211_CHAN_WIDTH_160:
79 if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
80 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) {
81 bw = IEEE80211_STA_RX_BW_160;
82 break;
83 }
84 /* fall through */
85 case NL80211_CHAN_WIDTH_80P80:
86 if ((cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) ==
87 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) {
88 bw = IEEE80211_STA_RX_BW_160;
89 break;
90 }
91 /* fall through */
92 case NL80211_CHAN_WIDTH_80:
93 bw = IEEE80211_STA_RX_BW_80;
94 }
95
96 check_max:
97 if (bw > sta->cur_max_bandwidth)
98 bw = sta->cur_max_bandwidth;
99 return bw;
100}
101
102void ieee80211_sta_set_rx_nss(struct sta_info *sta)
103{
104 u8 ht_rx_nss = 0, vht_rx_nss = 0;
105
106 /* if we received a notification already don't overwrite it */
107 if (sta->sta.rx_nss)
108 return;
109
110 if (sta->sta.ht_cap.ht_supported) {
111 if (sta->sta.ht_cap.mcs.rx_mask[0])
112 ht_rx_nss++;
113 if (sta->sta.ht_cap.mcs.rx_mask[1])
114 ht_rx_nss++;
115 if (sta->sta.ht_cap.mcs.rx_mask[2])
116 ht_rx_nss++;
117 if (sta->sta.ht_cap.mcs.rx_mask[3])
118 ht_rx_nss++;
119 /* FIXME: consider rx_highest? */
120 }
121
122 if (sta->sta.vht_cap.vht_supported) {
123 int i;
124 u16 rx_mcs_map;
125
126 rx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.rx_mcs_map);
127
128 for (i = 7; i >= 0; i--) {
129 u8 mcs = (rx_mcs_map >> (2 * i)) & 3;
130
131 if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
132 vht_rx_nss = i + 1;
133 break;
134 }
135 }
136 /* FIXME: consider rx_highest? */
137 }
138
139 ht_rx_nss = max(ht_rx_nss, vht_rx_nss);
140 sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss);
141}
142
143void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
144 struct sta_info *sta, u8 opmode,
145 enum ieee80211_band band, bool nss_only)
146{
147 struct ieee80211_local *local = sdata->local;
148 struct ieee80211_supported_band *sband;
149 enum ieee80211_sta_rx_bandwidth new_bw;
150 u32 changed = 0;
151 u8 nss;
152
153 sband = local->hw.wiphy->bands[band];
154
155 /* ignore - no support for BF yet */
156 if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
157 return;
158
159 nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
160 nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
161 nss += 1;
162
163 if (sta->sta.rx_nss != nss) {
164 sta->sta.rx_nss = nss;
165 changed |= IEEE80211_RC_NSS_CHANGED;
166 }
167
168 if (nss_only)
169 goto change;
170
171 switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
172 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
173 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
174 break;
175 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ:
176 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40;
177 break;
178 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ:
179 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
180 break;
181 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ:
182 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
183 break;
184 }
185
186 new_bw = ieee80211_sta_cur_vht_bw(sta);
187 if (new_bw != sta->sta.bandwidth) {
188 sta->sta.bandwidth = new_bw;
189 changed |= IEEE80211_RC_NSS_CHANGED;
190 }
191
192 change:
193 if (changed)
194 rate_control_rate_update(local, sband, sta, changed);
35} 195}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 906f00cd6d2f..afba19cb6f87 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -191,6 +191,15 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
191 191
192 /* qos header is 2 bytes */ 192 /* qos header is 2 bytes */
193 *p++ = ack_policy | tid; 193 *p++ = ack_policy | tid;
194 *p = ieee80211_vif_is_mesh(&sdata->vif) ? 194 if (ieee80211_vif_is_mesh(&sdata->vif)) {
195 (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0; 195 /* preserve RSPI and Mesh PS Level bit */
196 *p &= ((IEEE80211_QOS_CTL_RSPI |
197 IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8);
198
199 /* Nulls don't have a mesh header (frame body) */
200 if (!ieee80211_is_qos_nullfunc(hdr->frame_control))
201 *p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8);
202 } else {
203 *p = 0;
204 }
196} 205}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c175ee866ff4..c7c6d644486f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -181,7 +181,6 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
182 struct ieee80211_key *key = tx->key; 182 struct ieee80211_key *key = tx->key;
183 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 183 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
184 unsigned long flags;
185 unsigned int hdrlen; 184 unsigned int hdrlen;
186 int len, tail; 185 int len, tail;
187 u8 *pos; 186 u8 *pos;
@@ -216,12 +215,12 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
216 return 0; 215 return 0;
217 216
218 /* Increase IV for the frame */ 217 /* Increase IV for the frame */
219 spin_lock_irqsave(&key->u.tkip.txlock, flags); 218 spin_lock(&key->u.tkip.txlock);
220 key->u.tkip.tx.iv16++; 219 key->u.tkip.tx.iv16++;
221 if (key->u.tkip.tx.iv16 == 0) 220 if (key->u.tkip.tx.iv16 == 0)
222 key->u.tkip.tx.iv32++; 221 key->u.tkip.tx.iv32++;
223 pos = ieee80211_tkip_add_iv(pos, key); 222 pos = ieee80211_tkip_add_iv(pos, key);
224 spin_unlock_irqrestore(&key->u.tkip.txlock, flags); 223 spin_unlock(&key->u.tkip.txlock);
225 224
226 /* hwaccel - with software IV */ 225 /* hwaccel - with software IV */
227 if (info->control.hw_key) 226 if (info->control.hw_key)
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 199b92261e94..d20c6d3c247d 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -41,7 +41,7 @@ static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
41 return -EINVAL; 41 return -EINVAL;
42 42
43 *val = skb->data[0]; 43 *val = skb->data[0];
44 skb_pull(skb, 1); 44 skb_pull(skb, 1);
45 45
46 return 0; 46 return 0;
47} 47}
@@ -137,16 +137,12 @@ static int mac802154_header_create(struct sk_buff *skb,
137 struct ieee802154_addr dev_addr; 137 struct ieee802154_addr dev_addr;
138 struct mac802154_sub_if_data *priv = netdev_priv(dev); 138 struct mac802154_sub_if_data *priv = netdev_priv(dev);
139 int pos = 2; 139 int pos = 2;
140 u8 *head; 140 u8 head[MAC802154_FRAME_HARD_HEADER_LEN];
141 u16 fc; 141 u16 fc;
142 142
143 if (!daddr) 143 if (!daddr)
144 return -EINVAL; 144 return -EINVAL;
145 145
146 head = kzalloc(MAC802154_FRAME_HARD_HEADER_LEN, GFP_KERNEL);
147 if (head == NULL)
148 return -ENOMEM;
149
150 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */ 146 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
151 fc = mac_cb_type(skb); 147 fc = mac_cb_type(skb);
152 148
@@ -210,7 +206,6 @@ static int mac802154_header_create(struct sk_buff *skb,
210 head[1] = fc >> 8; 206 head[1] = fc >> 8;
211 207
212 memcpy(skb_push(skb, pos), head, pos); 208 memcpy(skb_push(skb, pos), head, pos);
213 kfree(head);
214 209
215 return pos; 210 return pos;
216} 211}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 9713e6e86d47..0b779d7df881 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -605,12 +605,12 @@ int __net_init ip_vs_app_net_init(struct net *net)
605 struct netns_ipvs *ipvs = net_ipvs(net); 605 struct netns_ipvs *ipvs = net_ipvs(net);
606 606
607 INIT_LIST_HEAD(&ipvs->app_list); 607 INIT_LIST_HEAD(&ipvs->app_list);
608 proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops); 608 proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
609 return 0; 609 return 0;
610} 610}
611 611
612void __net_exit ip_vs_app_net_cleanup(struct net *net) 612void __net_exit ip_vs_app_net_cleanup(struct net *net)
613{ 613{
614 unregister_ip_vs_app(net, NULL /* all */); 614 unregister_ip_vs_app(net, NULL /* all */);
615 proc_net_remove(net, "ip_vs_app"); 615 remove_proc_entry("ip_vs_app", net->proc_net);
616} 616}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 68e368a4beed..9f00db7e03f2 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1291,8 +1291,8 @@ int __net_init ip_vs_conn_net_init(struct net *net)
1291 1291
1292 atomic_set(&ipvs->conn_count, 0); 1292 atomic_set(&ipvs->conn_count, 0);
1293 1293
1294 proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops); 1294 proc_create("ip_vs_conn", 0, net->proc_net, &ip_vs_conn_fops);
1295 proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops); 1295 proc_create("ip_vs_conn_sync", 0, net->proc_net, &ip_vs_conn_sync_fops);
1296 return 0; 1296 return 0;
1297} 1297}
1298 1298
@@ -1300,8 +1300,8 @@ void __net_exit ip_vs_conn_net_cleanup(struct net *net)
1300{ 1300{
1301 /* flush all the connection entries first */ 1301 /* flush all the connection entries first */
1302 ip_vs_conn_flush(net); 1302 ip_vs_conn_flush(net);
1303 proc_net_remove(net, "ip_vs_conn"); 1303 remove_proc_entry("ip_vs_conn", net->proc_net);
1304 proc_net_remove(net, "ip_vs_conn_sync"); 1304 remove_proc_entry("ip_vs_conn_sync", net->proc_net);
1305} 1305}
1306 1306
1307int __init ip_vs_conn_init(void) 1307int __init ip_vs_conn_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ec664cbb119f..c68198bf9128 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3800,10 +3800,10 @@ int __net_init ip_vs_control_net_init(struct net *net)
3800 3800
3801 spin_lock_init(&ipvs->tot_stats.lock); 3801 spin_lock_init(&ipvs->tot_stats.lock);
3802 3802
3803 proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops); 3803 proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
3804 proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops); 3804 proc_create("ip_vs_stats", 0, net->proc_net, &ip_vs_stats_fops);
3805 proc_net_fops_create(net, "ip_vs_stats_percpu", 0, 3805 proc_create("ip_vs_stats_percpu", 0, net->proc_net,
3806 &ip_vs_stats_percpu_fops); 3806 &ip_vs_stats_percpu_fops);
3807 3807
3808 if (ip_vs_control_net_init_sysctl(net)) 3808 if (ip_vs_control_net_init_sysctl(net))
3809 goto err; 3809 goto err;
@@ -3822,9 +3822,9 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3822 ip_vs_trash_cleanup(net); 3822 ip_vs_trash_cleanup(net);
3823 ip_vs_stop_estimator(net, &ipvs->tot_stats); 3823 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3824 ip_vs_control_net_cleanup_sysctl(net); 3824 ip_vs_control_net_cleanup_sysctl(net);
3825 proc_net_remove(net, "ip_vs_stats_percpu"); 3825 remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
3826 proc_net_remove(net, "ip_vs_stats"); 3826 remove_proc_entry("ip_vs_stats", net->proc_net);
3827 proc_net_remove(net, "ip_vs"); 3827 remove_proc_entry("ip_vs", net->proc_net);
3828 free_percpu(ipvs->tot_stats.cpustats); 3828 free_percpu(ipvs->tot_stats.cpustats);
3829} 3829}
3830 3830
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 746048b13ef3..ae8ec6f27688 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
61 return 1; 61 return 1;
62} 62}
63 63
64static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
65 unsigned int sctphoff)
66{
67 __u32 crc32;
68 struct sk_buff *iter;
69
70 crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
71 skb_walk_frags(skb, iter)
72 crc32 = sctp_update_cksum((u8 *) iter->data,
73 skb_headlen(iter), crc32);
74 sctph->checksum = sctp_end_cksum(crc32);
75
76 skb->ip_summed = CHECKSUM_UNNECESSARY;
77}
78
64static int 79static int
65sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 80sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
66 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 81 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
67{ 82{
68 sctp_sctphdr_t *sctph; 83 sctp_sctphdr_t *sctph;
69 unsigned int sctphoff = iph->len; 84 unsigned int sctphoff = iph->len;
70 struct sk_buff *iter;
71 __be32 crc32;
72 85
73#ifdef CONFIG_IP_VS_IPV6 86#ifdef CONFIG_IP_VS_IPV6
74 if (cp->af == AF_INET6 && iph->fragoffs) 87 if (cp->af == AF_INET6 && iph->fragoffs)
@@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
92 sctph = (void *) skb_network_header(skb) + sctphoff; 105 sctph = (void *) skb_network_header(skb) + sctphoff;
93 sctph->source = cp->vport; 106 sctph->source = cp->vport;
94 107
95 /* Calculate the checksum */ 108 sctp_nat_csum(skb, sctph, sctphoff);
96 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
97 skb_walk_frags(skb, iter)
98 crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
99 crc32);
100 crc32 = sctp_end_cksum(crc32);
101 sctph->checksum = crc32;
102 109
103 return 1; 110 return 1;
104} 111}
@@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
109{ 116{
110 sctp_sctphdr_t *sctph; 117 sctp_sctphdr_t *sctph;
111 unsigned int sctphoff = iph->len; 118 unsigned int sctphoff = iph->len;
112 struct sk_buff *iter;
113 __be32 crc32;
114 119
115#ifdef CONFIG_IP_VS_IPV6 120#ifdef CONFIG_IP_VS_IPV6
116 if (cp->af == AF_INET6 && iph->fragoffs) 121 if (cp->af == AF_INET6 && iph->fragoffs)
@@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
134 sctph = (void *) skb_network_header(skb) + sctphoff; 139 sctph = (void *) skb_network_header(skb) + sctphoff;
135 sctph->dest = cp->dport; 140 sctph->dest = cp->dport;
136 141
137 /* Calculate the checksum */ 142 sctp_nat_csum(skb, sctph, sctphoff);
138 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
139 skb_walk_frags(skb, iter)
140 crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
141 crc32);
142 crc32 = sctp_end_cksum(crc32);
143 sctph->checksum = crc32;
144 143
145 return 1; 144 return 1;
146} 145}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index effa10c9e4e3..44fd10c539ac 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
1795 GFP_KERNEL); 1795 GFP_KERNEL);
1796 if (!tinfo->buf) 1796 if (!tinfo->buf)
1797 goto outtinfo; 1797 goto outtinfo;
1798 } else {
1799 tinfo->buf = NULL;
1798 } 1800 }
1799 tinfo->id = id; 1801 tinfo->id = id;
1800 1802
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index bdd341899ed3..3921e5bc1235 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -571,7 +571,8 @@ static int exp_proc_init(struct net *net)
571#ifdef CONFIG_NF_CONNTRACK_PROCFS 571#ifdef CONFIG_NF_CONNTRACK_PROCFS
572 struct proc_dir_entry *proc; 572 struct proc_dir_entry *proc;
573 573
574 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops); 574 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
575 &exp_file_ops);
575 if (!proc) 576 if (!proc)
576 return -ENOMEM; 577 return -ENOMEM;
577#endif /* CONFIG_NF_CONNTRACK_PROCFS */ 578#endif /* CONFIG_NF_CONNTRACK_PROCFS */
@@ -581,7 +582,7 @@ static int exp_proc_init(struct net *net)
581static void exp_proc_remove(struct net *net) 582static void exp_proc_remove(struct net *net)
582{ 583{
583#ifdef CONFIG_NF_CONNTRACK_PROCFS 584#ifdef CONFIG_NF_CONNTRACK_PROCFS
584 proc_net_remove(net, "nf_conntrack_expect"); 585 remove_proc_entry("nf_conntrack_expect", net->proc_net);
585#endif /* CONFIG_NF_CONNTRACK_PROCFS */ 586#endif /* CONFIG_NF_CONNTRACK_PROCFS */
586} 587}
587 588
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index c08768da7936..013cdf69fe29 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -237,7 +237,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
237 /* We only allow helper re-assignment of the same sort since 237 /* We only allow helper re-assignment of the same sort since
238 * we cannot reallocate the helper extension area. 238 * we cannot reallocate the helper extension area.
239 */ 239 */
240 if (help->helper != helper) { 240 struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
241
242 if (tmp && tmp->help != helper->help) {
241 RCU_INIT_POINTER(help->helper, NULL); 243 RCU_INIT_POINTER(help->helper, NULL);
242 goto out; 244 goto out;
243 } 245 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d490a300ce2b..5d60e04f9679 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1782,6 +1782,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1782 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1782 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1783 enum ip_conntrack_events events; 1783 enum ip_conntrack_events events;
1784 1784
1785 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1786 return -EINVAL;
1787
1785 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, 1788 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1786 &rtuple, u3); 1789 &rtuple, u3);
1787 if (IS_ERR(ct)) 1790 if (IS_ERR(ct))
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 7936bf7f90ba..6bcce401fd1c 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -366,7 +366,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
366{ 366{
367 struct proc_dir_entry *pde; 367 struct proc_dir_entry *pde;
368 368
369 pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops); 369 pde = proc_create("nf_conntrack", 0440, net->proc_net, &ct_file_ops);
370 if (!pde) 370 if (!pde)
371 goto out_nf_conntrack; 371 goto out_nf_conntrack;
372 372
@@ -377,7 +377,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
377 return 0; 377 return 0;
378 378
379out_stat_nf_conntrack: 379out_stat_nf_conntrack:
380 proc_net_remove(net, "nf_conntrack"); 380 remove_proc_entry("nf_conntrack", net->proc_net);
381out_nf_conntrack: 381out_nf_conntrack:
382 return -ENOMEM; 382 return -ENOMEM;
383} 383}
@@ -385,7 +385,7 @@ out_nf_conntrack:
385static void nf_conntrack_standalone_fini_proc(struct net *net) 385static void nf_conntrack_standalone_fini_proc(struct net *net)
386{ 386{
387 remove_proc_entry("nf_conntrack", net->proc_net_stat); 387 remove_proc_entry("nf_conntrack", net->proc_net_stat);
388 proc_net_remove(net, "nf_conntrack"); 388 remove_proc_entry("nf_conntrack", net->proc_net);
389} 389}
390#else 390#else
391static int nf_conntrack_standalone_init_proc(struct net *net) 391static int nf_conntrack_standalone_init_proc(struct net *net)
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 7b3a9e5999c0..686c7715d777 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1323,12 +1323,12 @@ int xt_proto_init(struct net *net, u_int8_t af)
1323out_remove_matches: 1323out_remove_matches:
1324 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1324 strlcpy(buf, xt_prefix[af], sizeof(buf));
1325 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1325 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1326 proc_net_remove(net, buf); 1326 remove_proc_entry(buf, net->proc_net);
1327 1327
1328out_remove_tables: 1328out_remove_tables:
1329 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1329 strlcpy(buf, xt_prefix[af], sizeof(buf));
1330 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1330 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1331 proc_net_remove(net, buf); 1331 remove_proc_entry(buf, net->proc_net);
1332out: 1332out:
1333 return -1; 1333 return -1;
1334#endif 1334#endif
@@ -1342,15 +1342,15 @@ void xt_proto_fini(struct net *net, u_int8_t af)
1342 1342
1343 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1343 strlcpy(buf, xt_prefix[af], sizeof(buf));
1344 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1344 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1345 proc_net_remove(net, buf); 1345 remove_proc_entry(buf, net->proc_net);
1346 1346
1347 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1347 strlcpy(buf, xt_prefix[af], sizeof(buf));
1348 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1348 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1349 proc_net_remove(net, buf); 1349 remove_proc_entry(buf, net->proc_net);
1350 1350
1351 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1351 strlcpy(buf, xt_prefix[af], sizeof(buf));
1352 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1352 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1353 proc_net_remove(net, buf); 1353 remove_proc_entry(buf, net->proc_net);
1354#endif /*CONFIG_PROC_FS*/ 1354#endif /*CONFIG_PROC_FS*/
1355} 1355}
1356EXPORT_SYMBOL_GPL(xt_proto_fini); 1356EXPORT_SYMBOL_GPL(xt_proto_fini);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index a9d7af953ceb..98218c896d2e 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -867,7 +867,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
867#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 867#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
868 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); 868 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
869 if (!hashlimit_net->ip6t_hashlimit) { 869 if (!hashlimit_net->ip6t_hashlimit) {
870 proc_net_remove(net, "ipt_hashlimit"); 870 remove_proc_entry("ipt_hashlimit", net->proc_net);
871 return -ENOMEM; 871 return -ENOMEM;
872 } 872 }
873#endif 873#endif
@@ -897,9 +897,9 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net)
897 hashlimit_net->ip6t_hashlimit = NULL; 897 hashlimit_net->ip6t_hashlimit = NULL;
898 mutex_unlock(&hashlimit_mutex); 898 mutex_unlock(&hashlimit_mutex);
899 899
900 proc_net_remove(net, "ipt_hashlimit"); 900 remove_proc_entry("ipt_hashlimit", net->proc_net);
901#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 901#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
902 proc_net_remove(net, "ip6t_hashlimit"); 902 remove_proc_entry("ip6t_hashlimit", net->proc_net);
903#endif 903#endif
904} 904}
905 905
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 978efc9b555a..31bf233dae97 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -643,7 +643,7 @@ static void __net_exit recent_proc_net_exit(struct net *net)
643 recent_net->xt_recent = NULL; 643 recent_net->xt_recent = NULL;
644 spin_unlock_bh(&recent_lock); 644 spin_unlock_bh(&recent_lock);
645 645
646 proc_net_remove(net, "xt_recent"); 646 remove_proc_entry("xt_recent", net->proc_net);
647} 647}
648#else 648#else
649static inline int recent_proc_net_init(struct net *net) 649static inline int recent_proc_net_init(struct net *net)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 74827e3b26a1..3d55e0c713e2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2145,7 +2145,7 @@ static const struct net_proto_family netlink_family_ops = {
2145static int __net_init netlink_net_init(struct net *net) 2145static int __net_init netlink_net_init(struct net *net)
2146{ 2146{
2147#ifdef CONFIG_PROC_FS 2147#ifdef CONFIG_PROC_FS
2148 if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) 2148 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
2149 return -ENOMEM; 2149 return -ENOMEM;
2150#endif 2150#endif
2151 return 0; 2151 return 0;
@@ -2154,7 +2154,7 @@ static int __net_init netlink_net_init(struct net *net)
2154static void __net_exit netlink_net_exit(struct net *net) 2154static void __net_exit netlink_net_exit(struct net *net)
2155{ 2155{
2156#ifdef CONFIG_PROC_FS 2156#ifdef CONFIG_PROC_FS
2157 proc_net_remove(net, "netlink"); 2157 remove_proc_entry("netlink", net->proc_net);
2158#endif 2158#endif
2159} 2159}
2160 2160
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 7261eb81974f..297b07a029de 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1452,9 +1452,9 @@ static int __init nr_proto_init(void)
1452 1452
1453 nr_loopback_init(); 1453 nr_loopback_init();
1454 1454
1455 proc_net_fops_create(&init_net, "nr", S_IRUGO, &nr_info_fops); 1455 proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
1456 proc_net_fops_create(&init_net, "nr_neigh", S_IRUGO, &nr_neigh_fops); 1456 proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
1457 proc_net_fops_create(&init_net, "nr_nodes", S_IRUGO, &nr_nodes_fops); 1457 proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
1458out: 1458out:
1459 return rc; 1459 return rc;
1460fail: 1460fail:
@@ -1482,9 +1482,9 @@ static void __exit nr_exit(void)
1482{ 1482{
1483 int i; 1483 int i;
1484 1484
1485 proc_net_remove(&init_net, "nr"); 1485 remove_proc_entry("nr", init_net.proc_net);
1486 proc_net_remove(&init_net, "nr_neigh"); 1486 remove_proc_entry("nr_neigh", init_net.proc_net);
1487 proc_net_remove(&init_net, "nr_nodes"); 1487 remove_proc_entry("nr_nodes", init_net.proc_net);
1488 nr_loopback_clear(); 1488 nr_loopback_clear();
1489 1489
1490 nr_rt_free(); 1490 nr_rt_free();
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 85bc75c38dea..746f5a2f9804 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -549,14 +549,13 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
549 pr_err("No LLCP device\n"); 549 pr_err("No LLCP device\n");
550 return -ENODEV; 550 return -ENODEV;
551 } 551 }
552 if (gb_len < 3)
553 return -EINVAL;
552 554
553 memset(local->remote_gb, 0, NFC_MAX_GT_LEN); 555 memset(local->remote_gb, 0, NFC_MAX_GT_LEN);
554 memcpy(local->remote_gb, gb, gb_len); 556 memcpy(local->remote_gb, gb, gb_len);
555 local->remote_gb_len = gb_len; 557 local->remote_gb_len = gb_len;
556 558
557 if (local->remote_gb == NULL || local->remote_gb_len == 0)
558 return -ENODEV;
559
560 if (memcmp(local->remote_gb, llcp_magic, 3)) { 559 if (memcmp(local->remote_gb, llcp_magic, 3)) {
561 pr_err("MAC does not support LLCP\n"); 560 pr_err("MAC does not support LLCP\n");
562 return -EINVAL; 561 return -EINVAL;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index d8c13a965459..9dc537df46c4 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -301,7 +301,7 @@ static int queue_gso_packets(struct net *net, int dp_ifindex,
301 struct sk_buff *segs, *nskb; 301 struct sk_buff *segs, *nskb;
302 int err; 302 int err;
303 303
304 segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM); 304 segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
305 if (IS_ERR(segs)) 305 if (IS_ERR(segs))
306 return PTR_ERR(segs); 306 return PTR_ERR(segs);
307 307
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index a9327e2e48ce..670cbc3518de 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -35,10 +35,11 @@
35/* Must be called with rcu_read_lock. */ 35/* Must be called with rcu_read_lock. */
36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) 36static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
37{ 37{
38 if (unlikely(!vport)) { 38 if (unlikely(!vport))
39 kfree_skb(skb); 39 goto error;
40 return; 40
41 } 41 if (unlikely(skb_warn_if_lro(skb)))
42 goto error;
42 43
43 /* Make our own copy of the packet. Otherwise we will mangle the 44 /* Make our own copy of the packet. Otherwise we will mangle the
44 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
50 51
51 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
52 ovs_vport_receive(vport, skb); 53 ovs_vport_receive(vport, skb);
54 return;
55
56error:
57 kfree_skb(skb);
53} 58}
54 59
55/* Called with rcu_read_lock and bottom-halves disabled. */ 60/* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
169 goto error; 174 goto error;
170 } 175 }
171 176
172 if (unlikely(skb_warn_if_lro(skb)))
173 goto error;
174
175 skb->dev = netdev_vport->dev; 177 skb->dev = netdev_vport->dev;
176 len = skb->len; 178 len = skb->len;
177 dev_queue_xmit(skb); 179 dev_queue_xmit(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e639645e8fec..c7bfeff10767 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
2361 2361
2362 packet_flush_mclist(sk); 2362 packet_flush_mclist(sk);
2363 2363
2364 memset(&req_u, 0, sizeof(req_u)); 2364 if (po->rx_ring.pg_vec) {
2365 2365 memset(&req_u, 0, sizeof(req_u));
2366 if (po->rx_ring.pg_vec)
2367 packet_set_ring(sk, &req_u, 1, 0); 2366 packet_set_ring(sk, &req_u, 1, 0);
2367 }
2368 2368
2369 if (po->tx_ring.pg_vec) 2369 if (po->tx_ring.pg_vec) {
2370 memset(&req_u, 0, sizeof(req_u));
2370 packet_set_ring(sk, &req_u, 1, 1); 2371 packet_set_ring(sk, &req_u, 1, 1);
2372 }
2371 2373
2372 fanout_release(sk); 2374 fanout_release(sk);
2373 2375
@@ -3826,7 +3828,7 @@ static int __net_init packet_net_init(struct net *net)
3826 mutex_init(&net->packet.sklist_lock); 3828 mutex_init(&net->packet.sklist_lock);
3827 INIT_HLIST_HEAD(&net->packet.sklist); 3829 INIT_HLIST_HEAD(&net->packet.sklist);
3828 3830
3829 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) 3831 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
3830 return -ENOMEM; 3832 return -ENOMEM;
3831 3833
3832 return 0; 3834 return 0;
@@ -3834,7 +3836,7 @@ static int __net_init packet_net_init(struct net *net)
3834 3836
3835static void __net_exit packet_net_exit(struct net *net) 3837static void __net_exit packet_net_exit(struct net *net)
3836{ 3838{
3837 proc_net_remove(net, "packet"); 3839 remove_proc_entry("packet", net->proc_net);
3838} 3840}
3839 3841
3840static struct pernet_operations packet_net_ops = { 3842static struct pernet_operations packet_net_ops = {
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 5bf6341e2dd4..45a7df6575de 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -320,7 +320,7 @@ static int __net_init phonet_init_net(struct net *net)
320{ 320{
321 struct phonet_net *pnn = phonet_pernet(net); 321 struct phonet_net *pnn = phonet_pernet(net);
322 322
323 if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) 323 if (!proc_create("phonet", 0, net->proc_net, &pn_sock_seq_fops))
324 return -ENOMEM; 324 return -ENOMEM;
325 325
326 INIT_LIST_HEAD(&pnn->pndevs.list); 326 INIT_LIST_HEAD(&pnn->pndevs.list);
@@ -331,7 +331,7 @@ static int __net_init phonet_init_net(struct net *net)
331 331
332static void __net_exit phonet_exit_net(struct net *net) 332static void __net_exit phonet_exit_net(struct net *net)
333{ 333{
334 proc_net_remove(net, "phonet"); 334 remove_proc_entry("phonet", net->proc_net);
335} 335}
336 336
337static struct pernet_operations phonet_net_ops = { 337static struct pernet_operations phonet_net_ops = {
@@ -348,7 +348,7 @@ int __init phonet_device_init(void)
348 if (err) 348 if (err)
349 return err; 349 return err;
350 350
351 proc_net_fops_create(&init_net, "pnresource", 0, &pn_res_seq_fops); 351 proc_create("pnresource", 0, init_net.proc_net, &pn_res_seq_fops);
352 register_netdevice_notifier(&phonet_device_notifier); 352 register_netdevice_notifier(&phonet_device_notifier);
353 err = phonet_netlink_register(); 353 err = phonet_netlink_register();
354 if (err) 354 if (err)
@@ -361,7 +361,7 @@ void phonet_device_exit(void)
361 rtnl_unregister_all(PF_PHONET); 361 rtnl_unregister_all(PF_PHONET);
362 unregister_netdevice_notifier(&phonet_device_notifier); 362 unregister_netdevice_notifier(&phonet_device_notifier);
363 unregister_pernet_subsys(&phonet_net_ops); 363 unregister_pernet_subsys(&phonet_net_ops);
364 proc_net_remove(&init_net, "pnresource"); 364 remove_proc_entry("pnresource", init_net.proc_net);
365} 365}
366 366
367int phonet_route_add(struct net_device *dev, u8 daddr) 367int phonet_route_add(struct net_device *dev, u8 daddr)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c4719ce604c2..b768fe9d5e7a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1575,10 +1575,13 @@ static int __init rose_proto_init(void)
1575 1575
1576 rose_add_loopback_neigh(); 1576 rose_add_loopback_neigh();
1577 1577
1578 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); 1578 proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops);
1579 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); 1579 proc_create("rose_neigh", S_IRUGO, init_net.proc_net,
1580 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); 1580 &rose_neigh_fops);
1581 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); 1581 proc_create("rose_nodes", S_IRUGO, init_net.proc_net,
1582 &rose_nodes_fops);
1583 proc_create("rose_routes", S_IRUGO, init_net.proc_net,
1584 &rose_routes_fops);
1582out: 1585out:
1583 return rc; 1586 return rc;
1584fail: 1587fail:
@@ -1605,10 +1608,10 @@ static void __exit rose_exit(void)
1605{ 1608{
1606 int i; 1609 int i;
1607 1610
1608 proc_net_remove(&init_net, "rose"); 1611 remove_proc_entry("rose", init_net.proc_net);
1609 proc_net_remove(&init_net, "rose_neigh"); 1612 remove_proc_entry("rose_neigh", init_net.proc_net);
1610 proc_net_remove(&init_net, "rose_nodes"); 1613 remove_proc_entry("rose_nodes", init_net.proc_net);
1611 proc_net_remove(&init_net, "rose_routes"); 1614 remove_proc_entry("rose_routes", init_net.proc_net);
1612 rose_loopback_clear(); 1615 rose_loopback_clear();
1613 1616
1614 rose_rt_free(); 1617 rose_rt_free();
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 5b0fd291babb..e61aa6001c65 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -839,8 +839,9 @@ static int __init af_rxrpc_init(void)
839 } 839 }
840 840
841#ifdef CONFIG_PROC_FS 841#ifdef CONFIG_PROC_FS
842 proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops); 842 proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
843 proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops); 843 proc_create("rxrpc_conns", 0, init_net.proc_net,
844 &rxrpc_connection_seq_fops);
844#endif 845#endif
845 return 0; 846 return 0;
846 847
@@ -878,8 +879,8 @@ static void __exit af_rxrpc_exit(void)
878 879
879 _debug("flush scheduled work"); 880 _debug("flush scheduled work");
880 flush_workqueue(rxrpc_workqueue); 881 flush_workqueue(rxrpc_workqueue);
881 proc_net_remove(&init_net, "rxrpc_conns"); 882 remove_proc_entry("rxrpc_conns", init_net.proc_net);
882 proc_net_remove(&init_net, "rxrpc_calls"); 883 remove_proc_entry("rxrpc_calls", init_net.proc_net);
883 destroy_workqueue(rxrpc_workqueue); 884 destroy_workqueue(rxrpc_workqueue);
884 kmem_cache_destroy(rxrpc_call_jar); 885 kmem_cache_destroy(rxrpc_call_jar);
885 _leave(""); 886 _leave("");
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 0fb9e3f567e6..e0f6de64afec 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -207,10 +207,8 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
207 struct tcf_ipt *ipt = a->priv; 207 struct tcf_ipt *ipt = a->priv;
208 struct xt_action_param par; 208 struct xt_action_param par;
209 209
210 if (skb_cloned(skb)) { 210 if (skb_unclone(skb, GFP_ATOMIC))
211 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 211 return TC_ACT_UNSPEC;
212 return TC_ACT_UNSPEC;
213 }
214 212
215 spin_lock(&ipt->tcf_lock); 213 spin_lock(&ipt->tcf_lock);
216 214
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 0c3faddf3f2c..7ed78c9e505c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -131,8 +131,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
131 int i, munged = 0; 131 int i, munged = 0;
132 unsigned int off; 132 unsigned int off;
133 133
134 if (skb_cloned(skb) && 134 if (skb_unclone(skb, GFP_ATOMIC))
135 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
136 return p->tcf_action; 135 return p->tcf_action;
137 136
138 off = skb_network_offset(skb); 137 off = skb_network_offset(skb);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8dbd695c160b..823463adbd21 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,23 @@
22#include <net/act_api.h> 22#include <net/act_api.h>
23#include <net/netlink.h> 23#include <net/netlink.h>
24 24
25#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L) 25struct tcf_police {
26#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L) 26 struct tcf_common common;
27 int tcfp_result;
28 u32 tcfp_ewma_rate;
29 s64 tcfp_burst;
30 u32 tcfp_mtu;
31 s64 tcfp_toks;
32 s64 tcfp_ptoks;
33 s64 tcfp_mtu_ptoks;
34 s64 tcfp_t_c;
35 struct psched_ratecfg rate;
36 bool rate_present;
37 struct psched_ratecfg peak;
38 bool peak_present;
39};
40#define to_police(pc) \
41 container_of(pc, struct tcf_police, common)
27 42
28#define POL_TAB_MASK 15 43#define POL_TAB_MASK 15
29static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; 44static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -108,10 +123,6 @@ static void tcf_police_destroy(struct tcf_police *p)
108 write_unlock_bh(&police_lock); 123 write_unlock_bh(&police_lock);
109 gen_kill_estimator(&p->tcf_bstats, 124 gen_kill_estimator(&p->tcf_bstats,
110 &p->tcf_rate_est); 125 &p->tcf_rate_est);
111 if (p->tcfp_R_tab)
112 qdisc_put_rtab(p->tcfp_R_tab);
113 if (p->tcfp_P_tab)
114 qdisc_put_rtab(p->tcfp_P_tab);
115 /* 126 /*
116 * gen_estimator est_timer() might access p->tcf_lock 127 * gen_estimator est_timer() might access p->tcf_lock
117 * or bstats, wait a RCU grace period before freeing p 128 * or bstats, wait a RCU grace period before freeing p
@@ -212,26 +223,36 @@ override:
212 } 223 }
213 224
214 /* No failure allowed after this point */ 225 /* No failure allowed after this point */
215 if (R_tab != NULL) { 226 police->tcfp_mtu = parm->mtu;
216 qdisc_put_rtab(police->tcfp_R_tab); 227 if (police->tcfp_mtu == 0) {
217 police->tcfp_R_tab = R_tab; 228 police->tcfp_mtu = ~0;
229 if (R_tab)
230 police->tcfp_mtu = 255 << R_tab->rate.cell_log;
231 }
232 if (R_tab) {
233 police->rate_present = true;
234 psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
235 qdisc_put_rtab(R_tab);
236 } else {
237 police->rate_present = false;
218 } 238 }
219 if (P_tab != NULL) { 239 if (P_tab) {
220 qdisc_put_rtab(police->tcfp_P_tab); 240 police->peak_present = true;
221 police->tcfp_P_tab = P_tab; 241 psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
242 qdisc_put_rtab(P_tab);
243 } else {
244 police->peak_present = false;
222 } 245 }
223 246
224 if (tb[TCA_POLICE_RESULT]) 247 if (tb[TCA_POLICE_RESULT])
225 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); 248 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
226 police->tcfp_toks = police->tcfp_burst = parm->burst; 249 police->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
227 police->tcfp_mtu = parm->mtu; 250 police->tcfp_toks = police->tcfp_burst;
228 if (police->tcfp_mtu == 0) { 251 if (police->peak_present) {
229 police->tcfp_mtu = ~0; 252 police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak,
230 if (police->tcfp_R_tab) 253 police->tcfp_mtu);
231 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log; 254 police->tcfp_ptoks = police->tcfp_mtu_ptoks;
232 } 255 }
233 if (police->tcfp_P_tab)
234 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
235 police->tcf_action = parm->action; 256 police->tcf_action = parm->action;
236 257
237 if (tb[TCA_POLICE_AVRATE]) 258 if (tb[TCA_POLICE_AVRATE])
@@ -241,7 +262,7 @@ override:
241 if (ret != ACT_P_CREATED) 262 if (ret != ACT_P_CREATED)
242 return ret; 263 return ret;
243 264
244 police->tcfp_t_c = psched_get_time(); 265 police->tcfp_t_c = ktime_to_ns(ktime_get());
245 police->tcf_index = parm->index ? parm->index : 266 police->tcf_index = parm->index ? parm->index :
246 tcf_hash_new_index(&police_idx_gen, &police_hash_info); 267 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
247 h = tcf_hash(police->tcf_index, POL_TAB_MASK); 268 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
@@ -287,9 +308,9 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
287 struct tcf_result *res) 308 struct tcf_result *res)
288{ 309{
289 struct tcf_police *police = a->priv; 310 struct tcf_police *police = a->priv;
290 psched_time_t now; 311 s64 now;
291 long toks; 312 s64 toks;
292 long ptoks = 0; 313 s64 ptoks = 0;
293 314
294 spin_lock(&police->tcf_lock); 315 spin_lock(&police->tcf_lock);
295 316
@@ -305,24 +326,25 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
305 } 326 }
306 327
307 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { 328 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
308 if (police->tcfp_R_tab == NULL) { 329 if (!police->rate_present) {
309 spin_unlock(&police->tcf_lock); 330 spin_unlock(&police->tcf_lock);
310 return police->tcfp_result; 331 return police->tcfp_result;
311 } 332 }
312 333
313 now = psched_get_time(); 334 now = ktime_to_ns(ktime_get());
314 toks = psched_tdiff_bounded(now, police->tcfp_t_c, 335 toks = min_t(s64, now - police->tcfp_t_c,
315 police->tcfp_burst); 336 police->tcfp_burst);
316 if (police->tcfp_P_tab) { 337 if (police->peak_present) {
317 ptoks = toks + police->tcfp_ptoks; 338 ptoks = toks + police->tcfp_ptoks;
318 if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) 339 if (ptoks > police->tcfp_mtu_ptoks)
319 ptoks = (long)L2T_P(police, police->tcfp_mtu); 340 ptoks = police->tcfp_mtu_ptoks;
320 ptoks -= L2T_P(police, qdisc_pkt_len(skb)); 341 ptoks -= (s64) psched_l2t_ns(&police->peak,
342 qdisc_pkt_len(skb));
321 } 343 }
322 toks += police->tcfp_toks; 344 toks += police->tcfp_toks;
323 if (toks > (long)police->tcfp_burst) 345 if (toks > police->tcfp_burst)
324 toks = police->tcfp_burst; 346 toks = police->tcfp_burst;
325 toks -= L2T(police, qdisc_pkt_len(skb)); 347 toks -= (s64) psched_l2t_ns(&police->rate, qdisc_pkt_len(skb));
326 if ((toks|ptoks) >= 0) { 348 if ((toks|ptoks) >= 0) {
327 police->tcfp_t_c = now; 349 police->tcfp_t_c = now;
328 police->tcfp_toks = toks; 350 police->tcfp_toks = toks;
@@ -348,15 +370,15 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
348 .index = police->tcf_index, 370 .index = police->tcf_index,
349 .action = police->tcf_action, 371 .action = police->tcf_action,
350 .mtu = police->tcfp_mtu, 372 .mtu = police->tcfp_mtu,
351 .burst = police->tcfp_burst, 373 .burst = PSCHED_NS2TICKS(police->tcfp_burst),
352 .refcnt = police->tcf_refcnt - ref, 374 .refcnt = police->tcf_refcnt - ref,
353 .bindcnt = police->tcf_bindcnt - bind, 375 .bindcnt = police->tcf_bindcnt - bind,
354 }; 376 };
355 377
356 if (police->tcfp_R_tab) 378 if (police->rate_present)
357 opt.rate = police->tcfp_R_tab->rate; 379 opt.rate.rate = psched_ratecfg_getrate(&police->rate);
358 if (police->tcfp_P_tab) 380 if (police->peak_present)
359 opt.peakrate = police->tcfp_P_tab->rate; 381 opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
360 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) 382 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
361 goto nla_put_failure; 383 goto nla_put_failure;
362 if (police->tcfp_result && 384 if (police->tcfp_result &&
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d84f7e734cd7..a181b484812a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -493,7 +493,7 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
493} 493}
494EXPORT_SYMBOL(qdisc_watchdog_init); 494EXPORT_SYMBOL(qdisc_watchdog_init);
495 495
496void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) 496void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
497{ 497{
498 if (test_bit(__QDISC_STATE_DEACTIVATED, 498 if (test_bit(__QDISC_STATE_DEACTIVATED,
499 &qdisc_root_sleeping(wd->qdisc)->state)) 499 &qdisc_root_sleeping(wd->qdisc)->state))
@@ -502,10 +502,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
502 qdisc_throttled(wd->qdisc); 502 qdisc_throttled(wd->qdisc);
503 503
504 hrtimer_start(&wd->timer, 504 hrtimer_start(&wd->timer,
505 ns_to_ktime(PSCHED_TICKS2NS(expires)), 505 ns_to_ktime(expires),
506 HRTIMER_MODE_ABS); 506 HRTIMER_MODE_ABS);
507} 507}
508EXPORT_SYMBOL(qdisc_watchdog_schedule); 508EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
509 509
510void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 510void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
511{ 511{
@@ -1768,7 +1768,7 @@ static int __net_init psched_net_init(struct net *net)
1768{ 1768{
1769 struct proc_dir_entry *e; 1769 struct proc_dir_entry *e;
1770 1770
1771 e = proc_net_fops_create(net, "psched", 0, &psched_fops); 1771 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1772 if (e == NULL) 1772 if (e == NULL)
1773 return -ENOMEM; 1773 return -ENOMEM;
1774 1774
@@ -1777,7 +1777,7 @@ static int __net_init psched_net_init(struct net *net)
1777 1777
1778static void __net_exit psched_net_exit(struct net *net) 1778static void __net_exit psched_net_exit(struct net *net)
1779{ 1779{
1780 proc_net_remove(net, "psched"); 1780 remove_proc_entry("psched", net->proc_net);
1781} 1781}
1782#else 1782#else
1783static int __net_init psched_net_init(struct net *net) 1783static int __net_init psched_net_init(struct net *net)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5d81a4478514..ffad48109a22 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <net/sch_generic.h>
28#include <net/pkt_sched.h> 29#include <net/pkt_sched.h>
29#include <net/dst.h> 30#include <net/dst.h>
30 31
@@ -896,3 +897,39 @@ void dev_shutdown(struct net_device *dev)
896 897
897 WARN_ON(timer_pending(&dev->watchdog_timer)); 898 WARN_ON(timer_pending(&dev->watchdog_timer));
898} 899}
900
901void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
902{
903 u64 factor;
904 u64 mult;
905 int shift;
906
907 r->rate_bps = rate << 3;
908 r->shift = 0;
909 r->mult = 1;
910 /*
911 * Calibrate mult, shift so that token counting is accurate
912 * for smallest packet size (64 bytes). Token (time in ns) is
913 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will
914 * work as long as the smallest packet transfer time can be
915 * accurately represented in nanosec.
916 */
917 if (r->rate_bps > 0) {
918 /*
919 * Higher shift gives better accuracy. Find the largest
920 * shift such that mult fits in 32 bits.
921 */
922 for (shift = 0; shift < 16; shift++) {
923 r->shift = shift;
924 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
925 mult = div64_u64(factor, r->rate_bps);
926 if (mult > UINT_MAX)
927 break;
928 }
929
930 r->shift = shift - 1;
931 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
932 r->mult = div64_u64(factor, r->rate_bps);
933 }
934}
935EXPORT_SYMBOL(psched_ratecfg_precompute);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 51561eafcb72..03c2692ca01e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -38,6 +38,7 @@
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <net/netlink.h> 40#include <net/netlink.h>
41#include <net/sch_generic.h>
41#include <net/pkt_sched.h> 42#include <net/pkt_sched.h>
42 43
43/* HTB algorithm. 44/* HTB algorithm.
@@ -71,12 +72,6 @@ enum htb_cmode {
71 HTB_CAN_SEND /* class can send */ 72 HTB_CAN_SEND /* class can send */
72}; 73};
73 74
74struct htb_rate_cfg {
75 u64 rate_bps;
76 u32 mult;
77 u32 shift;
78};
79
80/* interior & leaf nodes; props specific to leaves are marked L: */ 75/* interior & leaf nodes; props specific to leaves are marked L: */
81struct htb_class { 76struct htb_class {
82 struct Qdisc_class_common common; 77 struct Qdisc_class_common common;
@@ -124,8 +119,8 @@ struct htb_class {
124 int filter_cnt; 119 int filter_cnt;
125 120
126 /* token bucket parameters */ 121 /* token bucket parameters */
127 struct htb_rate_cfg rate; 122 struct psched_ratecfg rate;
128 struct htb_rate_cfg ceil; 123 struct psched_ratecfg ceil;
129 s64 buffer, cbuffer; /* token bucket depth/rate */ 124 s64 buffer, cbuffer; /* token bucket depth/rate */
130 psched_tdiff_t mbuffer; /* max wait time */ 125 psched_tdiff_t mbuffer; /* max wait time */
131 s64 tokens, ctokens; /* current number of tokens */ 126 s64 tokens, ctokens; /* current number of tokens */
@@ -168,45 +163,6 @@ struct htb_sched {
168 struct work_struct work; 163 struct work_struct work;
169}; 164};
170 165
171static u64 l2t_ns(struct htb_rate_cfg *r, unsigned int len)
172{
173 return ((u64)len * r->mult) >> r->shift;
174}
175
176static void htb_precompute_ratedata(struct htb_rate_cfg *r)
177{
178 u64 factor;
179 u64 mult;
180 int shift;
181
182 r->shift = 0;
183 r->mult = 1;
184 /*
185 * Calibrate mult, shift so that token counting is accurate
186 * for smallest packet size (64 bytes). Token (time in ns) is
187 * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will
188 * work as long as the smallest packet transfer time can be
189 * accurately represented in nanosec.
190 */
191 if (r->rate_bps > 0) {
192 /*
193 * Higher shift gives better accuracy. Find the largest
194 * shift such that mult fits in 32 bits.
195 */
196 for (shift = 0; shift < 16; shift++) {
197 r->shift = shift;
198 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
199 mult = div64_u64(factor, r->rate_bps);
200 if (mult > UINT_MAX)
201 break;
202 }
203
204 r->shift = shift - 1;
205 factor = 8LLU * NSEC_PER_SEC * (1 << r->shift);
206 r->mult = div64_u64(factor, r->rate_bps);
207 }
208}
209
210/* find class in global hash table using given handle */ 166/* find class in global hash table using given handle */
211static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 167static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
212{ 168{
@@ -632,7 +588,7 @@ static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
632 588
633 if (toks > cl->buffer) 589 if (toks > cl->buffer)
634 toks = cl->buffer; 590 toks = cl->buffer;
635 toks -= (s64) l2t_ns(&cl->rate, bytes); 591 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
636 if (toks <= -cl->mbuffer) 592 if (toks <= -cl->mbuffer)
637 toks = 1 - cl->mbuffer; 593 toks = 1 - cl->mbuffer;
638 594
@@ -645,7 +601,7 @@ static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
645 601
646 if (toks > cl->cbuffer) 602 if (toks > cl->cbuffer)
647 toks = cl->cbuffer; 603 toks = cl->cbuffer;
648 toks -= (s64) l2t_ns(&cl->ceil, bytes); 604 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
649 if (toks <= -cl->mbuffer) 605 if (toks <= -cl->mbuffer)
650 toks = 1 - cl->mbuffer; 606 toks = 1 - cl->mbuffer;
651 607
@@ -1134,10 +1090,10 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1134 1090
1135 memset(&opt, 0, sizeof(opt)); 1091 memset(&opt, 0, sizeof(opt));
1136 1092
1137 opt.rate.rate = cl->rate.rate_bps >> 3; 1093 opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
1138 opt.buffer = cl->buffer; 1094 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1139 opt.ceil.rate = cl->ceil.rate_bps >> 3; 1095 opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
1140 opt.cbuffer = cl->cbuffer; 1096 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1141 opt.quantum = cl->quantum; 1097 opt.quantum = cl->quantum;
1142 opt.prio = cl->prio; 1098 opt.prio = cl->prio;
1143 opt.level = cl->level; 1099 opt.level = cl->level;
@@ -1459,8 +1415,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1459 cl->parent = parent; 1415 cl->parent = parent;
1460 1416
1461 /* set class to be in HTB_CAN_SEND state */ 1417 /* set class to be in HTB_CAN_SEND state */
1462 cl->tokens = hopt->buffer; 1418 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1463 cl->ctokens = hopt->cbuffer; 1419 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1464 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ 1420 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1465 cl->t_c = psched_get_time(); 1421 cl->t_c = psched_get_time();
1466 cl->cmode = HTB_CAN_SEND; 1422 cl->cmode = HTB_CAN_SEND;
@@ -1503,17 +1459,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1503 cl->prio = TC_HTB_NUMPRIO - 1; 1459 cl->prio = TC_HTB_NUMPRIO - 1;
1504 } 1460 }
1505 1461
1506 cl->buffer = hopt->buffer; 1462 psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
1507 cl->cbuffer = hopt->cbuffer; 1463 psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
1508
1509 cl->rate.rate_bps = (u64)hopt->rate.rate << 3;
1510 cl->ceil.rate_bps = (u64)hopt->ceil.rate << 3;
1511
1512 htb_precompute_ratedata(&cl->rate);
1513 htb_precompute_ratedata(&cl->ceil);
1514 1464
1515 cl->buffer = hopt->buffer << PSCHED_SHIFT; 1465 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1516 cl->cbuffer = hopt->buffer << PSCHED_SHIFT; 1466 cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
1517 1467
1518 sch_tree_unlock(sch); 1468 sch_tree_unlock(sch);
1519 1469
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 298c0ddfb57e..3d2acc7a9c80 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
438 if (q->rate) { 438 if (q->rate) {
439 struct sk_buff_head *list = &sch->q; 439 struct sk_buff_head *list = &sch->q;
440 440
441 delay += packet_len_2_sched_time(skb->len, q);
442
443 if (!skb_queue_empty(list)) { 441 if (!skb_queue_empty(list)) {
444 /* 442 /*
445 * Last packet in queue is reference point (now). 443 * Last packet in queue is reference point (now),
446 * First packet in queue is already in flight, 444 * calculate this time bonus and subtract
447 * calculate this time bonus and substract
448 * from delay. 445 * from delay.
449 */ 446 */
450 delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; 447 delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
448 delay = max_t(psched_tdiff_t, 0, delay);
451 now = netem_skb_cb(skb_peek_tail(list))->time_to_send; 449 now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
452 } 450 }
451
452 delay += packet_len_2_sched_time(skb->len, q);
453 } 453 }
454 454
455 cb->time_to_send = now + delay; 455 cb->time_to_send = now + delay;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 4b056c15e90c..c8388f3c3426 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -19,6 +19,7 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/sch_generic.h>
22#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
23 24
24 25
@@ -100,23 +101,21 @@
100struct tbf_sched_data { 101struct tbf_sched_data {
101/* Parameters */ 102/* Parameters */
102 u32 limit; /* Maximal length of backlog: bytes */ 103 u32 limit; /* Maximal length of backlog: bytes */
103 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ 104 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
104 u32 mtu; 105 s64 mtu;
105 u32 max_size; 106 u32 max_size;
106 struct qdisc_rate_table *R_tab; 107 struct psched_ratecfg rate;
107 struct qdisc_rate_table *P_tab; 108 struct psched_ratecfg peak;
109 bool peak_present;
108 110
109/* Variables */ 111/* Variables */
110 long tokens; /* Current number of B tokens */ 112 s64 tokens; /* Current number of B tokens */
111 long ptokens; /* Current number of P tokens */ 113 s64 ptokens; /* Current number of P tokens */
112 psched_time_t t_c; /* Time check-point */ 114 s64 t_c; /* Time check-point */
113 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ 115 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
114 struct qdisc_watchdog watchdog; /* Watchdog timer */ 116 struct qdisc_watchdog watchdog; /* Watchdog timer */
115}; 117};
116 118
117#define L2T(q, L) qdisc_l2t((q)->R_tab, L)
118#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
119
120static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) 119static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
121{ 120{
122 struct tbf_sched_data *q = qdisc_priv(sch); 121 struct tbf_sched_data *q = qdisc_priv(sch);
@@ -156,24 +155,24 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
156 skb = q->qdisc->ops->peek(q->qdisc); 155 skb = q->qdisc->ops->peek(q->qdisc);
157 156
158 if (skb) { 157 if (skb) {
159 psched_time_t now; 158 s64 now;
160 long toks; 159 s64 toks;
161 long ptoks = 0; 160 s64 ptoks = 0;
162 unsigned int len = qdisc_pkt_len(skb); 161 unsigned int len = qdisc_pkt_len(skb);
163 162
164 now = psched_get_time(); 163 now = ktime_to_ns(ktime_get());
165 toks = psched_tdiff_bounded(now, q->t_c, q->buffer); 164 toks = min_t(s64, now - q->t_c, q->buffer);
166 165
167 if (q->P_tab) { 166 if (q->peak_present) {
168 ptoks = toks + q->ptokens; 167 ptoks = toks + q->ptokens;
169 if (ptoks > (long)q->mtu) 168 if (ptoks > q->mtu)
170 ptoks = q->mtu; 169 ptoks = q->mtu;
171 ptoks -= L2T_P(q, len); 170 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
172 } 171 }
173 toks += q->tokens; 172 toks += q->tokens;
174 if (toks > (long)q->buffer) 173 if (toks > q->buffer)
175 toks = q->buffer; 174 toks = q->buffer;
176 toks -= L2T(q, len); 175 toks -= (s64) psched_l2t_ns(&q->rate, len);
177 176
178 if ((toks|ptoks) >= 0) { 177 if ((toks|ptoks) >= 0) {
179 skb = qdisc_dequeue_peeked(q->qdisc); 178 skb = qdisc_dequeue_peeked(q->qdisc);
@@ -189,8 +188,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
189 return skb; 188 return skb;
190 } 189 }
191 190
192 qdisc_watchdog_schedule(&q->watchdog, 191 qdisc_watchdog_schedule_ns(&q->watchdog,
193 now + max_t(long, -toks, -ptoks)); 192 now + max_t(long, -toks, -ptoks));
194 193
195 /* Maybe we have a shorter packet in the queue, 194 /* Maybe we have a shorter packet in the queue,
196 which can be sent now. It sounds cool, 195 which can be sent now. It sounds cool,
@@ -214,7 +213,7 @@ static void tbf_reset(struct Qdisc *sch)
214 213
215 qdisc_reset(q->qdisc); 214 qdisc_reset(q->qdisc);
216 sch->q.qlen = 0; 215 sch->q.qlen = 0;
217 q->t_c = psched_get_time(); 216 q->t_c = ktime_to_ns(ktime_get());
218 q->tokens = q->buffer; 217 q->tokens = q->buffer;
219 q->ptokens = q->mtu; 218 q->ptokens = q->mtu;
220 qdisc_watchdog_cancel(&q->watchdog); 219 qdisc_watchdog_cancel(&q->watchdog);
@@ -293,14 +292,19 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
293 q->qdisc = child; 292 q->qdisc = child;
294 } 293 }
295 q->limit = qopt->limit; 294 q->limit = qopt->limit;
296 q->mtu = qopt->mtu; 295 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
297 q->max_size = max_size; 296 q->max_size = max_size;
298 q->buffer = qopt->buffer; 297 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
299 q->tokens = q->buffer; 298 q->tokens = q->buffer;
300 q->ptokens = q->mtu; 299 q->ptokens = q->mtu;
301 300
302 swap(q->R_tab, rtab); 301 psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
303 swap(q->P_tab, ptab); 302 if (ptab) {
303 psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
304 q->peak_present = true;
305 } else {
306 q->peak_present = false;
307 }
304 308
305 sch_tree_unlock(sch); 309 sch_tree_unlock(sch);
306 err = 0; 310 err = 0;
@@ -319,7 +323,7 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
319 if (opt == NULL) 323 if (opt == NULL)
320 return -EINVAL; 324 return -EINVAL;
321 325
322 q->t_c = psched_get_time(); 326 q->t_c = ktime_to_ns(ktime_get());
323 qdisc_watchdog_init(&q->watchdog, sch); 327 qdisc_watchdog_init(&q->watchdog, sch);
324 q->qdisc = &noop_qdisc; 328 q->qdisc = &noop_qdisc;
325 329
@@ -331,12 +335,6 @@ static void tbf_destroy(struct Qdisc *sch)
331 struct tbf_sched_data *q = qdisc_priv(sch); 335 struct tbf_sched_data *q = qdisc_priv(sch);
332 336
333 qdisc_watchdog_cancel(&q->watchdog); 337 qdisc_watchdog_cancel(&q->watchdog);
334
335 if (q->P_tab)
336 qdisc_put_rtab(q->P_tab);
337 if (q->R_tab)
338 qdisc_put_rtab(q->R_tab);
339
340 qdisc_destroy(q->qdisc); 338 qdisc_destroy(q->qdisc);
341} 339}
342 340
@@ -352,13 +350,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
352 goto nla_put_failure; 350 goto nla_put_failure;
353 351
354 opt.limit = q->limit; 352 opt.limit = q->limit;
355 opt.rate = q->R_tab->rate; 353 opt.rate.rate = psched_ratecfg_getrate(&q->rate);
356 if (q->P_tab) 354 if (q->peak_present)
357 opt.peakrate = q->P_tab->rate; 355 opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
358 else 356 else
359 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 357 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
360 opt.mtu = q->mtu; 358 opt.mtu = PSCHED_NS2TICKS(q->mtu);
361 opt.buffer = q->buffer; 359 opt.buffer = PSCHED_NS2TICKS(q->buffer);
362 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) 360 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
363 goto nla_put_failure; 361 goto nla_put_failure;
364 362
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 7521d944c0fb..cf4852814e0c 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig IP_SCTP 5menuconfig IP_SCTP
6 tristate "The SCTP Protocol (EXPERIMENTAL)" 6 tristate "The SCTP Protocol"
7 depends on INET && EXPERIMENTAL 7 depends on INET
8 depends on IPV6 || IPV6=n 8 depends on IPV6 || IPV6=n
9 select CRYPTO 9 select CRYPTO
10 select CRYPTO_HMAC 10 select CRYPTO_HMAC
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 159b9bc5d633..ba1dfc3f8def 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
71 return; 71 return;
72 72
73 if (atomic_dec_and_test(&key->refcnt)) { 73 if (atomic_dec_and_test(&key->refcnt)) {
74 kfree(key); 74 kzfree(key);
75 SCTP_DBG_OBJCNT_DEC(keys); 75 SCTP_DBG_OBJCNT_DEC(keys);
76 } 76 }
77} 77}
@@ -200,27 +200,28 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector(
200 struct sctp_auth_bytes *new; 200 struct sctp_auth_bytes *new;
201 __u32 len; 201 __u32 len;
202 __u32 offset = 0; 202 __u32 offset = 0;
203 __u16 random_len, hmacs_len, chunks_len = 0;
203 204
204 len = ntohs(random->param_hdr.length) + ntohs(hmacs->param_hdr.length); 205 random_len = ntohs(random->param_hdr.length);
205 if (chunks) 206 hmacs_len = ntohs(hmacs->param_hdr.length);
206 len += ntohs(chunks->param_hdr.length); 207 if (chunks)
208 chunks_len = ntohs(chunks->param_hdr.length);
207 209
208 new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp); 210 len = random_len + hmacs_len + chunks_len;
211
212 new = sctp_auth_create_key(len, gfp);
209 if (!new) 213 if (!new)
210 return NULL; 214 return NULL;
211 215
212 new->len = len; 216 memcpy(new->data, random, random_len);
213 217 offset += random_len;
214 memcpy(new->data, random, ntohs(random->param_hdr.length));
215 offset += ntohs(random->param_hdr.length);
216 218
217 if (chunks) { 219 if (chunks) {
218 memcpy(new->data + offset, chunks, 220 memcpy(new->data + offset, chunks, chunks_len);
219 ntohs(chunks->param_hdr.length)); 221 offset += chunks_len;
220 offset += ntohs(chunks->param_hdr.length);
221 } 222 }
222 223
223 memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length)); 224 memcpy(new->data + offset, hmacs, hmacs_len);
224 225
225 return new; 226 return new;
226} 227}
@@ -350,8 +351,8 @@ static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(
350 secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector, 351 secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector,
351 gfp); 352 gfp);
352out: 353out:
353 kfree(local_key_vector); 354 sctp_auth_key_put(local_key_vector);
354 kfree(peer_key_vector); 355 sctp_auth_key_put(peer_key_vector);
355 356
356 return secret; 357 return secret;
357} 358}
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 17a001bac2cc..73aad3d16a45 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -151,9 +151,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
151 ep->rcvbuf_policy = net->sctp.rcvbuf_policy; 151 ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
152 152
153 /* Initialize the secret key used with cookie. */ 153 /* Initialize the secret key used with cookie. */
154 get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); 154 get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
155 ep->last_key = ep->current_key = 0;
156 ep->key_changed_at = jiffies;
157 155
158 /* SCTP-AUTH extensions*/ 156 /* SCTP-AUTH extensions*/
159 INIT_LIST_HEAD(&ep->endpoint_shared_keys); 157 INIT_LIST_HEAD(&ep->endpoint_shared_keys);
@@ -271,6 +269,8 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
271 sctp_inq_free(&ep->base.inqueue); 269 sctp_inq_free(&ep->base.inqueue);
272 sctp_bind_addr_free(&ep->base.bind_addr); 270 sctp_bind_addr_free(&ep->base.bind_addr);
273 271
272 memset(ep->secret_key, 0, sizeof(ep->secret_key));
273
274 /* Remove and free the port */ 274 /* Remove and free the port */
275 if (sctp_sk(ep->base.sk)->bind_hash) 275 if (sctp_sk(ep->base.sk)->bind_hash)
276 sctp_put_port(ep->base.sk); 276 sctp_put_port(ep->base.sk);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f3f0f4dc31dd..391a245d5203 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
326 */ 326 */
327 rcu_read_lock(); 327 rcu_read_lock();
328 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 328 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
329 if (!laddr->valid && laddr->state != SCTP_ADDR_SRC) 329 if (!laddr->valid)
330 continue; 330 continue;
331 if ((laddr->a.sa.sa_family == AF_INET6) && 331 if ((laddr->state == SCTP_ADDR_SRC) &&
332 (laddr->a.sa.sa_family == AF_INET6) &&
332 (scope <= sctp_scope(&laddr->a))) { 333 (scope <= sctp_scope(&laddr->a))) {
333 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 334 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
334 if (!baddr || (matchlen < bmatchlen)) { 335 if (!baddr || (matchlen < bmatchlen)) {
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 5f7518de2fd1..ad0dba870341 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -122,12 +122,12 @@ static const struct file_operations sctpprobe_fops = {
122 .llseek = noop_llseek, 122 .llseek = noop_llseek,
123}; 123};
124 124
125sctp_disposition_t jsctp_sf_eat_sack(struct net *net, 125static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
126 const struct sctp_endpoint *ep, 126 const struct sctp_endpoint *ep,
127 const struct sctp_association *asoc, 127 const struct sctp_association *asoc,
128 const sctp_subtype_t type, 128 const sctp_subtype_t type,
129 void *arg, 129 void *arg,
130 sctp_cmd_seq_t *commands) 130 sctp_cmd_seq_t *commands)
131{ 131{
132 struct sctp_transport *sp; 132 struct sctp_transport *sp;
133 static __u32 lcwnd = 0; 133 static __u32 lcwnd = 0;
@@ -183,13 +183,20 @@ static __init int sctpprobe_init(void)
183{ 183{
184 int ret = -ENOMEM; 184 int ret = -ENOMEM;
185 185
186 /* Warning: if the function signature of sctp_sf_eat_sack_6_2,
187 * has been changed, you also have to change the signature of
188 * jsctp_sf_eat_sack, otherwise you end up right here!
189 */
190 BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2,
191 jsctp_sf_eat_sack) == 0);
192
186 init_waitqueue_head(&sctpw.wait); 193 init_waitqueue_head(&sctpw.wait);
187 spin_lock_init(&sctpw.lock); 194 spin_lock_init(&sctpw.lock);
188 if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL)) 195 if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
189 return ret; 196 return ret;
190 197
191 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, 198 if (!proc_create(procname, S_IRUSR, init_net.proc_net,
192 &sctpprobe_fops)) 199 &sctpprobe_fops))
193 goto free_kfifo; 200 goto free_kfifo;
194 201
195 ret = register_jprobe(&sctp_recv_probe); 202 ret = register_jprobe(&sctp_recv_probe);
@@ -201,7 +208,7 @@ static __init int sctpprobe_init(void)
201 return 0; 208 return 0;
202 209
203remove_proc: 210remove_proc:
204 proc_net_remove(&init_net, procname); 211 remove_proc_entry(procname, init_net.proc_net);
205free_kfifo: 212free_kfifo:
206 kfifo_free(&sctpw.fifo); 213 kfifo_free(&sctpw.fifo);
207 return ret; 214 return ret;
@@ -210,7 +217,7 @@ free_kfifo:
210static __exit void sctpprobe_exit(void) 217static __exit void sctpprobe_exit(void)
211{ 218{
212 kfifo_free(&sctpw.fifo); 219 kfifo_free(&sctpw.fifo);
213 proc_net_remove(&init_net, procname); 220 remove_proc_entry(procname, init_net.proc_net);
214 unregister_jprobe(&sctp_recv_probe); 221 unregister_jprobe(&sctp_recv_probe);
215} 222}
216 223
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index e1c5fc2be6b8..a193f3bc8144 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1589,8 +1589,6 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1589 struct sctp_signed_cookie *cookie; 1589 struct sctp_signed_cookie *cookie;
1590 struct scatterlist sg; 1590 struct scatterlist sg;
1591 int headersize, bodysize; 1591 int headersize, bodysize;
1592 unsigned int keylen;
1593 char *key;
1594 1592
1595 /* Header size is static data prior to the actual cookie, including 1593 /* Header size is static data prior to the actual cookie, including
1596 * any padding. 1594 * any padding.
@@ -1650,12 +1648,11 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1650 1648
1651 /* Sign the message. */ 1649 /* Sign the message. */
1652 sg_init_one(&sg, &cookie->c, bodysize); 1650 sg_init_one(&sg, &cookie->c, bodysize);
1653 keylen = SCTP_SECRET_SIZE;
1654 key = (char *)ep->secret_key[ep->current_key];
1655 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1651 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1656 desc.flags = 0; 1652 desc.flags = 0;
1657 1653
1658 if (crypto_hash_setkey(desc.tfm, key, keylen) || 1654 if (crypto_hash_setkey(desc.tfm, ep->secret_key,
1655 sizeof(ep->secret_key)) ||
1659 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) 1656 crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
1660 goto free_cookie; 1657 goto free_cookie;
1661 } 1658 }
@@ -1682,8 +1679,7 @@ struct sctp_association *sctp_unpack_cookie(
1682 int headersize, bodysize, fixed_size; 1679 int headersize, bodysize, fixed_size;
1683 __u8 *digest = ep->digest; 1680 __u8 *digest = ep->digest;
1684 struct scatterlist sg; 1681 struct scatterlist sg;
1685 unsigned int keylen, len; 1682 unsigned int len;
1686 char *key;
1687 sctp_scope_t scope; 1683 sctp_scope_t scope;
1688 struct sk_buff *skb = chunk->skb; 1684 struct sk_buff *skb = chunk->skb;
1689 struct timeval tv; 1685 struct timeval tv;
@@ -1718,34 +1714,21 @@ struct sctp_association *sctp_unpack_cookie(
1718 goto no_hmac; 1714 goto no_hmac;
1719 1715
1720 /* Check the signature. */ 1716 /* Check the signature. */
1721 keylen = SCTP_SECRET_SIZE;
1722 sg_init_one(&sg, bear_cookie, bodysize); 1717 sg_init_one(&sg, bear_cookie, bodysize);
1723 key = (char *)ep->secret_key[ep->current_key];
1724 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1718 desc.tfm = sctp_sk(ep->base.sk)->hmac;
1725 desc.flags = 0; 1719 desc.flags = 0;
1726 1720
1727 memset(digest, 0x00, SCTP_SIGNATURE_SIZE); 1721 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1728 if (crypto_hash_setkey(desc.tfm, key, keylen) || 1722 if (crypto_hash_setkey(desc.tfm, ep->secret_key,
1723 sizeof(ep->secret_key)) ||
1729 crypto_hash_digest(&desc, &sg, bodysize, digest)) { 1724 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1730 *error = -SCTP_IERROR_NOMEM; 1725 *error = -SCTP_IERROR_NOMEM;
1731 goto fail; 1726 goto fail;
1732 } 1727 }
1733 1728
1734 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { 1729 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1735 /* Try the previous key. */ 1730 *error = -SCTP_IERROR_BAD_SIG;
1736 key = (char *)ep->secret_key[ep->last_key]; 1731 goto fail;
1737 memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
1738 if (crypto_hash_setkey(desc.tfm, key, keylen) ||
1739 crypto_hash_digest(&desc, &sg, bodysize, digest)) {
1740 *error = -SCTP_IERROR_NOMEM;
1741 goto fail;
1742 }
1743
1744 if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
1745 /* Yikes! Still bad signature! */
1746 *error = -SCTP_IERROR_BAD_SIG;
1747 goto fail;
1748 }
1749 } 1732 }
1750 1733
1751no_hmac: 1734no_hmac:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e65758cb038..cedd9bf67b8c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3390 3390
3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
3392out: 3392out:
3393 kfree(authkey); 3393 kzfree(authkey);
3394 return ret; 3394 return ret;
3395} 3395}
3396 3396
diff --git a/net/socket.c b/net/socket.c
index 5c4d82c05293..ee0d029e5130 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2837,7 +2837,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2837 } 2837 }
2838 2838
2839 ifr = compat_alloc_user_space(buf_size); 2839 ifr = compat_alloc_user_space(buf_size);
2840 rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); 2840 rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
2841 2841
2842 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) 2842 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
2843 return -EFAULT; 2843 return -EFAULT;
@@ -2861,12 +2861,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2861 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2861 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2862 2862
2863 if (copy_in_user(rxnfc, compat_rxnfc, 2863 if (copy_in_user(rxnfc, compat_rxnfc,
2864 (void *)(&rxnfc->fs.m_ext + 1) - 2864 (void __user *)(&rxnfc->fs.m_ext + 1) -
2865 (void *)rxnfc) || 2865 (void __user *)rxnfc) ||
2866 copy_in_user(&rxnfc->fs.ring_cookie, 2866 copy_in_user(&rxnfc->fs.ring_cookie,
2867 &compat_rxnfc->fs.ring_cookie, 2867 &compat_rxnfc->fs.ring_cookie,
2868 (void *)(&rxnfc->fs.location + 1) - 2868 (void __user *)(&rxnfc->fs.location + 1) -
2869 (void *)&rxnfc->fs.ring_cookie) || 2869 (void __user *)&rxnfc->fs.ring_cookie) ||
2870 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, 2870 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
2871 sizeof(rxnfc->rule_cnt))) 2871 sizeof(rxnfc->rule_cnt)))
2872 return -EFAULT; 2872 return -EFAULT;
@@ -2878,12 +2878,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2878 2878
2879 if (convert_out) { 2879 if (convert_out) {
2880 if (copy_in_user(compat_rxnfc, rxnfc, 2880 if (copy_in_user(compat_rxnfc, rxnfc,
2881 (const void *)(&rxnfc->fs.m_ext + 1) - 2881 (const void __user *)(&rxnfc->fs.m_ext + 1) -
2882 (const void *)rxnfc) || 2882 (const void __user *)rxnfc) ||
2883 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2883 copy_in_user(&compat_rxnfc->fs.ring_cookie,
2884 &rxnfc->fs.ring_cookie, 2884 &rxnfc->fs.ring_cookie,
2885 (const void *)(&rxnfc->fs.location + 1) - 2885 (const void __user *)(&rxnfc->fs.location + 1) -
2886 (const void *)&rxnfc->fs.ring_cookie) || 2886 (const void __user *)&rxnfc->fs.ring_cookie) ||
2887 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, 2887 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
2888 sizeof(rxnfc->rule_cnt))) 2888 sizeof(rxnfc->rule_cnt)))
2889 return -EFAULT; 2889 return -EFAULT;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index bfa31714581f..fb20f25ddec9 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99} 99}
100 100
101static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
102{
103 struct list_head *q = &queue->tasks[queue->priority];
104 struct rpc_task *task;
105
106 if (!list_empty(q)) {
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
108 if (task->tk_owner == queue->owner)
109 list_move_tail(&task->u.tk_wait.list, q);
110 }
111}
112
101static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 113static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
102{ 114{
103 queue->priority = priority; 115 if (queue->priority != priority) {
116 /* Fairness: rotate the list when changing priority */
117 rpc_rotate_queue_owner(queue);
118 queue->priority = priority;
119 }
104} 120}
105 121
106static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) 122static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 0a148c9d2a5c..0f679df7d072 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
465} 465}
466 466
467/* 467/*
468 * See net/ipv6/datagram.c : datagram_recv_ctl 468 * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
469 */ 469 */
470static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 470static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
471 struct cmsghdr *cmh) 471 struct cmsghdr *cmh)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 54f89f90ac33..2655c9f4ecad 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -774,6 +774,7 @@ void tipc_bclink_init(void)
774 bcl->owner = &bclink->node; 774 bcl->owner = &bclink->node;
775 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 775 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
776 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 776 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
777 spin_lock_init(&bcbearer->bearer.lock);
777 bcl->b_ptr = &bcbearer->bearer; 778 bcl->b_ptr = &bcbearer->bearer;
778 bcl->state = WORKING_WORKING; 779 bcl->state = WORKING_WORKING;
779 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 780 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 9b4e4833a484..a9622b6cd916 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -43,7 +43,8 @@
43#define SS_LISTENING -1 /* socket is listening */ 43#define SS_LISTENING -1 /* socket is listening */
44#define SS_READY -2 /* socket is connectionless */ 44#define SS_READY -2 /* socket is connectionless */
45 45
46#define OVERLOAD_LIMIT_BASE 10000 46#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
47 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 48#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48 49
49struct tipc_sock { 50struct tipc_sock {
@@ -129,19 +130,6 @@ static void advance_rx_queue(struct sock *sk)
129} 130}
130 131
131/** 132/**
132 * discard_rx_queue - discard all buffers in socket receive queue
133 *
134 * Caller must hold socket lock
135 */
136static void discard_rx_queue(struct sock *sk)
137{
138 struct sk_buff *buf;
139
140 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
141 kfree_skb(buf);
142}
143
144/**
145 * reject_rx_queue - reject all buffers in socket receive queue 133 * reject_rx_queue - reject all buffers in socket receive queue
146 * 134 *
147 * Caller must hold socket lock 135 * Caller must hold socket lock
@@ -215,7 +203,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
215 203
216 sock_init_data(sock, sk); 204 sock_init_data(sock, sk);
217 sk->sk_backlog_rcv = backlog_rcv; 205 sk->sk_backlog_rcv = backlog_rcv;
218 sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2;
219 sk->sk_data_ready = tipc_data_ready; 206 sk->sk_data_ready = tipc_data_ready;
220 sk->sk_write_space = tipc_write_space; 207 sk->sk_write_space = tipc_write_space;
221 tipc_sk(sk)->p = tp_ptr; 208 tipc_sk(sk)->p = tp_ptr;
@@ -292,7 +279,7 @@ static int release(struct socket *sock)
292 res = tipc_deleteport(tport->ref); 279 res = tipc_deleteport(tport->ref);
293 280
294 /* Discard any remaining (connection-based) messages in receive queue */ 281 /* Discard any remaining (connection-based) messages in receive queue */
295 discard_rx_queue(sk); 282 __skb_queue_purge(&sk->sk_receive_queue);
296 283
297 /* Reject any messages that accumulated in backlog queue */ 284 /* Reject any messages that accumulated in backlog queue */
298 sock->state = SS_DISCONNECTING; 285 sock->state = SS_DISCONNECTING;
@@ -516,8 +503,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
516 if (unlikely((m->msg_namelen < sizeof(*dest)) || 503 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
517 (dest->family != AF_TIPC))) 504 (dest->family != AF_TIPC)))
518 return -EINVAL; 505 return -EINVAL;
519 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 506 if (total_len > TIPC_MAX_USER_MSG_SIZE)
520 (m->msg_iovlen > (unsigned int)INT_MAX))
521 return -EMSGSIZE; 507 return -EMSGSIZE;
522 508
523 if (iocb) 509 if (iocb)
@@ -625,8 +611,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
625 if (unlikely(dest)) 611 if (unlikely(dest))
626 return send_msg(iocb, sock, m, total_len); 612 return send_msg(iocb, sock, m, total_len);
627 613
628 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 614 if (total_len > TIPC_MAX_USER_MSG_SIZE)
629 (m->msg_iovlen > (unsigned int)INT_MAX))
630 return -EMSGSIZE; 615 return -EMSGSIZE;
631 616
632 if (iocb) 617 if (iocb)
@@ -711,8 +696,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
711 goto exit; 696 goto exit;
712 } 697 }
713 698
714 if ((total_len > (unsigned int)INT_MAX) || 699 if (total_len > (unsigned int)INT_MAX) {
715 (m->msg_iovlen > (unsigned int)INT_MAX)) {
716 res = -EMSGSIZE; 700 res = -EMSGSIZE;
717 goto exit; 701 goto exit;
718 } 702 }
@@ -1155,34 +1139,6 @@ static void tipc_data_ready(struct sock *sk, int len)
1155} 1139}
1156 1140
1157/** 1141/**
1158 * rx_queue_full - determine if receive queue can accept another message
1159 * @msg: message to be added to queue
1160 * @queue_size: current size of queue
1161 * @base: nominal maximum size of queue
1162 *
1163 * Returns 1 if queue is unable to accept message, 0 otherwise
1164 */
1165static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1166{
1167 u32 threshold;
1168 u32 imp = msg_importance(msg);
1169
1170 if (imp == TIPC_LOW_IMPORTANCE)
1171 threshold = base;
1172 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1173 threshold = base * 2;
1174 else if (imp == TIPC_HIGH_IMPORTANCE)
1175 threshold = base * 100;
1176 else
1177 return 0;
1178
1179 if (msg_connected(msg))
1180 threshold *= 4;
1181
1182 return queue_size >= threshold;
1183}
1184
1185/**
1186 * filter_connect - Handle all incoming messages for a connection-based socket 1142 * filter_connect - Handle all incoming messages for a connection-based socket
1187 * @tsock: TIPC socket 1143 * @tsock: TIPC socket
1188 * @msg: message 1144 * @msg: message
@@ -1260,6 +1216,36 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1260} 1216}
1261 1217
1262/** 1218/**
1219 * rcvbuf_limit - get proper overload limit of socket receive queue
1220 * @sk: socket
1221 * @buf: message
1222 *
1223 * For all connection oriented messages, irrespective of importance,
1224 * the default overload value (i.e. 67MB) is set as limit.
1225 *
1226 * For all connectionless messages, by default new queue limits are
1227 * as belows:
1228 *
1229 * TIPC_LOW_IMPORTANCE (5MB)
1230 * TIPC_MEDIUM_IMPORTANCE (10MB)
1231 * TIPC_HIGH_IMPORTANCE (20MB)
1232 * TIPC_CRITICAL_IMPORTANCE (40MB)
1233 *
1234 * Returns overload limit according to corresponding message importance
1235 */
1236static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1237{
1238 struct tipc_msg *msg = buf_msg(buf);
1239 unsigned int limit;
1240
1241 if (msg_connected(msg))
1242 limit = CONN_OVERLOAD_LIMIT;
1243 else
1244 limit = sk->sk_rcvbuf << (msg_importance(msg) + 5);
1245 return limit;
1246}
1247
1248/**
1263 * filter_rcv - validate incoming message 1249 * filter_rcv - validate incoming message
1264 * @sk: socket 1250 * @sk: socket
1265 * @buf: message 1251 * @buf: message
@@ -1275,7 +1261,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1275{ 1261{
1276 struct socket *sock = sk->sk_socket; 1262 struct socket *sock = sk->sk_socket;
1277 struct tipc_msg *msg = buf_msg(buf); 1263 struct tipc_msg *msg = buf_msg(buf);
1278 u32 recv_q_len; 1264 unsigned int limit = rcvbuf_limit(sk, buf);
1279 u32 res = TIPC_OK; 1265 u32 res = TIPC_OK;
1280 1266
1281 /* Reject message if it is wrong sort of message for socket */ 1267 /* Reject message if it is wrong sort of message for socket */
@@ -1292,15 +1278,13 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1292 } 1278 }
1293 1279
1294 /* Reject message if there isn't room to queue it */ 1280 /* Reject message if there isn't room to queue it */
1295 recv_q_len = skb_queue_len(&sk->sk_receive_queue); 1281 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1296 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) { 1282 return TIPC_ERR_OVERLOAD;
1297 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1298 return TIPC_ERR_OVERLOAD;
1299 }
1300 1283
1301 /* Enqueue message (finally!) */ 1284 /* Enqueue message */
1302 TIPC_SKB_CB(buf)->handle = 0; 1285 TIPC_SKB_CB(buf)->handle = 0;
1303 __skb_queue_tail(&sk->sk_receive_queue, buf); 1286 __skb_queue_tail(&sk->sk_receive_queue, buf);
1287 skb_set_owner_r(buf, sk);
1304 1288
1305 sk->sk_data_ready(sk, 0); 1289 sk->sk_data_ready(sk, 0);
1306 return TIPC_OK; 1290 return TIPC_OK;
@@ -1349,7 +1333,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1349 if (!sock_owned_by_user(sk)) { 1333 if (!sock_owned_by_user(sk)) {
1350 res = filter_rcv(sk, buf); 1334 res = filter_rcv(sk, buf);
1351 } else { 1335 } else {
1352 if (sk_add_backlog(sk, buf, sk->sk_rcvbuf)) 1336 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
1353 res = TIPC_ERR_OVERLOAD; 1337 res = TIPC_ERR_OVERLOAD;
1354 else 1338 else
1355 res = TIPC_OK; 1339 res = TIPC_OK;
@@ -1583,6 +1567,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1583 } else { 1567 } else {
1584 __skb_dequeue(&sk->sk_receive_queue); 1568 __skb_dequeue(&sk->sk_receive_queue);
1585 __skb_queue_head(&new_sk->sk_receive_queue, buf); 1569 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1570 skb_set_owner_r(buf, new_sk);
1586 } 1571 }
1587 release_sock(new_sk); 1572 release_sock(new_sk);
1588 1573
@@ -1637,7 +1622,7 @@ restart:
1637 case SS_DISCONNECTING: 1622 case SS_DISCONNECTING:
1638 1623
1639 /* Discard any unreceived messages */ 1624 /* Discard any unreceived messages */
1640 discard_rx_queue(sk); 1625 __skb_queue_purge(&sk->sk_receive_queue);
1641 1626
1642 /* Wake up anyone sleeping in poll */ 1627 /* Wake up anyone sleeping in poll */
1643 sk->sk_state_change(sk); 1628 sk->sk_state_change(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0c612361c153..87d284289012 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2402,7 +2402,7 @@ static int __net_init unix_net_init(struct net *net)
2402 goto out; 2402 goto out;
2403 2403
2404#ifdef CONFIG_PROC_FS 2404#ifdef CONFIG_PROC_FS
2405 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) { 2405 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2406 unix_sysctl_unregister(net); 2406 unix_sysctl_unregister(net);
2407 goto out; 2407 goto out;
2408 } 2408 }
@@ -2415,7 +2415,7 @@ out:
2415static void __net_exit unix_net_exit(struct net *net) 2415static void __net_exit unix_net_exit(struct net *net)
2416{ 2416{
2417 unix_sysctl_unregister(net); 2417 unix_sysctl_unregister(net);
2418 proc_net_remove(net, "unix"); 2418 remove_proc_entry("unix", net->proc_net);
2419} 2419}
2420 2420
2421static struct pernet_operations unix_net_ops = { 2421static struct pernet_operations unix_net_ops = {
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
new file mode 100644
index 000000000000..b5fa7e40cdcb
--- /dev/null
+++ b/net/vmw_vsock/Kconfig
@@ -0,0 +1,28 @@
1#
2# Vsock protocol
3#
4
5config VSOCKETS
6 tristate "Virtual Socket protocol"
7 help
8 Virtual Socket Protocol is a socket protocol similar to TCP/IP
9 allowing comunication between Virtual Machines and hypervisor
10 or host.
11
12 You should also select one or more hypervisor-specific transports
13 below.
14
15 To compile this driver as a module, choose M here: the module
16 will be called vsock. If unsure, say N.
17
18config VMWARE_VMCI_VSOCKETS
19 tristate "VMware VMCI transport for Virtual Sockets"
20 depends on VSOCKETS && VMWARE_VMCI
21 help
22 This module implements a VMCI transport for Virtual Sockets.
23
24 Enable this transport if your Virtual Machine runs on a VMware
25 hypervisor.
26
27 To compile this driver as a module, choose M here: the module
28 will be called vmw_vsock_vmci_transport. If unsure, say N.
diff --git a/net/vmw_vsock/Makefile b/net/vmw_vsock/Makefile
new file mode 100644
index 000000000000..2ce52d70f224
--- /dev/null
+++ b/net/vmw_vsock/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_VSOCKETS) += vsock.o
2obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o
3
4vsock-y += af_vsock.o vsock_addr.o
5
6vmw_vsock_vmci_transport-y += vmci_transport.o vmci_transport_notify.o \
7 vmci_transport_notify_qstate.o
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
new file mode 100644
index 000000000000..ca511c4f388a
--- /dev/null
+++ b/net/vmw_vsock/af_vsock.c
@@ -0,0 +1,2012 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16/* Implementation notes:
17 *
18 * - There are two kinds of sockets: those created by user action (such as
19 * calling socket(2)) and those created by incoming connection request packets.
20 *
21 * - There are two "global" tables, one for bound sockets (sockets that have
22 * specified an address that they are responsible for) and one for connected
23 * sockets (sockets that have established a connection with another socket).
24 * These tables are "global" in that all sockets on the system are placed
25 * within them. - Note, though, that the bound table contains an extra entry
26 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
27 * that list. The bound table is used solely for lookup of sockets when packets
28 * are received and that's not necessary for SOCK_DGRAM sockets since we create
29 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
30 * sockets out of the bound hash buckets will reduce the chance of collisions
31 * when looking for SOCK_STREAM sockets and prevents us from having to check the
32 * socket type in the hash table lookups.
33 *
34 * - Sockets created by user action will either be "client" sockets that
35 * initiate a connection or "server" sockets that listen for connections; we do
36 * not support simultaneous connects (two "client" sockets connecting).
37 *
38 * - "Server" sockets are referred to as listener sockets throughout this
39 * implementation because they are in the SS_LISTEN state. When a connection
40 * request is received (the second kind of socket mentioned above), we create a
41 * new socket and refer to it as a pending socket. These pending sockets are
42 * placed on the pending connection list of the listener socket. When future
43 * packets are received for the address the listener socket is bound to, we
44 * check if the source of the packet is from one that has an existing pending
45 * connection. If it does, we process the packet for the pending socket. When
46 * that socket reaches the connected state, it is removed from the listener
47 * socket's pending list and enqueued in the listener socket's accept queue.
48 * Callers of accept(2) will accept connected sockets from the listener socket's
49 * accept queue. If the socket cannot be accepted for some reason then it is
50 * marked rejected. Once the connection is accepted, it is owned by the user
51 * process and the responsibility for cleanup falls with that user process.
52 *
53 * - It is possible that these pending sockets will never reach the connected
54 * state; in fact, we may never receive another packet after the connection
55 * request. Because of this, we must schedule a cleanup function to run in the
56 * future, after some amount of time passes where a connection should have been
57 * established. This function ensures that the socket is off all lists so it
58 * cannot be retrieved, then drops all references to the socket so it is cleaned
59 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
60 * function will also cleanup rejected sockets, those that reach the connected
61 * state but leave it before they have been accepted.
62 *
63 * - Sockets created by user action will be cleaned up when the user process
64 * calls close(2), causing our release implementation to be called. Our release
65 * implementation will perform some cleanup then drop the last reference so our
66 * sk_destruct implementation is invoked. Our sk_destruct implementation will
67 * perform additional cleanup that's common for both types of sockets.
68 *
69 * - A socket's reference count is what ensures that the structure won't be
70 * freed. Each entry in a list (such as the "global" bound and connected tables
71 * and the listener socket's pending list and connected queue) ensures a
72 * reference. When we defer work until process context and pass a socket as our
73 * argument, we must ensure the reference count is increased to ensure the
74 * socket isn't freed before the function is run; the deferred function will
75 * then drop the reference.
76 */
77
78#include <linux/types.h>
79#include <linux/bitops.h>
80#include <linux/cred.h>
81#include <linux/init.h>
82#include <linux/io.h>
83#include <linux/kernel.h>
84#include <linux/kmod.h>
85#include <linux/list.h>
86#include <linux/miscdevice.h>
87#include <linux/module.h>
88#include <linux/mutex.h>
89#include <linux/net.h>
90#include <linux/poll.h>
91#include <linux/skbuff.h>
92#include <linux/smp.h>
93#include <linux/socket.h>
94#include <linux/stddef.h>
95#include <linux/unistd.h>
96#include <linux/wait.h>
97#include <linux/workqueue.h>
98#include <net/sock.h>
99
100#include "af_vsock.h"
101
102static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
103static void vsock_sk_destruct(struct sock *sk);
104static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
105
106/* Protocol family. */
107static struct proto vsock_proto = {
108 .name = "AF_VSOCK",
109 .owner = THIS_MODULE,
110 .obj_size = sizeof(struct vsock_sock),
111};
112
113/* The default peer timeout indicates how long we will wait for a peer response
114 * to a control message.
115 */
116#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
117
118#define SS_LISTEN 255
119
120static const struct vsock_transport *transport;
121static DEFINE_MUTEX(vsock_register_mutex);
122
123/**** EXPORTS ****/
124
125/* Get the ID of the local context. This is transport dependent. */
126
127int vm_sockets_get_local_cid(void)
128{
129 return transport->get_local_cid();
130}
131EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
132
133/**** UTILS ****/
134
135/* Each bound VSocket is stored in the bind hash table and each connected
136 * VSocket is stored in the connected hash table.
137 *
138 * Unbound sockets are all put on the same list attached to the end of the hash
139 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
140 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
141 * represents the list that addr hashes to).
142 *
143 * Specifically, we initialize the vsock_bind_table array to a size of
144 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
145 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
146 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
147 * mods with VSOCK_HASH_SIZE - 1 to ensure this.
148 */
149#define VSOCK_HASH_SIZE 251
150#define MAX_PORT_RETRIES 24
151
152#define VSOCK_HASH(addr) ((addr)->svm_port % (VSOCK_HASH_SIZE - 1))
153#define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
154#define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
155
156/* XXX This can probably be implemented in a better way. */
157#define VSOCK_CONN_HASH(src, dst) \
158 (((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1))
159#define vsock_connected_sockets(src, dst) \
160 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
161#define vsock_connected_sockets_vsk(vsk) \
162 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
163
164static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
165static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
166static DEFINE_SPINLOCK(vsock_table_lock);
167
168static __init void vsock_init_tables(void)
169{
170 int i;
171
172 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
173 INIT_LIST_HEAD(&vsock_bind_table[i]);
174
175 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
176 INIT_LIST_HEAD(&vsock_connected_table[i]);
177}
178
179static void __vsock_insert_bound(struct list_head *list,
180 struct vsock_sock *vsk)
181{
182 sock_hold(&vsk->sk);
183 list_add(&vsk->bound_table, list);
184}
185
186static void __vsock_insert_connected(struct list_head *list,
187 struct vsock_sock *vsk)
188{
189 sock_hold(&vsk->sk);
190 list_add(&vsk->connected_table, list);
191}
192
193static void __vsock_remove_bound(struct vsock_sock *vsk)
194{
195 list_del_init(&vsk->bound_table);
196 sock_put(&vsk->sk);
197}
198
199static void __vsock_remove_connected(struct vsock_sock *vsk)
200{
201 list_del_init(&vsk->connected_table);
202 sock_put(&vsk->sk);
203}
204
205static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
206{
207 struct vsock_sock *vsk;
208
209 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
210 if (vsock_addr_equals_addr_any(addr, &vsk->local_addr))
211 return sk_vsock(vsk);
212
213 return NULL;
214}
215
216static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
217 struct sockaddr_vm *dst)
218{
219 struct vsock_sock *vsk;
220
221 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
222 connected_table) {
223 if (vsock_addr_equals_addr(src, &vsk->remote_addr)
224 && vsock_addr_equals_addr(dst, &vsk->local_addr)) {
225 return sk_vsock(vsk);
226 }
227 }
228
229 return NULL;
230}
231
232static bool __vsock_in_bound_table(struct vsock_sock *vsk)
233{
234 return !list_empty(&vsk->bound_table);
235}
236
237static bool __vsock_in_connected_table(struct vsock_sock *vsk)
238{
239 return !list_empty(&vsk->connected_table);
240}
241
242static void vsock_insert_unbound(struct vsock_sock *vsk)
243{
244 spin_lock_bh(&vsock_table_lock);
245 __vsock_insert_bound(vsock_unbound_sockets, vsk);
246 spin_unlock_bh(&vsock_table_lock);
247}
248
249void vsock_insert_connected(struct vsock_sock *vsk)
250{
251 struct list_head *list = vsock_connected_sockets(
252 &vsk->remote_addr, &vsk->local_addr);
253
254 spin_lock_bh(&vsock_table_lock);
255 __vsock_insert_connected(list, vsk);
256 spin_unlock_bh(&vsock_table_lock);
257}
258EXPORT_SYMBOL_GPL(vsock_insert_connected);
259
260void vsock_remove_bound(struct vsock_sock *vsk)
261{
262 spin_lock_bh(&vsock_table_lock);
263 __vsock_remove_bound(vsk);
264 spin_unlock_bh(&vsock_table_lock);
265}
266EXPORT_SYMBOL_GPL(vsock_remove_bound);
267
268void vsock_remove_connected(struct vsock_sock *vsk)
269{
270 spin_lock_bh(&vsock_table_lock);
271 __vsock_remove_connected(vsk);
272 spin_unlock_bh(&vsock_table_lock);
273}
274EXPORT_SYMBOL_GPL(vsock_remove_connected);
275
276struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
277{
278 struct sock *sk;
279
280 spin_lock_bh(&vsock_table_lock);
281 sk = __vsock_find_bound_socket(addr);
282 if (sk)
283 sock_hold(sk);
284
285 spin_unlock_bh(&vsock_table_lock);
286
287 return sk;
288}
289EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
290
291struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
292 struct sockaddr_vm *dst)
293{
294 struct sock *sk;
295
296 spin_lock_bh(&vsock_table_lock);
297 sk = __vsock_find_connected_socket(src, dst);
298 if (sk)
299 sock_hold(sk);
300
301 spin_unlock_bh(&vsock_table_lock);
302
303 return sk;
304}
305EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
306
307static bool vsock_in_bound_table(struct vsock_sock *vsk)
308{
309 bool ret;
310
311 spin_lock_bh(&vsock_table_lock);
312 ret = __vsock_in_bound_table(vsk);
313 spin_unlock_bh(&vsock_table_lock);
314
315 return ret;
316}
317
318static bool vsock_in_connected_table(struct vsock_sock *vsk)
319{
320 bool ret;
321
322 spin_lock_bh(&vsock_table_lock);
323 ret = __vsock_in_connected_table(vsk);
324 spin_unlock_bh(&vsock_table_lock);
325
326 return ret;
327}
328
329void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
330{
331 int i;
332
333 spin_lock_bh(&vsock_table_lock);
334
335 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
336 struct vsock_sock *vsk;
337 list_for_each_entry(vsk, &vsock_connected_table[i],
338 connected_table);
339 fn(sk_vsock(vsk));
340 }
341
342 spin_unlock_bh(&vsock_table_lock);
343}
344EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
345
346void vsock_add_pending(struct sock *listener, struct sock *pending)
347{
348 struct vsock_sock *vlistener;
349 struct vsock_sock *vpending;
350
351 vlistener = vsock_sk(listener);
352 vpending = vsock_sk(pending);
353
354 sock_hold(pending);
355 sock_hold(listener);
356 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
357}
358EXPORT_SYMBOL_GPL(vsock_add_pending);
359
360void vsock_remove_pending(struct sock *listener, struct sock *pending)
361{
362 struct vsock_sock *vpending = vsock_sk(pending);
363
364 list_del_init(&vpending->pending_links);
365 sock_put(listener);
366 sock_put(pending);
367}
368EXPORT_SYMBOL_GPL(vsock_remove_pending);
369
370void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
371{
372 struct vsock_sock *vlistener;
373 struct vsock_sock *vconnected;
374
375 vlistener = vsock_sk(listener);
376 vconnected = vsock_sk(connected);
377
378 sock_hold(connected);
379 sock_hold(listener);
380 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
381}
382EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
383
384static struct sock *vsock_dequeue_accept(struct sock *listener)
385{
386 struct vsock_sock *vlistener;
387 struct vsock_sock *vconnected;
388
389 vlistener = vsock_sk(listener);
390
391 if (list_empty(&vlistener->accept_queue))
392 return NULL;
393
394 vconnected = list_entry(vlistener->accept_queue.next,
395 struct vsock_sock, accept_queue);
396
397 list_del_init(&vconnected->accept_queue);
398 sock_put(listener);
399 /* The caller will need a reference on the connected socket so we let
400 * it call sock_put().
401 */
402
403 return sk_vsock(vconnected);
404}
405
406static bool vsock_is_accept_queue_empty(struct sock *sk)
407{
408 struct vsock_sock *vsk = vsock_sk(sk);
409 return list_empty(&vsk->accept_queue);
410}
411
412static bool vsock_is_pending(struct sock *sk)
413{
414 struct vsock_sock *vsk = vsock_sk(sk);
415 return !list_empty(&vsk->pending_links);
416}
417
418static int vsock_send_shutdown(struct sock *sk, int mode)
419{
420 return transport->shutdown(vsock_sk(sk), mode);
421}
422
423void vsock_pending_work(struct work_struct *work)
424{
425 struct sock *sk;
426 struct sock *listener;
427 struct vsock_sock *vsk;
428 bool cleanup;
429
430 vsk = container_of(work, struct vsock_sock, dwork.work);
431 sk = sk_vsock(vsk);
432 listener = vsk->listener;
433 cleanup = true;
434
435 lock_sock(listener);
436 lock_sock(sk);
437
438 if (vsock_is_pending(sk)) {
439 vsock_remove_pending(listener, sk);
440 } else if (!vsk->rejected) {
441 /* We are not on the pending list and accept() did not reject
442 * us, so we must have been accepted by our user process. We
443 * just need to drop our references to the sockets and be on
444 * our way.
445 */
446 cleanup = false;
447 goto out;
448 }
449
450 listener->sk_ack_backlog--;
451
452 /* We need to remove ourself from the global connected sockets list so
453 * incoming packets can't find this socket, and to reduce the reference
454 * count.
455 */
456 if (vsock_in_connected_table(vsk))
457 vsock_remove_connected(vsk);
458
459 sk->sk_state = SS_FREE;
460
461out:
462 release_sock(sk);
463 release_sock(listener);
464 if (cleanup)
465 sock_put(sk);
466
467 sock_put(sk);
468 sock_put(listener);
469}
470EXPORT_SYMBOL_GPL(vsock_pending_work);
471
472/**** SOCKET OPERATIONS ****/
473
474static int __vsock_bind_stream(struct vsock_sock *vsk,
475 struct sockaddr_vm *addr)
476{
477 static u32 port = LAST_RESERVED_PORT + 1;
478 struct sockaddr_vm new_addr;
479
480 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
481
482 if (addr->svm_port == VMADDR_PORT_ANY) {
483 bool found = false;
484 unsigned int i;
485
486 for (i = 0; i < MAX_PORT_RETRIES; i++) {
487 if (port <= LAST_RESERVED_PORT)
488 port = LAST_RESERVED_PORT + 1;
489
490 new_addr.svm_port = port++;
491
492 if (!__vsock_find_bound_socket(&new_addr)) {
493 found = true;
494 break;
495 }
496 }
497
498 if (!found)
499 return -EADDRNOTAVAIL;
500 } else {
501 /* If port is in reserved range, ensure caller
502 * has necessary privileges.
503 */
504 if (addr->svm_port <= LAST_RESERVED_PORT &&
505 !capable(CAP_NET_BIND_SERVICE)) {
506 return -EACCES;
507 }
508
509 if (__vsock_find_bound_socket(&new_addr))
510 return -EADDRINUSE;
511 }
512
513 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
514
515 /* Remove stream sockets from the unbound list and add them to the hash
516 * table for easy lookup by its address. The unbound list is simply an
517 * extra entry at the end of the hash table, a trick used by AF_UNIX.
518 */
519 __vsock_remove_bound(vsk);
520 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
521
522 return 0;
523}
524
525static int __vsock_bind_dgram(struct vsock_sock *vsk,
526 struct sockaddr_vm *addr)
527{
528 return transport->dgram_bind(vsk, addr);
529}
530
531static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
532{
533 struct vsock_sock *vsk = vsock_sk(sk);
534 u32 cid;
535 int retval;
536
537 /* First ensure this socket isn't already bound. */
538 if (vsock_addr_bound(&vsk->local_addr))
539 return -EINVAL;
540
541 /* Now bind to the provided address or select appropriate values if
542 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
543 * like AF_INET prevents binding to a non-local IP address (in most
544 * cases), we only allow binding to the local CID.
545 */
546 cid = transport->get_local_cid();
547 if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
548 return -EADDRNOTAVAIL;
549
550 switch (sk->sk_socket->type) {
551 case SOCK_STREAM:
552 spin_lock_bh(&vsock_table_lock);
553 retval = __vsock_bind_stream(vsk, addr);
554 spin_unlock_bh(&vsock_table_lock);
555 break;
556
557 case SOCK_DGRAM:
558 retval = __vsock_bind_dgram(vsk, addr);
559 break;
560
561 default:
562 retval = -EINVAL;
563 break;
564 }
565
566 return retval;
567}
568
569struct sock *__vsock_create(struct net *net,
570 struct socket *sock,
571 struct sock *parent,
572 gfp_t priority,
573 unsigned short type)
574{
575 struct sock *sk;
576 struct vsock_sock *psk;
577 struct vsock_sock *vsk;
578
579 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
580 if (!sk)
581 return NULL;
582
583 sock_init_data(sock, sk);
584
585 /* sk->sk_type is normally set in sock_init_data, but only if sock is
586 * non-NULL. We make sure that our sockets always have a type by
587 * setting it here if needed.
588 */
589 if (!sock)
590 sk->sk_type = type;
591
592 vsk = vsock_sk(sk);
593 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
594 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
595
596 sk->sk_destruct = vsock_sk_destruct;
597 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
598 sk->sk_state = 0;
599 sock_reset_flag(sk, SOCK_DONE);
600
601 INIT_LIST_HEAD(&vsk->bound_table);
602 INIT_LIST_HEAD(&vsk->connected_table);
603 vsk->listener = NULL;
604 INIT_LIST_HEAD(&vsk->pending_links);
605 INIT_LIST_HEAD(&vsk->accept_queue);
606 vsk->rejected = false;
607 vsk->sent_request = false;
608 vsk->ignore_connecting_rst = false;
609 vsk->peer_shutdown = 0;
610
611 psk = parent ? vsock_sk(parent) : NULL;
612 if (parent) {
613 vsk->trusted = psk->trusted;
614 vsk->owner = get_cred(psk->owner);
615 vsk->connect_timeout = psk->connect_timeout;
616 } else {
617 vsk->trusted = capable(CAP_NET_ADMIN);
618 vsk->owner = get_current_cred();
619 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
620 }
621
622 if (transport->init(vsk, psk) < 0) {
623 sk_free(sk);
624 return NULL;
625 }
626
627 if (sock)
628 vsock_insert_unbound(vsk);
629
630 return sk;
631}
632EXPORT_SYMBOL_GPL(__vsock_create);
633
634static void __vsock_release(struct sock *sk)
635{
636 if (sk) {
637 struct sk_buff *skb;
638 struct sock *pending;
639 struct vsock_sock *vsk;
640
641 vsk = vsock_sk(sk);
642 pending = NULL; /* Compiler warning. */
643
644 if (vsock_in_bound_table(vsk))
645 vsock_remove_bound(vsk);
646
647 if (vsock_in_connected_table(vsk))
648 vsock_remove_connected(vsk);
649
650 transport->release(vsk);
651
652 lock_sock(sk);
653 sock_orphan(sk);
654 sk->sk_shutdown = SHUTDOWN_MASK;
655
656 while ((skb = skb_dequeue(&sk->sk_receive_queue)))
657 kfree_skb(skb);
658
659 /* Clean up any sockets that never were accepted. */
660 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
661 __vsock_release(pending);
662 sock_put(pending);
663 }
664
665 release_sock(sk);
666 sock_put(sk);
667 }
668}
669
670static void vsock_sk_destruct(struct sock *sk)
671{
672 struct vsock_sock *vsk = vsock_sk(sk);
673
674 transport->destruct(vsk);
675
676 /* When clearing these addresses, there's no need to set the family and
677 * possibly register the address family with the kernel.
678 */
679 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
680 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
681
682 put_cred(vsk->owner);
683}
684
685static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
686{
687 int err;
688
689 err = sock_queue_rcv_skb(sk, skb);
690 if (err)
691 kfree_skb(skb);
692
693 return err;
694}
695
696s64 vsock_stream_has_data(struct vsock_sock *vsk)
697{
698 return transport->stream_has_data(vsk);
699}
700EXPORT_SYMBOL_GPL(vsock_stream_has_data);
701
702s64 vsock_stream_has_space(struct vsock_sock *vsk)
703{
704 return transport->stream_has_space(vsk);
705}
706EXPORT_SYMBOL_GPL(vsock_stream_has_space);
707
708static int vsock_release(struct socket *sock)
709{
710 __vsock_release(sock->sk);
711 sock->sk = NULL;
712 sock->state = SS_FREE;
713
714 return 0;
715}
716
717static int
718vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
719{
720 int err;
721 struct sock *sk;
722 struct sockaddr_vm *vm_addr;
723
724 sk = sock->sk;
725
726 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
727 return -EINVAL;
728
729 lock_sock(sk);
730 err = __vsock_bind(sk, vm_addr);
731 release_sock(sk);
732
733 return err;
734}
735
736static int vsock_getname(struct socket *sock,
737 struct sockaddr *addr, int *addr_len, int peer)
738{
739 int err;
740 struct sock *sk;
741 struct vsock_sock *vsk;
742 struct sockaddr_vm *vm_addr;
743
744 sk = sock->sk;
745 vsk = vsock_sk(sk);
746 err = 0;
747
748 lock_sock(sk);
749
750 if (peer) {
751 if (sock->state != SS_CONNECTED) {
752 err = -ENOTCONN;
753 goto out;
754 }
755 vm_addr = &vsk->remote_addr;
756 } else {
757 vm_addr = &vsk->local_addr;
758 }
759
760 if (!vm_addr) {
761 err = -EINVAL;
762 goto out;
763 }
764
765 /* sys_getsockname() and sys_getpeername() pass us a
766 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
767 * that macro is defined in socket.c instead of .h, so we hardcode its
768 * value here.
769 */
770 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
771 memcpy(addr, vm_addr, sizeof(*vm_addr));
772 *addr_len = sizeof(*vm_addr);
773
774out:
775 release_sock(sk);
776 return err;
777}
778
779static int vsock_shutdown(struct socket *sock, int mode)
780{
781 int err;
782 struct sock *sk;
783
784 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
785 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
786 * here like the other address families do. Note also that the
787 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
788 * which is what we want.
789 */
790 mode++;
791
792 if ((mode & ~SHUTDOWN_MASK) || !mode)
793 return -EINVAL;
794
795 /* If this is a STREAM socket and it is not connected then bail out
796 * immediately. If it is a DGRAM socket then we must first kick the
797 * socket so that it wakes up from any sleeping calls, for example
798 * recv(), and then afterwards return the error.
799 */
800
801 sk = sock->sk;
802 if (sock->state == SS_UNCONNECTED) {
803 err = -ENOTCONN;
804 if (sk->sk_type == SOCK_STREAM)
805 return err;
806 } else {
807 sock->state = SS_DISCONNECTING;
808 err = 0;
809 }
810
811 /* Receive and send shutdowns are treated alike. */
812 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
813 if (mode) {
814 lock_sock(sk);
815 sk->sk_shutdown |= mode;
816 sk->sk_state_change(sk);
817 release_sock(sk);
818
819 if (sk->sk_type == SOCK_STREAM) {
820 sock_reset_flag(sk, SOCK_DONE);
821 vsock_send_shutdown(sk, mode);
822 }
823 }
824
825 return err;
826}
827
828static unsigned int vsock_poll(struct file *file, struct socket *sock,
829 poll_table *wait)
830{
831 struct sock *sk;
832 unsigned int mask;
833 struct vsock_sock *vsk;
834
835 sk = sock->sk;
836 vsk = vsock_sk(sk);
837
838 poll_wait(file, sk_sleep(sk), wait);
839 mask = 0;
840
841 if (sk->sk_err)
842 /* Signify that there has been an error on this socket. */
843 mask |= POLLERR;
844
845 /* INET sockets treat local write shutdown and peer write shutdown as a
846 * case of POLLHUP set.
847 */
848 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
849 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
850 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
851 mask |= POLLHUP;
852 }
853
854 if (sk->sk_shutdown & RCV_SHUTDOWN ||
855 vsk->peer_shutdown & SEND_SHUTDOWN) {
856 mask |= POLLRDHUP;
857 }
858
859 if (sock->type == SOCK_DGRAM) {
860 /* For datagram sockets we can read if there is something in
861 * the queue and write as long as the socket isn't shutdown for
862 * sending.
863 */
864 if (!skb_queue_empty(&sk->sk_receive_queue) ||
865 (sk->sk_shutdown & RCV_SHUTDOWN)) {
866 mask |= POLLIN | POLLRDNORM;
867 }
868
869 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
870 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
871
872 } else if (sock->type == SOCK_STREAM) {
873 lock_sock(sk);
874
875 /* Listening sockets that have connections in their accept
876 * queue can be read.
877 */
878 if (sk->sk_state == SS_LISTEN
879 && !vsock_is_accept_queue_empty(sk))
880 mask |= POLLIN | POLLRDNORM;
881
882 /* If there is something in the queue then we can read. */
883 if (transport->stream_is_active(vsk) &&
884 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
885 bool data_ready_now = false;
886 int ret = transport->notify_poll_in(
887 vsk, 1, &data_ready_now);
888 if (ret < 0) {
889 mask |= POLLERR;
890 } else {
891 if (data_ready_now)
892 mask |= POLLIN | POLLRDNORM;
893
894 }
895 }
896
897 /* Sockets whose connections have been closed, reset, or
898 * terminated should also be considered read, and we check the
899 * shutdown flag for that.
900 */
901 if (sk->sk_shutdown & RCV_SHUTDOWN ||
902 vsk->peer_shutdown & SEND_SHUTDOWN) {
903 mask |= POLLIN | POLLRDNORM;
904 }
905
906 /* Connected sockets that can produce data can be written. */
907 if (sk->sk_state == SS_CONNECTED) {
908 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
909 bool space_avail_now = false;
910 int ret = transport->notify_poll_out(
911 vsk, 1, &space_avail_now);
912 if (ret < 0) {
913 mask |= POLLERR;
914 } else {
915 if (space_avail_now)
916 /* Remove POLLWRBAND since INET
917 * sockets are not setting it.
918 */
919 mask |= POLLOUT | POLLWRNORM;
920
921 }
922 }
923 }
924
925 /* Simulate INET socket poll behaviors, which sets
926 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
927 * but local send is not shutdown.
928 */
929 if (sk->sk_state == SS_UNCONNECTED) {
930 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
931 mask |= POLLOUT | POLLWRNORM;
932
933 }
934
935 release_sock(sk);
936 }
937
938 return mask;
939}
940
941static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
942 struct msghdr *msg, size_t len)
943{
944 int err;
945 struct sock *sk;
946 struct vsock_sock *vsk;
947 struct sockaddr_vm *remote_addr;
948
949 if (msg->msg_flags & MSG_OOB)
950 return -EOPNOTSUPP;
951
952 /* For now, MSG_DONTWAIT is always assumed... */
953 err = 0;
954 sk = sock->sk;
955 vsk = vsock_sk(sk);
956
957 lock_sock(sk);
958
959 if (!vsock_addr_bound(&vsk->local_addr)) {
960 struct sockaddr_vm local_addr;
961
962 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
963 err = __vsock_bind(sk, &local_addr);
964 if (err != 0)
965 goto out;
966
967 }
968
969 /* If the provided message contains an address, use that. Otherwise
970 * fall back on the socket's remote handle (if it has been connected).
971 */
972 if (msg->msg_name &&
973 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
974 &remote_addr) == 0) {
975 /* Ensure this address is of the right type and is a valid
976 * destination.
977 */
978
979 if (remote_addr->svm_cid == VMADDR_CID_ANY)
980 remote_addr->svm_cid = transport->get_local_cid();
981
982 if (!vsock_addr_bound(remote_addr)) {
983 err = -EINVAL;
984 goto out;
985 }
986 } else if (sock->state == SS_CONNECTED) {
987 remote_addr = &vsk->remote_addr;
988
989 if (remote_addr->svm_cid == VMADDR_CID_ANY)
990 remote_addr->svm_cid = transport->get_local_cid();
991
992 /* XXX Should connect() or this function ensure remote_addr is
993 * bound?
994 */
995 if (!vsock_addr_bound(&vsk->remote_addr)) {
996 err = -EINVAL;
997 goto out;
998 }
999 } else {
1000 err = -EINVAL;
1001 goto out;
1002 }
1003
1004 if (!transport->dgram_allow(remote_addr->svm_cid,
1005 remote_addr->svm_port)) {
1006 err = -EINVAL;
1007 goto out;
1008 }
1009
1010 err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len);
1011
1012out:
1013 release_sock(sk);
1014 return err;
1015}
1016
1017static int vsock_dgram_connect(struct socket *sock,
1018 struct sockaddr *addr, int addr_len, int flags)
1019{
1020 int err;
1021 struct sock *sk;
1022 struct vsock_sock *vsk;
1023 struct sockaddr_vm *remote_addr;
1024
1025 sk = sock->sk;
1026 vsk = vsock_sk(sk);
1027
1028 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1029 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1030 lock_sock(sk);
1031 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1032 VMADDR_PORT_ANY);
1033 sock->state = SS_UNCONNECTED;
1034 release_sock(sk);
1035 return 0;
1036 } else if (err != 0)
1037 return -EINVAL;
1038
1039 lock_sock(sk);
1040
1041 if (!vsock_addr_bound(&vsk->local_addr)) {
1042 struct sockaddr_vm local_addr;
1043
1044 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
1045 err = __vsock_bind(sk, &local_addr);
1046 if (err != 0)
1047 goto out;
1048
1049 }
1050
1051 if (!transport->dgram_allow(remote_addr->svm_cid,
1052 remote_addr->svm_port)) {
1053 err = -EINVAL;
1054 goto out;
1055 }
1056
1057 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1058 sock->state = SS_CONNECTED;
1059
1060out:
1061 release_sock(sk);
1062 return err;
1063}
1064
1065static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock,
1066 struct msghdr *msg, size_t len, int flags)
1067{
1068 return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len,
1069 flags);
1070}
1071
1072static const struct proto_ops vsock_dgram_ops = {
1073 .family = PF_VSOCK,
1074 .owner = THIS_MODULE,
1075 .release = vsock_release,
1076 .bind = vsock_bind,
1077 .connect = vsock_dgram_connect,
1078 .socketpair = sock_no_socketpair,
1079 .accept = sock_no_accept,
1080 .getname = vsock_getname,
1081 .poll = vsock_poll,
1082 .ioctl = sock_no_ioctl,
1083 .listen = sock_no_listen,
1084 .shutdown = vsock_shutdown,
1085 .setsockopt = sock_no_setsockopt,
1086 .getsockopt = sock_no_getsockopt,
1087 .sendmsg = vsock_dgram_sendmsg,
1088 .recvmsg = vsock_dgram_recvmsg,
1089 .mmap = sock_no_mmap,
1090 .sendpage = sock_no_sendpage,
1091};
1092
1093static void vsock_connect_timeout(struct work_struct *work)
1094{
1095 struct sock *sk;
1096 struct vsock_sock *vsk;
1097
1098 vsk = container_of(work, struct vsock_sock, dwork.work);
1099 sk = sk_vsock(vsk);
1100
1101 lock_sock(sk);
1102 if (sk->sk_state == SS_CONNECTING &&
1103 (sk->sk_shutdown != SHUTDOWN_MASK)) {
1104 sk->sk_state = SS_UNCONNECTED;
1105 sk->sk_err = ETIMEDOUT;
1106 sk->sk_error_report(sk);
1107 }
1108 release_sock(sk);
1109
1110 sock_put(sk);
1111}
1112
1113static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1114 int addr_len, int flags)
1115{
1116 int err;
1117 struct sock *sk;
1118 struct vsock_sock *vsk;
1119 struct sockaddr_vm *remote_addr;
1120 long timeout;
1121 DEFINE_WAIT(wait);
1122
1123 err = 0;
1124 sk = sock->sk;
1125 vsk = vsock_sk(sk);
1126
1127 lock_sock(sk);
1128
1129 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1130 switch (sock->state) {
1131 case SS_CONNECTED:
1132 err = -EISCONN;
1133 goto out;
1134 case SS_DISCONNECTING:
1135 err = -EINVAL;
1136 goto out;
1137 case SS_CONNECTING:
1138 /* This continues on so we can move sock into the SS_CONNECTED
1139 * state once the connection has completed (at which point err
1140 * will be set to zero also). Otherwise, we will either wait
1141 * for the connection or return -EALREADY should this be a
1142 * non-blocking call.
1143 */
1144 err = -EALREADY;
1145 break;
1146 default:
1147 if ((sk->sk_state == SS_LISTEN) ||
1148 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1149 err = -EINVAL;
1150 goto out;
1151 }
1152
1153 /* The hypervisor and well-known contexts do not have socket
1154 * endpoints.
1155 */
1156 if (!transport->stream_allow(remote_addr->svm_cid,
1157 remote_addr->svm_port)) {
1158 err = -ENETUNREACH;
1159 goto out;
1160 }
1161
1162 /* Set the remote address that we are connecting to. */
1163 memcpy(&vsk->remote_addr, remote_addr,
1164 sizeof(vsk->remote_addr));
1165
1166 /* Autobind this socket to the local address if necessary. */
1167 if (!vsock_addr_bound(&vsk->local_addr)) {
1168 struct sockaddr_vm local_addr;
1169
1170 vsock_addr_init(&local_addr, VMADDR_CID_ANY,
1171 VMADDR_PORT_ANY);
1172 err = __vsock_bind(sk, &local_addr);
1173 if (err != 0)
1174 goto out;
1175
1176 }
1177
1178 sk->sk_state = SS_CONNECTING;
1179
1180 err = transport->connect(vsk);
1181 if (err < 0)
1182 goto out;
1183
1184 /* Mark sock as connecting and set the error code to in
1185 * progress in case this is a non-blocking connect.
1186 */
1187 sock->state = SS_CONNECTING;
1188 err = -EINPROGRESS;
1189 }
1190
1191 /* The receive path will handle all communication until we are able to
1192 * enter the connected state. Here we wait for the connection to be
1193 * completed or a notification of an error.
1194 */
1195 timeout = vsk->connect_timeout;
1196 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1197
1198 while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
1199 if (flags & O_NONBLOCK) {
1200 /* If we're not going to block, we schedule a timeout
1201 * function to generate a timeout on the connection
1202 * attempt, in case the peer doesn't respond in a
1203 * timely manner. We hold on to the socket until the
1204 * timeout fires.
1205 */
1206 sock_hold(sk);
1207 INIT_DELAYED_WORK(&vsk->dwork,
1208 vsock_connect_timeout);
1209 schedule_delayed_work(&vsk->dwork, timeout);
1210
1211 /* Skip ahead to preserve error code set above. */
1212 goto out_wait;
1213 }
1214
1215 release_sock(sk);
1216 timeout = schedule_timeout(timeout);
1217 lock_sock(sk);
1218
1219 if (signal_pending(current)) {
1220 err = sock_intr_errno(timeout);
1221 goto out_wait_error;
1222 } else if (timeout == 0) {
1223 err = -ETIMEDOUT;
1224 goto out_wait_error;
1225 }
1226
1227 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1228 }
1229
1230 if (sk->sk_err) {
1231 err = -sk->sk_err;
1232 goto out_wait_error;
1233 } else
1234 err = 0;
1235
1236out_wait:
1237 finish_wait(sk_sleep(sk), &wait);
1238out:
1239 release_sock(sk);
1240 return err;
1241
1242out_wait_error:
1243 sk->sk_state = SS_UNCONNECTED;
1244 sock->state = SS_UNCONNECTED;
1245 goto out_wait;
1246}
1247
1248static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
1249{
1250 struct sock *listener;
1251 int err;
1252 struct sock *connected;
1253 struct vsock_sock *vconnected;
1254 long timeout;
1255 DEFINE_WAIT(wait);
1256
1257 err = 0;
1258 listener = sock->sk;
1259
1260 lock_sock(listener);
1261
1262 if (sock->type != SOCK_STREAM) {
1263 err = -EOPNOTSUPP;
1264 goto out;
1265 }
1266
1267 if (listener->sk_state != SS_LISTEN) {
1268 err = -EINVAL;
1269 goto out;
1270 }
1271
1272 /* Wait for children sockets to appear; these are the new sockets
1273 * created upon connection establishment.
1274 */
1275 timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
1276 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1277
1278 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1279 listener->sk_err == 0) {
1280 release_sock(listener);
1281 timeout = schedule_timeout(timeout);
1282 lock_sock(listener);
1283
1284 if (signal_pending(current)) {
1285 err = sock_intr_errno(timeout);
1286 goto out_wait;
1287 } else if (timeout == 0) {
1288 err = -EAGAIN;
1289 goto out_wait;
1290 }
1291
1292 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1293 }
1294
1295 if (listener->sk_err)
1296 err = -listener->sk_err;
1297
1298 if (connected) {
1299 listener->sk_ack_backlog--;
1300
1301 lock_sock(connected);
1302 vconnected = vsock_sk(connected);
1303
1304 /* If the listener socket has received an error, then we should
1305 * reject this socket and return. Note that we simply mark the
1306 * socket rejected, drop our reference, and let the cleanup
1307 * function handle the cleanup; the fact that we found it in
1308 * the listener's accept queue guarantees that the cleanup
1309 * function hasn't run yet.
1310 */
1311 if (err) {
1312 vconnected->rejected = true;
1313 release_sock(connected);
1314 sock_put(connected);
1315 goto out_wait;
1316 }
1317
1318 newsock->state = SS_CONNECTED;
1319 sock_graft(connected, newsock);
1320 release_sock(connected);
1321 sock_put(connected);
1322 }
1323
1324out_wait:
1325 finish_wait(sk_sleep(listener), &wait);
1326out:
1327 release_sock(listener);
1328 return err;
1329}
1330
1331static int vsock_listen(struct socket *sock, int backlog)
1332{
1333 int err;
1334 struct sock *sk;
1335 struct vsock_sock *vsk;
1336
1337 sk = sock->sk;
1338
1339 lock_sock(sk);
1340
1341 if (sock->type != SOCK_STREAM) {
1342 err = -EOPNOTSUPP;
1343 goto out;
1344 }
1345
1346 if (sock->state != SS_UNCONNECTED) {
1347 err = -EINVAL;
1348 goto out;
1349 }
1350
1351 vsk = vsock_sk(sk);
1352
1353 if (!vsock_addr_bound(&vsk->local_addr)) {
1354 err = -EINVAL;
1355 goto out;
1356 }
1357
1358 sk->sk_max_ack_backlog = backlog;
1359 sk->sk_state = SS_LISTEN;
1360
1361 err = 0;
1362
1363out:
1364 release_sock(sk);
1365 return err;
1366}
1367
1368static int vsock_stream_setsockopt(struct socket *sock,
1369 int level,
1370 int optname,
1371 char __user *optval,
1372 unsigned int optlen)
1373{
1374 int err;
1375 struct sock *sk;
1376 struct vsock_sock *vsk;
1377 u64 val;
1378
1379 if (level != AF_VSOCK)
1380 return -ENOPROTOOPT;
1381
1382#define COPY_IN(_v) \
1383 do { \
1384 if (optlen < sizeof(_v)) { \
1385 err = -EINVAL; \
1386 goto exit; \
1387 } \
1388 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
1389 err = -EFAULT; \
1390 goto exit; \
1391 } \
1392 } while (0)
1393
1394 err = 0;
1395 sk = sock->sk;
1396 vsk = vsock_sk(sk);
1397
1398 lock_sock(sk);
1399
1400 switch (optname) {
1401 case SO_VM_SOCKETS_BUFFER_SIZE:
1402 COPY_IN(val);
1403 transport->set_buffer_size(vsk, val);
1404 break;
1405
1406 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1407 COPY_IN(val);
1408 transport->set_max_buffer_size(vsk, val);
1409 break;
1410
1411 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1412 COPY_IN(val);
1413 transport->set_min_buffer_size(vsk, val);
1414 break;
1415
1416 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1417 struct timeval tv;
1418 COPY_IN(tv);
1419 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1420 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1421 vsk->connect_timeout = tv.tv_sec * HZ +
1422 DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
1423 if (vsk->connect_timeout == 0)
1424 vsk->connect_timeout =
1425 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1426
1427 } else {
1428 err = -ERANGE;
1429 }
1430 break;
1431 }
1432
1433 default:
1434 err = -ENOPROTOOPT;
1435 break;
1436 }
1437
1438#undef COPY_IN
1439
1440exit:
1441 release_sock(sk);
1442 return err;
1443}
1444
1445static int vsock_stream_getsockopt(struct socket *sock,
1446 int level, int optname,
1447 char __user *optval,
1448 int __user *optlen)
1449{
1450 int err;
1451 int len;
1452 struct sock *sk;
1453 struct vsock_sock *vsk;
1454 u64 val;
1455
1456 if (level != AF_VSOCK)
1457 return -ENOPROTOOPT;
1458
1459 err = get_user(len, optlen);
1460 if (err != 0)
1461 return err;
1462
1463#define COPY_OUT(_v) \
1464 do { \
1465 if (len < sizeof(_v)) \
1466 return -EINVAL; \
1467 \
1468 len = sizeof(_v); \
1469 if (copy_to_user(optval, &_v, len) != 0) \
1470 return -EFAULT; \
1471 \
1472 } while (0)
1473
1474 err = 0;
1475 sk = sock->sk;
1476 vsk = vsock_sk(sk);
1477
1478 switch (optname) {
1479 case SO_VM_SOCKETS_BUFFER_SIZE:
1480 val = transport->get_buffer_size(vsk);
1481 COPY_OUT(val);
1482 break;
1483
1484 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1485 val = transport->get_max_buffer_size(vsk);
1486 COPY_OUT(val);
1487 break;
1488
1489 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1490 val = transport->get_min_buffer_size(vsk);
1491 COPY_OUT(val);
1492 break;
1493
1494 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1495 struct timeval tv;
1496 tv.tv_sec = vsk->connect_timeout / HZ;
1497 tv.tv_usec =
1498 (vsk->connect_timeout -
1499 tv.tv_sec * HZ) * (1000000 / HZ);
1500 COPY_OUT(tv);
1501 break;
1502 }
1503 default:
1504 return -ENOPROTOOPT;
1505 }
1506
1507 err = put_user(len, optlen);
1508 if (err != 0)
1509 return -EFAULT;
1510
1511#undef COPY_OUT
1512
1513 return 0;
1514}
1515
1516static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1517 struct msghdr *msg, size_t len)
1518{
1519 struct sock *sk;
1520 struct vsock_sock *vsk;
1521 ssize_t total_written;
1522 long timeout;
1523 int err;
1524 struct vsock_transport_send_notify_data send_data;
1525
1526 DEFINE_WAIT(wait);
1527
1528 sk = sock->sk;
1529 vsk = vsock_sk(sk);
1530 total_written = 0;
1531 err = 0;
1532
1533 if (msg->msg_flags & MSG_OOB)
1534 return -EOPNOTSUPP;
1535
1536 lock_sock(sk);
1537
1538 /* Callers should not provide a destination with stream sockets. */
1539 if (msg->msg_namelen) {
1540 err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
1541 goto out;
1542 }
1543
1544 /* Send data only if both sides are not shutdown in the direction. */
1545 if (sk->sk_shutdown & SEND_SHUTDOWN ||
1546 vsk->peer_shutdown & RCV_SHUTDOWN) {
1547 err = -EPIPE;
1548 goto out;
1549 }
1550
1551 if (sk->sk_state != SS_CONNECTED ||
1552 !vsock_addr_bound(&vsk->local_addr)) {
1553 err = -ENOTCONN;
1554 goto out;
1555 }
1556
1557 if (!vsock_addr_bound(&vsk->remote_addr)) {
1558 err = -EDESTADDRREQ;
1559 goto out;
1560 }
1561
1562 /* Wait for room in the produce queue to enqueue our user's data. */
1563 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1564
1565 err = transport->notify_send_init(vsk, &send_data);
1566 if (err < 0)
1567 goto out;
1568
1569 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1570
1571 while (total_written < len) {
1572 ssize_t written;
1573
1574 while (vsock_stream_has_space(vsk) == 0 &&
1575 sk->sk_err == 0 &&
1576 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1577 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1578
1579 /* Don't wait for non-blocking sockets. */
1580 if (timeout == 0) {
1581 err = -EAGAIN;
1582 goto out_wait;
1583 }
1584
1585 err = transport->notify_send_pre_block(vsk, &send_data);
1586 if (err < 0)
1587 goto out_wait;
1588
1589 release_sock(sk);
1590 timeout = schedule_timeout(timeout);
1591 lock_sock(sk);
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeout);
1594 goto out_wait;
1595 } else if (timeout == 0) {
1596 err = -EAGAIN;
1597 goto out_wait;
1598 }
1599
1600 prepare_to_wait(sk_sleep(sk), &wait,
1601 TASK_INTERRUPTIBLE);
1602 }
1603
1604 /* These checks occur both as part of and after the loop
1605 * conditional since we need to check before and after
1606 * sleeping.
1607 */
1608 if (sk->sk_err) {
1609 err = -sk->sk_err;
1610 goto out_wait;
1611 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1612 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1613 err = -EPIPE;
1614 goto out_wait;
1615 }
1616
1617 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1618 if (err < 0)
1619 goto out_wait;
1620
1621 /* Note that enqueue will only write as many bytes as are free
1622 * in the produce queue, so we don't need to ensure len is
1623 * smaller than the queue size. It is the caller's
1624 * responsibility to check how many bytes we were able to send.
1625 */
1626
1627 written = transport->stream_enqueue(
1628 vsk, msg->msg_iov,
1629 len - total_written);
1630 if (written < 0) {
1631 err = -ENOMEM;
1632 goto out_wait;
1633 }
1634
1635 total_written += written;
1636
1637 err = transport->notify_send_post_enqueue(
1638 vsk, written, &send_data);
1639 if (err < 0)
1640 goto out_wait;
1641
1642 }
1643
1644out_wait:
1645 if (total_written > 0)
1646 err = total_written;
1647 finish_wait(sk_sleep(sk), &wait);
1648out:
1649 release_sock(sk);
1650 return err;
1651}
1652
1653
1654static int
1655vsock_stream_recvmsg(struct kiocb *kiocb,
1656 struct socket *sock,
1657 struct msghdr *msg, size_t len, int flags)
1658{
1659 struct sock *sk;
1660 struct vsock_sock *vsk;
1661 int err;
1662 size_t target;
1663 ssize_t copied;
1664 long timeout;
1665 struct vsock_transport_recv_notify_data recv_data;
1666
1667 DEFINE_WAIT(wait);
1668
1669 sk = sock->sk;
1670 vsk = vsock_sk(sk);
1671 err = 0;
1672
1673 lock_sock(sk);
1674
1675 if (sk->sk_state != SS_CONNECTED) {
1676 /* Recvmsg is supposed to return 0 if a peer performs an
1677 * orderly shutdown. Differentiate between that case and when a
1678 * peer has not connected or a local shutdown occured with the
1679 * SOCK_DONE flag.
1680 */
1681 if (sock_flag(sk, SOCK_DONE))
1682 err = 0;
1683 else
1684 err = -ENOTCONN;
1685
1686 goto out;
1687 }
1688
1689 if (flags & MSG_OOB) {
1690 err = -EOPNOTSUPP;
1691 goto out;
1692 }
1693
1694 /* We don't check peer_shutdown flag here since peer may actually shut
1695 * down, but there can be data in the queue that a local socket can
1696 * receive.
1697 */
1698 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1699 err = 0;
1700 goto out;
1701 }
1702
1703 /* It is valid on Linux to pass in a zero-length receive buffer. This
1704 * is not an error. We may as well bail out now.
1705 */
1706 if (!len) {
1707 err = 0;
1708 goto out;
1709 }
1710
1711 /* We must not copy less than target bytes into the user's buffer
1712 * before returning successfully, so we wait for the consume queue to
1713 * have that much data to consume before dequeueing. Note that this
1714 * makes it impossible to handle cases where target is greater than the
1715 * queue size.
1716 */
1717 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1718 if (target >= transport->stream_rcvhiwat(vsk)) {
1719 err = -ENOMEM;
1720 goto out;
1721 }
1722 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1723 copied = 0;
1724
1725 err = transport->notify_recv_init(vsk, target, &recv_data);
1726 if (err < 0)
1727 goto out;
1728
1729 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1730
1731 while (1) {
1732 s64 ready = vsock_stream_has_data(vsk);
1733
1734 if (ready < 0) {
1735 /* Invalid queue pair content. XXX This should be
1736 * changed to a connection reset in a later change.
1737 */
1738
1739 err = -ENOMEM;
1740 goto out_wait;
1741 } else if (ready > 0) {
1742 ssize_t read;
1743
1744 err = transport->notify_recv_pre_dequeue(
1745 vsk, target, &recv_data);
1746 if (err < 0)
1747 break;
1748
1749 read = transport->stream_dequeue(
1750 vsk, msg->msg_iov,
1751 len - copied, flags);
1752 if (read < 0) {
1753 err = -ENOMEM;
1754 break;
1755 }
1756
1757 copied += read;
1758
1759 err = transport->notify_recv_post_dequeue(
1760 vsk, target, read,
1761 !(flags & MSG_PEEK), &recv_data);
1762 if (err < 0)
1763 goto out_wait;
1764
1765 if (read >= target || flags & MSG_PEEK)
1766 break;
1767
1768 target -= read;
1769 } else {
1770 if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
1771 || (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1772 break;
1773 }
1774 /* Don't wait for non-blocking sockets. */
1775 if (timeout == 0) {
1776 err = -EAGAIN;
1777 break;
1778 }
1779
1780 err = transport->notify_recv_pre_block(
1781 vsk, target, &recv_data);
1782 if (err < 0)
1783 break;
1784
1785 release_sock(sk);
1786 timeout = schedule_timeout(timeout);
1787 lock_sock(sk);
1788
1789 if (signal_pending(current)) {
1790 err = sock_intr_errno(timeout);
1791 break;
1792 } else if (timeout == 0) {
1793 err = -EAGAIN;
1794 break;
1795 }
1796
1797 prepare_to_wait(sk_sleep(sk), &wait,
1798 TASK_INTERRUPTIBLE);
1799 }
1800 }
1801
1802 if (sk->sk_err)
1803 err = -sk->sk_err;
1804 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1805 err = 0;
1806
1807 if (copied > 0) {
1808 /* We only do these additional bookkeeping/notification steps
1809 * if we actually copied something out of the queue pair
1810 * instead of just peeking ahead.
1811 */
1812
1813 if (!(flags & MSG_PEEK)) {
1814 /* If the other side has shutdown for sending and there
1815 * is nothing more to read, then modify the socket
1816 * state.
1817 */
1818 if (vsk->peer_shutdown & SEND_SHUTDOWN) {
1819 if (vsock_stream_has_data(vsk) <= 0) {
1820 sk->sk_state = SS_UNCONNECTED;
1821 sock_set_flag(sk, SOCK_DONE);
1822 sk->sk_state_change(sk);
1823 }
1824 }
1825 }
1826 err = copied;
1827 }
1828
1829out_wait:
1830 finish_wait(sk_sleep(sk), &wait);
1831out:
1832 release_sock(sk);
1833 return err;
1834}
1835
1836static const struct proto_ops vsock_stream_ops = {
1837 .family = PF_VSOCK,
1838 .owner = THIS_MODULE,
1839 .release = vsock_release,
1840 .bind = vsock_bind,
1841 .connect = vsock_stream_connect,
1842 .socketpair = sock_no_socketpair,
1843 .accept = vsock_accept,
1844 .getname = vsock_getname,
1845 .poll = vsock_poll,
1846 .ioctl = sock_no_ioctl,
1847 .listen = vsock_listen,
1848 .shutdown = vsock_shutdown,
1849 .setsockopt = vsock_stream_setsockopt,
1850 .getsockopt = vsock_stream_getsockopt,
1851 .sendmsg = vsock_stream_sendmsg,
1852 .recvmsg = vsock_stream_recvmsg,
1853 .mmap = sock_no_mmap,
1854 .sendpage = sock_no_sendpage,
1855};
1856
1857static int vsock_create(struct net *net, struct socket *sock,
1858 int protocol, int kern)
1859{
1860 if (!sock)
1861 return -EINVAL;
1862
1863 if (protocol && protocol != PF_VSOCK)
1864 return -EPROTONOSUPPORT;
1865
1866 switch (sock->type) {
1867 case SOCK_DGRAM:
1868 sock->ops = &vsock_dgram_ops;
1869 break;
1870 case SOCK_STREAM:
1871 sock->ops = &vsock_stream_ops;
1872 break;
1873 default:
1874 return -ESOCKTNOSUPPORT;
1875 }
1876
1877 sock->state = SS_UNCONNECTED;
1878
1879 return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
1880}
1881
1882static const struct net_proto_family vsock_family_ops = {
1883 .family = AF_VSOCK,
1884 .create = vsock_create,
1885 .owner = THIS_MODULE,
1886};
1887
1888static long vsock_dev_do_ioctl(struct file *filp,
1889 unsigned int cmd, void __user *ptr)
1890{
1891 u32 __user *p = ptr;
1892 int retval = 0;
1893
1894 switch (cmd) {
1895 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
1896 if (put_user(transport->get_local_cid(), p) != 0)
1897 retval = -EFAULT;
1898 break;
1899
1900 default:
1901 pr_err("Unknown ioctl %d\n", cmd);
1902 retval = -EINVAL;
1903 }
1904
1905 return retval;
1906}
1907
1908static long vsock_dev_ioctl(struct file *filp,
1909 unsigned int cmd, unsigned long arg)
1910{
1911 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
1912}
1913
1914#ifdef CONFIG_COMPAT
1915static long vsock_dev_compat_ioctl(struct file *filp,
1916 unsigned int cmd, unsigned long arg)
1917{
1918 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
1919}
1920#endif
1921
1922static const struct file_operations vsock_device_ops = {
1923 .owner = THIS_MODULE,
1924 .unlocked_ioctl = vsock_dev_ioctl,
1925#ifdef CONFIG_COMPAT
1926 .compat_ioctl = vsock_dev_compat_ioctl,
1927#endif
1928 .open = nonseekable_open,
1929};
1930
1931static struct miscdevice vsock_device = {
1932 .name = "vsock",
1933 .minor = MISC_DYNAMIC_MINOR,
1934 .fops = &vsock_device_ops,
1935};
1936
1937static int __vsock_core_init(void)
1938{
1939 int err;
1940
1941 vsock_init_tables();
1942
1943 err = misc_register(&vsock_device);
1944 if (err) {
1945 pr_err("Failed to register misc device\n");
1946 return -ENOENT;
1947 }
1948
1949 err = proto_register(&vsock_proto, 1); /* we want our slab */
1950 if (err) {
1951 pr_err("Cannot register vsock protocol\n");
1952 goto err_misc_deregister;
1953 }
1954
1955 err = sock_register(&vsock_family_ops);
1956 if (err) {
1957 pr_err("could not register af_vsock (%d) address family: %d\n",
1958 AF_VSOCK, err);
1959 goto err_unregister_proto;
1960 }
1961
1962 return 0;
1963
1964err_unregister_proto:
1965 proto_unregister(&vsock_proto);
1966err_misc_deregister:
1967 misc_deregister(&vsock_device);
1968 return err;
1969}
1970
1971int vsock_core_init(const struct vsock_transport *t)
1972{
1973 int retval = mutex_lock_interruptible(&vsock_register_mutex);
1974 if (retval)
1975 return retval;
1976
1977 if (transport) {
1978 retval = -EBUSY;
1979 goto out;
1980 }
1981
1982 transport = t;
1983 retval = __vsock_core_init();
1984 if (retval)
1985 transport = NULL;
1986
1987out:
1988 mutex_unlock(&vsock_register_mutex);
1989 return retval;
1990}
1991EXPORT_SYMBOL_GPL(vsock_core_init);
1992
1993void vsock_core_exit(void)
1994{
1995 mutex_lock(&vsock_register_mutex);
1996
1997 misc_deregister(&vsock_device);
1998 sock_unregister(AF_VSOCK);
1999 proto_unregister(&vsock_proto);
2000
2001 /* We do not want the assignment below re-ordered. */
2002 mb();
2003 transport = NULL;
2004
2005 mutex_unlock(&vsock_register_mutex);
2006}
2007EXPORT_SYMBOL_GPL(vsock_core_exit);
2008
2009MODULE_AUTHOR("VMware, Inc.");
2010MODULE_DESCRIPTION("VMware Virtual Socket Family");
2011MODULE_VERSION("1.0.0.0-k");
2012MODULE_LICENSE("GPL v2");
diff --git a/net/vmw_vsock/af_vsock.h b/net/vmw_vsock/af_vsock.h
new file mode 100644
index 000000000000..7d64d3609ec9
--- /dev/null
+++ b/net/vmw_vsock/af_vsock.h
@@ -0,0 +1,175 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __AF_VSOCK_H__
17#define __AF_VSOCK_H__
18
19#include <linux/kernel.h>
20#include <linux/workqueue.h>
21#include <linux/vm_sockets.h>
22
23#include "vsock_addr.h"
24
25#define LAST_RESERVED_PORT 1023
26
27#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
28#define sk_vsock(__vsk) (&(__vsk)->sk)
29
30struct vsock_sock {
31 /* sk must be the first member. */
32 struct sock sk;
33 struct sockaddr_vm local_addr;
34 struct sockaddr_vm remote_addr;
35 /* Links for the global tables of bound and connected sockets. */
36 struct list_head bound_table;
37 struct list_head connected_table;
38 /* Accessed without the socket lock held. This means it can never be
39 * modified outsided of socket create or destruct.
40 */
41 bool trusted;
42 bool cached_peer_allow_dgram; /* Dgram communication allowed to
43 * cached peer?
44 */
45 u32 cached_peer; /* Context ID of last dgram destination check. */
46 const struct cred *owner;
47 /* Rest are SOCK_STREAM only. */
48 long connect_timeout;
49 /* Listening socket that this came from. */
50 struct sock *listener;
51 /* Used for pending list and accept queue during connection handshake.
52 * The listening socket is the head for both lists. Sockets created
53 * for connection requests are placed in the pending list until they
54 * are connected, at which point they are put in the accept queue list
55 * so they can be accepted in accept(). If accept() cannot accept the
56 * connection, it is marked as rejected so the cleanup function knows
57 * to clean up the socket.
58 */
59 struct list_head pending_links;
60 struct list_head accept_queue;
61 bool rejected;
62 struct delayed_work dwork;
63 u32 peer_shutdown;
64 bool sent_request;
65 bool ignore_connecting_rst;
66
67 /* Private to transport. */
68 void *trans;
69};
70
71s64 vsock_stream_has_data(struct vsock_sock *vsk);
72s64 vsock_stream_has_space(struct vsock_sock *vsk);
73void vsock_pending_work(struct work_struct *work);
74struct sock *__vsock_create(struct net *net,
75 struct socket *sock,
76 struct sock *parent,
77 gfp_t priority, unsigned short type);
78
79/**** TRANSPORT ****/
80
81struct vsock_transport_recv_notify_data {
82 u64 data1; /* Transport-defined. */
83 u64 data2; /* Transport-defined. */
84 bool notify_on_block;
85};
86
87struct vsock_transport_send_notify_data {
88 u64 data1; /* Transport-defined. */
89 u64 data2; /* Transport-defined. */
90};
91
92struct vsock_transport {
93 /* Initialize/tear-down socket. */
94 int (*init)(struct vsock_sock *, struct vsock_sock *);
95 void (*destruct)(struct vsock_sock *);
96 void (*release)(struct vsock_sock *);
97
98 /* Connections. */
99 int (*connect)(struct vsock_sock *);
100
101 /* DGRAM. */
102 int (*dgram_bind)(struct vsock_sock *, struct sockaddr_vm *);
103 int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk,
104 struct msghdr *msg, size_t len, int flags);
105 int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *,
106 struct iovec *, size_t len);
107 bool (*dgram_allow)(u32 cid, u32 port);
108
109 /* STREAM. */
110 /* TODO: stream_bind() */
111 ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *,
112 size_t len, int flags);
113 ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *,
114 size_t len);
115 s64 (*stream_has_data)(struct vsock_sock *);
116 s64 (*stream_has_space)(struct vsock_sock *);
117 u64 (*stream_rcvhiwat)(struct vsock_sock *);
118 bool (*stream_is_active)(struct vsock_sock *);
119 bool (*stream_allow)(u32 cid, u32 port);
120
121 /* Notification. */
122 int (*notify_poll_in)(struct vsock_sock *, size_t, bool *);
123 int (*notify_poll_out)(struct vsock_sock *, size_t, bool *);
124 int (*notify_recv_init)(struct vsock_sock *, size_t,
125 struct vsock_transport_recv_notify_data *);
126 int (*notify_recv_pre_block)(struct vsock_sock *, size_t,
127 struct vsock_transport_recv_notify_data *);
128 int (*notify_recv_pre_dequeue)(struct vsock_sock *, size_t,
129 struct vsock_transport_recv_notify_data *);
130 int (*notify_recv_post_dequeue)(struct vsock_sock *, size_t,
131 ssize_t, bool, struct vsock_transport_recv_notify_data *);
132 int (*notify_send_init)(struct vsock_sock *,
133 struct vsock_transport_send_notify_data *);
134 int (*notify_send_pre_block)(struct vsock_sock *,
135 struct vsock_transport_send_notify_data *);
136 int (*notify_send_pre_enqueue)(struct vsock_sock *,
137 struct vsock_transport_send_notify_data *);
138 int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
139 struct vsock_transport_send_notify_data *);
140
141 /* Shutdown. */
142 int (*shutdown)(struct vsock_sock *, int);
143
144 /* Buffer sizes. */
145 void (*set_buffer_size)(struct vsock_sock *, u64);
146 void (*set_min_buffer_size)(struct vsock_sock *, u64);
147 void (*set_max_buffer_size)(struct vsock_sock *, u64);
148 u64 (*get_buffer_size)(struct vsock_sock *);
149 u64 (*get_min_buffer_size)(struct vsock_sock *);
150 u64 (*get_max_buffer_size)(struct vsock_sock *);
151
152 /* Addressing. */
153 u32 (*get_local_cid)(void);
154};
155
156/**** CORE ****/
157
158int vsock_core_init(const struct vsock_transport *t);
159void vsock_core_exit(void);
160
161/**** UTILS ****/
162
163void vsock_release_pending(struct sock *pending);
164void vsock_add_pending(struct sock *listener, struct sock *pending);
165void vsock_remove_pending(struct sock *listener, struct sock *pending);
166void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
167void vsock_insert_connected(struct vsock_sock *vsk);
168void vsock_remove_bound(struct vsock_sock *vsk);
169void vsock_remove_connected(struct vsock_sock *vsk);
170struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
171struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
172 struct sockaddr_vm *dst);
173void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
174
175#endif /* __AF_VSOCK_H__ */
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
new file mode 100644
index 000000000000..a70ace83a153
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport.c
@@ -0,0 +1,2155 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/bitops.h>
18#include <linux/cred.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/kmod.h>
23#include <linux/list.h>
24#include <linux/miscdevice.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/net.h>
28#include <linux/poll.h>
29#include <linux/skbuff.h>
30#include <linux/smp.h>
31#include <linux/socket.h>
32#include <linux/stddef.h>
33#include <linux/unistd.h>
34#include <linux/wait.h>
35#include <linux/workqueue.h>
36#include <net/sock.h>
37
38#include "af_vsock.h"
39#include "vmci_transport_notify.h"
40
41static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
42static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
43static void vmci_transport_peer_attach_cb(u32 sub_id,
44 const struct vmci_event_data *ed,
45 void *client_data);
46static void vmci_transport_peer_detach_cb(u32 sub_id,
47 const struct vmci_event_data *ed,
48 void *client_data);
49static void vmci_transport_recv_pkt_work(struct work_struct *work);
50static int vmci_transport_recv_listen(struct sock *sk,
51 struct vmci_transport_packet *pkt);
52static int vmci_transport_recv_connecting_server(
53 struct sock *sk,
54 struct sock *pending,
55 struct vmci_transport_packet *pkt);
56static int vmci_transport_recv_connecting_client(
57 struct sock *sk,
58 struct vmci_transport_packet *pkt);
59static int vmci_transport_recv_connecting_client_negotiate(
60 struct sock *sk,
61 struct vmci_transport_packet *pkt);
62static int vmci_transport_recv_connecting_client_invalid(
63 struct sock *sk,
64 struct vmci_transport_packet *pkt);
65static int vmci_transport_recv_connected(struct sock *sk,
66 struct vmci_transport_packet *pkt);
67static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
68static u16 vmci_transport_new_proto_supported_versions(void);
69static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
70 bool old_pkt_proto);
71
72struct vmci_transport_recv_pkt_info {
73 struct work_struct work;
74 struct sock *sk;
75 struct vmci_transport_packet pkt;
76};
77
78static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
79 VMCI_INVALID_ID };
80static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
81
82static int PROTOCOL_OVERRIDE = -1;
83
84#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN 128
85#define VMCI_TRANSPORT_DEFAULT_QP_SIZE 262144
86#define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX 262144
87
88/* The default peer timeout indicates how long we will wait for a peer response
89 * to a control message.
90 */
91#define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
92
93#define SS_LISTEN 255
94
95/* Helper function to convert from a VMCI error code to a VSock error code. */
96
97static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
98{
99 int err;
100
101 switch (vmci_error) {
102 case VMCI_ERROR_NO_MEM:
103 err = ENOMEM;
104 break;
105 case VMCI_ERROR_DUPLICATE_ENTRY:
106 case VMCI_ERROR_ALREADY_EXISTS:
107 err = EADDRINUSE;
108 break;
109 case VMCI_ERROR_NO_ACCESS:
110 err = EPERM;
111 break;
112 case VMCI_ERROR_NO_RESOURCES:
113 err = ENOBUFS;
114 break;
115 case VMCI_ERROR_INVALID_RESOURCE:
116 err = EHOSTUNREACH;
117 break;
118 case VMCI_ERROR_INVALID_ARGS:
119 default:
120 err = EINVAL;
121 }
122
123 return err > 0 ? -err : err;
124}
125
126static inline void
127vmci_transport_packet_init(struct vmci_transport_packet *pkt,
128 struct sockaddr_vm *src,
129 struct sockaddr_vm *dst,
130 u8 type,
131 u64 size,
132 u64 mode,
133 struct vmci_transport_waiting_info *wait,
134 u16 proto,
135 struct vmci_handle handle)
136{
137 /* We register the stream control handler as an any cid handle so we
138 * must always send from a source address of VMADDR_CID_ANY
139 */
140 pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
141 VMCI_TRANSPORT_PACKET_RID);
142 pkt->dg.dst = vmci_make_handle(dst->svm_cid,
143 VMCI_TRANSPORT_PACKET_RID);
144 pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
145 pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
146 pkt->type = type;
147 pkt->src_port = src->svm_port;
148 pkt->dst_port = dst->svm_port;
149 memset(&pkt->proto, 0, sizeof(pkt->proto));
150 memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
151
152 switch (pkt->type) {
153 case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
154 pkt->u.size = 0;
155 break;
156
157 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST:
158 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
159 pkt->u.size = size;
160 break;
161
162 case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
163 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
164 pkt->u.handle = handle;
165 break;
166
167 case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
168 case VMCI_TRANSPORT_PACKET_TYPE_READ:
169 case VMCI_TRANSPORT_PACKET_TYPE_RST:
170 pkt->u.size = 0;
171 break;
172
173 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
174 pkt->u.mode = mode;
175 break;
176
177 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
178 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
179 memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait));
180 break;
181
182 case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2:
183 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
184 pkt->u.size = size;
185 pkt->proto = proto;
186 break;
187 }
188}
189
190static inline void
191vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
192 struct sockaddr_vm *local,
193 struct sockaddr_vm *remote)
194{
195 vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
196 vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
197}
198
199static int
200__vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
201 struct sockaddr_vm *src,
202 struct sockaddr_vm *dst,
203 enum vmci_transport_packet_type type,
204 u64 size,
205 u64 mode,
206 struct vmci_transport_waiting_info *wait,
207 u16 proto,
208 struct vmci_handle handle,
209 bool convert_error)
210{
211 int err;
212
213 vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
214 proto, handle);
215 err = vmci_datagram_send(&pkt->dg);
216 if (convert_error && (err < 0))
217 return vmci_transport_error_to_vsock_error(err);
218
219 return err;
220}
221
222static int
223vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
224 enum vmci_transport_packet_type type,
225 u64 size,
226 u64 mode,
227 struct vmci_transport_waiting_info *wait,
228 struct vmci_handle handle)
229{
230 struct vmci_transport_packet reply;
231 struct sockaddr_vm src, dst;
232
233 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
234 return 0;
235 } else {
236 vmci_transport_packet_get_addresses(pkt, &src, &dst);
237 return __vmci_transport_send_control_pkt(&reply, &src, &dst,
238 type,
239 size, mode, wait,
240 VSOCK_PROTO_INVALID,
241 handle, true);
242 }
243}
244
245static int
246vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
247 struct sockaddr_vm *dst,
248 enum vmci_transport_packet_type type,
249 u64 size,
250 u64 mode,
251 struct vmci_transport_waiting_info *wait,
252 struct vmci_handle handle)
253{
254 /* Note that it is safe to use a single packet across all CPUs since
255 * two tasklets of the same type are guaranteed to not ever run
256 * simultaneously. If that ever changes, or VMCI stops using tasklets,
257 * we can use per-cpu packets.
258 */
259 static struct vmci_transport_packet pkt;
260
261 return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
262 size, mode, wait,
263 VSOCK_PROTO_INVALID, handle,
264 false);
265}
266
267static int
268vmci_transport_send_control_pkt(struct sock *sk,
269 enum vmci_transport_packet_type type,
270 u64 size,
271 u64 mode,
272 struct vmci_transport_waiting_info *wait,
273 u16 proto,
274 struct vmci_handle handle)
275{
276 struct vmci_transport_packet *pkt;
277 struct vsock_sock *vsk;
278 int err;
279
280 vsk = vsock_sk(sk);
281
282 if (!vsock_addr_bound(&vsk->local_addr))
283 return -EINVAL;
284
285 if (!vsock_addr_bound(&vsk->remote_addr))
286 return -EINVAL;
287
288 pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
289 if (!pkt)
290 return -ENOMEM;
291
292 err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
293 &vsk->remote_addr, type, size,
294 mode, wait, proto, handle,
295 true);
296 kfree(pkt);
297
298 return err;
299}
300
301static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
302 struct sockaddr_vm *src,
303 struct vmci_transport_packet *pkt)
304{
305 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
306 return 0;
307 return vmci_transport_send_control_pkt_bh(
308 dst, src,
309 VMCI_TRANSPORT_PACKET_TYPE_RST, 0,
310 0, NULL, VMCI_INVALID_HANDLE);
311}
312
313static int vmci_transport_send_reset(struct sock *sk,
314 struct vmci_transport_packet *pkt)
315{
316 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
317 return 0;
318 return vmci_transport_send_control_pkt(sk,
319 VMCI_TRANSPORT_PACKET_TYPE_RST,
320 0, 0, NULL, VSOCK_PROTO_INVALID,
321 VMCI_INVALID_HANDLE);
322}
323
324static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
325{
326 return vmci_transport_send_control_pkt(
327 sk,
328 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
329 size, 0, NULL,
330 VSOCK_PROTO_INVALID,
331 VMCI_INVALID_HANDLE);
332}
333
334static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
335 u16 version)
336{
337 return vmci_transport_send_control_pkt(
338 sk,
339 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
340 size, 0, NULL, version,
341 VMCI_INVALID_HANDLE);
342}
343
344static int vmci_transport_send_qp_offer(struct sock *sk,
345 struct vmci_handle handle)
346{
347 return vmci_transport_send_control_pkt(
348 sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
349 0, NULL,
350 VSOCK_PROTO_INVALID, handle);
351}
352
353static int vmci_transport_send_attach(struct sock *sk,
354 struct vmci_handle handle)
355{
356 return vmci_transport_send_control_pkt(
357 sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
358 0, 0, NULL, VSOCK_PROTO_INVALID,
359 handle);
360}
361
362static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
363{
364 return vmci_transport_reply_control_pkt_fast(
365 pkt,
366 VMCI_TRANSPORT_PACKET_TYPE_RST,
367 0, 0, NULL,
368 VMCI_INVALID_HANDLE);
369}
370
371static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst,
372 struct sockaddr_vm *src)
373{
374 return vmci_transport_send_control_pkt_bh(
375 dst, src,
376 VMCI_TRANSPORT_PACKET_TYPE_INVALID,
377 0, 0, NULL, VMCI_INVALID_HANDLE);
378}
379
380int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
381 struct sockaddr_vm *src)
382{
383 return vmci_transport_send_control_pkt_bh(
384 dst, src,
385 VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
386 0, NULL, VMCI_INVALID_HANDLE);
387}
388
389int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
390 struct sockaddr_vm *src)
391{
392 return vmci_transport_send_control_pkt_bh(
393 dst, src,
394 VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
395 0, NULL, VMCI_INVALID_HANDLE);
396}
397
398int vmci_transport_send_wrote(struct sock *sk)
399{
400 return vmci_transport_send_control_pkt(
401 sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
402 0, NULL, VSOCK_PROTO_INVALID,
403 VMCI_INVALID_HANDLE);
404}
405
406int vmci_transport_send_read(struct sock *sk)
407{
408 return vmci_transport_send_control_pkt(
409 sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
410 0, NULL, VSOCK_PROTO_INVALID,
411 VMCI_INVALID_HANDLE);
412}
413
414int vmci_transport_send_waiting_write(struct sock *sk,
415 struct vmci_transport_waiting_info *wait)
416{
417 return vmci_transport_send_control_pkt(
418 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
419 0, 0, wait, VSOCK_PROTO_INVALID,
420 VMCI_INVALID_HANDLE);
421}
422
423int vmci_transport_send_waiting_read(struct sock *sk,
424 struct vmci_transport_waiting_info *wait)
425{
426 return vmci_transport_send_control_pkt(
427 sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
428 0, 0, wait, VSOCK_PROTO_INVALID,
429 VMCI_INVALID_HANDLE);
430}
431
432static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode)
433{
434 return vmci_transport_send_control_pkt(
435 &vsk->sk,
436 VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
437 0, mode, NULL,
438 VSOCK_PROTO_INVALID,
439 VMCI_INVALID_HANDLE);
440}
441
442static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
443{
444 return vmci_transport_send_control_pkt(sk,
445 VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
446 size, 0, NULL,
447 VSOCK_PROTO_INVALID,
448 VMCI_INVALID_HANDLE);
449}
450
451static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
452 u16 version)
453{
454 return vmci_transport_send_control_pkt(
455 sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
456 size, 0, NULL, version,
457 VMCI_INVALID_HANDLE);
458}
459
460static struct sock *vmci_transport_get_pending(
461 struct sock *listener,
462 struct vmci_transport_packet *pkt)
463{
464 struct vsock_sock *vlistener;
465 struct vsock_sock *vpending;
466 struct sock *pending;
467
468 vlistener = vsock_sk(listener);
469
470 list_for_each_entry(vpending, &vlistener->pending_links,
471 pending_links) {
472 struct sockaddr_vm src;
473 struct sockaddr_vm dst;
474
475 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
476 vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
477
478 if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
479 vsock_addr_equals_addr(&dst, &vpending->local_addr)) {
480 pending = sk_vsock(vpending);
481 sock_hold(pending);
482 goto found;
483 }
484 }
485
486 pending = NULL;
487found:
488 return pending;
489
490}
491
492static void vmci_transport_release_pending(struct sock *pending)
493{
494 sock_put(pending);
495}
496
497/* We allow two kinds of sockets to communicate with a restricted VM: 1)
498 * trusted sockets 2) sockets from applications running as the same user as the
499 * VM (this is only true for the host side and only when using hosted products)
500 */
501
502static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
503{
504 return vsock->trusted ||
505 vmci_is_context_owner(peer_cid, vsock->owner->uid);
506}
507
508/* We allow sending datagrams to and receiving datagrams from a restricted VM
509 * only if it is trusted as described in vmci_transport_is_trusted.
510 */
511
512static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
513{
514 if (vsock->cached_peer != peer_cid) {
515 vsock->cached_peer = peer_cid;
516 if (!vmci_transport_is_trusted(vsock, peer_cid) &&
517 (vmci_context_get_priv_flags(peer_cid) &
518 VMCI_PRIVILEGE_FLAG_RESTRICTED)) {
519 vsock->cached_peer_allow_dgram = false;
520 } else {
521 vsock->cached_peer_allow_dgram = true;
522 }
523 }
524
525 return vsock->cached_peer_allow_dgram;
526}
527
528static int
529vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
530 struct vmci_handle *handle,
531 u64 produce_size,
532 u64 consume_size,
533 u32 peer, u32 flags, bool trusted)
534{
535 int err = 0;
536
537 if (trusted) {
538 /* Try to allocate our queue pair as trusted. This will only
539 * work if vsock is running in the host.
540 */
541
542 err = vmci_qpair_alloc(qpair, handle, produce_size,
543 consume_size,
544 peer, flags,
545 VMCI_PRIVILEGE_FLAG_TRUSTED);
546 if (err != VMCI_ERROR_NO_ACCESS)
547 goto out;
548
549 }
550
551 err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size,
552 peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
553out:
554 if (err < 0) {
555 pr_err("Could not attach to queue pair with %d\n",
556 err);
557 err = vmci_transport_error_to_vsock_error(err);
558 }
559
560 return err;
561}
562
563static int
564vmci_transport_datagram_create_hnd(u32 resource_id,
565 u32 flags,
566 vmci_datagram_recv_cb recv_cb,
567 void *client_data,
568 struct vmci_handle *out_handle)
569{
570 int err = 0;
571
572 /* Try to allocate our datagram handler as trusted. This will only work
573 * if vsock is running in the host.
574 */
575
576 err = vmci_datagram_create_handle_priv(resource_id, flags,
577 VMCI_PRIVILEGE_FLAG_TRUSTED,
578 recv_cb,
579 client_data, out_handle);
580
581 if (err == VMCI_ERROR_NO_ACCESS)
582 err = vmci_datagram_create_handle(resource_id, flags,
583 recv_cb, client_data,
584 out_handle);
585
586 return err;
587}
588
589/* This is invoked as part of a tasklet that's scheduled when the VMCI
590 * interrupt fires. This is run in bottom-half context and if it ever needs to
591 * sleep it should defer that work to a work queue.
592 */
593
594static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
595{
596 struct sock *sk;
597 size_t size;
598 struct sk_buff *skb;
599 struct vsock_sock *vsk;
600
601 sk = (struct sock *)data;
602
603 /* This handler is privileged when this module is running on the host.
604 * We will get datagrams from all endpoints (even VMs that are in a
605 * restricted context). If we get one from a restricted context then
606 * the destination socket must be trusted.
607 *
608 * NOTE: We access the socket struct without holding the lock here.
609 * This is ok because the field we are interested is never modified
610 * outside of the create and destruct socket functions.
611 */
612 vsk = vsock_sk(sk);
613 if (!vmci_transport_allow_dgram(vsk, dg->src.context))
614 return VMCI_ERROR_NO_ACCESS;
615
616 size = VMCI_DG_SIZE(dg);
617
618 /* Attach the packet to the socket's receive queue as an sk_buff. */
619 skb = alloc_skb(size, GFP_ATOMIC);
620 if (skb) {
621 /* sk_receive_skb() will do a sock_put(), so hold here. */
622 sock_hold(sk);
623 skb_put(skb, size);
624 memcpy(skb->data, dg, size);
625 sk_receive_skb(sk, skb, 0);
626 }
627
628 return VMCI_SUCCESS;
629}
630
631static bool vmci_transport_stream_allow(u32 cid, u32 port)
632{
633 static const u32 non_socket_contexts[] = {
634 VMADDR_CID_HYPERVISOR,
635 VMADDR_CID_RESERVED,
636 };
637 int i;
638
639 BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts));
640
641 for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) {
642 if (cid == non_socket_contexts[i])
643 return false;
644 }
645
646 return true;
647}
648
649/* This is invoked as part of a tasklet that's scheduled when the VMCI
650 * interrupt fires. This is run in bottom-half context but it defers most of
651 * its work to the packet handling work queue.
652 */
653
654static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
655{
656 struct sock *sk;
657 struct sockaddr_vm dst;
658 struct sockaddr_vm src;
659 struct vmci_transport_packet *pkt;
660 struct vsock_sock *vsk;
661 bool bh_process_pkt;
662 int err;
663
664 sk = NULL;
665 err = VMCI_SUCCESS;
666 bh_process_pkt = false;
667
668 /* Ignore incoming packets from contexts without sockets, or resources
669 * that aren't vsock implementations.
670 */
671
672 if (!vmci_transport_stream_allow(dg->src.context, -1)
673 || VMCI_TRANSPORT_PACKET_RID != dg->src.resource)
674 return VMCI_ERROR_NO_ACCESS;
675
676 if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
677 /* Drop datagrams that do not contain full VSock packets. */
678 return VMCI_ERROR_INVALID_ARGS;
679
680 pkt = (struct vmci_transport_packet *)dg;
681
682 /* Find the socket that should handle this packet. First we look for a
683 * connected socket and if there is none we look for a socket bound to
684 * the destintation address.
685 */
686 vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
687 vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
688
689 sk = vsock_find_connected_socket(&src, &dst);
690 if (!sk) {
691 sk = vsock_find_bound_socket(&dst);
692 if (!sk) {
693 /* We could not find a socket for this specified
694 * address. If this packet is a RST, we just drop it.
695 * If it is another packet, we send a RST. Note that
696 * we do not send a RST reply to RSTs so that we do not
697 * continually send RSTs between two endpoints.
698 *
699 * Note that since this is a reply, dst is src and src
700 * is dst.
701 */
702 if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
703 pr_err("unable to send reset\n");
704
705 err = VMCI_ERROR_NOT_FOUND;
706 goto out;
707 }
708 }
709
710 /* If the received packet type is beyond all types known to this
711 * implementation, reply with an invalid message. Hopefully this will
712 * help when implementing backwards compatibility in the future.
713 */
714 if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
715 vmci_transport_send_invalid_bh(&dst, &src);
716 err = VMCI_ERROR_INVALID_ARGS;
717 goto out;
718 }
719
720 /* This handler is privileged when this module is running on the host.
721 * We will get datagram connect requests from all endpoints (even VMs
722 * that are in a restricted context). If we get one from a restricted
723 * context then the destination socket must be trusted.
724 *
725 * NOTE: We access the socket struct without holding the lock here.
726 * This is ok because the field we are interested is never modified
727 * outside of the create and destruct socket functions.
728 */
729 vsk = vsock_sk(sk);
730 if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
731 err = VMCI_ERROR_NO_ACCESS;
732 goto out;
733 }
734
735 /* We do most everything in a work queue, but let's fast path the
736 * notification of reads and writes to help data transfer performance.
737 * We can only do this if there is no process context code executing
738 * for this socket since that may change the state.
739 */
740 bh_lock_sock(sk);
741
742 if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED)
743 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
744 sk, pkt, true, &dst, &src,
745 &bh_process_pkt);
746
747 bh_unlock_sock(sk);
748
749 if (!bh_process_pkt) {
750 struct vmci_transport_recv_pkt_info *recv_pkt_info;
751
752 recv_pkt_info = kmalloc(sizeof(*recv_pkt_info), GFP_ATOMIC);
753 if (!recv_pkt_info) {
754 if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
755 pr_err("unable to send reset\n");
756
757 err = VMCI_ERROR_NO_MEM;
758 goto out;
759 }
760
761 recv_pkt_info->sk = sk;
762 memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
763 INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
764
765 schedule_work(&recv_pkt_info->work);
766 /* Clear sk so that the reference count incremented by one of
767 * the Find functions above is not decremented below. We need
768 * that reference count for the packet handler we've scheduled
769 * to run.
770 */
771 sk = NULL;
772 }
773
774out:
775 if (sk)
776 sock_put(sk);
777
778 return err;
779}
780
781static void vmci_transport_peer_attach_cb(u32 sub_id,
782 const struct vmci_event_data *e_data,
783 void *client_data)
784{
785 struct sock *sk = client_data;
786 const struct vmci_event_payload_qp *e_payload;
787 struct vsock_sock *vsk;
788
789 e_payload = vmci_event_data_const_payload(e_data);
790
791 vsk = vsock_sk(sk);
792
793 /* We don't ask for delayed CBs when we subscribe to this event (we
794 * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
795 * guarantees in that case about what context we might be running in,
796 * so it could be BH or process, blockable or non-blockable. So we
797 * need to account for all possible contexts here.
798 */
799 local_bh_disable();
800 bh_lock_sock(sk);
801
802 /* XXX This is lame, we should provide a way to lookup sockets by
803 * qp_handle.
804 */
805 if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
806 e_payload->handle)) {
807 /* XXX This doesn't do anything, but in the future we may want
808 * to set a flag here to verify the attach really did occur and
809 * we weren't just sent a datagram claiming it was.
810 */
811 goto out;
812 }
813
814out:
815 bh_unlock_sock(sk);
816 local_bh_enable();
817}
818
819static void vmci_transport_handle_detach(struct sock *sk)
820{
821 struct vsock_sock *vsk;
822
823 vsk = vsock_sk(sk);
824 if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
825 sock_set_flag(sk, SOCK_DONE);
826
827 /* On a detach the peer will not be sending or receiving
828 * anymore.
829 */
830 vsk->peer_shutdown = SHUTDOWN_MASK;
831
832 /* We should not be sending anymore since the peer won't be
833 * there to receive, but we can still receive if there is data
834 * left in our consume queue.
835 */
836 if (vsock_stream_has_data(vsk) <= 0) {
837 if (sk->sk_state == SS_CONNECTING) {
838 /* The peer may detach from a queue pair while
839 * we are still in the connecting state, i.e.,
840 * if the peer VM is killed after attaching to
841 * a queue pair, but before we complete the
842 * handshake. In that case, we treat the detach
843 * event like a reset.
844 */
845
846 sk->sk_state = SS_UNCONNECTED;
847 sk->sk_err = ECONNRESET;
848 sk->sk_error_report(sk);
849 return;
850 }
851 sk->sk_state = SS_UNCONNECTED;
852 }
853 sk->sk_state_change(sk);
854 }
855}
856
857static void vmci_transport_peer_detach_cb(u32 sub_id,
858 const struct vmci_event_data *e_data,
859 void *client_data)
860{
861 struct sock *sk = client_data;
862 const struct vmci_event_payload_qp *e_payload;
863 struct vsock_sock *vsk;
864
865 e_payload = vmci_event_data_const_payload(e_data);
866 vsk = vsock_sk(sk);
867 if (vmci_handle_is_invalid(e_payload->handle))
868 return;
869
870 /* Same rules for locking as for peer_attach_cb(). */
871 local_bh_disable();
872 bh_lock_sock(sk);
873
874 /* XXX This is lame, we should provide a way to lookup sockets by
875 * qp_handle.
876 */
877 if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
878 e_payload->handle))
879 vmci_transport_handle_detach(sk);
880
881 bh_unlock_sock(sk);
882 local_bh_enable();
883}
884
885static void vmci_transport_qp_resumed_cb(u32 sub_id,
886 const struct vmci_event_data *e_data,
887 void *client_data)
888{
889 vsock_for_each_connected_socket(vmci_transport_handle_detach);
890}
891
892static void vmci_transport_recv_pkt_work(struct work_struct *work)
893{
894 struct vmci_transport_recv_pkt_info *recv_pkt_info;
895 struct vmci_transport_packet *pkt;
896 struct sock *sk;
897
898 recv_pkt_info =
899 container_of(work, struct vmci_transport_recv_pkt_info, work);
900 sk = recv_pkt_info->sk;
901 pkt = &recv_pkt_info->pkt;
902
903 lock_sock(sk);
904
905 switch (sk->sk_state) {
906 case SS_LISTEN:
907 vmci_transport_recv_listen(sk, pkt);
908 break;
909 case SS_CONNECTING:
910 /* Processing of pending connections for servers goes through
911 * the listening socket, so see vmci_transport_recv_listen()
912 * for that path.
913 */
914 vmci_transport_recv_connecting_client(sk, pkt);
915 break;
916 case SS_CONNECTED:
917 vmci_transport_recv_connected(sk, pkt);
918 break;
919 default:
920 /* Because this function does not run in the same context as
921 * vmci_transport_recv_stream_cb it is possible that the
922 * socket has closed. We need to let the other side know or it
923 * could be sitting in a connect and hang forever. Send a
924 * reset to prevent that.
925 */
926 vmci_transport_send_reset(sk, pkt);
927 goto out;
928 }
929
930out:
931 release_sock(sk);
932 kfree(recv_pkt_info);
933 /* Release reference obtained in the stream callback when we fetched
934 * this socket out of the bound or connected list.
935 */
936 sock_put(sk);
937}
938
939static int vmci_transport_recv_listen(struct sock *sk,
940 struct vmci_transport_packet *pkt)
941{
942 struct sock *pending;
943 struct vsock_sock *vpending;
944 int err;
945 u64 qp_size;
946 bool old_request = false;
947 bool old_pkt_proto = false;
948
949 err = 0;
950
951 /* Because we are in the listen state, we could be receiving a packet
952 * for ourself or any previous connection requests that we received.
953 * If it's the latter, we try to find a socket in our list of pending
954 * connections and, if we do, call the appropriate handler for the
955 * state that that socket is in. Otherwise we try to service the
956 * connection request.
957 */
958 pending = vmci_transport_get_pending(sk, pkt);
959 if (pending) {
960 lock_sock(pending);
961 switch (pending->sk_state) {
962 case SS_CONNECTING:
963 err = vmci_transport_recv_connecting_server(sk,
964 pending,
965 pkt);
966 break;
967 default:
968 vmci_transport_send_reset(pending, pkt);
969 err = -EINVAL;
970 }
971
972 if (err < 0)
973 vsock_remove_pending(sk, pending);
974
975 release_sock(pending);
976 vmci_transport_release_pending(pending);
977
978 return err;
979 }
980
981 /* The listen state only accepts connection requests. Reply with a
982 * reset unless we received a reset.
983 */
984
985 if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
986 pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
987 vmci_transport_reply_reset(pkt);
988 return -EINVAL;
989 }
990
991 if (pkt->u.size == 0) {
992 vmci_transport_reply_reset(pkt);
993 return -EINVAL;
994 }
995
996 /* If this socket can't accommodate this connection request, we send a
997 * reset. Otherwise we create and initialize a child socket and reply
998 * with a connection negotiation.
999 */
1000 if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
1001 vmci_transport_reply_reset(pkt);
1002 return -ECONNREFUSED;
1003 }
1004
1005 pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
1006 sk->sk_type);
1007 if (!pending) {
1008 vmci_transport_send_reset(sk, pkt);
1009 return -ENOMEM;
1010 }
1011
1012 vpending = vsock_sk(pending);
1013
1014 vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
1015 pkt->dst_port);
1016 vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
1017 pkt->src_port);
1018
1019 /* If the proposed size fits within our min/max, accept it. Otherwise
1020 * propose our own size.
1021 */
1022 if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size &&
1023 pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) {
1024 qp_size = pkt->u.size;
1025 } else {
1026 qp_size = vmci_trans(vpending)->queue_pair_size;
1027 }
1028
1029 /* Figure out if we are using old or new requests based on the
1030 * overrides pkt types sent by our peer.
1031 */
1032 if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1033 old_request = old_pkt_proto;
1034 } else {
1035 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
1036 old_request = true;
1037 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
1038 old_request = false;
1039
1040 }
1041
1042 if (old_request) {
1043 /* Handle a REQUEST (or override) */
1044 u16 version = VSOCK_PROTO_INVALID;
1045 if (vmci_transport_proto_to_notify_struct(
1046 pending, &version, true))
1047 err = vmci_transport_send_negotiate(pending, qp_size);
1048 else
1049 err = -EINVAL;
1050
1051 } else {
1052 /* Handle a REQUEST2 (or override) */
1053 int proto_int = pkt->proto;
1054 int pos;
1055 u16 active_proto_version = 0;
1056
1057 /* The list of possible protocols is the intersection of all
1058 * protocols the client supports ... plus all the protocols we
1059 * support.
1060 */
1061 proto_int &= vmci_transport_new_proto_supported_versions();
1062
1063 /* We choose the highest possible protocol version and use that
1064 * one.
1065 */
1066 pos = fls(proto_int);
1067 if (pos) {
1068 active_proto_version = (1 << (pos - 1));
1069 if (vmci_transport_proto_to_notify_struct(
1070 pending, &active_proto_version, false))
1071 err = vmci_transport_send_negotiate2(pending,
1072 qp_size,
1073 active_proto_version);
1074 else
1075 err = -EINVAL;
1076
1077 } else {
1078 err = -EINVAL;
1079 }
1080 }
1081
1082 if (err < 0) {
1083 vmci_transport_send_reset(sk, pkt);
1084 sock_put(pending);
1085 err = vmci_transport_error_to_vsock_error(err);
1086 goto out;
1087 }
1088
1089 vsock_add_pending(sk, pending);
1090 sk->sk_ack_backlog++;
1091
1092 pending->sk_state = SS_CONNECTING;
1093 vmci_trans(vpending)->produce_size =
1094 vmci_trans(vpending)->consume_size = qp_size;
1095 vmci_trans(vpending)->queue_pair_size = qp_size;
1096
1097 vmci_trans(vpending)->notify_ops->process_request(pending);
1098
1099 /* We might never receive another message for this socket and it's not
1100 * connected to any process, so we have to ensure it gets cleaned up
1101 * ourself. Our delayed work function will take care of that. Note
1102 * that we do not ever cancel this function since we have few
1103 * guarantees about its state when calling cancel_delayed_work().
1104 * Instead we hold a reference on the socket for that function and make
1105 * it capable of handling cases where it needs to do nothing but
1106 * release that reference.
1107 */
1108 vpending->listener = sk;
1109 sock_hold(sk);
1110 sock_hold(pending);
1111 INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
1112 schedule_delayed_work(&vpending->dwork, HZ);
1113
1114out:
1115 return err;
1116}
1117
1118static int
1119vmci_transport_recv_connecting_server(struct sock *listener,
1120 struct sock *pending,
1121 struct vmci_transport_packet *pkt)
1122{
1123 struct vsock_sock *vpending;
1124 struct vmci_handle handle;
1125 struct vmci_qp *qpair;
1126 bool is_local;
1127 u32 flags;
1128 u32 detach_sub_id;
1129 int err;
1130 int skerr;
1131
1132 vpending = vsock_sk(pending);
1133 detach_sub_id = VMCI_INVALID_ID;
1134
1135 switch (pkt->type) {
1136 case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
1137 if (vmci_handle_is_invalid(pkt->u.handle)) {
1138 vmci_transport_send_reset(pending, pkt);
1139 skerr = EPROTO;
1140 err = -EINVAL;
1141 goto destroy;
1142 }
1143 break;
1144 default:
1145 /* Close and cleanup the connection. */
1146 vmci_transport_send_reset(pending, pkt);
1147 skerr = EPROTO;
1148 err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
1149 goto destroy;
1150 }
1151
1152 /* In order to complete the connection we need to attach to the offered
1153 * queue pair and send an attach notification. We also subscribe to the
1154 * detach event so we know when our peer goes away, and we do that
1155 * before attaching so we don't miss an event. If all this succeeds,
1156 * we update our state and wakeup anything waiting in accept() for a
1157 * connection.
1158 */
1159
1160 /* We don't care about attach since we ensure the other side has
1161 * attached by specifying the ATTACH_ONLY flag below.
1162 */
1163 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1164 vmci_transport_peer_detach_cb,
1165 pending, &detach_sub_id);
1166 if (err < VMCI_SUCCESS) {
1167 vmci_transport_send_reset(pending, pkt);
1168 err = vmci_transport_error_to_vsock_error(err);
1169 skerr = -err;
1170 goto destroy;
1171 }
1172
1173 vmci_trans(vpending)->detach_sub_id = detach_sub_id;
1174
1175 /* Now attach to the queue pair the client created. */
1176 handle = pkt->u.handle;
1177
1178 /* vpending->local_addr always has a context id so we do not need to
1179 * worry about VMADDR_CID_ANY in this case.
1180 */
1181 is_local =
1182 vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid;
1183 flags = VMCI_QPFLAG_ATTACH_ONLY;
1184 flags |= is_local ? VMCI_QPFLAG_LOCAL : 0;
1185
1186 err = vmci_transport_queue_pair_alloc(
1187 &qpair,
1188 &handle,
1189 vmci_trans(vpending)->produce_size,
1190 vmci_trans(vpending)->consume_size,
1191 pkt->dg.src.context,
1192 flags,
1193 vmci_transport_is_trusted(
1194 vpending,
1195 vpending->remote_addr.svm_cid));
1196 if (err < 0) {
1197 vmci_transport_send_reset(pending, pkt);
1198 skerr = -err;
1199 goto destroy;
1200 }
1201
1202 vmci_trans(vpending)->qp_handle = handle;
1203 vmci_trans(vpending)->qpair = qpair;
1204
1205 /* When we send the attach message, we must be ready to handle incoming
1206 * control messages on the newly connected socket. So we move the
1207 * pending socket to the connected state before sending the attach
1208 * message. Otherwise, an incoming packet triggered by the attach being
1209 * received by the peer may be processed concurrently with what happens
1210 * below after sending the attach message, and that incoming packet
1211 * will find the listening socket instead of the (currently) pending
1212 * socket. Note that enqueueing the socket increments the reference
1213 * count, so even if a reset comes before the connection is accepted,
1214 * the socket will be valid until it is removed from the queue.
1215 *
1216 * If we fail sending the attach below, we remove the socket from the
1217 * connected list and move the socket to SS_UNCONNECTED before
1218 * releasing the lock, so a pending slow path processing of an incoming
1219 * packet will not see the socket in the connected state in that case.
1220 */
1221 pending->sk_state = SS_CONNECTED;
1222
1223 vsock_insert_connected(vpending);
1224
1225 /* Notify our peer of our attach. */
1226 err = vmci_transport_send_attach(pending, handle);
1227 if (err < 0) {
1228 vsock_remove_connected(vpending);
1229 pr_err("Could not send attach\n");
1230 vmci_transport_send_reset(pending, pkt);
1231 err = vmci_transport_error_to_vsock_error(err);
1232 skerr = -err;
1233 goto destroy;
1234 }
1235
1236 /* We have a connection. Move the now connected socket from the
1237 * listener's pending list to the accept queue so callers of accept()
1238 * can find it.
1239 */
1240 vsock_remove_pending(listener, pending);
1241 vsock_enqueue_accept(listener, pending);
1242
1243 /* Callers of accept() will be be waiting on the listening socket, not
1244 * the pending socket.
1245 */
1246 listener->sk_state_change(listener);
1247
1248 return 0;
1249
1250destroy:
1251 pending->sk_err = skerr;
1252 pending->sk_state = SS_UNCONNECTED;
1253 /* As long as we drop our reference, all necessary cleanup will handle
1254 * when the cleanup function drops its reference and our destruct
1255 * implementation is called. Note that since the listen handler will
1256 * remove pending from the pending list upon our failure, the cleanup
1257 * function won't drop the additional reference, which is why we do it
1258 * here.
1259 */
1260 sock_put(pending);
1261
1262 return err;
1263}
1264
1265static int
1266vmci_transport_recv_connecting_client(struct sock *sk,
1267 struct vmci_transport_packet *pkt)
1268{
1269 struct vsock_sock *vsk;
1270 int err;
1271 int skerr;
1272
1273 vsk = vsock_sk(sk);
1274
1275 switch (pkt->type) {
1276 case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
1277 if (vmci_handle_is_invalid(pkt->u.handle) ||
1278 !vmci_handle_is_equal(pkt->u.handle,
1279 vmci_trans(vsk)->qp_handle)) {
1280 skerr = EPROTO;
1281 err = -EINVAL;
1282 goto destroy;
1283 }
1284
1285 /* Signify the socket is connected and wakeup the waiter in
1286 * connect(). Also place the socket in the connected table for
1287 * accounting (it can already be found since it's in the bound
1288 * table).
1289 */
1290 sk->sk_state = SS_CONNECTED;
1291 sk->sk_socket->state = SS_CONNECTED;
1292 vsock_insert_connected(vsk);
1293 sk->sk_state_change(sk);
1294
1295 break;
1296 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
1297 case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
1298 if (pkt->u.size == 0
1299 || pkt->dg.src.context != vsk->remote_addr.svm_cid
1300 || pkt->src_port != vsk->remote_addr.svm_port
1301 || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)
1302 || vmci_trans(vsk)->qpair
1303 || vmci_trans(vsk)->produce_size != 0
1304 || vmci_trans(vsk)->consume_size != 0
1305 || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
1306 || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1307 skerr = EPROTO;
1308 err = -EINVAL;
1309
1310 goto destroy;
1311 }
1312
1313 err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
1314 if (err) {
1315 skerr = -err;
1316 goto destroy;
1317 }
1318
1319 break;
1320 case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
1321 err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
1322 if (err) {
1323 skerr = -err;
1324 goto destroy;
1325 }
1326
1327 break;
1328 case VMCI_TRANSPORT_PACKET_TYPE_RST:
1329 /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
1330 * continue processing here after they sent an INVALID packet.
1331 * This meant that we got a RST after the INVALID. We ignore a
1332 * RST after an INVALID. The common code doesn't send the RST
1333 * ... so we can hang if an old version of the common code
1334 * fails between getting a REQUEST and sending an OFFER back.
1335 * Not much we can do about it... except hope that it doesn't
1336 * happen.
1337 */
1338 if (vsk->ignore_connecting_rst) {
1339 vsk->ignore_connecting_rst = false;
1340 } else {
1341 skerr = ECONNRESET;
1342 err = 0;
1343 goto destroy;
1344 }
1345
1346 break;
1347 default:
1348 /* Close and cleanup the connection. */
1349 skerr = EPROTO;
1350 err = -EINVAL;
1351 goto destroy;
1352 }
1353
1354 return 0;
1355
1356destroy:
1357 vmci_transport_send_reset(sk, pkt);
1358
1359 sk->sk_state = SS_UNCONNECTED;
1360 sk->sk_err = skerr;
1361 sk->sk_error_report(sk);
1362 return err;
1363}
1364
1365static int vmci_transport_recv_connecting_client_negotiate(
1366 struct sock *sk,
1367 struct vmci_transport_packet *pkt)
1368{
1369 int err;
1370 struct vsock_sock *vsk;
1371 struct vmci_handle handle;
1372 struct vmci_qp *qpair;
1373 u32 attach_sub_id;
1374 u32 detach_sub_id;
1375 bool is_local;
1376 u32 flags;
1377 bool old_proto = true;
1378 bool old_pkt_proto;
1379 u16 version;
1380
1381 vsk = vsock_sk(sk);
1382 handle = VMCI_INVALID_HANDLE;
1383 attach_sub_id = VMCI_INVALID_ID;
1384 detach_sub_id = VMCI_INVALID_ID;
1385
1386 /* If we have gotten here then we should be past the point where old
1387 * linux vsock could have sent the bogus rst.
1388 */
1389 vsk->sent_request = false;
1390 vsk->ignore_connecting_rst = false;
1391
1392 /* Verify that we're OK with the proposed queue pair size */
1393 if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size ||
1394 pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) {
1395 err = -EINVAL;
1396 goto destroy;
1397 }
1398
1399 /* At this point we know the CID the peer is using to talk to us. */
1400
1401 if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
1402 vsk->local_addr.svm_cid = pkt->dg.dst.context;
1403
1404 /* Setup the notify ops to be the highest supported version that both
1405 * the server and the client support.
1406 */
1407
1408 if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1409 old_proto = old_pkt_proto;
1410 } else {
1411 if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
1412 old_proto = true;
1413 else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
1414 old_proto = false;
1415
1416 }
1417
1418 if (old_proto)
1419 version = VSOCK_PROTO_INVALID;
1420 else
1421 version = pkt->proto;
1422
1423 if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
1424 err = -EINVAL;
1425 goto destroy;
1426 }
1427
1428 /* Subscribe to attach and detach events first.
1429 *
1430 * XXX We attach once for each queue pair created for now so it is easy
1431 * to find the socket (it's provided), but later we should only
1432 * subscribe once and add a way to lookup sockets by queue pair handle.
1433 */
1434 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
1435 vmci_transport_peer_attach_cb,
1436 sk, &attach_sub_id);
1437 if (err < VMCI_SUCCESS) {
1438 err = vmci_transport_error_to_vsock_error(err);
1439 goto destroy;
1440 }
1441
1442 err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1443 vmci_transport_peer_detach_cb,
1444 sk, &detach_sub_id);
1445 if (err < VMCI_SUCCESS) {
1446 err = vmci_transport_error_to_vsock_error(err);
1447 goto destroy;
1448 }
1449
1450 /* Make VMCI select the handle for us. */
1451 handle = VMCI_INVALID_HANDLE;
1452 is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
1453 flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
1454
1455 err = vmci_transport_queue_pair_alloc(&qpair,
1456 &handle,
1457 pkt->u.size,
1458 pkt->u.size,
1459 vsk->remote_addr.svm_cid,
1460 flags,
1461 vmci_transport_is_trusted(
1462 vsk,
1463 vsk->
1464 remote_addr.svm_cid));
1465 if (err < 0)
1466 goto destroy;
1467
1468 err = vmci_transport_send_qp_offer(sk, handle);
1469 if (err < 0) {
1470 err = vmci_transport_error_to_vsock_error(err);
1471 goto destroy;
1472 }
1473
1474 vmci_trans(vsk)->qp_handle = handle;
1475 vmci_trans(vsk)->qpair = qpair;
1476
1477 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
1478 pkt->u.size;
1479
1480 vmci_trans(vsk)->attach_sub_id = attach_sub_id;
1481 vmci_trans(vsk)->detach_sub_id = detach_sub_id;
1482
1483 vmci_trans(vsk)->notify_ops->process_negotiate(sk);
1484
1485 return 0;
1486
1487destroy:
1488 if (attach_sub_id != VMCI_INVALID_ID)
1489 vmci_event_unsubscribe(attach_sub_id);
1490
1491 if (detach_sub_id != VMCI_INVALID_ID)
1492 vmci_event_unsubscribe(detach_sub_id);
1493
1494 if (!vmci_handle_is_invalid(handle))
1495 vmci_qpair_detach(&qpair);
1496
1497 return err;
1498}
1499
1500static int
1501vmci_transport_recv_connecting_client_invalid(struct sock *sk,
1502 struct vmci_transport_packet *pkt)
1503{
1504 int err = 0;
1505 struct vsock_sock *vsk = vsock_sk(sk);
1506
1507 if (vsk->sent_request) {
1508 vsk->sent_request = false;
1509 vsk->ignore_connecting_rst = true;
1510
1511 err = vmci_transport_send_conn_request(
1512 sk, vmci_trans(vsk)->queue_pair_size);
1513 if (err < 0)
1514 err = vmci_transport_error_to_vsock_error(err);
1515 else
1516 err = 0;
1517
1518 }
1519
1520 return err;
1521}
1522
1523static int vmci_transport_recv_connected(struct sock *sk,
1524 struct vmci_transport_packet *pkt)
1525{
1526 struct vsock_sock *vsk;
1527 bool pkt_processed = false;
1528
1529 /* In cases where we are closing the connection, it's sufficient to
1530 * mark the state change (and maybe error) and wake up any waiting
1531 * threads. Since this is a connected socket, it's owned by a user
1532 * process and will be cleaned up when the failure is passed back on
1533 * the current or next system call. Our system call implementations
1534 * must therefore check for error and state changes on entry and when
1535 * being awoken.
1536 */
1537 switch (pkt->type) {
1538 case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
1539 if (pkt->u.mode) {
1540 vsk = vsock_sk(sk);
1541
1542 vsk->peer_shutdown |= pkt->u.mode;
1543 sk->sk_state_change(sk);
1544 }
1545 break;
1546
1547 case VMCI_TRANSPORT_PACKET_TYPE_RST:
1548 vsk = vsock_sk(sk);
1549 /* It is possible that we sent our peer a message (e.g a
1550 * WAITING_READ) right before we got notified that the peer had
1551 * detached. If that happens then we can get a RST pkt back
1552 * from our peer even though there is data available for us to
1553 * read. In that case, don't shutdown the socket completely but
1554 * instead allow the local client to finish reading data off
1555 * the queuepair. Always treat a RST pkt in connected mode like
1556 * a clean shutdown.
1557 */
1558 sock_set_flag(sk, SOCK_DONE);
1559 vsk->peer_shutdown = SHUTDOWN_MASK;
1560 if (vsock_stream_has_data(vsk) <= 0)
1561 sk->sk_state = SS_DISCONNECTING;
1562
1563 sk->sk_state_change(sk);
1564 break;
1565
1566 default:
1567 vsk = vsock_sk(sk);
1568 vmci_trans(vsk)->notify_ops->handle_notify_pkt(
1569 sk, pkt, false, NULL, NULL,
1570 &pkt_processed);
1571 if (!pkt_processed)
1572 return -EINVAL;
1573
1574 break;
1575 }
1576
1577 return 0;
1578}
1579
1580static int vmci_transport_socket_init(struct vsock_sock *vsk,
1581 struct vsock_sock *psk)
1582{
1583 vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL);
1584 if (!vsk->trans)
1585 return -ENOMEM;
1586
1587 vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1588 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1589 vmci_trans(vsk)->qpair = NULL;
1590 vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
1591 vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
1592 VMCI_INVALID_ID;
1593 vmci_trans(vsk)->notify_ops = NULL;
1594 if (psk) {
1595 vmci_trans(vsk)->queue_pair_size =
1596 vmci_trans(psk)->queue_pair_size;
1597 vmci_trans(vsk)->queue_pair_min_size =
1598 vmci_trans(psk)->queue_pair_min_size;
1599 vmci_trans(vsk)->queue_pair_max_size =
1600 vmci_trans(psk)->queue_pair_max_size;
1601 } else {
1602 vmci_trans(vsk)->queue_pair_size =
1603 VMCI_TRANSPORT_DEFAULT_QP_SIZE;
1604 vmci_trans(vsk)->queue_pair_min_size =
1605 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN;
1606 vmci_trans(vsk)->queue_pair_max_size =
1607 VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX;
1608 }
1609
1610 return 0;
1611}
1612
1613static void vmci_transport_destruct(struct vsock_sock *vsk)
1614{
1615 if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
1616 vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
1617 vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
1618 }
1619
1620 if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1621 vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
1622 vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
1623 }
1624
1625 if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
1626 vmci_qpair_detach(&vmci_trans(vsk)->qpair);
1627 vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1628 vmci_trans(vsk)->produce_size = 0;
1629 vmci_trans(vsk)->consume_size = 0;
1630 }
1631
1632 if (vmci_trans(vsk)->notify_ops)
1633 vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
1634
1635 kfree(vsk->trans);
1636 vsk->trans = NULL;
1637}
1638
1639static void vmci_transport_release(struct vsock_sock *vsk)
1640{
1641 if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
1642 vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
1643 vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1644 }
1645}
1646
1647static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
1648 struct sockaddr_vm *addr)
1649{
1650 u32 port;
1651 u32 flags;
1652 int err;
1653
1654 /* VMCI will select a resource ID for us if we provide
1655 * VMCI_INVALID_ID.
1656 */
1657 port = addr->svm_port == VMADDR_PORT_ANY ?
1658 VMCI_INVALID_ID : addr->svm_port;
1659
1660 if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE))
1661 return -EACCES;
1662
1663 flags = addr->svm_cid == VMADDR_CID_ANY ?
1664 VMCI_FLAG_ANYCID_DG_HND : 0;
1665
1666 err = vmci_transport_datagram_create_hnd(port, flags,
1667 vmci_transport_recv_dgram_cb,
1668 &vsk->sk,
1669 &vmci_trans(vsk)->dg_handle);
1670 if (err < VMCI_SUCCESS)
1671 return vmci_transport_error_to_vsock_error(err);
1672 vsock_addr_init(&vsk->local_addr, addr->svm_cid,
1673 vmci_trans(vsk)->dg_handle.resource);
1674
1675 return 0;
1676}
1677
1678static int vmci_transport_dgram_enqueue(
1679 struct vsock_sock *vsk,
1680 struct sockaddr_vm *remote_addr,
1681 struct iovec *iov,
1682 size_t len)
1683{
1684 int err;
1685 struct vmci_datagram *dg;
1686
1687 if (len > VMCI_MAX_DG_PAYLOAD_SIZE)
1688 return -EMSGSIZE;
1689
1690 if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid))
1691 return -EPERM;
1692
1693 /* Allocate a buffer for the user's message and our packet header. */
1694 dg = kmalloc(len + sizeof(*dg), GFP_KERNEL);
1695 if (!dg)
1696 return -ENOMEM;
1697
1698 memcpy_fromiovec(VMCI_DG_PAYLOAD(dg), iov, len);
1699
1700 dg->dst = vmci_make_handle(remote_addr->svm_cid,
1701 remote_addr->svm_port);
1702 dg->src = vmci_make_handle(vsk->local_addr.svm_cid,
1703 vsk->local_addr.svm_port);
1704 dg->payload_size = len;
1705
1706 err = vmci_datagram_send(dg);
1707 kfree(dg);
1708 if (err < 0)
1709 return vmci_transport_error_to_vsock_error(err);
1710
1711 return err - sizeof(*dg);
1712}
1713
1714static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
1715 struct vsock_sock *vsk,
1716 struct msghdr *msg, size_t len,
1717 int flags)
1718{
1719 int err;
1720 int noblock;
1721 struct vmci_datagram *dg;
1722 size_t payload_len;
1723 struct sk_buff *skb;
1724
1725 noblock = flags & MSG_DONTWAIT;
1726
1727 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1728 return -EOPNOTSUPP;
1729
1730 /* Retrieve the head sk_buff from the socket's receive queue. */
1731 err = 0;
1732 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1733 if (err)
1734 return err;
1735
1736 if (!skb)
1737 return -EAGAIN;
1738
1739 dg = (struct vmci_datagram *)skb->data;
1740 if (!dg)
1741 /* err is 0, meaning we read zero bytes. */
1742 goto out;
1743
1744 payload_len = dg->payload_size;
1745 /* Ensure the sk_buff matches the payload size claimed in the packet. */
1746 if (payload_len != skb->len - sizeof(*dg)) {
1747 err = -EINVAL;
1748 goto out;
1749 }
1750
1751 if (payload_len > len) {
1752 payload_len = len;
1753 msg->msg_flags |= MSG_TRUNC;
1754 }
1755
1756 /* Place the datagram payload in the user's iovec. */
1757 err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov,
1758 payload_len);
1759 if (err)
1760 goto out;
1761
1762 msg->msg_namelen = 0;
1763 if (msg->msg_name) {
1764 struct sockaddr_vm *vm_addr;
1765
1766 /* Provide the address of the sender. */
1767 vm_addr = (struct sockaddr_vm *)msg->msg_name;
1768 vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
1769 msg->msg_namelen = sizeof(*vm_addr);
1770 }
1771 err = payload_len;
1772
1773out:
1774 skb_free_datagram(&vsk->sk, skb);
1775 return err;
1776}
1777
1778static bool vmci_transport_dgram_allow(u32 cid, u32 port)
1779{
1780 if (cid == VMADDR_CID_HYPERVISOR) {
1781 /* Registrations of PBRPC Servers do not modify VMX/Hypervisor
1782 * state and are allowed.
1783 */
1784 return port == VMCI_UNITY_PBRPC_REGISTER;
1785 }
1786
1787 return true;
1788}
1789
1790static int vmci_transport_connect(struct vsock_sock *vsk)
1791{
1792 int err;
1793 bool old_pkt_proto = false;
1794 struct sock *sk = &vsk->sk;
1795
1796 if (vmci_transport_old_proto_override(&old_pkt_proto) &&
1797 old_pkt_proto) {
1798 err = vmci_transport_send_conn_request(
1799 sk, vmci_trans(vsk)->queue_pair_size);
1800 if (err < 0) {
1801 sk->sk_state = SS_UNCONNECTED;
1802 return err;
1803 }
1804 } else {
1805 int supported_proto_versions =
1806 vmci_transport_new_proto_supported_versions();
1807 err = vmci_transport_send_conn_request2(
1808 sk, vmci_trans(vsk)->queue_pair_size,
1809 supported_proto_versions);
1810 if (err < 0) {
1811 sk->sk_state = SS_UNCONNECTED;
1812 return err;
1813 }
1814
1815 vsk->sent_request = true;
1816 }
1817
1818 return err;
1819}
1820
1821static ssize_t vmci_transport_stream_dequeue(
1822 struct vsock_sock *vsk,
1823 struct iovec *iov,
1824 size_t len,
1825 int flags)
1826{
1827 if (flags & MSG_PEEK)
1828 return vmci_qpair_peekv(vmci_trans(vsk)->qpair, iov, len, 0);
1829 else
1830 return vmci_qpair_dequev(vmci_trans(vsk)->qpair, iov, len, 0);
1831}
1832
1833static ssize_t vmci_transport_stream_enqueue(
1834 struct vsock_sock *vsk,
1835 struct iovec *iov,
1836 size_t len)
1837{
1838 return vmci_qpair_enquev(vmci_trans(vsk)->qpair, iov, len, 0);
1839}
1840
1841static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
1842{
1843 return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair);
1844}
1845
1846static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk)
1847{
1848 return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair);
1849}
1850
1851static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk)
1852{
1853 return vmci_trans(vsk)->consume_size;
1854}
1855
1856static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
1857{
1858 return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
1859}
1860
1861static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk)
1862{
1863 return vmci_trans(vsk)->queue_pair_size;
1864}
1865
1866static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk)
1867{
1868 return vmci_trans(vsk)->queue_pair_min_size;
1869}
1870
1871static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk)
1872{
1873 return vmci_trans(vsk)->queue_pair_max_size;
1874}
1875
1876static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
1877{
1878 if (val < vmci_trans(vsk)->queue_pair_min_size)
1879 vmci_trans(vsk)->queue_pair_min_size = val;
1880 if (val > vmci_trans(vsk)->queue_pair_max_size)
1881 vmci_trans(vsk)->queue_pair_max_size = val;
1882 vmci_trans(vsk)->queue_pair_size = val;
1883}
1884
1885static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk,
1886 u64 val)
1887{
1888 if (val > vmci_trans(vsk)->queue_pair_size)
1889 vmci_trans(vsk)->queue_pair_size = val;
1890 vmci_trans(vsk)->queue_pair_min_size = val;
1891}
1892
1893static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk,
1894 u64 val)
1895{
1896 if (val < vmci_trans(vsk)->queue_pair_size)
1897 vmci_trans(vsk)->queue_pair_size = val;
1898 vmci_trans(vsk)->queue_pair_max_size = val;
1899}
1900
1901static int vmci_transport_notify_poll_in(
1902 struct vsock_sock *vsk,
1903 size_t target,
1904 bool *data_ready_now)
1905{
1906 return vmci_trans(vsk)->notify_ops->poll_in(
1907 &vsk->sk, target, data_ready_now);
1908}
1909
1910static int vmci_transport_notify_poll_out(
1911 struct vsock_sock *vsk,
1912 size_t target,
1913 bool *space_available_now)
1914{
1915 return vmci_trans(vsk)->notify_ops->poll_out(
1916 &vsk->sk, target, space_available_now);
1917}
1918
1919static int vmci_transport_notify_recv_init(
1920 struct vsock_sock *vsk,
1921 size_t target,
1922 struct vsock_transport_recv_notify_data *data)
1923{
1924 return vmci_trans(vsk)->notify_ops->recv_init(
1925 &vsk->sk, target,
1926 (struct vmci_transport_recv_notify_data *)data);
1927}
1928
1929static int vmci_transport_notify_recv_pre_block(
1930 struct vsock_sock *vsk,
1931 size_t target,
1932 struct vsock_transport_recv_notify_data *data)
1933{
1934 return vmci_trans(vsk)->notify_ops->recv_pre_block(
1935 &vsk->sk, target,
1936 (struct vmci_transport_recv_notify_data *)data);
1937}
1938
1939static int vmci_transport_notify_recv_pre_dequeue(
1940 struct vsock_sock *vsk,
1941 size_t target,
1942 struct vsock_transport_recv_notify_data *data)
1943{
1944 return vmci_trans(vsk)->notify_ops->recv_pre_dequeue(
1945 &vsk->sk, target,
1946 (struct vmci_transport_recv_notify_data *)data);
1947}
1948
1949static int vmci_transport_notify_recv_post_dequeue(
1950 struct vsock_sock *vsk,
1951 size_t target,
1952 ssize_t copied,
1953 bool data_read,
1954 struct vsock_transport_recv_notify_data *data)
1955{
1956 return vmci_trans(vsk)->notify_ops->recv_post_dequeue(
1957 &vsk->sk, target, copied, data_read,
1958 (struct vmci_transport_recv_notify_data *)data);
1959}
1960
1961static int vmci_transport_notify_send_init(
1962 struct vsock_sock *vsk,
1963 struct vsock_transport_send_notify_data *data)
1964{
1965 return vmci_trans(vsk)->notify_ops->send_init(
1966 &vsk->sk,
1967 (struct vmci_transport_send_notify_data *)data);
1968}
1969
1970static int vmci_transport_notify_send_pre_block(
1971 struct vsock_sock *vsk,
1972 struct vsock_transport_send_notify_data *data)
1973{
1974 return vmci_trans(vsk)->notify_ops->send_pre_block(
1975 &vsk->sk,
1976 (struct vmci_transport_send_notify_data *)data);
1977}
1978
1979static int vmci_transport_notify_send_pre_enqueue(
1980 struct vsock_sock *vsk,
1981 struct vsock_transport_send_notify_data *data)
1982{
1983 return vmci_trans(vsk)->notify_ops->send_pre_enqueue(
1984 &vsk->sk,
1985 (struct vmci_transport_send_notify_data *)data);
1986}
1987
1988static int vmci_transport_notify_send_post_enqueue(
1989 struct vsock_sock *vsk,
1990 ssize_t written,
1991 struct vsock_transport_send_notify_data *data)
1992{
1993 return vmci_trans(vsk)->notify_ops->send_post_enqueue(
1994 &vsk->sk, written,
1995 (struct vmci_transport_send_notify_data *)data);
1996}
1997
1998static bool vmci_transport_old_proto_override(bool *old_pkt_proto)
1999{
2000 if (PROTOCOL_OVERRIDE != -1) {
2001 if (PROTOCOL_OVERRIDE == 0)
2002 *old_pkt_proto = true;
2003 else
2004 *old_pkt_proto = false;
2005
2006 pr_info("Proto override in use\n");
2007 return true;
2008 }
2009
2010 return false;
2011}
2012
2013static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
2014 u16 *proto,
2015 bool old_pkt_proto)
2016{
2017 struct vsock_sock *vsk = vsock_sk(sk);
2018
2019 if (old_pkt_proto) {
2020 if (*proto != VSOCK_PROTO_INVALID) {
2021 pr_err("Can't set both an old and new protocol\n");
2022 return false;
2023 }
2024 vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops;
2025 goto exit;
2026 }
2027
2028 switch (*proto) {
2029 case VSOCK_PROTO_PKT_ON_NOTIFY:
2030 vmci_trans(vsk)->notify_ops =
2031 &vmci_transport_notify_pkt_q_state_ops;
2032 break;
2033 default:
2034 pr_err("Unknown notify protocol version\n");
2035 return false;
2036 }
2037
2038exit:
2039 vmci_trans(vsk)->notify_ops->socket_init(sk);
2040 return true;
2041}
2042
2043static u16 vmci_transport_new_proto_supported_versions(void)
2044{
2045 if (PROTOCOL_OVERRIDE != -1)
2046 return PROTOCOL_OVERRIDE;
2047
2048 return VSOCK_PROTO_ALL_SUPPORTED;
2049}
2050
2051static u32 vmci_transport_get_local_cid(void)
2052{
2053 return vmci_get_context_id();
2054}
2055
2056static struct vsock_transport vmci_transport = {
2057 .init = vmci_transport_socket_init,
2058 .destruct = vmci_transport_destruct,
2059 .release = vmci_transport_release,
2060 .connect = vmci_transport_connect,
2061 .dgram_bind = vmci_transport_dgram_bind,
2062 .dgram_dequeue = vmci_transport_dgram_dequeue,
2063 .dgram_enqueue = vmci_transport_dgram_enqueue,
2064 .dgram_allow = vmci_transport_dgram_allow,
2065 .stream_dequeue = vmci_transport_stream_dequeue,
2066 .stream_enqueue = vmci_transport_stream_enqueue,
2067 .stream_has_data = vmci_transport_stream_has_data,
2068 .stream_has_space = vmci_transport_stream_has_space,
2069 .stream_rcvhiwat = vmci_transport_stream_rcvhiwat,
2070 .stream_is_active = vmci_transport_stream_is_active,
2071 .stream_allow = vmci_transport_stream_allow,
2072 .notify_poll_in = vmci_transport_notify_poll_in,
2073 .notify_poll_out = vmci_transport_notify_poll_out,
2074 .notify_recv_init = vmci_transport_notify_recv_init,
2075 .notify_recv_pre_block = vmci_transport_notify_recv_pre_block,
2076 .notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue,
2077 .notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue,
2078 .notify_send_init = vmci_transport_notify_send_init,
2079 .notify_send_pre_block = vmci_transport_notify_send_pre_block,
2080 .notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
2081 .notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
2082 .shutdown = vmci_transport_shutdown,
2083 .set_buffer_size = vmci_transport_set_buffer_size,
2084 .set_min_buffer_size = vmci_transport_set_min_buffer_size,
2085 .set_max_buffer_size = vmci_transport_set_max_buffer_size,
2086 .get_buffer_size = vmci_transport_get_buffer_size,
2087 .get_min_buffer_size = vmci_transport_get_min_buffer_size,
2088 .get_max_buffer_size = vmci_transport_get_max_buffer_size,
2089 .get_local_cid = vmci_transport_get_local_cid,
2090};
2091
2092static int __init vmci_transport_init(void)
2093{
2094 int err;
2095
2096 /* Create the datagram handle that we will use to send and receive all
2097 * VSocket control messages for this context.
2098 */
2099 err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID,
2100 VMCI_FLAG_ANYCID_DG_HND,
2101 vmci_transport_recv_stream_cb,
2102 NULL,
2103 &vmci_transport_stream_handle);
2104 if (err < VMCI_SUCCESS) {
2105 pr_err("Unable to create datagram handle. (%d)\n", err);
2106 return vmci_transport_error_to_vsock_error(err);
2107 }
2108
2109 err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
2110 vmci_transport_qp_resumed_cb,
2111 NULL, &vmci_transport_qp_resumed_sub_id);
2112 if (err < VMCI_SUCCESS) {
2113 pr_err("Unable to subscribe to resumed event. (%d)\n", err);
2114 err = vmci_transport_error_to_vsock_error(err);
2115 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2116 goto err_destroy_stream_handle;
2117 }
2118
2119 err = vsock_core_init(&vmci_transport);
2120 if (err < 0)
2121 goto err_unsubscribe;
2122
2123 return 0;
2124
2125err_unsubscribe:
2126 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2127err_destroy_stream_handle:
2128 vmci_datagram_destroy_handle(vmci_transport_stream_handle);
2129 return err;
2130}
2131module_init(vmci_transport_init);
2132
2133static void __exit vmci_transport_exit(void)
2134{
2135 if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
2136 if (vmci_datagram_destroy_handle(
2137 vmci_transport_stream_handle) != VMCI_SUCCESS)
2138 pr_err("Couldn't destroy datagram handle\n");
2139 vmci_transport_stream_handle = VMCI_INVALID_HANDLE;
2140 }
2141
2142 if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) {
2143 vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2144 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2145 }
2146
2147 vsock_core_exit();
2148}
2149module_exit(vmci_transport_exit);
2150
2151MODULE_AUTHOR("VMware, Inc.");
2152MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2153MODULE_LICENSE("GPL v2");
2154MODULE_ALIAS("vmware_vsock");
2155MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
new file mode 100644
index 000000000000..1bf991803ec0
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport.h
@@ -0,0 +1,139 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _VMCI_TRANSPORT_H_
17#define _VMCI_TRANSPORT_H_
18
19#include <linux/vmw_vmci_defs.h>
20#include <linux/vmw_vmci_api.h>
21
22#include "vsock_addr.h"
23#include "af_vsock.h"
24
25/* If the packet format changes in a release then this should change too. */
26#define VMCI_TRANSPORT_PACKET_VERSION 1
27
28/* The resource ID on which control packets are sent. */
29#define VMCI_TRANSPORT_PACKET_RID 1
30
31#define VSOCK_PROTO_INVALID 0
32#define VSOCK_PROTO_PKT_ON_NOTIFY (1 << 0)
33#define VSOCK_PROTO_ALL_SUPPORTED (VSOCK_PROTO_PKT_ON_NOTIFY)
34
35#define vmci_trans(_vsk) ((struct vmci_transport *)((_vsk)->trans))
36
37enum vmci_transport_packet_type {
38 VMCI_TRANSPORT_PACKET_TYPE_INVALID = 0,
39 VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
40 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
41 VMCI_TRANSPORT_PACKET_TYPE_OFFER,
42 VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
43 VMCI_TRANSPORT_PACKET_TYPE_WROTE,
44 VMCI_TRANSPORT_PACKET_TYPE_READ,
45 VMCI_TRANSPORT_PACKET_TYPE_RST,
46 VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
47 VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
48 VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
49 VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
50 VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
51 VMCI_TRANSPORT_PACKET_TYPE_MAX
52};
53
54struct vmci_transport_waiting_info {
55 u64 generation;
56 u64 offset;
57};
58
59/* Control packet type for STREAM sockets. DGRAMs have no control packets nor
60 * special packet header for data packets, they are just raw VMCI DGRAM
61 * messages. For STREAMs, control packets are sent over the control channel
62 * while data is written and read directly from queue pairs with no packet
63 * format.
64 */
65struct vmci_transport_packet {
66 struct vmci_datagram dg;
67 u8 version;
68 u8 type;
69 u16 proto;
70 u32 src_port;
71 u32 dst_port;
72 u32 _reserved2;
73 union {
74 u64 size;
75 u64 mode;
76 struct vmci_handle handle;
77 struct vmci_transport_waiting_info wait;
78 } u;
79};
80
81struct vmci_transport_notify_pkt {
82 u64 write_notify_window;
83 u64 write_notify_min_window;
84 bool peer_waiting_read;
85 bool peer_waiting_write;
86 bool peer_waiting_write_detected;
87 bool sent_waiting_read;
88 bool sent_waiting_write;
89 struct vmci_transport_waiting_info peer_waiting_read_info;
90 struct vmci_transport_waiting_info peer_waiting_write_info;
91 u64 produce_q_generation;
92 u64 consume_q_generation;
93};
94
95struct vmci_transport_notify_pkt_q_state {
96 u64 write_notify_window;
97 u64 write_notify_min_window;
98 bool peer_waiting_write;
99 bool peer_waiting_write_detected;
100};
101
102union vmci_transport_notify {
103 struct vmci_transport_notify_pkt pkt;
104 struct vmci_transport_notify_pkt_q_state pkt_q_state;
105};
106
107/* Our transport-specific data. */
108struct vmci_transport {
109 /* For DGRAMs. */
110 struct vmci_handle dg_handle;
111 /* For STREAMs. */
112 struct vmci_handle qp_handle;
113 struct vmci_qp *qpair;
114 u64 produce_size;
115 u64 consume_size;
116 u64 queue_pair_size;
117 u64 queue_pair_min_size;
118 u64 queue_pair_max_size;
119 u32 attach_sub_id;
120 u32 detach_sub_id;
121 union vmci_transport_notify notify;
122 struct vmci_transport_notify_ops *notify_ops;
123};
124
125int vmci_transport_register(void);
126void vmci_transport_unregister(void);
127
128int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
129 struct sockaddr_vm *src);
130int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
131 struct sockaddr_vm *src);
132int vmci_transport_send_wrote(struct sock *sk);
133int vmci_transport_send_read(struct sock *sk);
134int vmci_transport_send_waiting_write(struct sock *sk,
135 struct vmci_transport_waiting_info *wait);
136int vmci_transport_send_waiting_read(struct sock *sk,
137 struct vmci_transport_waiting_info *wait);
138
139#endif
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
new file mode 100644
index 000000000000..9a730744e7bc
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -0,0 +1,680 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/socket.h>
18#include <linux/stddef.h>
19#include <net/sock.h>
20
21#include "vmci_transport_notify.h"
22
23#define PKT_FIELD(vsk, field_name) (vmci_trans(vsk)->notify.pkt.field_name)
24
25static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
26{
27#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
28 bool retval;
29 u64 notify_limit;
30
31 if (!PKT_FIELD(vsk, peer_waiting_write))
32 return false;
33
34#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
35 /* When the sender blocks, we take that as a sign that the sender is
36 * faster than the receiver. To reduce the transmit rate of the sender,
37 * we delay the sending of the read notification by decreasing the
38 * write_notify_window. The notification is delayed until the number of
39 * bytes used in the queue drops below the write_notify_window.
40 */
41
42 if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
43 PKT_FIELD(vsk, peer_waiting_write_detected) = true;
44 if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
45 PKT_FIELD(vsk, write_notify_window) =
46 PKT_FIELD(vsk, write_notify_min_window);
47 } else {
48 PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
49 if (PKT_FIELD(vsk, write_notify_window) <
50 PKT_FIELD(vsk, write_notify_min_window))
51 PKT_FIELD(vsk, write_notify_window) =
52 PKT_FIELD(vsk, write_notify_min_window);
53
54 }
55 }
56 notify_limit = vmci_trans(vsk)->consume_size -
57 PKT_FIELD(vsk, write_notify_window);
58#else
59 notify_limit = 0;
60#endif
61
62 /* For now we ignore the wait information and just see if the free
63 * space exceeds the notify limit. Note that improving this function
64 * to be more intelligent will not require a protocol change and will
65 * retain compatibility between endpoints with mixed versions of this
66 * function.
67 *
68 * The notify_limit is used to delay notifications in the case where
69 * flow control is enabled. Below the test is expressed in terms of
70 * free space in the queue: if free_space > ConsumeSize -
71 * write_notify_window then notify An alternate way of expressing this
72 * is to rewrite the expression to use the data ready in the receive
73 * queue: if write_notify_window > bufferReady then notify as
74 * free_space == ConsumeSize - bufferReady.
75 */
76 retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
77 notify_limit;
78#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
79 if (retval) {
80 /*
81 * Once we notify the peer, we reset the detected flag so the
82 * next wait will again cause a decrease in the window size.
83 */
84
85 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
86 }
87#endif
88 return retval;
89#else
90 return true;
91#endif
92}
93
94static bool vmci_transport_notify_waiting_read(struct vsock_sock *vsk)
95{
96#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
97 if (!PKT_FIELD(vsk, peer_waiting_read))
98 return false;
99
100 /* For now we ignore the wait information and just see if there is any
101 * data for our peer to read. Note that improving this function to be
102 * more intelligent will not require a protocol change and will retain
103 * compatibility between endpoints with mixed versions of this
104 * function.
105 */
106 return vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) > 0;
107#else
108 return true;
109#endif
110}
111
112static void
113vmci_transport_handle_waiting_read(struct sock *sk,
114 struct vmci_transport_packet *pkt,
115 bool bottom_half,
116 struct sockaddr_vm *dst,
117 struct sockaddr_vm *src)
118{
119#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
120 struct vsock_sock *vsk;
121
122 vsk = vsock_sk(sk);
123
124 PKT_FIELD(vsk, peer_waiting_read) = true;
125 memcpy(&PKT_FIELD(vsk, peer_waiting_read_info), &pkt->u.wait,
126 sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
127
128 if (vmci_transport_notify_waiting_read(vsk)) {
129 bool sent;
130
131 if (bottom_half)
132 sent = vmci_transport_send_wrote_bh(dst, src) > 0;
133 else
134 sent = vmci_transport_send_wrote(sk) > 0;
135
136 if (sent)
137 PKT_FIELD(vsk, peer_waiting_read) = false;
138 }
139#endif
140}
141
142static void
143vmci_transport_handle_waiting_write(struct sock *sk,
144 struct vmci_transport_packet *pkt,
145 bool bottom_half,
146 struct sockaddr_vm *dst,
147 struct sockaddr_vm *src)
148{
149#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
150 struct vsock_sock *vsk;
151
152 vsk = vsock_sk(sk);
153
154 PKT_FIELD(vsk, peer_waiting_write) = true;
155 memcpy(&PKT_FIELD(vsk, peer_waiting_write_info), &pkt->u.wait,
156 sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
157
158 if (vmci_transport_notify_waiting_write(vsk)) {
159 bool sent;
160
161 if (bottom_half)
162 sent = vmci_transport_send_read_bh(dst, src) > 0;
163 else
164 sent = vmci_transport_send_read(sk) > 0;
165
166 if (sent)
167 PKT_FIELD(vsk, peer_waiting_write) = false;
168 }
169#endif
170}
171
172static void
173vmci_transport_handle_read(struct sock *sk,
174 struct vmci_transport_packet *pkt,
175 bool bottom_half,
176 struct sockaddr_vm *dst, struct sockaddr_vm *src)
177{
178#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
179 struct vsock_sock *vsk;
180
181 vsk = vsock_sk(sk);
182 PKT_FIELD(vsk, sent_waiting_write) = false;
183#endif
184
185 sk->sk_write_space(sk);
186}
187
188static bool send_waiting_read(struct sock *sk, u64 room_needed)
189{
190#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
191 struct vsock_sock *vsk;
192 struct vmci_transport_waiting_info waiting_info;
193 u64 tail;
194 u64 head;
195 u64 room_left;
196 bool ret;
197
198 vsk = vsock_sk(sk);
199
200 if (PKT_FIELD(vsk, sent_waiting_read))
201 return true;
202
203 if (PKT_FIELD(vsk, write_notify_window) <
204 vmci_trans(vsk)->consume_size)
205 PKT_FIELD(vsk, write_notify_window) =
206 min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
207 vmci_trans(vsk)->consume_size);
208
209 vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head);
210 room_left = vmci_trans(vsk)->consume_size - head;
211 if (room_needed >= room_left) {
212 waiting_info.offset = room_needed - room_left;
213 waiting_info.generation =
214 PKT_FIELD(vsk, consume_q_generation) + 1;
215 } else {
216 waiting_info.offset = head + room_needed;
217 waiting_info.generation = PKT_FIELD(vsk, consume_q_generation);
218 }
219
220 ret = vmci_transport_send_waiting_read(sk, &waiting_info) > 0;
221 if (ret)
222 PKT_FIELD(vsk, sent_waiting_read) = true;
223
224 return ret;
225#else
226 return true;
227#endif
228}
229
230static bool send_waiting_write(struct sock *sk, u64 room_needed)
231{
232#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
233 struct vsock_sock *vsk;
234 struct vmci_transport_waiting_info waiting_info;
235 u64 tail;
236 u64 head;
237 u64 room_left;
238 bool ret;
239
240 vsk = vsock_sk(sk);
241
242 if (PKT_FIELD(vsk, sent_waiting_write))
243 return true;
244
245 vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head);
246 room_left = vmci_trans(vsk)->produce_size - tail;
247 if (room_needed + 1 >= room_left) {
248 /* Wraps around to current generation. */
249 waiting_info.offset = room_needed + 1 - room_left;
250 waiting_info.generation = PKT_FIELD(vsk, produce_q_generation);
251 } else {
252 waiting_info.offset = tail + room_needed + 1;
253 waiting_info.generation =
254 PKT_FIELD(vsk, produce_q_generation) - 1;
255 }
256
257 ret = vmci_transport_send_waiting_write(sk, &waiting_info) > 0;
258 if (ret)
259 PKT_FIELD(vsk, sent_waiting_write) = true;
260
261 return ret;
262#else
263 return true;
264#endif
265}
266
267static int vmci_transport_send_read_notification(struct sock *sk)
268{
269 struct vsock_sock *vsk;
270 bool sent_read;
271 unsigned int retries;
272 int err;
273
274 vsk = vsock_sk(sk);
275 sent_read = false;
276 retries = 0;
277 err = 0;
278
279 if (vmci_transport_notify_waiting_write(vsk)) {
280 /* Notify the peer that we have read, retrying the send on
281 * failure up to our maximum value. XXX For now we just log
282 * the failure, but later we should schedule a work item to
283 * handle the resend until it succeeds. That would require
284 * keeping track of work items in the vsk and cleaning them up
285 * upon socket close.
286 */
287 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
288 !sent_read &&
289 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
290 err = vmci_transport_send_read(sk);
291 if (err >= 0)
292 sent_read = true;
293
294 retries++;
295 }
296
297 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS)
298 pr_err("%p unable to send read notify to peer\n", sk);
299 else
300#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
301 PKT_FIELD(vsk, peer_waiting_write) = false;
302#endif
303
304 }
305 return err;
306}
307
308static void
309vmci_transport_handle_wrote(struct sock *sk,
310 struct vmci_transport_packet *pkt,
311 bool bottom_half,
312 struct sockaddr_vm *dst, struct sockaddr_vm *src)
313{
314#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
315 struct vsock_sock *vsk = vsock_sk(sk);
316 PKT_FIELD(vsk, sent_waiting_read) = false;
317#endif
318 sk->sk_data_ready(sk, 0);
319}
320
321static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
322{
323 struct vsock_sock *vsk = vsock_sk(sk);
324
325 PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
326 PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
327 PKT_FIELD(vsk, peer_waiting_read) = false;
328 PKT_FIELD(vsk, peer_waiting_write) = false;
329 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
330 PKT_FIELD(vsk, sent_waiting_read) = false;
331 PKT_FIELD(vsk, sent_waiting_write) = false;
332 PKT_FIELD(vsk, produce_q_generation) = 0;
333 PKT_FIELD(vsk, consume_q_generation) = 0;
334
335 memset(&PKT_FIELD(vsk, peer_waiting_read_info), 0,
336 sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
337 memset(&PKT_FIELD(vsk, peer_waiting_write_info), 0,
338 sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
339}
340
341static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
342{
343}
344
345static int
346vmci_transport_notify_pkt_poll_in(struct sock *sk,
347 size_t target, bool *data_ready_now)
348{
349 struct vsock_sock *vsk = vsock_sk(sk);
350
351 if (vsock_stream_has_data(vsk)) {
352 *data_ready_now = true;
353 } else {
354 /* We can't read right now because there is nothing in the
355 * queue. Ask for notifications when there is something to
356 * read.
357 */
358 if (sk->sk_state == SS_CONNECTED) {
359 if (!send_waiting_read(sk, 1))
360 return -1;
361
362 }
363 *data_ready_now = false;
364 }
365
366 return 0;
367}
368
369static int
370vmci_transport_notify_pkt_poll_out(struct sock *sk,
371 size_t target, bool *space_avail_now)
372{
373 s64 produce_q_free_space;
374 struct vsock_sock *vsk = vsock_sk(sk);
375
376 produce_q_free_space = vsock_stream_has_space(vsk);
377 if (produce_q_free_space > 0) {
378 *space_avail_now = true;
379 return 0;
380 } else if (produce_q_free_space == 0) {
381 /* This is a connected socket but we can't currently send data.
382 * Notify the peer that we are waiting if the queue is full. We
383 * only send a waiting write if the queue is full because
384 * otherwise we end up in an infinite WAITING_WRITE, READ,
385 * WAITING_WRITE, READ, etc. loop. Treat failing to send the
386 * notification as a socket error, passing that back through
387 * the mask.
388 */
389 if (!send_waiting_write(sk, 1))
390 return -1;
391
392 *space_avail_now = false;
393 }
394
395 return 0;
396}
397
398static int
399vmci_transport_notify_pkt_recv_init(
400 struct sock *sk,
401 size_t target,
402 struct vmci_transport_recv_notify_data *data)
403{
404 struct vsock_sock *vsk = vsock_sk(sk);
405
406#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
407 data->consume_head = 0;
408 data->produce_tail = 0;
409#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
410 data->notify_on_block = false;
411
412 if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
413 PKT_FIELD(vsk, write_notify_min_window) = target + 1;
414 if (PKT_FIELD(vsk, write_notify_window) <
415 PKT_FIELD(vsk, write_notify_min_window)) {
416 /* If the current window is smaller than the new
417 * minimal window size, we need to reevaluate whether
418 * we need to notify the sender. If the number of ready
419 * bytes are smaller than the new window, we need to
420 * send a notification to the sender before we block.
421 */
422
423 PKT_FIELD(vsk, write_notify_window) =
424 PKT_FIELD(vsk, write_notify_min_window);
425 data->notify_on_block = true;
426 }
427 }
428#endif
429#endif
430
431 return 0;
432}
433
434static int
435vmci_transport_notify_pkt_recv_pre_block(
436 struct sock *sk,
437 size_t target,
438 struct vmci_transport_recv_notify_data *data)
439{
440 int err = 0;
441
442 /* Notify our peer that we are waiting for data to read. */
443 if (!send_waiting_read(sk, target)) {
444 err = -EHOSTUNREACH;
445 return err;
446 }
447#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
448 if (data->notify_on_block) {
449 err = vmci_transport_send_read_notification(sk);
450 if (err < 0)
451 return err;
452
453 data->notify_on_block = false;
454 }
455#endif
456
457 return err;
458}
459
460static int
461vmci_transport_notify_pkt_recv_pre_dequeue(
462 struct sock *sk,
463 size_t target,
464 struct vmci_transport_recv_notify_data *data)
465{
466 struct vsock_sock *vsk = vsock_sk(sk);
467
468 /* Now consume up to len bytes from the queue. Note that since we have
469 * the socket locked we should copy at least ready bytes.
470 */
471#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
472 vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair,
473 &data->produce_tail,
474 &data->consume_head);
475#endif
476
477 return 0;
478}
479
480static int
481vmci_transport_notify_pkt_recv_post_dequeue(
482 struct sock *sk,
483 size_t target,
484 ssize_t copied,
485 bool data_read,
486 struct vmci_transport_recv_notify_data *data)
487{
488 struct vsock_sock *vsk;
489 int err;
490
491 vsk = vsock_sk(sk);
492 err = 0;
493
494 if (data_read) {
495#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
496 /* Detect a wrap-around to maintain queue generation. Note
497 * that this is safe since we hold the socket lock across the
498 * two queue pair operations.
499 */
500 if (copied >=
501 vmci_trans(vsk)->consume_size - data->consume_head)
502 PKT_FIELD(vsk, consume_q_generation)++;
503#endif
504
505 err = vmci_transport_send_read_notification(sk);
506 if (err < 0)
507 return err;
508
509 }
510 return err;
511}
512
513static int
514vmci_transport_notify_pkt_send_init(
515 struct sock *sk,
516 struct vmci_transport_send_notify_data *data)
517{
518#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
519 data->consume_head = 0;
520 data->produce_tail = 0;
521#endif
522
523 return 0;
524}
525
526static int
527vmci_transport_notify_pkt_send_pre_block(
528 struct sock *sk,
529 struct vmci_transport_send_notify_data *data)
530{
531 /* Notify our peer that we are waiting for room to write. */
532 if (!send_waiting_write(sk, 1))
533 return -EHOSTUNREACH;
534
535 return 0;
536}
537
538static int
539vmci_transport_notify_pkt_send_pre_enqueue(
540 struct sock *sk,
541 struct vmci_transport_send_notify_data *data)
542{
543 struct vsock_sock *vsk = vsock_sk(sk);
544
545#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
546 vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair,
547 &data->produce_tail,
548 &data->consume_head);
549#endif
550
551 return 0;
552}
553
554static int
555vmci_transport_notify_pkt_send_post_enqueue(
556 struct sock *sk,
557 ssize_t written,
558 struct vmci_transport_send_notify_data *data)
559{
560 int err = 0;
561 struct vsock_sock *vsk;
562 bool sent_wrote = false;
563 int retries = 0;
564
565 vsk = vsock_sk(sk);
566
567#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
568 /* Detect a wrap-around to maintain queue generation. Note that this
569 * is safe since we hold the socket lock across the two queue pair
570 * operations.
571 */
572 if (written >= vmci_trans(vsk)->produce_size - data->produce_tail)
573 PKT_FIELD(vsk, produce_q_generation)++;
574
575#endif
576
577 if (vmci_transport_notify_waiting_read(vsk)) {
578 /* Notify the peer that we have written, retrying the send on
579 * failure up to our maximum value. See the XXX comment for the
580 * corresponding piece of code in StreamRecvmsg() for potential
581 * improvements.
582 */
583 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
584 !sent_wrote &&
585 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
586 err = vmci_transport_send_wrote(sk);
587 if (err >= 0)
588 sent_wrote = true;
589
590 retries++;
591 }
592
593 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
594 pr_err("%p unable to send wrote notify to peer\n", sk);
595 return err;
596 } else {
597#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
598 PKT_FIELD(vsk, peer_waiting_read) = false;
599#endif
600 }
601 }
602 return err;
603}
604
605static void
606vmci_transport_notify_pkt_handle_pkt(
607 struct sock *sk,
608 struct vmci_transport_packet *pkt,
609 bool bottom_half,
610 struct sockaddr_vm *dst,
611 struct sockaddr_vm *src, bool *pkt_processed)
612{
613 bool processed = false;
614
615 switch (pkt->type) {
616 case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
617 vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
618 processed = true;
619 break;
620 case VMCI_TRANSPORT_PACKET_TYPE_READ:
621 vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
622 processed = true;
623 break;
624 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
625 vmci_transport_handle_waiting_write(sk, pkt, bottom_half,
626 dst, src);
627 processed = true;
628 break;
629
630 case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
631 vmci_transport_handle_waiting_read(sk, pkt, bottom_half,
632 dst, src);
633 processed = true;
634 break;
635 }
636
637 if (pkt_processed)
638 *pkt_processed = processed;
639}
640
641static void vmci_transport_notify_pkt_process_request(struct sock *sk)
642{
643 struct vsock_sock *vsk = vsock_sk(sk);
644
645 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
646 if (vmci_trans(vsk)->consume_size <
647 PKT_FIELD(vsk, write_notify_min_window))
648 PKT_FIELD(vsk, write_notify_min_window) =
649 vmci_trans(vsk)->consume_size;
650}
651
652static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
653{
654 struct vsock_sock *vsk = vsock_sk(sk);
655
656 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
657 if (vmci_trans(vsk)->consume_size <
658 PKT_FIELD(vsk, write_notify_min_window))
659 PKT_FIELD(vsk, write_notify_min_window) =
660 vmci_trans(vsk)->consume_size;
661}
662
663/* Socket control packet based operations. */
664struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
665 vmci_transport_notify_pkt_socket_init,
666 vmci_transport_notify_pkt_socket_destruct,
667 vmci_transport_notify_pkt_poll_in,
668 vmci_transport_notify_pkt_poll_out,
669 vmci_transport_notify_pkt_handle_pkt,
670 vmci_transport_notify_pkt_recv_init,
671 vmci_transport_notify_pkt_recv_pre_block,
672 vmci_transport_notify_pkt_recv_pre_dequeue,
673 vmci_transport_notify_pkt_recv_post_dequeue,
674 vmci_transport_notify_pkt_send_init,
675 vmci_transport_notify_pkt_send_pre_block,
676 vmci_transport_notify_pkt_send_pre_enqueue,
677 vmci_transport_notify_pkt_send_post_enqueue,
678 vmci_transport_notify_pkt_process_request,
679 vmci_transport_notify_pkt_process_negotiate,
680};
diff --git a/net/vmw_vsock/vmci_transport_notify.h b/net/vmw_vsock/vmci_transport_notify.h
new file mode 100644
index 000000000000..7df793249b6c
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport_notify.h
@@ -0,0 +1,83 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __VMCI_TRANSPORT_NOTIFY_H__
17#define __VMCI_TRANSPORT_NOTIFY_H__
18
19#include <linux/types.h>
20#include <linux/vmw_vmci_defs.h>
21#include <linux/vmw_vmci_api.h>
22#include <linux/vm_sockets.h>
23
24#include "vmci_transport.h"
25
26/* Comment this out to compare with old protocol. */
27#define VSOCK_OPTIMIZATION_WAITING_NOTIFY 1
28#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
29/* Comment this out to remove flow control for "new" protocol */
30#define VSOCK_OPTIMIZATION_FLOW_CONTROL 1
31#endif
32
33#define VMCI_TRANSPORT_MAX_DGRAM_RESENDS 10
34
35struct vmci_transport_recv_notify_data {
36 u64 consume_head;
37 u64 produce_tail;
38 bool notify_on_block;
39};
40
41struct vmci_transport_send_notify_data {
42 u64 consume_head;
43 u64 produce_tail;
44};
45
46/* Socket notification callbacks. */
47struct vmci_transport_notify_ops {
48 void (*socket_init) (struct sock *sk);
49 void (*socket_destruct) (struct vsock_sock *vsk);
50 int (*poll_in) (struct sock *sk, size_t target,
51 bool *data_ready_now);
52 int (*poll_out) (struct sock *sk, size_t target,
53 bool *space_avail_now);
54 void (*handle_notify_pkt) (struct sock *sk,
55 struct vmci_transport_packet *pkt,
56 bool bottom_half, struct sockaddr_vm *dst,
57 struct sockaddr_vm *src,
58 bool *pkt_processed);
59 int (*recv_init) (struct sock *sk, size_t target,
60 struct vmci_transport_recv_notify_data *data);
61 int (*recv_pre_block) (struct sock *sk, size_t target,
62 struct vmci_transport_recv_notify_data *data);
63 int (*recv_pre_dequeue) (struct sock *sk, size_t target,
64 struct vmci_transport_recv_notify_data *data);
65 int (*recv_post_dequeue) (struct sock *sk, size_t target,
66 ssize_t copied, bool data_read,
67 struct vmci_transport_recv_notify_data *data);
68 int (*send_init) (struct sock *sk,
69 struct vmci_transport_send_notify_data *data);
70 int (*send_pre_block) (struct sock *sk,
71 struct vmci_transport_send_notify_data *data);
72 int (*send_pre_enqueue) (struct sock *sk,
73 struct vmci_transport_send_notify_data *data);
74 int (*send_post_enqueue) (struct sock *sk, ssize_t written,
75 struct vmci_transport_send_notify_data *data);
76 void (*process_request) (struct sock *sk);
77 void (*process_negotiate) (struct sock *sk);
78};
79
80extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops;
81extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops;
82
83#endif /* __VMCI_TRANSPORT_NOTIFY_H__ */
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
new file mode 100644
index 000000000000..622bd7aa1016
--- /dev/null
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -0,0 +1,438 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/socket.h>
18#include <linux/stddef.h>
19#include <net/sock.h>
20
21#include "vmci_transport_notify.h"
22
23#define PKT_FIELD(vsk, field_name) \
24 (vmci_trans(vsk)->notify.pkt_q_state.field_name)
25
26static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
27{
28 bool retval;
29 u64 notify_limit;
30
31 if (!PKT_FIELD(vsk, peer_waiting_write))
32 return false;
33
34 /* When the sender blocks, we take that as a sign that the sender is
35 * faster than the receiver. To reduce the transmit rate of the sender,
36 * we delay the sending of the read notification by decreasing the
37 * write_notify_window. The notification is delayed until the number of
38 * bytes used in the queue drops below the write_notify_window.
39 */
40
41 if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
42 PKT_FIELD(vsk, peer_waiting_write_detected) = true;
43 if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
44 PKT_FIELD(vsk, write_notify_window) =
45 PKT_FIELD(vsk, write_notify_min_window);
46 } else {
47 PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
48 if (PKT_FIELD(vsk, write_notify_window) <
49 PKT_FIELD(vsk, write_notify_min_window))
50 PKT_FIELD(vsk, write_notify_window) =
51 PKT_FIELD(vsk, write_notify_min_window);
52
53 }
54 }
55 notify_limit = vmci_trans(vsk)->consume_size -
56 PKT_FIELD(vsk, write_notify_window);
57
58 /* The notify_limit is used to delay notifications in the case where
59 * flow control is enabled. Below the test is expressed in terms of
60 * free space in the queue: if free_space > ConsumeSize -
61 * write_notify_window then notify An alternate way of expressing this
62 * is to rewrite the expression to use the data ready in the receive
63 * queue: if write_notify_window > bufferReady then notify as
64 * free_space == ConsumeSize - bufferReady.
65 */
66
67 retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
68 notify_limit;
69
70 if (retval) {
71 /* Once we notify the peer, we reset the detected flag so the
72 * next wait will again cause a decrease in the window size.
73 */
74
75 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
76 }
77 return retval;
78}
79
80static void
81vmci_transport_handle_read(struct sock *sk,
82 struct vmci_transport_packet *pkt,
83 bool bottom_half,
84 struct sockaddr_vm *dst, struct sockaddr_vm *src)
85{
86 sk->sk_write_space(sk);
87}
88
89static void
90vmci_transport_handle_wrote(struct sock *sk,
91 struct vmci_transport_packet *pkt,
92 bool bottom_half,
93 struct sockaddr_vm *dst, struct sockaddr_vm *src)
94{
95 sk->sk_data_ready(sk, 0);
96}
97
98static void vsock_block_update_write_window(struct sock *sk)
99{
100 struct vsock_sock *vsk = vsock_sk(sk);
101
102 if (PKT_FIELD(vsk, write_notify_window) < vmci_trans(vsk)->consume_size)
103 PKT_FIELD(vsk, write_notify_window) =
104 min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
105 vmci_trans(vsk)->consume_size);
106}
107
108static int vmci_transport_send_read_notification(struct sock *sk)
109{
110 struct vsock_sock *vsk;
111 bool sent_read;
112 unsigned int retries;
113 int err;
114
115 vsk = vsock_sk(sk);
116 sent_read = false;
117 retries = 0;
118 err = 0;
119
120 if (vmci_transport_notify_waiting_write(vsk)) {
121 /* Notify the peer that we have read, retrying the send on
122 * failure up to our maximum value. XXX For now we just log
123 * the failure, but later we should schedule a work item to
124 * handle the resend until it succeeds. That would require
125 * keeping track of work items in the vsk and cleaning them up
126 * upon socket close.
127 */
128 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
129 !sent_read &&
130 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
131 err = vmci_transport_send_read(sk);
132 if (err >= 0)
133 sent_read = true;
134
135 retries++;
136 }
137
138 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_read)
139 pr_err("%p unable to send read notification to peer\n",
140 sk);
141 else
142 PKT_FIELD(vsk, peer_waiting_write) = false;
143
144 }
145 return err;
146}
147
148static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
149{
150 struct vsock_sock *vsk = vsock_sk(sk);
151
152 PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
153 PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
154 PKT_FIELD(vsk, peer_waiting_write) = false;
155 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
156}
157
158static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
159{
160 PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
161 PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
162 PKT_FIELD(vsk, peer_waiting_write) = false;
163 PKT_FIELD(vsk, peer_waiting_write_detected) = false;
164}
165
166static int
167vmci_transport_notify_pkt_poll_in(struct sock *sk,
168 size_t target, bool *data_ready_now)
169{
170 struct vsock_sock *vsk = vsock_sk(sk);
171
172 if (vsock_stream_has_data(vsk)) {
173 *data_ready_now = true;
174 } else {
175 /* We can't read right now because there is nothing in the
176 * queue. Ask for notifications when there is something to
177 * read.
178 */
179 if (sk->sk_state == SS_CONNECTED)
180 vsock_block_update_write_window(sk);
181 *data_ready_now = false;
182 }
183
184 return 0;
185}
186
187static int
188vmci_transport_notify_pkt_poll_out(struct sock *sk,
189 size_t target, bool *space_avail_now)
190{
191 s64 produce_q_free_space;
192 struct vsock_sock *vsk = vsock_sk(sk);
193
194 produce_q_free_space = vsock_stream_has_space(vsk);
195 if (produce_q_free_space > 0) {
196 *space_avail_now = true;
197 return 0;
198 } else if (produce_q_free_space == 0) {
199 /* This is a connected socket but we can't currently send data.
200 * Nothing else to do.
201 */
202 *space_avail_now = false;
203 }
204
205 return 0;
206}
207
208static int
209vmci_transport_notify_pkt_recv_init(
210 struct sock *sk,
211 size_t target,
212 struct vmci_transport_recv_notify_data *data)
213{
214 struct vsock_sock *vsk = vsock_sk(sk);
215
216 data->consume_head = 0;
217 data->produce_tail = 0;
218 data->notify_on_block = false;
219
220 if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
221 PKT_FIELD(vsk, write_notify_min_window) = target + 1;
222 if (PKT_FIELD(vsk, write_notify_window) <
223 PKT_FIELD(vsk, write_notify_min_window)) {
224 /* If the current window is smaller than the new
225 * minimal window size, we need to reevaluate whether
226 * we need to notify the sender. If the number of ready
227 * bytes are smaller than the new window, we need to
228 * send a notification to the sender before we block.
229 */
230
231 PKT_FIELD(vsk, write_notify_window) =
232 PKT_FIELD(vsk, write_notify_min_window);
233 data->notify_on_block = true;
234 }
235 }
236
237 return 0;
238}
239
240static int
241vmci_transport_notify_pkt_recv_pre_block(
242 struct sock *sk,
243 size_t target,
244 struct vmci_transport_recv_notify_data *data)
245{
246 int err = 0;
247
248 vsock_block_update_write_window(sk);
249
250 if (data->notify_on_block) {
251 err = vmci_transport_send_read_notification(sk);
252 if (err < 0)
253 return err;
254 data->notify_on_block = false;
255 }
256
257 return err;
258}
259
260static int
261vmci_transport_notify_pkt_recv_post_dequeue(
262 struct sock *sk,
263 size_t target,
264 ssize_t copied,
265 bool data_read,
266 struct vmci_transport_recv_notify_data *data)
267{
268 struct vsock_sock *vsk;
269 int err;
270 bool was_full = false;
271 u64 free_space;
272
273 vsk = vsock_sk(sk);
274 err = 0;
275
276 if (data_read) {
277 smp_mb();
278
279 free_space =
280 vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair);
281 was_full = free_space == copied;
282
283 if (was_full)
284 PKT_FIELD(vsk, peer_waiting_write) = true;
285
286 err = vmci_transport_send_read_notification(sk);
287 if (err < 0)
288 return err;
289
290 /* See the comment in
291 * vmci_transport_notify_pkt_send_post_enqueue().
292 */
293 sk->sk_data_ready(sk, 0);
294 }
295
296 return err;
297}
298
299static int
300vmci_transport_notify_pkt_send_init(
301 struct sock *sk,
302 struct vmci_transport_send_notify_data *data)
303{
304 data->consume_head = 0;
305 data->produce_tail = 0;
306
307 return 0;
308}
309
310static int
311vmci_transport_notify_pkt_send_post_enqueue(
312 struct sock *sk,
313 ssize_t written,
314 struct vmci_transport_send_notify_data *data)
315{
316 int err = 0;
317 struct vsock_sock *vsk;
318 bool sent_wrote = false;
319 bool was_empty;
320 int retries = 0;
321
322 vsk = vsock_sk(sk);
323
324 smp_mb();
325
326 was_empty =
327 vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) == written;
328 if (was_empty) {
329 while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
330 !sent_wrote &&
331 retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
332 err = vmci_transport_send_wrote(sk);
333 if (err >= 0)
334 sent_wrote = true;
335
336 retries++;
337 }
338 }
339
340 if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_wrote) {
341 pr_err("%p unable to send wrote notification to peer\n",
342 sk);
343 return err;
344 }
345
346 return err;
347}
348
349static void
350vmci_transport_notify_pkt_handle_pkt(
351 struct sock *sk,
352 struct vmci_transport_packet *pkt,
353 bool bottom_half,
354 struct sockaddr_vm *dst,
355 struct sockaddr_vm *src, bool *pkt_processed)
356{
357 bool processed = false;
358
359 switch (pkt->type) {
360 case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
361 vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
362 processed = true;
363 break;
364 case VMCI_TRANSPORT_PACKET_TYPE_READ:
365 vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
366 processed = true;
367 break;
368 }
369
370 if (pkt_processed)
371 *pkt_processed = processed;
372}
373
374static void vmci_transport_notify_pkt_process_request(struct sock *sk)
375{
376 struct vsock_sock *vsk = vsock_sk(sk);
377
378 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
379 if (vmci_trans(vsk)->consume_size <
380 PKT_FIELD(vsk, write_notify_min_window))
381 PKT_FIELD(vsk, write_notify_min_window) =
382 vmci_trans(vsk)->consume_size;
383}
384
385static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
386{
387 struct vsock_sock *vsk = vsock_sk(sk);
388
389 PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
390 if (vmci_trans(vsk)->consume_size <
391 PKT_FIELD(vsk, write_notify_min_window))
392 PKT_FIELD(vsk, write_notify_min_window) =
393 vmci_trans(vsk)->consume_size;
394}
395
396static int
397vmci_transport_notify_pkt_recv_pre_dequeue(
398 struct sock *sk,
399 size_t target,
400 struct vmci_transport_recv_notify_data *data)
401{
402 return 0; /* NOP for QState. */
403}
404
405static int
406vmci_transport_notify_pkt_send_pre_block(
407 struct sock *sk,
408 struct vmci_transport_send_notify_data *data)
409{
410 return 0; /* NOP for QState. */
411}
412
413static int
414vmci_transport_notify_pkt_send_pre_enqueue(
415 struct sock *sk,
416 struct vmci_transport_send_notify_data *data)
417{
418 return 0; /* NOP for QState. */
419}
420
421/* Socket always on control packet based operations. */
422struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
423 vmci_transport_notify_pkt_socket_init,
424 vmci_transport_notify_pkt_socket_destruct,
425 vmci_transport_notify_pkt_poll_in,
426 vmci_transport_notify_pkt_poll_out,
427 vmci_transport_notify_pkt_handle_pkt,
428 vmci_transport_notify_pkt_recv_init,
429 vmci_transport_notify_pkt_recv_pre_block,
430 vmci_transport_notify_pkt_recv_pre_dequeue,
431 vmci_transport_notify_pkt_recv_post_dequeue,
432 vmci_transport_notify_pkt_send_init,
433 vmci_transport_notify_pkt_send_pre_block,
434 vmci_transport_notify_pkt_send_pre_enqueue,
435 vmci_transport_notify_pkt_send_post_enqueue,
436 vmci_transport_notify_pkt_process_request,
437 vmci_transport_notify_pkt_process_negotiate,
438};
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
new file mode 100644
index 000000000000..b7df1aea7c59
--- /dev/null
+++ b/net/vmw_vsock/vsock_addr.c
@@ -0,0 +1,86 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/socket.h>
18#include <linux/stddef.h>
19#include <net/sock.h>
20
21#include "vsock_addr.h"
22
23void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
24{
25 memset(addr, 0, sizeof(*addr));
26 addr->svm_family = AF_VSOCK;
27 addr->svm_cid = cid;
28 addr->svm_port = port;
29}
30EXPORT_SYMBOL_GPL(vsock_addr_init);
31
32int vsock_addr_validate(const struct sockaddr_vm *addr)
33{
34 if (!addr)
35 return -EFAULT;
36
37 if (addr->svm_family != AF_VSOCK)
38 return -EAFNOSUPPORT;
39
40 if (addr->svm_zero[0] != 0)
41 return -EINVAL;
42
43 return 0;
44}
45EXPORT_SYMBOL_GPL(vsock_addr_validate);
46
47bool vsock_addr_bound(const struct sockaddr_vm *addr)
48{
49 return addr->svm_port != VMADDR_PORT_ANY;
50}
51EXPORT_SYMBOL_GPL(vsock_addr_bound);
52
53void vsock_addr_unbind(struct sockaddr_vm *addr)
54{
55 vsock_addr_init(addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
56}
57EXPORT_SYMBOL_GPL(vsock_addr_unbind);
58
59bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
60 const struct sockaddr_vm *other)
61{
62 return addr->svm_cid == other->svm_cid &&
63 addr->svm_port == other->svm_port;
64}
65EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
66
67bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
68 const struct sockaddr_vm *other)
69{
70 return (addr->svm_cid == VMADDR_CID_ANY ||
71 other->svm_cid == VMADDR_CID_ANY ||
72 addr->svm_cid == other->svm_cid) &&
73 addr->svm_port == other->svm_port;
74}
75EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any);
76
77int vsock_addr_cast(const struct sockaddr *addr,
78 size_t len, struct sockaddr_vm **out_addr)
79{
80 if (len < sizeof(**out_addr))
81 return -EFAULT;
82
83 *out_addr = (struct sockaddr_vm *)addr;
84 return vsock_addr_validate(*out_addr);
85}
86EXPORT_SYMBOL_GPL(vsock_addr_cast);
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h
new file mode 100644
index 000000000000..cdfbcefdf843
--- /dev/null
+++ b/net/vmw_vsock/vsock_addr.h
@@ -0,0 +1,32 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _VSOCK_ADDR_H_
17#define _VSOCK_ADDR_H_
18
19#include <linux/vm_sockets.h>
20
21void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
22int vsock_addr_validate(const struct sockaddr_vm *addr);
23bool vsock_addr_bound(const struct sockaddr_vm *addr);
24void vsock_addr_unbind(struct sockaddr_vm *addr);
25bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
26 const struct sockaddr_vm *other);
27bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr,
28 const struct sockaddr_vm *other);
29int vsock_addr_cast(const struct sockaddr *addr, size_t len,
30 struct sockaddr_vm **out_addr);
31
32#endif
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 396373f3ec26..fd556ac05fdb 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -147,6 +147,32 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
147 } 147 }
148} 148}
149 149
150static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
151{
152 int width;
153
154 switch (c->width) {
155 case NL80211_CHAN_WIDTH_20:
156 case NL80211_CHAN_WIDTH_20_NOHT:
157 width = 20;
158 break;
159 case NL80211_CHAN_WIDTH_40:
160 width = 40;
161 break;
162 case NL80211_CHAN_WIDTH_80P80:
163 case NL80211_CHAN_WIDTH_80:
164 width = 80;
165 break;
166 case NL80211_CHAN_WIDTH_160:
167 width = 160;
168 break;
169 default:
170 WARN_ON_ONCE(1);
171 return -1;
172 }
173 return width;
174}
175
150const struct cfg80211_chan_def * 176const struct cfg80211_chan_def *
151cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, 177cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
152 const struct cfg80211_chan_def *c2) 178 const struct cfg80211_chan_def *c2)
@@ -192,6 +218,93 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
192} 218}
193EXPORT_SYMBOL(cfg80211_chandef_compatible); 219EXPORT_SYMBOL(cfg80211_chandef_compatible);
194 220
221static void cfg80211_set_chans_dfs_state(struct wiphy *wiphy, u32 center_freq,
222 u32 bandwidth,
223 enum nl80211_dfs_state dfs_state)
224{
225 struct ieee80211_channel *c;
226 u32 freq;
227
228 for (freq = center_freq - bandwidth/2 + 10;
229 freq <= center_freq + bandwidth/2 - 10;
230 freq += 20) {
231 c = ieee80211_get_channel(wiphy, freq);
232 if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
233 continue;
234
235 c->dfs_state = dfs_state;
236 c->dfs_state_entered = jiffies;
237 }
238}
239
240void cfg80211_set_dfs_state(struct wiphy *wiphy,
241 const struct cfg80211_chan_def *chandef,
242 enum nl80211_dfs_state dfs_state)
243{
244 int width;
245
246 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
247 return;
248
249 width = cfg80211_chandef_get_width(chandef);
250 if (width < 0)
251 return;
252
253 cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq1,
254 width, dfs_state);
255
256 if (!chandef->center_freq2)
257 return;
258 cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq2,
259 width, dfs_state);
260}
261
262static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
263 u32 center_freq,
264 u32 bandwidth)
265{
266 struct ieee80211_channel *c;
267 u32 freq;
268
269 for (freq = center_freq - bandwidth/2 + 10;
270 freq <= center_freq + bandwidth/2 - 10;
271 freq += 20) {
272 c = ieee80211_get_channel(wiphy, freq);
273 if (!c)
274 return -EINVAL;
275
276 if (c->flags & IEEE80211_CHAN_RADAR)
277 return 1;
278 }
279 return 0;
280}
281
282
283int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
284 const struct cfg80211_chan_def *chandef)
285{
286 int width;
287 int r;
288
289 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
290 return -EINVAL;
291
292 width = cfg80211_chandef_get_width(chandef);
293 if (width < 0)
294 return -EINVAL;
295
296 r = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1,
297 width);
298 if (r)
299 return r;
300
301 if (!chandef->center_freq2)
302 return 0;
303
304 return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
305 width);
306}
307
195static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, 308static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
196 u32 center_freq, u32 bandwidth, 309 u32 center_freq, u32 bandwidth,
197 u32 prohibited_flags) 310 u32 prohibited_flags)
@@ -203,7 +316,16 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
203 freq <= center_freq + bandwidth/2 - 10; 316 freq <= center_freq + bandwidth/2 - 10;
204 freq += 20) { 317 freq += 20) {
205 c = ieee80211_get_channel(wiphy, freq); 318 c = ieee80211_get_channel(wiphy, freq);
206 if (!c || c->flags & prohibited_flags) 319 if (!c)
320 return false;
321
322 /* check for radar flags */
323 if ((prohibited_flags & c->flags & IEEE80211_CHAN_RADAR) &&
324 (c->dfs_state != NL80211_DFS_AVAILABLE))
325 return false;
326
327 /* check for the other flags */
328 if (c->flags & prohibited_flags & ~IEEE80211_CHAN_RADAR)
207 return false; 329 return false;
208 } 330 }
209 331
@@ -253,6 +375,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
253 case NL80211_CHAN_WIDTH_80: 375 case NL80211_CHAN_WIDTH_80:
254 if (!vht_cap->vht_supported) 376 if (!vht_cap->vht_supported)
255 return false; 377 return false;
378 prohibited_flags |= IEEE80211_CHAN_NO_80MHZ;
256 width = 80; 379 width = 80;
257 break; 380 break;
258 case NL80211_CHAN_WIDTH_160: 381 case NL80211_CHAN_WIDTH_160:
@@ -260,6 +383,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
260 return false; 383 return false;
261 if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)) 384 if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
262 return false; 385 return false;
386 prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
263 width = 160; 387 width = 160;
264 break; 388 break;
265 default: 389 default:
@@ -267,7 +391,16 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
267 return false; 391 return false;
268 } 392 }
269 393
270 /* TODO: missing regulatory check on 80/160 bandwidth */ 394 /*
395 * TODO: What if there are only certain 80/160/80+80 MHz channels
396 * allowed by the driver, or only certain combinations?
397 * For 40 MHz the driver can set the NO_HT40 flags, but for
398 * 80/160 MHz and in particular 80+80 MHz this isn't really
399 * feasible and we only have NO_80MHZ/NO_160MHZ so far but
400 * no way to cover 80+80 MHz or more complex restrictions.
401 * Note that such restrictions also need to be advertised to
402 * userspace, for example for P2P channel selection.
403 */
271 404
272 if (width > 20) 405 if (width > 20)
273 prohibited_flags |= IEEE80211_CHAN_NO_OFDM; 406 prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
@@ -344,7 +477,10 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
344 break; 477 break;
345 case NL80211_IFTYPE_AP: 478 case NL80211_IFTYPE_AP:
346 case NL80211_IFTYPE_P2P_GO: 479 case NL80211_IFTYPE_P2P_GO:
347 if (wdev->beacon_interval) { 480 if (wdev->cac_started) {
481 *chan = wdev->channel;
482 *chanmode = CHAN_MODE_SHARED;
483 } else if (wdev->beacon_interval) {
348 *chan = wdev->channel; 484 *chan = wdev->channel;
349 *chanmode = CHAN_MODE_SHARED; 485 *chanmode = CHAN_MODE_SHARED;
350 } 486 }
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 9245729694d2..5ffff039b017 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -324,6 +324,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
324 INIT_LIST_HEAD(&rdev->bss_list); 324 INIT_LIST_HEAD(&rdev->bss_list);
325 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); 325 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
326 INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results); 326 INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
327 INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
328 cfg80211_dfs_channels_update_work);
327#ifdef CONFIG_CFG80211_WEXT 329#ifdef CONFIG_CFG80211_WEXT
328 rdev->wiphy.wext = &cfg80211_wext_handler; 330 rdev->wiphy.wext = &cfg80211_wext_handler;
329#endif 331#endif
@@ -365,7 +367,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
365 rdev->wiphy.rts_threshold = (u32) -1; 367 rdev->wiphy.rts_threshold = (u32) -1;
366 rdev->wiphy.coverage_class = 0; 368 rdev->wiphy.coverage_class = 0;
367 369
368 rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; 370 rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH |
371 NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
369 372
370 return &rdev->wiphy; 373 return &rdev->wiphy;
371} 374}
@@ -478,6 +481,11 @@ int wiphy_register(struct wiphy *wiphy)
478 ETH_ALEN))) 481 ETH_ALEN)))
479 return -EINVAL; 482 return -EINVAL;
480 483
484 if (WARN_ON(wiphy->max_acl_mac_addrs &&
485 (!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME) ||
486 !rdev->ops->set_mac_acl)))
487 return -EINVAL;
488
481 if (wiphy->addresses) 489 if (wiphy->addresses)
482 memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN); 490 memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
483 491
@@ -690,6 +698,7 @@ void wiphy_unregister(struct wiphy *wiphy)
690 flush_work(&rdev->scan_done_wk); 698 flush_work(&rdev->scan_done_wk);
691 cancel_work_sync(&rdev->conn_work); 699 cancel_work_sync(&rdev->conn_work);
692 flush_work(&rdev->event_work); 700 flush_work(&rdev->event_work);
701 cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
693 702
694 if (rdev->wowlan && rdev->ops->set_wakeup) 703 if (rdev->wowlan && rdev->ops->set_wakeup)
695 rdev_set_wakeup(rdev, false); 704 rdev_set_wakeup(rdev, false);
@@ -710,7 +719,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
710 kfree(reg); 719 kfree(reg);
711 } 720 }
712 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 721 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
713 cfg80211_put_bss(&scan->pub); 722 cfg80211_put_bss(&rdev->wiphy, &scan->pub);
714 kfree(rdev); 723 kfree(rdev);
715} 724}
716 725
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8396f7671c8d..3aec0e429d8a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -8,7 +8,6 @@
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/kref.h>
12#include <linux/rbtree.h> 11#include <linux/rbtree.h>
13#include <linux/debugfs.h> 12#include <linux/debugfs.h>
14#include <linux/rfkill.h> 13#include <linux/rfkill.h>
@@ -87,6 +86,8 @@ struct cfg80211_registered_device {
87 86
88 struct cfg80211_wowlan *wowlan; 87 struct cfg80211_wowlan *wowlan;
89 88
89 struct delayed_work dfs_update_channels_wk;
90
90 /* must be last because of the way we do wiphy_priv(), 91 /* must be last because of the way we do wiphy_priv(),
91 * and it should at least be aligned to NETDEV_ALIGN */ 92 * and it should at least be aligned to NETDEV_ALIGN */
92 struct wiphy wiphy __aligned(NETDEV_ALIGN); 93 struct wiphy wiphy __aligned(NETDEV_ALIGN);
@@ -109,6 +110,9 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
109 for (i = 0; i < rdev->wowlan->n_patterns; i++) 110 for (i = 0; i < rdev->wowlan->n_patterns; i++)
110 kfree(rdev->wowlan->patterns[i].mask); 111 kfree(rdev->wowlan->patterns[i].mask);
111 kfree(rdev->wowlan->patterns); 112 kfree(rdev->wowlan->patterns);
113 if (rdev->wowlan->tcp && rdev->wowlan->tcp->sock)
114 sock_release(rdev->wowlan->tcp->sock);
115 kfree(rdev->wowlan->tcp);
112 kfree(rdev->wowlan); 116 kfree(rdev->wowlan);
113} 117}
114 118
@@ -124,9 +128,10 @@ static inline void assert_cfg80211_lock(void)
124 128
125struct cfg80211_internal_bss { 129struct cfg80211_internal_bss {
126 struct list_head list; 130 struct list_head list;
131 struct list_head hidden_list;
127 struct rb_node rbn; 132 struct rb_node rbn;
128 unsigned long ts; 133 unsigned long ts;
129 struct kref ref; 134 unsigned long refcount;
130 atomic_t hold; 135 atomic_t hold;
131 136
132 /* must be last because of priv member */ 137 /* must be last because of priv member */
@@ -428,6 +433,22 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
428 enum cfg80211_chan_mode chanmode, 433 enum cfg80211_chan_mode chanmode,
429 u8 radar_detect); 434 u8 radar_detect);
430 435
436/**
437 * cfg80211_chandef_dfs_required - checks if radar detection is required
438 * @wiphy: the wiphy to validate against
439 * @chandef: the channel definition to check
440 * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
441 */
442int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
443 const struct cfg80211_chan_def *c);
444
445void cfg80211_set_dfs_state(struct wiphy *wiphy,
446 const struct cfg80211_chan_def *chandef,
447 enum nl80211_dfs_state dfs_state);
448
449void cfg80211_dfs_channels_update_work(struct work_struct *work);
450
451
431static inline int 452static inline int
432cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, 453cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
433 struct wireless_dev *wdev, 454 struct wireless_dev *wdev,
@@ -454,6 +475,16 @@ cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
454 chan, chanmode, 0); 475 chan, chanmode, 0);
455} 476}
456 477
478static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
479{
480 unsigned long end = jiffies;
481
482 if (end >= start)
483 return jiffies_to_msecs(end - start);
484
485 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
486}
487
457void 488void
458cfg80211_get_chan_state(struct wireless_dev *wdev, 489cfg80211_get_chan_state(struct wireless_dev *wdev,
459 struct ieee80211_channel **chan, 490 struct ieee80211_channel **chan,
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 9b9551e4a6f9..d80e47194d49 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -37,7 +37,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
37 37
38 if (wdev->current_bss) { 38 if (wdev->current_bss) {
39 cfg80211_unhold_bss(wdev->current_bss); 39 cfg80211_unhold_bss(wdev->current_bss);
40 cfg80211_put_bss(&wdev->current_bss->pub); 40 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
41 } 41 }
42 42
43 cfg80211_hold_bss(bss_from_pub(bss)); 43 cfg80211_hold_bss(bss_from_pub(bss));
@@ -182,7 +182,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
182 182
183 if (wdev->current_bss) { 183 if (wdev->current_bss) {
184 cfg80211_unhold_bss(wdev->current_bss); 184 cfg80211_unhold_bss(wdev->current_bss);
185 cfg80211_put_bss(&wdev->current_bss->pub); 185 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
186 } 186 }
187 187
188 wdev->current_bss = NULL; 188 wdev->current_bss = NULL;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 461e692cdfec..caddca35d686 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -58,7 +58,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
58 */ 58 */
59 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && 59 if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
60 cfg80211_sme_failed_reassoc(wdev)) { 60 cfg80211_sme_failed_reassoc(wdev)) {
61 cfg80211_put_bss(bss); 61 cfg80211_put_bss(wiphy, bss);
62 goto out; 62 goto out;
63 } 63 }
64 64
@@ -70,7 +70,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
70 * do not call connect_result() now because the 70 * do not call connect_result() now because the
71 * sme will schedule work that does it later. 71 * sme will schedule work that does it later.
72 */ 72 */
73 cfg80211_put_bss(bss); 73 cfg80211_put_bss(wiphy, bss);
74 goto out; 74 goto out;
75 } 75 }
76 76
@@ -108,7 +108,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
108 if (wdev->current_bss && 108 if (wdev->current_bss &&
109 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { 109 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
110 cfg80211_unhold_bss(wdev->current_bss); 110 cfg80211_unhold_bss(wdev->current_bss);
111 cfg80211_put_bss(&wdev->current_bss->pub); 111 cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
112 wdev->current_bss = NULL; 112 wdev->current_bss = NULL;
113 was_current = true; 113 was_current = true;
114 } 114 }
@@ -164,7 +164,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
164 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { 164 ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
165 cfg80211_sme_disassoc(dev, wdev->current_bss); 165 cfg80211_sme_disassoc(dev, wdev->current_bss);
166 cfg80211_unhold_bss(wdev->current_bss); 166 cfg80211_unhold_bss(wdev->current_bss);
167 cfg80211_put_bss(&wdev->current_bss->pub); 167 cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
168 wdev->current_bss = NULL; 168 wdev->current_bss = NULL;
169 } else 169 } else
170 WARN_ON(1); 170 WARN_ON(1);
@@ -324,7 +324,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
324 err = rdev_auth(rdev, dev, &req); 324 err = rdev_auth(rdev, dev, &req);
325 325
326out: 326out:
327 cfg80211_put_bss(req.bss); 327 cfg80211_put_bss(&rdev->wiphy, req.bss);
328 return err; 328 return err;
329} 329}
330 330
@@ -432,7 +432,7 @@ out:
432 if (err) { 432 if (err) {
433 if (was_connected) 433 if (was_connected)
434 wdev->sme_state = CFG80211_SME_CONNECTED; 434 wdev->sme_state = CFG80211_SME_CONNECTED;
435 cfg80211_put_bss(req.bss); 435 cfg80211_put_bss(&rdev->wiphy, req.bss);
436 } 436 }
437 437
438 return err; 438 return err;
@@ -514,7 +514,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
514 if (wdev->sme_state != CFG80211_SME_CONNECTED) 514 if (wdev->sme_state != CFG80211_SME_CONNECTED)
515 return -ENOTCONN; 515 return -ENOTCONN;
516 516
517 if (WARN_ON(!wdev->current_bss)) 517 if (WARN(!wdev->current_bss, "sme_state=%d\n", wdev->sme_state))
518 return -ENOTCONN; 518 return -ENOTCONN;
519 519
520 memset(&req, 0, sizeof(req)); 520 memset(&req, 0, sizeof(req));
@@ -572,7 +572,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
572 572
573 if (wdev->current_bss) { 573 if (wdev->current_bss) {
574 cfg80211_unhold_bss(wdev->current_bss); 574 cfg80211_unhold_bss(wdev->current_bss);
575 cfg80211_put_bss(&wdev->current_bss->pub); 575 cfg80211_put_bss(&rdev->wiphy, &wdev->current_bss->pub);
576 wdev->current_bss = NULL; 576 wdev->current_bss = NULL;
577 } 577 }
578} 578}
@@ -987,3 +987,123 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
987 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); 987 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
988} 988}
989EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); 989EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
990
991void cfg80211_dfs_channels_update_work(struct work_struct *work)
992{
993 struct delayed_work *delayed_work;
994 struct cfg80211_registered_device *rdev;
995 struct cfg80211_chan_def chandef;
996 struct ieee80211_supported_band *sband;
997 struct ieee80211_channel *c;
998 struct wiphy *wiphy;
999 bool check_again = false;
1000 unsigned long timeout, next_time = 0;
1001 int bandid, i;
1002
1003 delayed_work = container_of(work, struct delayed_work, work);
1004 rdev = container_of(delayed_work, struct cfg80211_registered_device,
1005 dfs_update_channels_wk);
1006 wiphy = &rdev->wiphy;
1007
1008 mutex_lock(&cfg80211_mutex);
1009 for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) {
1010 sband = wiphy->bands[bandid];
1011 if (!sband)
1012 continue;
1013
1014 for (i = 0; i < sband->n_channels; i++) {
1015 c = &sband->channels[i];
1016
1017 if (c->dfs_state != NL80211_DFS_UNAVAILABLE)
1018 continue;
1019
1020 timeout = c->dfs_state_entered +
1021 IEEE80211_DFS_MIN_NOP_TIME_MS;
1022
1023 if (time_after_eq(jiffies, timeout)) {
1024 c->dfs_state = NL80211_DFS_USABLE;
1025 cfg80211_chandef_create(&chandef, c,
1026 NL80211_CHAN_NO_HT);
1027
1028 nl80211_radar_notify(rdev, &chandef,
1029 NL80211_RADAR_NOP_FINISHED,
1030 NULL, GFP_ATOMIC);
1031 continue;
1032 }
1033
1034 if (!check_again)
1035 next_time = timeout - jiffies;
1036 else
1037 next_time = min(next_time, timeout - jiffies);
1038 check_again = true;
1039 }
1040 }
1041 mutex_unlock(&cfg80211_mutex);
1042
1043 /* reschedule if there are other channels waiting to be cleared again */
1044 if (check_again)
1045 queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
1046 next_time);
1047}
1048
1049
1050void cfg80211_radar_event(struct wiphy *wiphy,
1051 struct cfg80211_chan_def *chandef,
1052 gfp_t gfp)
1053{
1054 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1055 unsigned long timeout;
1056
1057 trace_cfg80211_radar_event(wiphy, chandef);
1058
1059 /* only set the chandef supplied channel to unavailable, in
1060 * case the radar is detected on only one of multiple channels
1061 * spanned by the chandef.
1062 */
1063 cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE);
1064
1065 timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS);
1066 queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk,
1067 timeout);
1068
1069 nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp);
1070}
1071EXPORT_SYMBOL(cfg80211_radar_event);
1072
1073void cfg80211_cac_event(struct net_device *netdev,
1074 enum nl80211_radar_event event, gfp_t gfp)
1075{
1076 struct wireless_dev *wdev = netdev->ieee80211_ptr;
1077 struct wiphy *wiphy = wdev->wiphy;
1078 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1079 struct cfg80211_chan_def chandef;
1080 unsigned long timeout;
1081
1082 trace_cfg80211_cac_event(netdev, event);
1083
1084 if (WARN_ON(!wdev->cac_started))
1085 return;
1086
1087 if (WARN_ON(!wdev->channel))
1088 return;
1089
1090 cfg80211_chandef_create(&chandef, wdev->channel, NL80211_CHAN_NO_HT);
1091
1092 switch (event) {
1093 case NL80211_RADAR_CAC_FINISHED:
1094 timeout = wdev->cac_start_time +
1095 msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
1096 WARN_ON(!time_after_eq(jiffies, timeout));
1097 cfg80211_set_dfs_state(wiphy, &chandef, NL80211_DFS_AVAILABLE);
1098 break;
1099 case NL80211_RADAR_CAC_ABORTED:
1100 break;
1101 default:
1102 WARN_ON(1);
1103 return;
1104 }
1105 wdev->cac_started = false;
1106
1107 nl80211_radar_notify(rdev, &chandef, event, netdev, gfp);
1108}
1109EXPORT_SYMBOL(cfg80211_cac_event);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 33de80364c5c..580ffeaef3d5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -19,6 +19,7 @@
19#include <net/genetlink.h> 19#include <net/genetlink.h>
20#include <net/cfg80211.h> 20#include <net/cfg80211.h>
21#include <net/sock.h> 21#include <net/sock.h>
22#include <net/inet_connection_sock.h>
22#include "core.h" 23#include "core.h"
23#include "nl80211.h" 24#include "nl80211.h"
24#include "reg.h" 25#include "reg.h"
@@ -365,6 +366,10 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
365 [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, 366 [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
366 [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, 367 [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
367 [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, 368 [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
369 [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
370 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
371 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
372 [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, },
368}; 373};
369 374
370/* policy for the key attributes */ 375/* policy for the key attributes */
@@ -397,6 +402,26 @@ nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
397 [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG }, 402 [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG },
398 [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG }, 403 [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG },
399 [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG }, 404 [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG },
405 [NL80211_WOWLAN_TRIG_TCP_CONNECTION] = { .type = NLA_NESTED },
406};
407
408static const struct nla_policy
409nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
410 [NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 },
411 [NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 },
412 [NL80211_WOWLAN_TCP_DST_MAC] = { .len = ETH_ALEN },
413 [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
414 [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
415 [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .len = 1 },
416 [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
417 .len = sizeof(struct nl80211_wowlan_tcp_data_seq)
418 },
419 [NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN] = {
420 .len = sizeof(struct nl80211_wowlan_tcp_data_token)
421 },
422 [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
423 [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .len = 1 },
424 [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
400}; 425};
401 426
402/* policy for GTK rekey offload attributes */ 427/* policy for GTK rekey offload attributes */
@@ -529,8 +554,27 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
529 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
530 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) 555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
531 goto nla_put_failure; 556 goto nla_put_failure;
532 if ((chan->flags & IEEE80211_CHAN_RADAR) && 557 if (chan->flags & IEEE80211_CHAN_RADAR) {
533 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 558 u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
559 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
560 goto nla_put_failure;
561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
562 chan->dfs_state))
563 goto nla_put_failure;
564 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
565 goto nla_put_failure;
566 }
567 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
568 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
569 goto nla_put_failure;
570 if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
571 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
572 goto nla_put_failure;
573 if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
574 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
575 goto nla_put_failure;
576 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
577 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
534 goto nla_put_failure; 578 goto nla_put_failure;
535 579
536 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 580 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -870,6 +914,48 @@ nla_put_failure:
870 return -ENOBUFS; 914 return -ENOBUFS;
871} 915}
872 916
917#ifdef CONFIG_PM
918static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
919 struct sk_buff *msg)
920{
921 const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
922 struct nlattr *nl_tcp;
923
924 if (!tcp)
925 return 0;
926
927 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
928 if (!nl_tcp)
929 return -ENOBUFS;
930
931 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
932 tcp->data_payload_max))
933 return -ENOBUFS;
934
935 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
936 tcp->data_payload_max))
937 return -ENOBUFS;
938
939 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
940 return -ENOBUFS;
941
942 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
943 sizeof(*tcp->tok), tcp->tok))
944 return -ENOBUFS;
945
946 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
947 tcp->data_interval_max))
948 return -ENOBUFS;
949
950 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
951 tcp->wake_payload_max))
952 return -ENOBUFS;
953
954 nla_nest_end(msg, nl_tcp);
955 return 0;
956}
957#endif
958
873static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, 959static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
874 struct cfg80211_registered_device *dev) 960 struct cfg80211_registered_device *dev)
875{ 961{
@@ -1236,12 +1322,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1236 dev->wiphy.wowlan.pattern_min_len, 1322 dev->wiphy.wowlan.pattern_min_len,
1237 .max_pattern_len = 1323 .max_pattern_len =
1238 dev->wiphy.wowlan.pattern_max_len, 1324 dev->wiphy.wowlan.pattern_max_len,
1325 .max_pkt_offset =
1326 dev->wiphy.wowlan.max_pkt_offset,
1239 }; 1327 };
1240 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, 1328 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1241 sizeof(pat), &pat)) 1329 sizeof(pat), &pat))
1242 goto nla_put_failure; 1330 goto nla_put_failure;
1243 } 1331 }
1244 1332
1333 if (nl80211_send_wowlan_tcp_caps(dev, msg))
1334 goto nla_put_failure;
1335
1245 nla_nest_end(msg, nl_wowlan); 1336 nla_nest_end(msg, nl_wowlan);
1246 } 1337 }
1247#endif 1338#endif
@@ -1268,6 +1359,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1268 dev->wiphy.ht_capa_mod_mask)) 1359 dev->wiphy.ht_capa_mod_mask))
1269 goto nla_put_failure; 1360 goto nla_put_failure;
1270 1361
1362 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
1363 dev->wiphy.max_acl_mac_addrs &&
1364 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
1365 dev->wiphy.max_acl_mac_addrs))
1366 goto nla_put_failure;
1367
1368 if (dev->wiphy.extended_capabilities &&
1369 (nla_put(msg, NL80211_ATTR_EXT_CAPA,
1370 dev->wiphy.extended_capabilities_len,
1371 dev->wiphy.extended_capabilities) ||
1372 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1373 dev->wiphy.extended_capabilities_len,
1374 dev->wiphy.extended_capabilities_mask)))
1375 goto nla_put_failure;
1376
1271 return genlmsg_end(msg, hdr); 1377 return genlmsg_end(msg, hdr);
1272 1378
1273 nla_put_failure: 1379 nla_put_failure:
@@ -2491,6 +2597,97 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2491 return err; 2597 return err;
2492} 2598}
2493 2599
2600/* This function returns an error or the number of nested attributes */
2601static int validate_acl_mac_addrs(struct nlattr *nl_attr)
2602{
2603 struct nlattr *attr;
2604 int n_entries = 0, tmp;
2605
2606 nla_for_each_nested(attr, nl_attr, tmp) {
2607 if (nla_len(attr) != ETH_ALEN)
2608 return -EINVAL;
2609
2610 n_entries++;
2611 }
2612
2613 return n_entries;
2614}
2615
2616/*
2617 * This function parses ACL information and allocates memory for ACL data.
2618 * On successful return, the calling function is responsible to free the
2619 * ACL buffer returned by this function.
2620 */
2621static struct cfg80211_acl_data *parse_acl_data(struct wiphy *wiphy,
2622 struct genl_info *info)
2623{
2624 enum nl80211_acl_policy acl_policy;
2625 struct nlattr *attr;
2626 struct cfg80211_acl_data *acl;
2627 int i = 0, n_entries, tmp;
2628
2629 if (!wiphy->max_acl_mac_addrs)
2630 return ERR_PTR(-EOPNOTSUPP);
2631
2632 if (!info->attrs[NL80211_ATTR_ACL_POLICY])
2633 return ERR_PTR(-EINVAL);
2634
2635 acl_policy = nla_get_u32(info->attrs[NL80211_ATTR_ACL_POLICY]);
2636 if (acl_policy != NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
2637 acl_policy != NL80211_ACL_POLICY_DENY_UNLESS_LISTED)
2638 return ERR_PTR(-EINVAL);
2639
2640 if (!info->attrs[NL80211_ATTR_MAC_ADDRS])
2641 return ERR_PTR(-EINVAL);
2642
2643 n_entries = validate_acl_mac_addrs(info->attrs[NL80211_ATTR_MAC_ADDRS]);
2644 if (n_entries < 0)
2645 return ERR_PTR(n_entries);
2646
2647 if (n_entries > wiphy->max_acl_mac_addrs)
2648 return ERR_PTR(-ENOTSUPP);
2649
2650 acl = kzalloc(sizeof(*acl) + (sizeof(struct mac_address) * n_entries),
2651 GFP_KERNEL);
2652 if (!acl)
2653 return ERR_PTR(-ENOMEM);
2654
2655 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_MAC_ADDRS], tmp) {
2656 memcpy(acl->mac_addrs[i].addr, nla_data(attr), ETH_ALEN);
2657 i++;
2658 }
2659
2660 acl->n_acl_entries = n_entries;
2661 acl->acl_policy = acl_policy;
2662
2663 return acl;
2664}
2665
2666static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
2667{
2668 struct cfg80211_registered_device *rdev = info->user_ptr[0];
2669 struct net_device *dev = info->user_ptr[1];
2670 struct cfg80211_acl_data *acl;
2671 int err;
2672
2673 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2674 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2675 return -EOPNOTSUPP;
2676
2677 if (!dev->ieee80211_ptr->beacon_interval)
2678 return -EINVAL;
2679
2680 acl = parse_acl_data(&rdev->wiphy, info);
2681 if (IS_ERR(acl))
2682 return PTR_ERR(acl);
2683
2684 err = rdev_set_mac_acl(rdev, dev, acl);
2685
2686 kfree(acl);
2687
2688 return err;
2689}
2690
2494static int nl80211_parse_beacon(struct genl_info *info, 2691static int nl80211_parse_beacon(struct genl_info *info,
2495 struct cfg80211_beacon_data *bcn) 2692 struct cfg80211_beacon_data *bcn)
2496{ 2693{
@@ -2608,6 +2805,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2608 struct wireless_dev *wdev = dev->ieee80211_ptr; 2805 struct wireless_dev *wdev = dev->ieee80211_ptr;
2609 struct cfg80211_ap_settings params; 2806 struct cfg80211_ap_settings params;
2610 int err; 2807 int err;
2808 u8 radar_detect_width = 0;
2611 2809
2612 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2810 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2613 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2811 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -2726,14 +2924,30 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2726 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef)) 2924 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
2727 return -EINVAL; 2925 return -EINVAL;
2728 2926
2927 err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
2928 if (err < 0)
2929 return err;
2930 if (err) {
2931 radar_detect_width = BIT(params.chandef.width);
2932 params.radar_required = true;
2933 }
2934
2729 mutex_lock(&rdev->devlist_mtx); 2935 mutex_lock(&rdev->devlist_mtx);
2730 err = cfg80211_can_use_chan(rdev, wdev, params.chandef.chan, 2936 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
2731 CHAN_MODE_SHARED); 2937 params.chandef.chan,
2938 CHAN_MODE_SHARED,
2939 radar_detect_width);
2732 mutex_unlock(&rdev->devlist_mtx); 2940 mutex_unlock(&rdev->devlist_mtx);
2733 2941
2734 if (err) 2942 if (err)
2735 return err; 2943 return err;
2736 2944
2945 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
2946 params.acl = parse_acl_data(&rdev->wiphy, info);
2947 if (IS_ERR(params.acl))
2948 return PTR_ERR(params.acl);
2949 }
2950
2737 err = rdev_start_ap(rdev, dev, &params); 2951 err = rdev_start_ap(rdev, dev, &params);
2738 if (!err) { 2952 if (!err) {
2739 wdev->preset_chandef = params.chandef; 2953 wdev->preset_chandef = params.chandef;
@@ -2742,6 +2956,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
2742 wdev->ssid_len = params.ssid_len; 2956 wdev->ssid_len = params.ssid_len;
2743 memcpy(wdev->ssid, params.ssid, wdev->ssid_len); 2957 memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
2744 } 2958 }
2959
2960 kfree(params.acl);
2961
2745 return err; 2962 return err;
2746} 2963}
2747 2964
@@ -2949,12 +3166,22 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
2949 nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME, 3166 nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
2950 sinfo->inactive_time)) 3167 sinfo->inactive_time))
2951 goto nla_put_failure; 3168 goto nla_put_failure;
2952 if ((sinfo->filled & STATION_INFO_RX_BYTES) && 3169 if ((sinfo->filled & (STATION_INFO_RX_BYTES |
3170 STATION_INFO_RX_BYTES64)) &&
2953 nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, 3171 nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
2954 sinfo->rx_bytes)) 3172 (u32)sinfo->rx_bytes))
2955 goto nla_put_failure; 3173 goto nla_put_failure;
2956 if ((sinfo->filled & STATION_INFO_TX_BYTES) && 3174 if ((sinfo->filled & (STATION_INFO_TX_BYTES |
3175 NL80211_STA_INFO_TX_BYTES64)) &&
2957 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, 3176 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
3177 (u32)sinfo->tx_bytes))
3178 goto nla_put_failure;
3179 if ((sinfo->filled & STATION_INFO_RX_BYTES64) &&
3180 nla_put_u64(msg, NL80211_STA_INFO_RX_BYTES64,
3181 sinfo->rx_bytes))
3182 goto nla_put_failure;
3183 if ((sinfo->filled & STATION_INFO_TX_BYTES64) &&
3184 nla_put_u64(msg, NL80211_STA_INFO_TX_BYTES64,
2958 sinfo->tx_bytes)) 3185 sinfo->tx_bytes))
2959 goto nla_put_failure; 3186 goto nla_put_failure;
2960 if ((sinfo->filled & STATION_INFO_LLID) && 3187 if ((sinfo->filled & STATION_INFO_LLID) &&
@@ -3182,6 +3409,63 @@ static struct net_device *get_vlan(struct genl_info *info,
3182 return ERR_PTR(ret); 3409 return ERR_PTR(ret);
3183} 3410}
3184 3411
3412static struct nla_policy
3413nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
3414 [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
3415 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
3416};
3417
3418static int nl80211_set_station_tdls(struct genl_info *info,
3419 struct station_parameters *params)
3420{
3421 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3422 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
3423 struct nlattr *nla;
3424 int err;
3425
3426 /* Can only set if TDLS ... */
3427 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS))
3428 return -EOPNOTSUPP;
3429
3430 /* ... with external setup is supported */
3431 if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
3432 return -EOPNOTSUPP;
3433
3434 /* Dummy STA entry gets updated once the peer capabilities are known */
3435 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3436 params->ht_capa =
3437 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
3438 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3439 params->vht_capa =
3440 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3441
3442 /* parse WME attributes if present */
3443 if (!info->attrs[NL80211_ATTR_STA_WME])
3444 return 0;
3445
3446 nla = info->attrs[NL80211_ATTR_STA_WME];
3447 err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
3448 nl80211_sta_wme_policy);
3449 if (err)
3450 return err;
3451
3452 if (tb[NL80211_STA_WME_UAPSD_QUEUES])
3453 params->uapsd_queues = nla_get_u8(
3454 tb[NL80211_STA_WME_UAPSD_QUEUES]);
3455 if (params->uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
3456 return -EINVAL;
3457
3458 if (tb[NL80211_STA_WME_MAX_SP])
3459 params->max_sp = nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
3460
3461 if (params->max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
3462 return -EINVAL;
3463
3464 params->sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
3465
3466 return 0;
3467}
3468
3185static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) 3469static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3186{ 3470{
3187 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3471 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3210,8 +3494,20 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3210 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 3494 nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
3211 } 3495 }
3212 3496
3213 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL] || 3497 if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
3214 info->attrs[NL80211_ATTR_HT_CAPABILITY]) 3498 params.capability =
3499 nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]);
3500 params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY;
3501 }
3502
3503 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) {
3504 params.ext_capab =
3505 nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3506 params.ext_capab_len =
3507 nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3508 }
3509
3510 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
3215 return -EINVAL; 3511 return -EINVAL;
3216 3512
3217 if (!rdev->ops->change_station) 3513 if (!rdev->ops->change_station)
@@ -3280,6 +3576,13 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3280 /* reject other things that can't change */ 3576 /* reject other things that can't change */
3281 if (params.supported_rates) 3577 if (params.supported_rates)
3282 return -EINVAL; 3578 return -EINVAL;
3579 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3580 return -EINVAL;
3581 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3582 return -EINVAL;
3583 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3584 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3585 return -EINVAL;
3283 3586
3284 /* must be last in here for error handling */ 3587 /* must be last in here for error handling */
3285 params.vlan = get_vlan(info, rdev); 3588 params.vlan = get_vlan(info, rdev);
@@ -3295,13 +3598,29 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3295 * to change the flag. 3598 * to change the flag.
3296 */ 3599 */
3297 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); 3600 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3298 /* fall through */ 3601 /* Include parameters for TDLS peer (driver will check) */
3602 err = nl80211_set_station_tdls(info, &params);
3603 if (err)
3604 return err;
3605 /* disallow things sta doesn't support */
3606 if (params.plink_action)
3607 return -EINVAL;
3608 if (params.local_pm)
3609 return -EINVAL;
3610 /* reject any changes other than AUTHORIZED or WME (for TDLS) */
3611 if (params.sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3612 BIT(NL80211_STA_FLAG_WME)))
3613 return -EINVAL;
3614 break;
3299 case NL80211_IFTYPE_ADHOC: 3615 case NL80211_IFTYPE_ADHOC:
3300 /* disallow things sta doesn't support */ 3616 /* disallow things sta doesn't support */
3301 if (params.plink_action) 3617 if (params.plink_action)
3302 return -EINVAL; 3618 return -EINVAL;
3303 if (params.local_pm) 3619 if (params.local_pm)
3304 return -EINVAL; 3620 return -EINVAL;
3621 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3622 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3623 return -EINVAL;
3305 /* reject any changes other than AUTHORIZED */ 3624 /* reject any changes other than AUTHORIZED */
3306 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) 3625 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
3307 return -EINVAL; 3626 return -EINVAL;
@@ -3312,6 +3631,13 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3312 return -EINVAL; 3631 return -EINVAL;
3313 if (params.supported_rates) 3632 if (params.supported_rates)
3314 return -EINVAL; 3633 return -EINVAL;
3634 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3635 return -EINVAL;
3636 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3637 return -EINVAL;
3638 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3639 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3640 return -EINVAL;
3315 /* 3641 /*
3316 * No special handling for TDLS here -- the userspace 3642 * No special handling for TDLS here -- the userspace
3317 * mesh code doesn't have this bug. 3643 * mesh code doesn't have this bug.
@@ -3336,12 +3662,6 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3336 return err; 3662 return err;
3337} 3663}
3338 3664
3339static struct nla_policy
3340nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
3341 [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
3342 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
3343};
3344
3345static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) 3665static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3346{ 3666{
3347 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3667 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3376,6 +3696,19 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3376 if (!params.aid || params.aid > IEEE80211_MAX_AID) 3696 if (!params.aid || params.aid > IEEE80211_MAX_AID)
3377 return -EINVAL; 3697 return -EINVAL;
3378 3698
3699 if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
3700 params.capability =
3701 nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]);
3702 params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY;
3703 }
3704
3705 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) {
3706 params.ext_capab =
3707 nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3708 params.ext_capab_len =
3709 nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]);
3710 }
3711
3379 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 3712 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3380 params.ht_capa = 3713 params.ht_capa =
3381 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 3714 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
@@ -4869,6 +5202,54 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
4869 return err; 5202 return err;
4870} 5203}
4871 5204
5205static int nl80211_start_radar_detection(struct sk_buff *skb,
5206 struct genl_info *info)
5207{
5208 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5209 struct net_device *dev = info->user_ptr[1];
5210 struct wireless_dev *wdev = dev->ieee80211_ptr;
5211 struct cfg80211_chan_def chandef;
5212 int err;
5213
5214 err = nl80211_parse_chandef(rdev, info, &chandef);
5215 if (err)
5216 return err;
5217
5218 if (wdev->cac_started)
5219 return -EBUSY;
5220
5221 err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef);
5222 if (err < 0)
5223 return err;
5224
5225 if (err == 0)
5226 return -EINVAL;
5227
5228 if (chandef.chan->dfs_state != NL80211_DFS_USABLE)
5229 return -EINVAL;
5230
5231 if (!rdev->ops->start_radar_detection)
5232 return -EOPNOTSUPP;
5233
5234 mutex_lock(&rdev->devlist_mtx);
5235 err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
5236 chandef.chan, CHAN_MODE_SHARED,
5237 BIT(chandef.width));
5238 if (err)
5239 goto err_locked;
5240
5241 err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef);
5242 if (!err) {
5243 wdev->channel = chandef.chan;
5244 wdev->cac_started = true;
5245 wdev->cac_start_time = jiffies;
5246 }
5247err_locked:
5248 mutex_unlock(&rdev->devlist_mtx);
5249
5250 return err;
5251}
5252
4872static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, 5253static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4873 u32 seq, int flags, 5254 u32 seq, int flags,
4874 struct cfg80211_registered_device *rdev, 5255 struct cfg80211_registered_device *rdev,
@@ -4879,6 +5260,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4879 const struct cfg80211_bss_ies *ies; 5260 const struct cfg80211_bss_ies *ies;
4880 void *hdr; 5261 void *hdr;
4881 struct nlattr *bss; 5262 struct nlattr *bss;
5263 bool tsf = false;
4882 5264
4883 ASSERT_WDEV_LOCK(wdev); 5265 ASSERT_WDEV_LOCK(wdev);
4884 5266
@@ -4902,22 +5284,24 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4902 5284
4903 rcu_read_lock(); 5285 rcu_read_lock();
4904 ies = rcu_dereference(res->ies); 5286 ies = rcu_dereference(res->ies);
4905 if (ies && ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, 5287 if (ies) {
4906 ies->len, ies->data)) { 5288 if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
4907 rcu_read_unlock(); 5289 goto fail_unlock_rcu;
4908 goto nla_put_failure; 5290 tsf = true;
5291 if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
5292 ies->len, ies->data))
5293 goto fail_unlock_rcu;
4909 } 5294 }
4910 ies = rcu_dereference(res->beacon_ies); 5295 ies = rcu_dereference(res->beacon_ies);
4911 if (ies && ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, 5296 if (ies) {
4912 ies->len, ies->data)) { 5297 if (!tsf && nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
4913 rcu_read_unlock(); 5298 goto fail_unlock_rcu;
4914 goto nla_put_failure; 5299 if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
5300 ies->len, ies->data))
5301 goto fail_unlock_rcu;
4915 } 5302 }
4916 rcu_read_unlock(); 5303 rcu_read_unlock();
4917 5304
4918 if (res->tsf &&
4919 nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
4920 goto nla_put_failure;
4921 if (res->beacon_interval && 5305 if (res->beacon_interval &&
4922 nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval)) 5306 nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
4923 goto nla_put_failure; 5307 goto nla_put_failure;
@@ -4962,6 +5346,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4962 5346
4963 return genlmsg_end(msg, hdr); 5347 return genlmsg_end(msg, hdr);
4964 5348
5349 fail_unlock_rcu:
5350 rcu_read_unlock();
4965 nla_put_failure: 5351 nla_put_failure:
4966 genlmsg_cancel(msg, hdr); 5352 genlmsg_cancel(msg, hdr);
4967 return -EMSGSIZE; 5353 return -EMSGSIZE;
@@ -6772,16 +7158,100 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
6772} 7158}
6773 7159
6774#ifdef CONFIG_PM 7160#ifdef CONFIG_PM
7161static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
7162 struct cfg80211_registered_device *rdev)
7163{
7164 struct nlattr *nl_pats, *nl_pat;
7165 int i, pat_len;
7166
7167 if (!rdev->wowlan->n_patterns)
7168 return 0;
7169
7170 nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN);
7171 if (!nl_pats)
7172 return -ENOBUFS;
7173
7174 for (i = 0; i < rdev->wowlan->n_patterns; i++) {
7175 nl_pat = nla_nest_start(msg, i + 1);
7176 if (!nl_pat)
7177 return -ENOBUFS;
7178 pat_len = rdev->wowlan->patterns[i].pattern_len;
7179 if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
7180 DIV_ROUND_UP(pat_len, 8),
7181 rdev->wowlan->patterns[i].mask) ||
7182 nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
7183 pat_len, rdev->wowlan->patterns[i].pattern) ||
7184 nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
7185 rdev->wowlan->patterns[i].pkt_offset))
7186 return -ENOBUFS;
7187 nla_nest_end(msg, nl_pat);
7188 }
7189 nla_nest_end(msg, nl_pats);
7190
7191 return 0;
7192}
7193
7194static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
7195 struct cfg80211_wowlan_tcp *tcp)
7196{
7197 struct nlattr *nl_tcp;
7198
7199 if (!tcp)
7200 return 0;
7201
7202 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
7203 if (!nl_tcp)
7204 return -ENOBUFS;
7205
7206 if (nla_put_be32(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) ||
7207 nla_put_be32(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) ||
7208 nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) ||
7209 nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) ||
7210 nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) ||
7211 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
7212 tcp->payload_len, tcp->payload) ||
7213 nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
7214 tcp->data_interval) ||
7215 nla_put(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
7216 tcp->wake_len, tcp->wake_data) ||
7217 nla_put(msg, NL80211_WOWLAN_TCP_WAKE_MASK,
7218 DIV_ROUND_UP(tcp->wake_len, 8), tcp->wake_mask))
7219 return -ENOBUFS;
7220
7221 if (tcp->payload_seq.len &&
7222 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ,
7223 sizeof(tcp->payload_seq), &tcp->payload_seq))
7224 return -ENOBUFS;
7225
7226 if (tcp->payload_tok.len &&
7227 nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
7228 sizeof(tcp->payload_tok) + tcp->tokens_size,
7229 &tcp->payload_tok))
7230 return -ENOBUFS;
7231
7232 return 0;
7233}
7234
6775static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) 7235static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
6776{ 7236{
6777 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 7237 struct cfg80211_registered_device *rdev = info->user_ptr[0];
6778 struct sk_buff *msg; 7238 struct sk_buff *msg;
6779 void *hdr; 7239 void *hdr;
7240 u32 size = NLMSG_DEFAULT_SIZE;
6780 7241
6781 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) 7242 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
7243 !rdev->wiphy.wowlan.tcp)
6782 return -EOPNOTSUPP; 7244 return -EOPNOTSUPP;
6783 7245
6784 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 7246 if (rdev->wowlan && rdev->wowlan->tcp) {
7247 /* adjust size to have room for all the data */
7248 size += rdev->wowlan->tcp->tokens_size +
7249 rdev->wowlan->tcp->payload_len +
7250 rdev->wowlan->tcp->wake_len +
7251 rdev->wowlan->tcp->wake_len / 8;
7252 }
7253
7254 msg = nlmsg_new(size, GFP_KERNEL);
6785 if (!msg) 7255 if (!msg)
6786 return -ENOMEM; 7256 return -ENOMEM;
6787 7257
@@ -6812,31 +7282,12 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
6812 (rdev->wowlan->rfkill_release && 7282 (rdev->wowlan->rfkill_release &&
6813 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) 7283 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
6814 goto nla_put_failure; 7284 goto nla_put_failure;
6815 if (rdev->wowlan->n_patterns) {
6816 struct nlattr *nl_pats, *nl_pat;
6817 int i, pat_len;
6818 7285
6819 nl_pats = nla_nest_start(msg, 7286 if (nl80211_send_wowlan_patterns(msg, rdev))
6820 NL80211_WOWLAN_TRIG_PKT_PATTERN); 7287 goto nla_put_failure;
6821 if (!nl_pats)
6822 goto nla_put_failure;
6823 7288
6824 for (i = 0; i < rdev->wowlan->n_patterns; i++) { 7289 if (nl80211_send_wowlan_tcp(msg, rdev->wowlan->tcp))
6825 nl_pat = nla_nest_start(msg, i + 1); 7290 goto nla_put_failure;
6826 if (!nl_pat)
6827 goto nla_put_failure;
6828 pat_len = rdev->wowlan->patterns[i].pattern_len;
6829 if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
6830 DIV_ROUND_UP(pat_len, 8),
6831 rdev->wowlan->patterns[i].mask) ||
6832 nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
6833 pat_len,
6834 rdev->wowlan->patterns[i].pattern))
6835 goto nla_put_failure;
6836 nla_nest_end(msg, nl_pat);
6837 }
6838 nla_nest_end(msg, nl_pats);
6839 }
6840 7291
6841 nla_nest_end(msg, nl_wowlan); 7292 nla_nest_end(msg, nl_wowlan);
6842 } 7293 }
@@ -6849,6 +7300,150 @@ nla_put_failure:
6849 return -ENOBUFS; 7300 return -ENOBUFS;
6850} 7301}
6851 7302
7303static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
7304 struct nlattr *attr,
7305 struct cfg80211_wowlan *trig)
7306{
7307 struct nlattr *tb[NUM_NL80211_WOWLAN_TCP];
7308 struct cfg80211_wowlan_tcp *cfg;
7309 struct nl80211_wowlan_tcp_data_token *tok = NULL;
7310 struct nl80211_wowlan_tcp_data_seq *seq = NULL;
7311 u32 size;
7312 u32 data_size, wake_size, tokens_size = 0, wake_mask_size;
7313 int err, port;
7314
7315 if (!rdev->wiphy.wowlan.tcp)
7316 return -EINVAL;
7317
7318 err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP,
7319 nla_data(attr), nla_len(attr),
7320 nl80211_wowlan_tcp_policy);
7321 if (err)
7322 return err;
7323
7324 if (!tb[NL80211_WOWLAN_TCP_SRC_IPV4] ||
7325 !tb[NL80211_WOWLAN_TCP_DST_IPV4] ||
7326 !tb[NL80211_WOWLAN_TCP_DST_MAC] ||
7327 !tb[NL80211_WOWLAN_TCP_DST_PORT] ||
7328 !tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD] ||
7329 !tb[NL80211_WOWLAN_TCP_DATA_INTERVAL] ||
7330 !tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] ||
7331 !tb[NL80211_WOWLAN_TCP_WAKE_MASK])
7332 return -EINVAL;
7333
7334 data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]);
7335 if (data_size > rdev->wiphy.wowlan.tcp->data_payload_max)
7336 return -EINVAL;
7337
7338 if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) >
7339 rdev->wiphy.wowlan.tcp->data_interval_max)
7340 return -EINVAL;
7341
7342 wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]);
7343 if (wake_size > rdev->wiphy.wowlan.tcp->wake_payload_max)
7344 return -EINVAL;
7345
7346 wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]);
7347 if (wake_mask_size != DIV_ROUND_UP(wake_size, 8))
7348 return -EINVAL;
7349
7350 if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]) {
7351 u32 tokln = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
7352
7353 tok = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]);
7354 tokens_size = tokln - sizeof(*tok);
7355
7356 if (!tok->len || tokens_size % tok->len)
7357 return -EINVAL;
7358 if (!rdev->wiphy.wowlan.tcp->tok)
7359 return -EINVAL;
7360 if (tok->len > rdev->wiphy.wowlan.tcp->tok->max_len)
7361 return -EINVAL;
7362 if (tok->len < rdev->wiphy.wowlan.tcp->tok->min_len)
7363 return -EINVAL;
7364 if (tokens_size > rdev->wiphy.wowlan.tcp->tok->bufsize)
7365 return -EINVAL;
7366 if (tok->offset + tok->len > data_size)
7367 return -EINVAL;
7368 }
7369
7370 if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) {
7371 seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]);
7372 if (!rdev->wiphy.wowlan.tcp->seq)
7373 return -EINVAL;
7374 if (seq->len == 0 || seq->len > 4)
7375 return -EINVAL;
7376 if (seq->len + seq->offset > data_size)
7377 return -EINVAL;
7378 }
7379
7380 size = sizeof(*cfg);
7381 size += data_size;
7382 size += wake_size + wake_mask_size;
7383 size += tokens_size;
7384
7385 cfg = kzalloc(size, GFP_KERNEL);
7386 if (!cfg)
7387 return -ENOMEM;
7388 cfg->src = nla_get_be32(tb[NL80211_WOWLAN_TCP_SRC_IPV4]);
7389 cfg->dst = nla_get_be32(tb[NL80211_WOWLAN_TCP_DST_IPV4]);
7390 memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]),
7391 ETH_ALEN);
7392 if (tb[NL80211_WOWLAN_TCP_SRC_PORT])
7393 port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]);
7394 else
7395 port = 0;
7396#ifdef CONFIG_INET
7397 /* allocate a socket and port for it and use it */
7398 err = __sock_create(wiphy_net(&rdev->wiphy), PF_INET, SOCK_STREAM,
7399 IPPROTO_TCP, &cfg->sock, 1);
7400 if (err) {
7401 kfree(cfg);
7402 return err;
7403 }
7404 if (inet_csk_get_port(cfg->sock->sk, port)) {
7405 sock_release(cfg->sock);
7406 kfree(cfg);
7407 return -EADDRINUSE;
7408 }
7409 cfg->src_port = inet_sk(cfg->sock->sk)->inet_num;
7410#else
7411 if (!port) {
7412 kfree(cfg);
7413 return -EINVAL;
7414 }
7415 cfg->src_port = port;
7416#endif
7417
7418 cfg->dst_port = nla_get_u16(tb[NL80211_WOWLAN_TCP_DST_PORT]);
7419 cfg->payload_len = data_size;
7420 cfg->payload = (u8 *)cfg + sizeof(*cfg) + tokens_size;
7421 memcpy((void *)cfg->payload,
7422 nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]),
7423 data_size);
7424 if (seq)
7425 cfg->payload_seq = *seq;
7426 cfg->data_interval = nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]);
7427 cfg->wake_len = wake_size;
7428 cfg->wake_data = (u8 *)cfg + sizeof(*cfg) + tokens_size + data_size;
7429 memcpy((void *)cfg->wake_data,
7430 nla_data(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]),
7431 wake_size);
7432 cfg->wake_mask = (u8 *)cfg + sizeof(*cfg) + tokens_size +
7433 data_size + wake_size;
7434 memcpy((void *)cfg->wake_mask,
7435 nla_data(tb[NL80211_WOWLAN_TCP_WAKE_MASK]),
7436 wake_mask_size);
7437 if (tok) {
7438 cfg->tokens_size = tokens_size;
7439 memcpy(&cfg->payload_tok, tok, sizeof(*tok) + tokens_size);
7440 }
7441
7442 trig->tcp = cfg;
7443
7444 return 0;
7445}
7446
6852static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) 7447static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6853{ 7448{
6854 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 7449 struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6859,7 +7454,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6859 int err, i; 7454 int err, i;
6860 bool prev_enabled = rdev->wowlan; 7455 bool prev_enabled = rdev->wowlan;
6861 7456
6862 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) 7457 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
7458 !rdev->wiphy.wowlan.tcp)
6863 return -EOPNOTSUPP; 7459 return -EOPNOTSUPP;
6864 7460
6865 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { 7461 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
@@ -6923,7 +7519,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6923 if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) { 7519 if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
6924 struct nlattr *pat; 7520 struct nlattr *pat;
6925 int n_patterns = 0; 7521 int n_patterns = 0;
6926 int rem, pat_len, mask_len; 7522 int rem, pat_len, mask_len, pkt_offset;
6927 struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT]; 7523 struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
6928 7524
6929 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], 7525 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
@@ -6958,6 +7554,15 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6958 pat_len < wowlan->pattern_min_len) 7554 pat_len < wowlan->pattern_min_len)
6959 goto error; 7555 goto error;
6960 7556
7557 if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET])
7558 pkt_offset = 0;
7559 else
7560 pkt_offset = nla_get_u32(
7561 pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]);
7562 if (pkt_offset > wowlan->max_pkt_offset)
7563 goto error;
7564 new_triggers.patterns[i].pkt_offset = pkt_offset;
7565
6961 new_triggers.patterns[i].mask = 7566 new_triggers.patterns[i].mask =
6962 kmalloc(mask_len + pat_len, GFP_KERNEL); 7567 kmalloc(mask_len + pat_len, GFP_KERNEL);
6963 if (!new_triggers.patterns[i].mask) { 7568 if (!new_triggers.patterns[i].mask) {
@@ -6977,6 +7582,14 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6977 } 7582 }
6978 } 7583 }
6979 7584
7585 if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) {
7586 err = nl80211_parse_wowlan_tcp(
7587 rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION],
7588 &new_triggers);
7589 if (err)
7590 goto error;
7591 }
7592
6980 ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL); 7593 ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
6981 if (!ntrig) { 7594 if (!ntrig) {
6982 err = -ENOMEM; 7595 err = -ENOMEM;
@@ -6994,6 +7607,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6994 for (i = 0; i < new_triggers.n_patterns; i++) 7607 for (i = 0; i < new_triggers.n_patterns; i++)
6995 kfree(new_triggers.patterns[i].mask); 7608 kfree(new_triggers.patterns[i].mask);
6996 kfree(new_triggers.patterns); 7609 kfree(new_triggers.patterns);
7610 if (new_triggers.tcp && new_triggers.tcp->sock)
7611 sock_release(new_triggers.tcp->sock);
7612 kfree(new_triggers.tcp);
6997 return err; 7613 return err;
6998} 7614}
6999#endif 7615#endif
@@ -7876,6 +8492,22 @@ static struct genl_ops nl80211_ops[] = {
7876 .internal_flags = NL80211_FLAG_NEED_NETDEV | 8492 .internal_flags = NL80211_FLAG_NEED_NETDEV |
7877 NL80211_FLAG_NEED_RTNL, 8493 NL80211_FLAG_NEED_RTNL,
7878 }, 8494 },
8495 {
8496 .cmd = NL80211_CMD_SET_MAC_ACL,
8497 .doit = nl80211_set_mac_acl,
8498 .policy = nl80211_policy,
8499 .flags = GENL_ADMIN_PERM,
8500 .internal_flags = NL80211_FLAG_NEED_NETDEV |
8501 NL80211_FLAG_NEED_RTNL,
8502 },
8503 {
8504 .cmd = NL80211_CMD_RADAR_DETECT,
8505 .doit = nl80211_start_radar_detection,
8506 .policy = nl80211_policy,
8507 .flags = GENL_ADMIN_PERM,
8508 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
8509 NL80211_FLAG_NEED_RTNL,
8510 },
7879}; 8511};
7880 8512
7881static struct genl_multicast_group nl80211_mlme_mcgrp = { 8513static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -9073,6 +9705,57 @@ nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
9073} 9705}
9074 9706
9075void 9707void
9708nl80211_radar_notify(struct cfg80211_registered_device *rdev,
9709 struct cfg80211_chan_def *chandef,
9710 enum nl80211_radar_event event,
9711 struct net_device *netdev, gfp_t gfp)
9712{
9713 struct sk_buff *msg;
9714 void *hdr;
9715
9716 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9717 if (!msg)
9718 return;
9719
9720 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_RADAR_DETECT);
9721 if (!hdr) {
9722 nlmsg_free(msg);
9723 return;
9724 }
9725
9726 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
9727 goto nla_put_failure;
9728
9729 /* NOP and radar events don't need a netdev parameter */
9730 if (netdev) {
9731 struct wireless_dev *wdev = netdev->ieee80211_ptr;
9732
9733 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
9734 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
9735 goto nla_put_failure;
9736 }
9737
9738 if (nla_put_u32(msg, NL80211_ATTR_RADAR_EVENT, event))
9739 goto nla_put_failure;
9740
9741 if (nl80211_send_chandef(msg, chandef))
9742 goto nla_put_failure;
9743
9744 if (genlmsg_end(msg, hdr) < 0) {
9745 nlmsg_free(msg);
9746 return;
9747 }
9748
9749 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
9750 nl80211_mlme_mcgrp.id, gfp);
9751 return;
9752
9753 nla_put_failure:
9754 genlmsg_cancel(msg, hdr);
9755 nlmsg_free(msg);
9756}
9757
9758void
9076nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 9759nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
9077 struct net_device *netdev, const u8 *peer, 9760 struct net_device *netdev, const u8 *peer,
9078 u32 num_packets, gfp_t gfp) 9761 u32 num_packets, gfp_t gfp)
@@ -9207,6 +9890,114 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
9207} 9890}
9208EXPORT_SYMBOL(cfg80211_report_obss_beacon); 9891EXPORT_SYMBOL(cfg80211_report_obss_beacon);
9209 9892
9893#ifdef CONFIG_PM
9894void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
9895 struct cfg80211_wowlan_wakeup *wakeup,
9896 gfp_t gfp)
9897{
9898 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
9899 struct sk_buff *msg;
9900 void *hdr;
9901 int err, size = 200;
9902
9903 trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup);
9904
9905 if (wakeup)
9906 size += wakeup->packet_present_len;
9907
9908 msg = nlmsg_new(size, gfp);
9909 if (!msg)
9910 return;
9911
9912 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_WOWLAN);
9913 if (!hdr)
9914 goto free_msg;
9915
9916 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9917 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
9918 goto free_msg;
9919
9920 if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
9921 wdev->netdev->ifindex))
9922 goto free_msg;
9923
9924 if (wakeup) {
9925 struct nlattr *reasons;
9926
9927 reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
9928
9929 if (wakeup->disconnect &&
9930 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT))
9931 goto free_msg;
9932 if (wakeup->magic_pkt &&
9933 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT))
9934 goto free_msg;
9935 if (wakeup->gtk_rekey_failure &&
9936 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE))
9937 goto free_msg;
9938 if (wakeup->eap_identity_req &&
9939 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST))
9940 goto free_msg;
9941 if (wakeup->four_way_handshake &&
9942 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE))
9943 goto free_msg;
9944 if (wakeup->rfkill_release &&
9945 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))
9946 goto free_msg;
9947
9948 if (wakeup->pattern_idx >= 0 &&
9949 nla_put_u32(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
9950 wakeup->pattern_idx))
9951 goto free_msg;
9952
9953 if (wakeup->tcp_match)
9954 nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH);
9955
9956 if (wakeup->tcp_connlost)
9957 nla_put_flag(msg,
9958 NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST);
9959
9960 if (wakeup->tcp_nomoretokens)
9961 nla_put_flag(msg,
9962 NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS);
9963
9964 if (wakeup->packet) {
9965 u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
9966 u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN;
9967
9968 if (!wakeup->packet_80211) {
9969 pkt_attr =
9970 NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023;
9971 len_attr =
9972 NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN;
9973 }
9974
9975 if (wakeup->packet_len &&
9976 nla_put_u32(msg, len_attr, wakeup->packet_len))
9977 goto free_msg;
9978
9979 if (nla_put(msg, pkt_attr, wakeup->packet_present_len,
9980 wakeup->packet))
9981 goto free_msg;
9982 }
9983
9984 nla_nest_end(msg, reasons);
9985 }
9986
9987 err = genlmsg_end(msg, hdr);
9988 if (err < 0)
9989 goto free_msg;
9990
9991 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
9992 nl80211_mlme_mcgrp.id, gfp);
9993 return;
9994
9995 free_msg:
9996 nlmsg_free(msg);
9997}
9998EXPORT_SYMBOL(cfg80211_report_wowlan_wakeup);
9999#endif
10000
9210void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, 10001void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
9211 enum nl80211_tdls_operation oper, 10002 enum nl80211_tdls_operation oper,
9212 u16 reason_code, gfp_t gfp) 10003 u16 reason_code, gfp_t gfp)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 2acba8477e9d..b061da4919e1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -108,6 +108,13 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
108 struct net_device *netdev, 108 struct net_device *netdev,
109 enum nl80211_cqm_rssi_threshold_event rssi_event, 109 enum nl80211_cqm_rssi_threshold_event rssi_event,
110 gfp_t gfp); 110 gfp_t gfp);
111
112void
113nl80211_radar_notify(struct cfg80211_registered_device *rdev,
114 struct cfg80211_chan_def *chandef,
115 enum nl80211_radar_event event,
116 struct net_device *netdev, gfp_t gfp);
117
111void 118void
112nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 119nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
113 struct net_device *netdev, const u8 *peer, 120 struct net_device *netdev, const u8 *peer,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 6c0c8191f837..422d38291d66 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -875,4 +875,16 @@ static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev,
875 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); 875 rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
876 trace_rdev_return_void(&rdev->wiphy); 876 trace_rdev_return_void(&rdev->wiphy);
877} 877}
878
879static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev,
880 struct net_device *dev,
881 struct cfg80211_acl_data *params)
882{
883 int ret;
884
885 trace_rdev_set_mac_acl(&rdev->wiphy, dev, params);
886 ret = rdev->ops->set_mac_acl(&rdev->wiphy, dev, params);
887 trace_rdev_return_int(&rdev->wiphy, ret);
888 return ret;
889}
878#endif /* __CFG80211_RDEV_OPS */ 890#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index de02d633c212..98532c00242d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -866,6 +866,10 @@ static void handle_channel(struct wiphy *wiphy,
866 866
867 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) 867 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
868 bw_flags = IEEE80211_CHAN_NO_HT40; 868 bw_flags = IEEE80211_CHAN_NO_HT40;
869 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
870 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
871 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
872 bw_flags |= IEEE80211_CHAN_NO_160MHZ;
869 873
870 if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && 874 if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
871 request_wiphy && request_wiphy == wiphy && 875 request_wiphy && request_wiphy == wiphy &&
@@ -884,6 +888,9 @@ static void handle_channel(struct wiphy *wiphy,
884 return; 888 return;
885 } 889 }
886 890
891 chan->dfs_state = NL80211_DFS_USABLE;
892 chan->dfs_state_entered = jiffies;
893
887 chan->beacon_found = false; 894 chan->beacon_found = false;
888 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 895 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
889 chan->max_antenna_gain = 896 chan->max_antenna_gain =
@@ -1261,6 +1268,10 @@ static void handle_channel_custom(struct wiphy *wiphy,
1261 1268
1262 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) 1269 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
1263 bw_flags = IEEE80211_CHAN_NO_HT40; 1270 bw_flags = IEEE80211_CHAN_NO_HT40;
1271 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
1272 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1273 if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
1274 bw_flags |= IEEE80211_CHAN_NO_160MHZ;
1264 1275
1265 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; 1276 chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
1266 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); 1277 chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
@@ -2189,10 +2200,15 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2189 * However if a driver requested this specific regulatory 2200 * However if a driver requested this specific regulatory
2190 * domain we keep it for its private use 2201 * domain we keep it for its private use
2191 */ 2202 */
2192 if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER) 2203 if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
2204 const struct ieee80211_regdomain *tmp;
2205
2206 tmp = get_wiphy_regdom(request_wiphy);
2193 rcu_assign_pointer(request_wiphy->regd, rd); 2207 rcu_assign_pointer(request_wiphy->regd, rd);
2194 else 2208 rcu_free_regdom(tmp);
2209 } else {
2195 kfree(rd); 2210 kfree(rd);
2211 }
2196 2212
2197 rd = NULL; 2213 rd = NULL;
2198 2214
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 01592d7d4789..674aadca0079 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -19,55 +19,142 @@
19#include "wext-compat.h" 19#include "wext-compat.h"
20#include "rdev-ops.h" 20#include "rdev-ops.h"
21 21
22/**
23 * DOC: BSS tree/list structure
24 *
25 * At the top level, the BSS list is kept in both a list in each
26 * registered device (@bss_list) as well as an RB-tree for faster
27 * lookup. In the RB-tree, entries can be looked up using their
28 * channel, MESHID, MESHCONF (for MBSSes) or channel, BSSID, SSID
29 * for other BSSes.
30 *
31 * Due to the possibility of hidden SSIDs, there's a second level
32 * structure, the "hidden_list" and "hidden_beacon_bss" pointer.
33 * The hidden_list connects all BSSes belonging to a single AP
34 * that has a hidden SSID, and connects beacon and probe response
35 * entries. For a probe response entry for a hidden SSID, the
36 * hidden_beacon_bss pointer points to the BSS struct holding the
37 * beacon's information.
38 *
39 * Reference counting is done for all these references except for
40 * the hidden_list, so that a beacon BSS struct that is otherwise
41 * not referenced has one reference for being on the bss_list and
42 * one for each probe response entry that points to it using the
43 * hidden_beacon_bss pointer. When a BSS struct that has such a
44 * pointer is get/put, the refcount update is also propagated to
45 * the referenced struct, this ensure that it cannot get removed
46 * while somebody is using the probe response version.
47 *
48 * Note that the hidden_beacon_bss pointer never changes, due to
49 * the reference counting. Therefore, no locking is needed for
50 * it.
51 *
52 * Also note that the hidden_beacon_bss pointer is only relevant
53 * if the driver uses something other than the IEs, e.g. private
54 * data stored stored in the BSS struct, since the beacon IEs are
55 * also linked into the probe response struct.
56 */
57
22#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) 58#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
23 59
24static void bss_release(struct kref *ref) 60static void bss_free(struct cfg80211_internal_bss *bss)
25{ 61{
26 struct cfg80211_bss_ies *ies; 62 struct cfg80211_bss_ies *ies;
27 struct cfg80211_internal_bss *bss;
28
29 bss = container_of(ref, struct cfg80211_internal_bss, ref);
30 63
31 if (WARN_ON(atomic_read(&bss->hold))) 64 if (WARN_ON(atomic_read(&bss->hold)))
32 return; 65 return;
33 66
34 if (bss->pub.free_priv)
35 bss->pub.free_priv(&bss->pub);
36
37 ies = (void *)rcu_access_pointer(bss->pub.beacon_ies); 67 ies = (void *)rcu_access_pointer(bss->pub.beacon_ies);
38 if (ies) 68 if (ies && !bss->pub.hidden_beacon_bss)
39 kfree_rcu(ies, rcu_head); 69 kfree_rcu(ies, rcu_head);
40 ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies); 70 ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies);
41 if (ies) 71 if (ies)
42 kfree_rcu(ies, rcu_head); 72 kfree_rcu(ies, rcu_head);
43 73
74 /*
75 * This happens when the module is removed, it doesn't
76 * really matter any more save for completeness
77 */
78 if (!list_empty(&bss->hidden_list))
79 list_del(&bss->hidden_list);
80
44 kfree(bss); 81 kfree(bss);
45} 82}
46 83
47/* must hold dev->bss_lock! */ 84static inline void bss_ref_get(struct cfg80211_registered_device *dev,
48static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, 85 struct cfg80211_internal_bss *bss)
86{
87 lockdep_assert_held(&dev->bss_lock);
88
89 bss->refcount++;
90 if (bss->pub.hidden_beacon_bss) {
91 bss = container_of(bss->pub.hidden_beacon_bss,
92 struct cfg80211_internal_bss,
93 pub);
94 bss->refcount++;
95 }
96}
97
98static inline void bss_ref_put(struct cfg80211_registered_device *dev,
99 struct cfg80211_internal_bss *bss)
100{
101 lockdep_assert_held(&dev->bss_lock);
102
103 if (bss->pub.hidden_beacon_bss) {
104 struct cfg80211_internal_bss *hbss;
105 hbss = container_of(bss->pub.hidden_beacon_bss,
106 struct cfg80211_internal_bss,
107 pub);
108 hbss->refcount--;
109 if (hbss->refcount == 0)
110 bss_free(hbss);
111 }
112 bss->refcount--;
113 if (bss->refcount == 0)
114 bss_free(bss);
115}
116
117static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
49 struct cfg80211_internal_bss *bss) 118 struct cfg80211_internal_bss *bss)
50{ 119{
120 lockdep_assert_held(&dev->bss_lock);
121
122 if (!list_empty(&bss->hidden_list)) {
123 /*
124 * don't remove the beacon entry if it has
125 * probe responses associated with it
126 */
127 if (!bss->pub.hidden_beacon_bss)
128 return false;
129 /*
130 * if it's a probe response entry break its
131 * link to the other entries in the group
132 */
133 list_del_init(&bss->hidden_list);
134 }
135
51 list_del_init(&bss->list); 136 list_del_init(&bss->list);
52 rb_erase(&bss->rbn, &dev->bss_tree); 137 rb_erase(&bss->rbn, &dev->bss_tree);
53 kref_put(&bss->ref, bss_release); 138 bss_ref_put(dev, bss);
139 return true;
54} 140}
55 141
56/* must hold dev->bss_lock! */
57static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev, 142static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
58 unsigned long expire_time) 143 unsigned long expire_time)
59{ 144{
60 struct cfg80211_internal_bss *bss, *tmp; 145 struct cfg80211_internal_bss *bss, *tmp;
61 bool expired = false; 146 bool expired = false;
62 147
148 lockdep_assert_held(&dev->bss_lock);
149
63 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { 150 list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
64 if (atomic_read(&bss->hold)) 151 if (atomic_read(&bss->hold))
65 continue; 152 continue;
66 if (!time_after(expire_time, bss->ts)) 153 if (!time_after(expire_time, bss->ts))
67 continue; 154 continue;
68 155
69 __cfg80211_unlink_bss(dev, bss); 156 if (__cfg80211_unlink_bss(dev, bss))
70 expired = true; 157 expired = true;
71 } 158 }
72 159
73 if (expired) 160 if (expired)
@@ -234,15 +321,16 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
234 return 0; 321 return 0;
235} 322}
236 323
237/* must hold dev->bss_lock! */
238void cfg80211_bss_age(struct cfg80211_registered_device *dev, 324void cfg80211_bss_age(struct cfg80211_registered_device *dev,
239 unsigned long age_secs) 325 unsigned long age_secs)
240{ 326{
241 struct cfg80211_internal_bss *bss; 327 struct cfg80211_internal_bss *bss;
242 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); 328 unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
243 329
330 spin_lock_bh(&dev->bss_lock);
244 list_for_each_entry(bss, &dev->bss_list, list) 331 list_for_each_entry(bss, &dev->bss_list, list)
245 bss->ts -= age_jiffies; 332 bss->ts -= age_jiffies;
333 spin_unlock_bh(&dev->bss_lock);
246} 334}
247 335
248void cfg80211_bss_expire(struct cfg80211_registered_device *dev) 336void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
@@ -277,40 +365,24 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
277 if (!pos) 365 if (!pos)
278 return NULL; 366 return NULL;
279 367
280 if (end - pos < sizeof(*ie))
281 return NULL;
282
283 ie = (struct ieee80211_vendor_ie *)pos; 368 ie = (struct ieee80211_vendor_ie *)pos;
369
370 /* make sure we can access ie->len */
371 BUILD_BUG_ON(offsetof(struct ieee80211_vendor_ie, len) != 1);
372
373 if (ie->len < sizeof(*ie))
374 goto cont;
375
284 ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2]; 376 ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
285 if (ie_oui == oui && ie->oui_type == oui_type) 377 if (ie_oui == oui && ie->oui_type == oui_type)
286 return pos; 378 return pos;
287 379cont:
288 pos += 2 + ie->len; 380 pos += 2 + ie->len;
289 } 381 }
290 return NULL; 382 return NULL;
291} 383}
292EXPORT_SYMBOL(cfg80211_find_vendor_ie); 384EXPORT_SYMBOL(cfg80211_find_vendor_ie);
293 385
294static int cmp_ies(u8 num, const u8 *ies1, int len1, const u8 *ies2, int len2)
295{
296 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
297 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
298
299 /* equal if both missing */
300 if (!ie1 && !ie2)
301 return 0;
302 /* sort missing IE before (left of) present IE */
303 if (!ie1)
304 return -1;
305 if (!ie2)
306 return 1;
307
308 /* sort by length first, then by contents */
309 if (ie1[1] != ie2[1])
310 return ie2[1] - ie1[1];
311 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
312}
313
314static bool is_bss(struct cfg80211_bss *a, const u8 *bssid, 386static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
315 const u8 *ssid, size_t ssid_len) 387 const u8 *ssid, size_t ssid_len)
316{ 388{
@@ -334,109 +406,30 @@ static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
334 return memcmp(ssidie + 2, ssid, ssid_len) == 0; 406 return memcmp(ssidie + 2, ssid, ssid_len) == 0;
335} 407}
336 408
337static bool is_mesh_bss(struct cfg80211_bss *a) 409/**
338{ 410 * enum bss_compare_mode - BSS compare mode
339 const struct cfg80211_bss_ies *ies; 411 * @BSS_CMP_REGULAR: regular compare mode (for insertion and normal find)
340 const u8 *ie; 412 * @BSS_CMP_HIDE_ZLEN: find hidden SSID with zero-length mode
341 413 * @BSS_CMP_HIDE_NUL: find hidden SSID with NUL-ed out mode
342 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability)) 414 */
343 return false; 415enum bss_compare_mode {
344 416 BSS_CMP_REGULAR,
345 ies = rcu_access_pointer(a->ies); 417 BSS_CMP_HIDE_ZLEN,
346 if (!ies) 418 BSS_CMP_HIDE_NUL,
347 return false; 419};
348
349 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
350 if (!ie)
351 return false;
352
353 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
354 if (!ie)
355 return false;
356
357 return true;
358}
359
360static bool is_mesh(struct cfg80211_bss *a,
361 const u8 *meshid, size_t meshidlen,
362 const u8 *meshcfg)
363{
364 const struct cfg80211_bss_ies *ies;
365 const u8 *ie;
366
367 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
368 return false;
369
370 ies = rcu_access_pointer(a->ies);
371 if (!ies)
372 return false;
373
374 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, ies->data, ies->len);
375 if (!ie)
376 return false;
377 if (ie[1] != meshidlen)
378 return false;
379 if (memcmp(ie + 2, meshid, meshidlen))
380 return false;
381
382 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, ies->data, ies->len);
383 if (!ie)
384 return false;
385 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
386 return false;
387
388 /*
389 * Ignore mesh capability (last two bytes of the IE) when
390 * comparing since that may differ between stations taking
391 * part in the same mesh.
392 */
393 return memcmp(ie + 2, meshcfg,
394 sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
395}
396 420
397static int cmp_bss_core(struct cfg80211_bss *a, struct cfg80211_bss *b) 421static int cmp_bss(struct cfg80211_bss *a,
422 struct cfg80211_bss *b,
423 enum bss_compare_mode mode)
398{ 424{
399 const struct cfg80211_bss_ies *a_ies, *b_ies; 425 const struct cfg80211_bss_ies *a_ies, *b_ies;
400 int r; 426 const u8 *ie1 = NULL;
427 const u8 *ie2 = NULL;
428 int i, r;
401 429
402 if (a->channel != b->channel) 430 if (a->channel != b->channel)
403 return b->channel->center_freq - a->channel->center_freq; 431 return b->channel->center_freq - a->channel->center_freq;
404 432
405 if (is_mesh_bss(a) && is_mesh_bss(b)) {
406 a_ies = rcu_access_pointer(a->ies);
407 if (!a_ies)
408 return -1;
409 b_ies = rcu_access_pointer(b->ies);
410 if (!b_ies)
411 return 1;
412
413 r = cmp_ies(WLAN_EID_MESH_ID,
414 a_ies->data, a_ies->len,
415 b_ies->data, b_ies->len);
416 if (r)
417 return r;
418 return cmp_ies(WLAN_EID_MESH_CONFIG,
419 a_ies->data, a_ies->len,
420 b_ies->data, b_ies->len);
421 }
422
423 /*
424 * we can't use compare_ether_addr here since we need a < > operator.
425 * The binary return value of compare_ether_addr isn't enough
426 */
427 return memcmp(a->bssid, b->bssid, sizeof(a->bssid));
428}
429
430static int cmp_bss(struct cfg80211_bss *a,
431 struct cfg80211_bss *b)
432{
433 const struct cfg80211_bss_ies *a_ies, *b_ies;
434 int r;
435
436 r = cmp_bss_core(a, b);
437 if (r)
438 return r;
439
440 a_ies = rcu_access_pointer(a->ies); 433 a_ies = rcu_access_pointer(a->ies);
441 if (!a_ies) 434 if (!a_ies)
442 return -1; 435 return -1;
@@ -444,42 +437,51 @@ static int cmp_bss(struct cfg80211_bss *a,
444 if (!b_ies) 437 if (!b_ies)
445 return 1; 438 return 1;
446 439
447 return cmp_ies(WLAN_EID_SSID, 440 if (WLAN_CAPABILITY_IS_STA_BSS(a->capability))
448 a_ies->data, a_ies->len, 441 ie1 = cfg80211_find_ie(WLAN_EID_MESH_ID,
449 b_ies->data, b_ies->len); 442 a_ies->data, a_ies->len);
450} 443 if (WLAN_CAPABILITY_IS_STA_BSS(b->capability))
451 444 ie2 = cfg80211_find_ie(WLAN_EID_MESH_ID,
452static int cmp_hidden_bss(struct cfg80211_bss *a, struct cfg80211_bss *b) 445 b_ies->data, b_ies->len);
453{ 446 if (ie1 && ie2) {
454 const struct cfg80211_bss_ies *a_ies, *b_ies; 447 int mesh_id_cmp;
455 const u8 *ie1; 448
456 const u8 *ie2; 449 if (ie1[1] == ie2[1])
457 int i; 450 mesh_id_cmp = memcmp(ie1 + 2, ie2 + 2, ie1[1]);
458 int r; 451 else
452 mesh_id_cmp = ie2[1] - ie1[1];
453
454 ie1 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
455 a_ies->data, a_ies->len);
456 ie2 = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
457 b_ies->data, b_ies->len);
458 if (ie1 && ie2) {
459 if (mesh_id_cmp)
460 return mesh_id_cmp;
461 if (ie1[1] != ie2[1])
462 return ie2[1] - ie1[1];
463 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
464 }
465 }
459 466
460 r = cmp_bss_core(a, b); 467 /*
468 * we can't use compare_ether_addr here since we need a < > operator.
469 * The binary return value of compare_ether_addr isn't enough
470 */
471 r = memcmp(a->bssid, b->bssid, sizeof(a->bssid));
461 if (r) 472 if (r)
462 return r; 473 return r;
463 474
464 a_ies = rcu_access_pointer(a->ies);
465 if (!a_ies)
466 return -1;
467 b_ies = rcu_access_pointer(b->ies);
468 if (!b_ies)
469 return 1;
470
471 ie1 = cfg80211_find_ie(WLAN_EID_SSID, a_ies->data, a_ies->len); 475 ie1 = cfg80211_find_ie(WLAN_EID_SSID, a_ies->data, a_ies->len);
472 ie2 = cfg80211_find_ie(WLAN_EID_SSID, b_ies->data, b_ies->len); 476 ie2 = cfg80211_find_ie(WLAN_EID_SSID, b_ies->data, b_ies->len);
473 477
478 if (!ie1 && !ie2)
479 return 0;
480
474 /* 481 /*
475 * Key comparator must use same algorithm in any rb-tree 482 * Note that with "hide_ssid", the function returns a match if
476 * search function (order is important), otherwise ordering 483 * the already-present BSS ("b") is a hidden SSID beacon for
477 * of items in the tree is broken and search gives incorrect 484 * the new BSS ("a").
478 * results. This code uses same order as cmp_ies() does.
479 *
480 * Note that due to the differring behaviour with hidden SSIDs
481 * this function only works when "b" is the tree element and
482 * "a" is the key we're looking for.
483 */ 485 */
484 486
485 /* sort missing IE before (left of) present IE */ 487 /* sort missing IE before (left of) present IE */
@@ -488,24 +490,36 @@ static int cmp_hidden_bss(struct cfg80211_bss *a, struct cfg80211_bss *b)
488 if (!ie2) 490 if (!ie2)
489 return 1; 491 return 1;
490 492
491 /* zero-size SSID is used as an indication of the hidden bss */ 493 switch (mode) {
492 if (!ie2[1]) 494 case BSS_CMP_HIDE_ZLEN:
495 /*
496 * In ZLEN mode we assume the BSS entry we're
497 * looking for has a zero-length SSID. So if
498 * the one we're looking at right now has that,
499 * return 0. Otherwise, return the difference
500 * in length, but since we're looking for the
501 * 0-length it's really equivalent to returning
502 * the length of the one we're looking at.
503 *
504 * No content comparison is needed as we assume
505 * the content length is zero.
506 */
507 return ie2[1];
508 case BSS_CMP_REGULAR:
509 default:
510 /* sort by length first, then by contents */
511 if (ie1[1] != ie2[1])
512 return ie2[1] - ie1[1];
513 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
514 case BSS_CMP_HIDE_NUL:
515 if (ie1[1] != ie2[1])
516 return ie2[1] - ie1[1];
517 /* this is equivalent to memcmp(zeroes, ie2 + 2, len) */
518 for (i = 0; i < ie2[1]; i++)
519 if (ie2[i + 2])
520 return -1;
493 return 0; 521 return 0;
494 522 }
495 /* sort by length first, then by contents */
496 if (ie1[1] != ie2[1])
497 return ie2[1] - ie1[1];
498
499 /*
500 * zeroed SSID ie is another indication of a hidden bss;
501 * if it isn't zeroed just return the regular sort value
502 * to find the next candidate
503 */
504 for (i = 0; i < ie2[1]; i++)
505 if (ie2[i + 2])
506 return memcmp(ie1 + 2, ie2 + 2, ie1[1]);
507
508 return 0;
509} 523}
510 524
511struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, 525struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
@@ -534,7 +548,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
534 continue; 548 continue;
535 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { 549 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
536 res = bss; 550 res = bss;
537 kref_get(&res->ref); 551 bss_ref_get(dev, res);
538 break; 552 break;
539 } 553 }
540 } 554 }
@@ -547,34 +561,6 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
547} 561}
548EXPORT_SYMBOL(cfg80211_get_bss); 562EXPORT_SYMBOL(cfg80211_get_bss);
549 563
550struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy,
551 struct ieee80211_channel *channel,
552 const u8 *meshid, size_t meshidlen,
553 const u8 *meshcfg)
554{
555 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
556 struct cfg80211_internal_bss *bss, *res = NULL;
557
558 spin_lock_bh(&dev->bss_lock);
559
560 list_for_each_entry(bss, &dev->bss_list, list) {
561 if (channel && bss->pub.channel != channel)
562 continue;
563 if (is_mesh(&bss->pub, meshid, meshidlen, meshcfg)) {
564 res = bss;
565 kref_get(&res->ref);
566 break;
567 }
568 }
569
570 spin_unlock_bh(&dev->bss_lock);
571 if (!res)
572 return NULL;
573 return &res->pub;
574}
575EXPORT_SYMBOL(cfg80211_get_mesh);
576
577
578static void rb_insert_bss(struct cfg80211_registered_device *dev, 564static void rb_insert_bss(struct cfg80211_registered_device *dev,
579 struct cfg80211_internal_bss *bss) 565 struct cfg80211_internal_bss *bss)
580{ 566{
@@ -587,7 +573,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
587 parent = *p; 573 parent = *p;
588 tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn); 574 tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn);
589 575
590 cmp = cmp_bss(&bss->pub, &tbss->pub); 576 cmp = cmp_bss(&bss->pub, &tbss->pub, BSS_CMP_REGULAR);
591 577
592 if (WARN_ON(!cmp)) { 578 if (WARN_ON(!cmp)) {
593 /* will sort of leak this BSS */ 579 /* will sort of leak this BSS */
@@ -606,7 +592,8 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
606 592
607static struct cfg80211_internal_bss * 593static struct cfg80211_internal_bss *
608rb_find_bss(struct cfg80211_registered_device *dev, 594rb_find_bss(struct cfg80211_registered_device *dev,
609 struct cfg80211_internal_bss *res) 595 struct cfg80211_internal_bss *res,
596 enum bss_compare_mode mode)
610{ 597{
611 struct rb_node *n = dev->bss_tree.rb_node; 598 struct rb_node *n = dev->bss_tree.rb_node;
612 struct cfg80211_internal_bss *bss; 599 struct cfg80211_internal_bss *bss;
@@ -614,7 +601,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
614 601
615 while (n) { 602 while (n) {
616 bss = rb_entry(n, struct cfg80211_internal_bss, rbn); 603 bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
617 r = cmp_bss(&res->pub, &bss->pub); 604 r = cmp_bss(&res->pub, &bss->pub, mode);
618 605
619 if (r == 0) 606 if (r == 0)
620 return bss; 607 return bss;
@@ -627,46 +614,67 @@ rb_find_bss(struct cfg80211_registered_device *dev,
627 return NULL; 614 return NULL;
628} 615}
629 616
630static struct cfg80211_internal_bss * 617static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
631rb_find_hidden_bss(struct cfg80211_registered_device *dev, 618 struct cfg80211_internal_bss *new)
632 struct cfg80211_internal_bss *res)
633{ 619{
634 struct rb_node *n = dev->bss_tree.rb_node; 620 const struct cfg80211_bss_ies *ies;
635 struct cfg80211_internal_bss *bss; 621 struct cfg80211_internal_bss *bss;
636 int r; 622 const u8 *ie;
623 int i, ssidlen;
624 u8 fold = 0;
637 625
638 while (n) { 626 ies = rcu_access_pointer(new->pub.beacon_ies);
639 bss = rb_entry(n, struct cfg80211_internal_bss, rbn); 627 if (WARN_ON(!ies))
640 r = cmp_hidden_bss(&res->pub, &bss->pub); 628 return false;
641 629
642 if (r == 0) 630 ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
643 return bss; 631 if (!ie) {
644 else if (r < 0) 632 /* nothing to do */
645 n = n->rb_left; 633 return true;
646 else
647 n = n->rb_right;
648 } 634 }
649 635
650 return NULL; 636 ssidlen = ie[1];
651} 637 for (i = 0; i < ssidlen; i++)
638 fold |= ie[2 + i];
652 639
653static void 640 if (fold) {
654copy_hidden_ies(struct cfg80211_internal_bss *res, 641 /* not a hidden SSID */
655 struct cfg80211_internal_bss *hidden) 642 return true;
656{ 643 }
657 const struct cfg80211_bss_ies *ies;
658 644
659 if (rcu_access_pointer(res->pub.beacon_ies)) 645 /* This is the bad part ... */
660 return;
661 646
662 ies = rcu_access_pointer(hidden->pub.beacon_ies); 647 list_for_each_entry(bss, &dev->bss_list, list) {
663 if (WARN_ON(!ies)) 648 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
664 return; 649 continue;
650 if (bss->pub.channel != new->pub.channel)
651 continue;
652 if (rcu_access_pointer(bss->pub.beacon_ies))
653 continue;
654 ies = rcu_access_pointer(bss->pub.ies);
655 if (!ies)
656 continue;
657 ie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
658 if (!ie)
659 continue;
660 if (ssidlen && ie[1] != ssidlen)
661 continue;
662 /* that would be odd ... */
663 if (bss->pub.beacon_ies)
664 continue;
665 if (WARN_ON_ONCE(bss->pub.hidden_beacon_bss))
666 continue;
667 if (WARN_ON_ONCE(!list_empty(&bss->hidden_list)))
668 list_del(&bss->hidden_list);
669 /* combine them */
670 list_add(&bss->hidden_list, &new->hidden_list);
671 bss->pub.hidden_beacon_bss = &new->pub;
672 new->refcount += bss->refcount;
673 rcu_assign_pointer(bss->pub.beacon_ies,
674 new->pub.beacon_ies);
675 }
665 676
666 ies = kmemdup(ies, sizeof(*ies) + ies->len, GFP_ATOMIC); 677 return true;
667 if (unlikely(!ies))
668 return;
669 rcu_assign_pointer(res->pub.beacon_ies, ies);
670} 678}
671 679
672static struct cfg80211_internal_bss * 680static struct cfg80211_internal_bss *
@@ -687,11 +695,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
687 return NULL; 695 return NULL;
688 } 696 }
689 697
690 found = rb_find_bss(dev, tmp); 698 found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
691 699
692 if (found) { 700 if (found) {
693 found->pub.beacon_interval = tmp->pub.beacon_interval; 701 found->pub.beacon_interval = tmp->pub.beacon_interval;
694 found->pub.tsf = tmp->pub.tsf;
695 found->pub.signal = tmp->pub.signal; 702 found->pub.signal = tmp->pub.signal;
696 found->pub.capability = tmp->pub.capability; 703 found->pub.capability = tmp->pub.capability;
697 found->ts = tmp->ts; 704 found->ts = tmp->ts;
@@ -711,19 +718,45 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
711 kfree_rcu((struct cfg80211_bss_ies *)old, 718 kfree_rcu((struct cfg80211_bss_ies *)old,
712 rcu_head); 719 rcu_head);
713 } else if (rcu_access_pointer(tmp->pub.beacon_ies)) { 720 } else if (rcu_access_pointer(tmp->pub.beacon_ies)) {
714 const struct cfg80211_bss_ies *old, *ies; 721 const struct cfg80211_bss_ies *old;
722 struct cfg80211_internal_bss *bss;
723
724 if (found->pub.hidden_beacon_bss &&
725 !list_empty(&found->hidden_list)) {
726 /*
727 * The found BSS struct is one of the probe
728 * response members of a group, but we're
729 * receiving a beacon (beacon_ies in the tmp
730 * bss is used). This can only mean that the
731 * AP changed its beacon from not having an
732 * SSID to showing it, which is confusing so
733 * drop this information.
734 */
735 goto drop;
736 }
715 737
716 old = rcu_access_pointer(found->pub.beacon_ies); 738 old = rcu_access_pointer(found->pub.beacon_ies);
717 ies = rcu_access_pointer(found->pub.ies);
718 739
719 rcu_assign_pointer(found->pub.beacon_ies, 740 rcu_assign_pointer(found->pub.beacon_ies,
720 tmp->pub.beacon_ies); 741 tmp->pub.beacon_ies);
721 742
722 /* Override IEs if they were from a beacon before */ 743 /* Override IEs if they were from a beacon before */
723 if (old == ies) 744 if (old == rcu_access_pointer(found->pub.ies))
724 rcu_assign_pointer(found->pub.ies, 745 rcu_assign_pointer(found->pub.ies,
725 tmp->pub.beacon_ies); 746 tmp->pub.beacon_ies);
726 747
748 /* Assign beacon IEs to all sub entries */
749 list_for_each_entry(bss, &found->hidden_list,
750 hidden_list) {
751 const struct cfg80211_bss_ies *ies;
752
753 ies = rcu_access_pointer(bss->pub.beacon_ies);
754 WARN_ON(ies != old);
755
756 rcu_assign_pointer(bss->pub.beacon_ies,
757 tmp->pub.beacon_ies);
758 }
759
727 if (old) 760 if (old)
728 kfree_rcu((struct cfg80211_bss_ies *)old, 761 kfree_rcu((struct cfg80211_bss_ies *)old,
729 rcu_head); 762 rcu_head);
@@ -733,19 +766,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
733 struct cfg80211_internal_bss *hidden; 766 struct cfg80211_internal_bss *hidden;
734 struct cfg80211_bss_ies *ies; 767 struct cfg80211_bss_ies *ies;
735 768
736 /* First check if the beacon is a probe response from
737 * a hidden bss. If so, copy beacon ies (with nullified
738 * ssid) into the probe response bss entry (with real ssid).
739 * It is required basically for PSM implementation
740 * (probe responses do not contain tim ie) */
741
742 /* TODO: The code is not trying to update existing probe
743 * response bss entries when beacon ies are
744 * getting changed. */
745 hidden = rb_find_hidden_bss(dev, tmp);
746 if (hidden)
747 copy_hidden_ies(tmp, hidden);
748
749 /* 769 /*
750 * create a copy -- the "res" variable that is passed in 770 * create a copy -- the "res" variable that is passed in
751 * is allocated on the stack since it's not needed in the 771 * is allocated on the stack since it's not needed in the
@@ -760,21 +780,51 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
760 ies = (void *)rcu_dereference(tmp->pub.proberesp_ies); 780 ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
761 if (ies) 781 if (ies)
762 kfree_rcu(ies, rcu_head); 782 kfree_rcu(ies, rcu_head);
763 spin_unlock_bh(&dev->bss_lock); 783 goto drop;
764 return NULL;
765 } 784 }
766 memcpy(new, tmp, sizeof(*new)); 785 memcpy(new, tmp, sizeof(*new));
767 kref_init(&new->ref); 786 new->refcount = 1;
787 INIT_LIST_HEAD(&new->hidden_list);
788
789 if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
790 hidden = rb_find_bss(dev, tmp, BSS_CMP_HIDE_ZLEN);
791 if (!hidden)
792 hidden = rb_find_bss(dev, tmp,
793 BSS_CMP_HIDE_NUL);
794 if (hidden) {
795 new->pub.hidden_beacon_bss = &hidden->pub;
796 list_add(&new->hidden_list,
797 &hidden->hidden_list);
798 hidden->refcount++;
799 rcu_assign_pointer(new->pub.beacon_ies,
800 hidden->pub.beacon_ies);
801 }
802 } else {
803 /*
804 * Ok so we found a beacon, and don't have an entry. If
805 * it's a beacon with hidden SSID, we might be in for an
806 * expensive search for any probe responses that should
807 * be grouped with this beacon for updates ...
808 */
809 if (!cfg80211_combine_bsses(dev, new)) {
810 kfree(new);
811 goto drop;
812 }
813 }
814
768 list_add_tail(&new->list, &dev->bss_list); 815 list_add_tail(&new->list, &dev->bss_list);
769 rb_insert_bss(dev, new); 816 rb_insert_bss(dev, new);
770 found = new; 817 found = new;
771 } 818 }
772 819
773 dev->bss_generation++; 820 dev->bss_generation++;
821 bss_ref_get(dev, found);
774 spin_unlock_bh(&dev->bss_lock); 822 spin_unlock_bh(&dev->bss_lock);
775 823
776 kref_get(&found->ref);
777 return found; 824 return found;
825 drop:
826 spin_unlock_bh(&dev->bss_lock);
827 return NULL;
778} 828}
779 829
780static struct ieee80211_channel * 830static struct ieee80211_channel *
@@ -833,7 +883,6 @@ cfg80211_inform_bss(struct wiphy *wiphy,
833 memcpy(tmp.pub.bssid, bssid, ETH_ALEN); 883 memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
834 tmp.pub.channel = channel; 884 tmp.pub.channel = channel;
835 tmp.pub.signal = signal; 885 tmp.pub.signal = signal;
836 tmp.pub.tsf = tsf;
837 tmp.pub.beacon_interval = beacon_interval; 886 tmp.pub.beacon_interval = beacon_interval;
838 tmp.pub.capability = capability; 887 tmp.pub.capability = capability;
839 /* 888 /*
@@ -841,16 +890,14 @@ cfg80211_inform_bss(struct wiphy *wiphy,
841 * Response frame, we need to pick one of the options and only use it 890 * Response frame, we need to pick one of the options and only use it
842 * with the driver that does not provide the full Beacon/Probe Response 891 * with the driver that does not provide the full Beacon/Probe Response
843 * frame. Use Beacon frame pointer to avoid indicating that this should 892 * frame. Use Beacon frame pointer to avoid indicating that this should
844 * override the iies pointer should we have received an earlier 893 * override the IEs pointer should we have received an earlier
845 * indication of Probe Response data. 894 * indication of Probe Response data.
846 *
847 * The initial buffer for the IEs is allocated with the BSS entry and
848 * is located after the private area.
849 */ 895 */
850 ies = kmalloc(sizeof(*ies) + ielen, gfp); 896 ies = kmalloc(sizeof(*ies) + ielen, gfp);
851 if (!ies) 897 if (!ies)
852 return NULL; 898 return NULL;
853 ies->len = ielen; 899 ies->len = ielen;
900 ies->tsf = tsf;
854 memcpy(ies->data, ie, ielen); 901 memcpy(ies->data, ie, ielen);
855 902
856 rcu_assign_pointer(tmp.pub.beacon_ies, ies); 903 rcu_assign_pointer(tmp.pub.beacon_ies, ies);
@@ -907,6 +954,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
907 if (!ies) 954 if (!ies)
908 return NULL; 955 return NULL;
909 ies->len = ielen; 956 ies->len = ielen;
957 ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
910 memcpy(ies->data, mgmt->u.probe_resp.variable, ielen); 958 memcpy(ies->data, mgmt->u.probe_resp.variable, ielen);
911 959
912 if (ieee80211_is_probe_resp(mgmt->frame_control)) 960 if (ieee80211_is_probe_resp(mgmt->frame_control))
@@ -918,7 +966,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
918 memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN); 966 memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
919 tmp.pub.channel = channel; 967 tmp.pub.channel = channel;
920 tmp.pub.signal = signal; 968 tmp.pub.signal = signal;
921 tmp.pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
922 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 969 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
923 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 970 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
924 971
@@ -935,27 +982,35 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
935} 982}
936EXPORT_SYMBOL(cfg80211_inform_bss_frame); 983EXPORT_SYMBOL(cfg80211_inform_bss_frame);
937 984
938void cfg80211_ref_bss(struct cfg80211_bss *pub) 985void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
939{ 986{
987 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
940 struct cfg80211_internal_bss *bss; 988 struct cfg80211_internal_bss *bss;
941 989
942 if (!pub) 990 if (!pub)
943 return; 991 return;
944 992
945 bss = container_of(pub, struct cfg80211_internal_bss, pub); 993 bss = container_of(pub, struct cfg80211_internal_bss, pub);
946 kref_get(&bss->ref); 994
995 spin_lock_bh(&dev->bss_lock);
996 bss_ref_get(dev, bss);
997 spin_unlock_bh(&dev->bss_lock);
947} 998}
948EXPORT_SYMBOL(cfg80211_ref_bss); 999EXPORT_SYMBOL(cfg80211_ref_bss);
949 1000
950void cfg80211_put_bss(struct cfg80211_bss *pub) 1001void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
951{ 1002{
1003 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
952 struct cfg80211_internal_bss *bss; 1004 struct cfg80211_internal_bss *bss;
953 1005
954 if (!pub) 1006 if (!pub)
955 return; 1007 return;
956 1008
957 bss = container_of(pub, struct cfg80211_internal_bss, pub); 1009 bss = container_of(pub, struct cfg80211_internal_bss, pub);
958 kref_put(&bss->ref, bss_release); 1010
1011 spin_lock_bh(&dev->bss_lock);
1012 bss_ref_put(dev, bss);
1013 spin_unlock_bh(&dev->bss_lock);
959} 1014}
960EXPORT_SYMBOL(cfg80211_put_bss); 1015EXPORT_SYMBOL(cfg80211_put_bss);
961 1016
@@ -971,8 +1026,8 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
971 1026
972 spin_lock_bh(&dev->bss_lock); 1027 spin_lock_bh(&dev->bss_lock);
973 if (!list_empty(&bss->list)) { 1028 if (!list_empty(&bss->list)) {
974 __cfg80211_unlink_bss(dev, bss); 1029 if (__cfg80211_unlink_bss(dev, bss))
975 dev->bss_generation++; 1030 dev->bss_generation++;
976 } 1031 }
977 spin_unlock_bh(&dev->bss_lock); 1032 spin_unlock_bh(&dev->bss_lock);
978} 1033}
@@ -1155,16 +1210,6 @@ static void ieee80211_scan_add_ies(struct iw_request_info *info,
1155 } 1210 }
1156} 1211}
1157 1212
1158static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
1159{
1160 unsigned long end = jiffies;
1161
1162 if (end >= start)
1163 return jiffies_to_msecs(end - start);
1164
1165 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
1166}
1167
1168static char * 1213static char *
1169ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info, 1214ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1170 struct cfg80211_internal_bss *bss, char *current_ev, 1215 struct cfg80211_internal_bss *bss, char *current_ev,
@@ -1241,15 +1286,10 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1241 1286
1242 rcu_read_lock(); 1287 rcu_read_lock();
1243 ies = rcu_dereference(bss->pub.ies); 1288 ies = rcu_dereference(bss->pub.ies);
1244 if (ies) { 1289 rem = ies->len;
1245 rem = ies->len; 1290 ie = ies->data;
1246 ie = ies->data;
1247 } else {
1248 rem = 0;
1249 ie = NULL;
1250 }
1251 1291
1252 while (ies && rem >= 2) { 1292 while (rem >= 2) {
1253 /* invalid data */ 1293 /* invalid data */
1254 if (ie[1] > rem - 2) 1294 if (ie[1] > rem - 2)
1255 break; 1295 break;
@@ -1358,11 +1398,11 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
1358 &iwe, IW_EV_UINT_LEN); 1398 &iwe, IW_EV_UINT_LEN);
1359 } 1399 }
1360 1400
1361 buf = kmalloc(30, GFP_ATOMIC); 1401 buf = kmalloc(31, GFP_ATOMIC);
1362 if (buf) { 1402 if (buf) {
1363 memset(&iwe, 0, sizeof(iwe)); 1403 memset(&iwe, 0, sizeof(iwe));
1364 iwe.cmd = IWEVCUSTOM; 1404 iwe.cmd = IWEVCUSTOM;
1365 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->pub.tsf)); 1405 sprintf(buf, "tsf=%016llx", (unsigned long long)(ies->tsf));
1366 iwe.u.data.length = strlen(buf); 1406 iwe.u.data.length = strlen(buf);
1367 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 1407 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
1368 &iwe, buf); 1408 &iwe, buf);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a825dfe12cf7..f432bd3755b1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -301,7 +301,7 @@ static void __cfg80211_sme_scan_done(struct net_device *dev)
301 301
302 bss = cfg80211_get_conn_bss(wdev); 302 bss = cfg80211_get_conn_bss(wdev);
303 if (bss) { 303 if (bss) {
304 cfg80211_put_bss(bss); 304 cfg80211_put_bss(&rdev->wiphy, bss);
305 } else { 305 } else {
306 /* not found */ 306 /* not found */
307 if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) 307 if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)
@@ -464,7 +464,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
464 464
465 if (wdev->current_bss) { 465 if (wdev->current_bss) {
466 cfg80211_unhold_bss(wdev->current_bss); 466 cfg80211_unhold_bss(wdev->current_bss);
467 cfg80211_put_bss(&wdev->current_bss->pub); 467 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
468 wdev->current_bss = NULL; 468 wdev->current_bss = NULL;
469 } 469 }
470 470
@@ -480,7 +480,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
480 kfree(wdev->connect_keys); 480 kfree(wdev->connect_keys);
481 wdev->connect_keys = NULL; 481 wdev->connect_keys = NULL;
482 wdev->ssid_len = 0; 482 wdev->ssid_len = 0;
483 cfg80211_put_bss(bss); 483 cfg80211_put_bss(wdev->wiphy, bss);
484 return; 484 return;
485 } 485 }
486 486
@@ -586,7 +586,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
586 } 586 }
587 587
588 cfg80211_unhold_bss(wdev->current_bss); 588 cfg80211_unhold_bss(wdev->current_bss);
589 cfg80211_put_bss(&wdev->current_bss->pub); 589 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
590 wdev->current_bss = NULL; 590 wdev->current_bss = NULL;
591 591
592 cfg80211_hold_bss(bss_from_pub(bss)); 592 cfg80211_hold_bss(bss_from_pub(bss));
@@ -621,7 +621,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
621 621
622 return; 622 return;
623out: 623out:
624 cfg80211_put_bss(bss); 624 cfg80211_put_bss(wdev->wiphy, bss);
625} 625}
626 626
627void cfg80211_roamed(struct net_device *dev, 627void cfg80211_roamed(struct net_device *dev,
@@ -663,7 +663,7 @@ void cfg80211_roamed_bss(struct net_device *dev,
663 663
664 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); 664 ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
665 if (!ev) { 665 if (!ev) {
666 cfg80211_put_bss(bss); 666 cfg80211_put_bss(wdev->wiphy, bss);
667 return; 667 return;
668 } 668 }
669 669
@@ -704,7 +704,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
704 704
705 if (wdev->current_bss) { 705 if (wdev->current_bss) {
706 cfg80211_unhold_bss(wdev->current_bss); 706 cfg80211_unhold_bss(wdev->current_bss);
707 cfg80211_put_bss(&wdev->current_bss->pub); 707 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
708 } 708 }
709 709
710 wdev->current_bss = NULL; 710 wdev->current_bss = NULL;
@@ -875,7 +875,7 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
875 if (bss) { 875 if (bss) {
876 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; 876 wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
877 err = cfg80211_conn_do_work(wdev); 877 err = cfg80211_conn_do_work(wdev);
878 cfg80211_put_bss(bss); 878 cfg80211_put_bss(wdev->wiphy, bss);
879 } else { 879 } else {
880 /* otherwise we'll need to scan for the AP first */ 880 /* otherwise we'll need to scan for the AP first */
881 err = cfg80211_conn_scan(wdev); 881 err = cfg80211_conn_scan(wdev);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 1f6f01e2dc4c..238ee49b3868 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -106,9 +106,7 @@ static int wiphy_resume(struct device *dev)
106 int ret = 0; 106 int ret = 0;
107 107
108 /* Age scan results with time spent in suspend */ 108 /* Age scan results with time spent in suspend */
109 spin_lock_bh(&rdev->bss_lock);
110 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); 109 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
111 spin_unlock_bh(&rdev->bss_lock);
112 110
113 if (rdev->ops->resume) { 111 if (rdev->ops->resume) {
114 rtnl_lock(); 112 rtnl_lock();
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 2134576f426e..b7a531380e19 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1767,6 +1767,24 @@ DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device,
1767 TP_ARGS(wiphy, wdev) 1767 TP_ARGS(wiphy, wdev)
1768); 1768);
1769 1769
1770TRACE_EVENT(rdev_set_mac_acl,
1771 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1772 struct cfg80211_acl_data *params),
1773 TP_ARGS(wiphy, netdev, params),
1774 TP_STRUCT__entry(
1775 WIPHY_ENTRY
1776 NETDEV_ENTRY
1777 __field(u32, acl_policy)
1778 ),
1779 TP_fast_assign(
1780 WIPHY_ASSIGN;
1781 WIPHY_ASSIGN;
1782 __entry->acl_policy = params->acl_policy;
1783 ),
1784 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
1785 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy)
1786);
1787
1770/************************************************************* 1788/*************************************************************
1771 * cfg80211 exported functions traces * 1789 * cfg80211 exported functions traces *
1772 *************************************************************/ 1790 *************************************************************/
@@ -2033,6 +2051,21 @@ TRACE_EVENT(cfg80211_reg_can_beacon,
2033 WIPHY_PR_ARG, CHAN_DEF_PR_ARG) 2051 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
2034); 2052);
2035 2053
2054TRACE_EVENT(cfg80211_chandef_dfs_required,
2055 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
2056 TP_ARGS(wiphy, chandef),
2057 TP_STRUCT__entry(
2058 WIPHY_ENTRY
2059 CHAN_DEF_ENTRY
2060 ),
2061 TP_fast_assign(
2062 WIPHY_ASSIGN;
2063 CHAN_DEF_ASSIGN(chandef);
2064 ),
2065 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
2066 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
2067);
2068
2036TRACE_EVENT(cfg80211_ch_switch_notify, 2069TRACE_EVENT(cfg80211_ch_switch_notify,
2037 TP_PROTO(struct net_device *netdev, 2070 TP_PROTO(struct net_device *netdev,
2038 struct cfg80211_chan_def *chandef), 2071 struct cfg80211_chan_def *chandef),
@@ -2049,6 +2082,36 @@ TRACE_EVENT(cfg80211_ch_switch_notify,
2049 NETDEV_PR_ARG, CHAN_DEF_PR_ARG) 2082 NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
2050); 2083);
2051 2084
2085TRACE_EVENT(cfg80211_radar_event,
2086 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
2087 TP_ARGS(wiphy, chandef),
2088 TP_STRUCT__entry(
2089 WIPHY_ENTRY
2090 CHAN_DEF_ENTRY
2091 ),
2092 TP_fast_assign(
2093 WIPHY_ASSIGN;
2094 CHAN_DEF_ASSIGN(chandef);
2095 ),
2096 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
2097 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
2098);
2099
2100TRACE_EVENT(cfg80211_cac_event,
2101 TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt),
2102 TP_ARGS(netdev, evt),
2103 TP_STRUCT__entry(
2104 NETDEV_ENTRY
2105 __field(enum nl80211_radar_event, evt)
2106 ),
2107 TP_fast_assign(
2108 NETDEV_ASSIGN;
2109 __entry->evt = evt;
2110 ),
2111 TP_printk(NETDEV_PR_FMT ", event: %d",
2112 NETDEV_PR_ARG, __entry->evt)
2113);
2114
2052DECLARE_EVENT_CLASS(cfg80211_rx_evt, 2115DECLARE_EVENT_CLASS(cfg80211_rx_evt,
2053 TP_PROTO(struct net_device *netdev, const u8 *addr), 2116 TP_PROTO(struct net_device *netdev, const u8 *addr),
2054 TP_ARGS(netdev, addr), 2117 TP_ARGS(netdev, addr),
@@ -2315,6 +2378,41 @@ TRACE_EVENT(cfg80211_return_u32,
2315 TP_printk("ret: %u", __entry->ret) 2378 TP_printk("ret: %u", __entry->ret)
2316); 2379);
2317 2380
2381TRACE_EVENT(cfg80211_report_wowlan_wakeup,
2382 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
2383 struct cfg80211_wowlan_wakeup *wakeup),
2384 TP_ARGS(wiphy, wdev, wakeup),
2385 TP_STRUCT__entry(
2386 WIPHY_ENTRY
2387 WDEV_ENTRY
2388 __field(bool, disconnect)
2389 __field(bool, magic_pkt)
2390 __field(bool, gtk_rekey_failure)
2391 __field(bool, eap_identity_req)
2392 __field(bool, four_way_handshake)
2393 __field(bool, rfkill_release)
2394 __field(s32, pattern_idx)
2395 __field(u32, packet_len)
2396 __dynamic_array(u8, packet, wakeup->packet_present_len)
2397 ),
2398 TP_fast_assign(
2399 WIPHY_ASSIGN;
2400 WDEV_ASSIGN;
2401 __entry->disconnect = wakeup->disconnect;
2402 __entry->magic_pkt = wakeup->magic_pkt;
2403 __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure;
2404 __entry->eap_identity_req = wakeup->eap_identity_req;
2405 __entry->four_way_handshake = wakeup->four_way_handshake;
2406 __entry->rfkill_release = wakeup->rfkill_release;
2407 __entry->pattern_idx = wakeup->pattern_idx;
2408 __entry->packet_len = wakeup->packet_len;
2409 if (wakeup->packet && wakeup->packet_present_len)
2410 memcpy(__get_dynamic_array(packet), wakeup->packet,
2411 wakeup->packet_present_len);
2412 ),
2413 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
2414);
2415
2318#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ 2416#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
2319 2417
2320#undef TRACE_INCLUDE_PATH 2418#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1c2795d52db0..37a56ee1e1ed 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1212,14 +1212,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
1212 case NL80211_IFTYPE_MESH_POINT: 1212 case NL80211_IFTYPE_MESH_POINT:
1213 case NL80211_IFTYPE_P2P_GO: 1213 case NL80211_IFTYPE_P2P_GO:
1214 case NL80211_IFTYPE_WDS: 1214 case NL80211_IFTYPE_WDS:
1215 radar_required = !!(chan->flags & IEEE80211_CHAN_RADAR); 1215 radar_required = !!(chan &&
1216 (chan->flags & IEEE80211_CHAN_RADAR));
1216 break; 1217 break;
1217 case NL80211_IFTYPE_P2P_CLIENT: 1218 case NL80211_IFTYPE_P2P_CLIENT:
1218 case NL80211_IFTYPE_STATION: 1219 case NL80211_IFTYPE_STATION:
1220 case NL80211_IFTYPE_P2P_DEVICE:
1219 case NL80211_IFTYPE_MONITOR: 1221 case NL80211_IFTYPE_MONITOR:
1220 radar_required = false; 1222 radar_required = false;
1221 break; 1223 break;
1222 case NL80211_IFTYPE_P2P_DEVICE:
1223 case NUM_NL80211_IFTYPES: 1224 case NUM_NL80211_IFTYPES:
1224 case NL80211_IFTYPE_UNSPECIFIED: 1225 case NL80211_IFTYPE_UNSPECIFIED:
1225 default: 1226 default:
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 8bafa31fa9f8..e98a01c1034f 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -143,7 +143,8 @@ static const struct file_operations wireless_seq_fops = {
143int __net_init wext_proc_init(struct net *net) 143int __net_init wext_proc_init(struct net *net)
144{ 144{
145 /* Create /proc/net/wireless entry */ 145 /* Create /proc/net/wireless entry */
146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) 146 if (!proc_create("wireless", S_IRUGO, net->proc_net,
147 &wireless_seq_fops))
147 return -ENOMEM; 148 return -ENOMEM;
148 149
149 return 0; 150 return 0;
@@ -151,5 +152,5 @@ int __net_init wext_proc_init(struct net *net)
151 152
152void __net_exit wext_proc_exit(struct net *net) 153void __net_exit wext_proc_exit(struct net *net)
153{ 154{
154 proc_net_remove(net, "wireless"); 155 remove_proc_entry("wireless", net->proc_net);
155} 156}
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index f9a549554740..6fb9d00a75dc 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -35,6 +35,8 @@ static struct xfrm_algo_desc aead_list[] = {
35 } 35 }
36 }, 36 },
37 37
38 .pfkey_supported = 1,
39
38 .desc = { 40 .desc = {
39 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8, 41 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8,
40 .sadb_alg_ivlen = 8, 42 .sadb_alg_ivlen = 8,
@@ -51,6 +53,8 @@ static struct xfrm_algo_desc aead_list[] = {
51 } 53 }
52 }, 54 },
53 55
56 .pfkey_supported = 1,
57
54 .desc = { 58 .desc = {
55 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12, 59 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12,
56 .sadb_alg_ivlen = 8, 60 .sadb_alg_ivlen = 8,
@@ -67,6 +71,8 @@ static struct xfrm_algo_desc aead_list[] = {
67 } 71 }
68 }, 72 },
69 73
74 .pfkey_supported = 1,
75
70 .desc = { 76 .desc = {
71 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16, 77 .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16,
72 .sadb_alg_ivlen = 8, 78 .sadb_alg_ivlen = 8,
@@ -83,6 +89,8 @@ static struct xfrm_algo_desc aead_list[] = {
83 } 89 }
84 }, 90 },
85 91
92 .pfkey_supported = 1,
93
86 .desc = { 94 .desc = {
87 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8, 95 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8,
88 .sadb_alg_ivlen = 8, 96 .sadb_alg_ivlen = 8,
@@ -99,6 +107,8 @@ static struct xfrm_algo_desc aead_list[] = {
99 } 107 }
100 }, 108 },
101 109
110 .pfkey_supported = 1,
111
102 .desc = { 112 .desc = {
103 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12, 113 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12,
104 .sadb_alg_ivlen = 8, 114 .sadb_alg_ivlen = 8,
@@ -115,6 +125,8 @@ static struct xfrm_algo_desc aead_list[] = {
115 } 125 }
116 }, 126 },
117 127
128 .pfkey_supported = 1,
129
118 .desc = { 130 .desc = {
119 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16, 131 .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16,
120 .sadb_alg_ivlen = 8, 132 .sadb_alg_ivlen = 8,
@@ -131,6 +143,8 @@ static struct xfrm_algo_desc aead_list[] = {
131 } 143 }
132 }, 144 },
133 145
146 .pfkey_supported = 1,
147
134 .desc = { 148 .desc = {
135 .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC, 149 .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
136 .sadb_alg_ivlen = 8, 150 .sadb_alg_ivlen = 8,
@@ -151,6 +165,8 @@ static struct xfrm_algo_desc aalg_list[] = {
151 } 165 }
152 }, 166 },
153 167
168 .pfkey_supported = 1,
169
154 .desc = { 170 .desc = {
155 .sadb_alg_id = SADB_X_AALG_NULL, 171 .sadb_alg_id = SADB_X_AALG_NULL,
156 .sadb_alg_ivlen = 0, 172 .sadb_alg_ivlen = 0,
@@ -169,6 +185,8 @@ static struct xfrm_algo_desc aalg_list[] = {
169 } 185 }
170 }, 186 },
171 187
188 .pfkey_supported = 1,
189
172 .desc = { 190 .desc = {
173 .sadb_alg_id = SADB_AALG_MD5HMAC, 191 .sadb_alg_id = SADB_AALG_MD5HMAC,
174 .sadb_alg_ivlen = 0, 192 .sadb_alg_ivlen = 0,
@@ -187,6 +205,8 @@ static struct xfrm_algo_desc aalg_list[] = {
187 } 205 }
188 }, 206 },
189 207
208 .pfkey_supported = 1,
209
190 .desc = { 210 .desc = {
191 .sadb_alg_id = SADB_AALG_SHA1HMAC, 211 .sadb_alg_id = SADB_AALG_SHA1HMAC,
192 .sadb_alg_ivlen = 0, 212 .sadb_alg_ivlen = 0,
@@ -205,6 +225,8 @@ static struct xfrm_algo_desc aalg_list[] = {
205 } 225 }
206 }, 226 },
207 227
228 .pfkey_supported = 1,
229
208 .desc = { 230 .desc = {
209 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC, 231 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
210 .sadb_alg_ivlen = 0, 232 .sadb_alg_ivlen = 0,
@@ -222,6 +244,8 @@ static struct xfrm_algo_desc aalg_list[] = {
222 } 244 }
223 }, 245 },
224 246
247 .pfkey_supported = 1,
248
225 .desc = { 249 .desc = {
226 .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC, 250 .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC,
227 .sadb_alg_ivlen = 0, 251 .sadb_alg_ivlen = 0,
@@ -239,6 +263,8 @@ static struct xfrm_algo_desc aalg_list[] = {
239 } 263 }
240 }, 264 },
241 265
266 .pfkey_supported = 1,
267
242 .desc = { 268 .desc = {
243 .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC, 269 .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC,
244 .sadb_alg_ivlen = 0, 270 .sadb_alg_ivlen = 0,
@@ -257,6 +283,8 @@ static struct xfrm_algo_desc aalg_list[] = {
257 } 283 }
258 }, 284 },
259 285
286 .pfkey_supported = 1,
287
260 .desc = { 288 .desc = {
261 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC, 289 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
262 .sadb_alg_ivlen = 0, 290 .sadb_alg_ivlen = 0,
@@ -274,6 +302,8 @@ static struct xfrm_algo_desc aalg_list[] = {
274 } 302 }
275 }, 303 },
276 304
305 .pfkey_supported = 1,
306
277 .desc = { 307 .desc = {
278 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC, 308 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
279 .sadb_alg_ivlen = 0, 309 .sadb_alg_ivlen = 0,
@@ -295,6 +325,8 @@ static struct xfrm_algo_desc ealg_list[] = {
295 } 325 }
296 }, 326 },
297 327
328 .pfkey_supported = 1,
329
298 .desc = { 330 .desc = {
299 .sadb_alg_id = SADB_EALG_NULL, 331 .sadb_alg_id = SADB_EALG_NULL,
300 .sadb_alg_ivlen = 0, 332 .sadb_alg_ivlen = 0,
@@ -313,6 +345,8 @@ static struct xfrm_algo_desc ealg_list[] = {
313 } 345 }
314 }, 346 },
315 347
348 .pfkey_supported = 1,
349
316 .desc = { 350 .desc = {
317 .sadb_alg_id = SADB_EALG_DESCBC, 351 .sadb_alg_id = SADB_EALG_DESCBC,
318 .sadb_alg_ivlen = 8, 352 .sadb_alg_ivlen = 8,
@@ -331,6 +365,8 @@ static struct xfrm_algo_desc ealg_list[] = {
331 } 365 }
332 }, 366 },
333 367
368 .pfkey_supported = 1,
369
334 .desc = { 370 .desc = {
335 .sadb_alg_id = SADB_EALG_3DESCBC, 371 .sadb_alg_id = SADB_EALG_3DESCBC,
336 .sadb_alg_ivlen = 8, 372 .sadb_alg_ivlen = 8,
@@ -349,6 +385,8 @@ static struct xfrm_algo_desc ealg_list[] = {
349 } 385 }
350 }, 386 },
351 387
388 .pfkey_supported = 1,
389
352 .desc = { 390 .desc = {
353 .sadb_alg_id = SADB_X_EALG_CASTCBC, 391 .sadb_alg_id = SADB_X_EALG_CASTCBC,
354 .sadb_alg_ivlen = 8, 392 .sadb_alg_ivlen = 8,
@@ -367,6 +405,8 @@ static struct xfrm_algo_desc ealg_list[] = {
367 } 405 }
368 }, 406 },
369 407
408 .pfkey_supported = 1,
409
370 .desc = { 410 .desc = {
371 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC, 411 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
372 .sadb_alg_ivlen = 8, 412 .sadb_alg_ivlen = 8,
@@ -385,6 +425,8 @@ static struct xfrm_algo_desc ealg_list[] = {
385 } 425 }
386 }, 426 },
387 427
428 .pfkey_supported = 1,
429
388 .desc = { 430 .desc = {
389 .sadb_alg_id = SADB_X_EALG_AESCBC, 431 .sadb_alg_id = SADB_X_EALG_AESCBC,
390 .sadb_alg_ivlen = 8, 432 .sadb_alg_ivlen = 8,
@@ -403,6 +445,8 @@ static struct xfrm_algo_desc ealg_list[] = {
403 } 445 }
404 }, 446 },
405 447
448 .pfkey_supported = 1,
449
406 .desc = { 450 .desc = {
407 .sadb_alg_id = SADB_X_EALG_SERPENTCBC, 451 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
408 .sadb_alg_ivlen = 8, 452 .sadb_alg_ivlen = 8,
@@ -421,6 +465,8 @@ static struct xfrm_algo_desc ealg_list[] = {
421 } 465 }
422 }, 466 },
423 467
468 .pfkey_supported = 1,
469
424 .desc = { 470 .desc = {
425 .sadb_alg_id = SADB_X_EALG_CAMELLIACBC, 471 .sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
426 .sadb_alg_ivlen = 8, 472 .sadb_alg_ivlen = 8,
@@ -439,6 +485,8 @@ static struct xfrm_algo_desc ealg_list[] = {
439 } 485 }
440 }, 486 },
441 487
488 .pfkey_supported = 1,
489
442 .desc = { 490 .desc = {
443 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC, 491 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
444 .sadb_alg_ivlen = 8, 492 .sadb_alg_ivlen = 8,
@@ -456,6 +504,8 @@ static struct xfrm_algo_desc ealg_list[] = {
456 } 504 }
457 }, 505 },
458 506
507 .pfkey_supported = 1,
508
459 .desc = { 509 .desc = {
460 .sadb_alg_id = SADB_X_EALG_AESCTR, 510 .sadb_alg_id = SADB_X_EALG_AESCTR,
461 .sadb_alg_ivlen = 8, 511 .sadb_alg_ivlen = 8,
@@ -473,6 +523,7 @@ static struct xfrm_algo_desc calg_list[] = {
473 .threshold = 90, 523 .threshold = 90,
474 } 524 }
475 }, 525 },
526 .pfkey_supported = 1,
476 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE } 527 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
477}, 528},
478{ 529{
@@ -482,6 +533,7 @@ static struct xfrm_algo_desc calg_list[] = {
482 .threshold = 90, 533 .threshold = 90,
483 } 534 }
484 }, 535 },
536 .pfkey_supported = 1,
485 .desc = { .sadb_alg_id = SADB_X_CALG_LZS } 537 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
486}, 538},
487{ 539{
@@ -491,6 +543,7 @@ static struct xfrm_algo_desc calg_list[] = {
491 .threshold = 50, 543 .threshold = 50,
492 } 544 }
493 }, 545 },
546 .pfkey_supported = 1,
494 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH } 547 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
495}, 548},
496}; 549};
@@ -714,27 +767,27 @@ void xfrm_probe_algs(void)
714} 767}
715EXPORT_SYMBOL_GPL(xfrm_probe_algs); 768EXPORT_SYMBOL_GPL(xfrm_probe_algs);
716 769
717int xfrm_count_auth_supported(void) 770int xfrm_count_pfkey_auth_supported(void)
718{ 771{
719 int i, n; 772 int i, n;
720 773
721 for (i = 0, n = 0; i < aalg_entries(); i++) 774 for (i = 0, n = 0; i < aalg_entries(); i++)
722 if (aalg_list[i].available) 775 if (aalg_list[i].available && aalg_list[i].pfkey_supported)
723 n++; 776 n++;
724 return n; 777 return n;
725} 778}
726EXPORT_SYMBOL_GPL(xfrm_count_auth_supported); 779EXPORT_SYMBOL_GPL(xfrm_count_pfkey_auth_supported);
727 780
728int xfrm_count_enc_supported(void) 781int xfrm_count_pfkey_enc_supported(void)
729{ 782{
730 int i, n; 783 int i, n;
731 784
732 for (i = 0, n = 0; i < ealg_entries(); i++) 785 for (i = 0, n = 0; i < ealg_entries(); i++)
733 if (ealg_list[i].available) 786 if (ealg_list[i].available && ealg_list[i].pfkey_supported)
734 n++; 787 n++;
735 return n; 788 return n;
736} 789}
737EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); 790EXPORT_SYMBOL_GPL(xfrm_count_pfkey_enc_supported);
738 791
739#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) 792#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
740 793
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3670526e70b9..bcfda8921b5b 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -64,7 +64,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
64 64
65 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 65 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
66 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID); 66 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
67 goto error_nolock; 67 goto error;
68 } 68 }
69 69
70 err = xfrm_state_check_expire(x); 70 err = xfrm_state_check_expire(x);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 6c9aa642a2ba..5b47180986f8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -35,6 +35,10 @@
35 35
36#include "xfrm_hash.h" 36#include "xfrm_hash.h"
37 37
38#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40#define XFRM_MAX_QUEUE_LEN 100
41
38DEFINE_MUTEX(xfrm_cfg_mutex); 42DEFINE_MUTEX(xfrm_cfg_mutex);
39EXPORT_SYMBOL(xfrm_cfg_mutex); 43EXPORT_SYMBOL(xfrm_cfg_mutex);
40 44
@@ -51,7 +55,7 @@ static struct kmem_cache *xfrm_dst_cache __read_mostly;
51static void xfrm_init_pmtu(struct dst_entry *dst); 55static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst); 56static int stale_bundle(struct dst_entry *dst);
53static int xfrm_bundle_ok(struct xfrm_dst *xdst); 57static int xfrm_bundle_ok(struct xfrm_dst *xdst);
54 58static void xfrm_policy_queue_process(unsigned long arg);
55 59
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 60static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
57 int dir); 61 int dir);
@@ -287,8 +291,11 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
287 INIT_HLIST_NODE(&policy->byidx); 291 INIT_HLIST_NODE(&policy->byidx);
288 rwlock_init(&policy->lock); 292 rwlock_init(&policy->lock);
289 atomic_set(&policy->refcnt, 1); 293 atomic_set(&policy->refcnt, 1);
294 skb_queue_head_init(&policy->polq.hold_queue);
290 setup_timer(&policy->timer, xfrm_policy_timer, 295 setup_timer(&policy->timer, xfrm_policy_timer,
291 (unsigned long)policy); 296 (unsigned long)policy);
297 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
298 (unsigned long)policy);
292 policy->flo.ops = &xfrm_policy_fc_ops; 299 policy->flo.ops = &xfrm_policy_fc_ops;
293 } 300 }
294 return policy; 301 return policy;
@@ -309,6 +316,16 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
309} 316}
310EXPORT_SYMBOL(xfrm_policy_destroy); 317EXPORT_SYMBOL(xfrm_policy_destroy);
311 318
319static void xfrm_queue_purge(struct sk_buff_head *list)
320{
321 struct sk_buff *skb;
322
323 while ((skb = skb_dequeue(list)) != NULL) {
324 dev_put(skb->dev);
325 kfree_skb(skb);
326 }
327}
328
312/* Rule must be locked. Release descentant resources, announce 329/* Rule must be locked. Release descentant resources, announce
313 * entry dead. The rule must be unlinked from lists to the moment. 330 * entry dead. The rule must be unlinked from lists to the moment.
314 */ 331 */
@@ -319,6 +336,9 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
319 336
320 atomic_inc(&policy->genid); 337 atomic_inc(&policy->genid);
321 338
339 del_timer(&policy->polq.hold_timer);
340 xfrm_queue_purge(&policy->polq.hold_queue);
341
322 if (del_timer(&policy->timer)) 342 if (del_timer(&policy->timer))
323 xfrm_pol_put(policy); 343 xfrm_pol_put(policy);
324 344
@@ -562,6 +582,46 @@ static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s
562 return 0; 582 return 0;
563} 583}
564 584
585static void xfrm_policy_requeue(struct xfrm_policy *old,
586 struct xfrm_policy *new)
587{
588 struct xfrm_policy_queue *pq = &old->polq;
589 struct sk_buff_head list;
590
591 __skb_queue_head_init(&list);
592
593 spin_lock_bh(&pq->hold_queue.lock);
594 skb_queue_splice_init(&pq->hold_queue, &list);
595 del_timer(&pq->hold_timer);
596 spin_unlock_bh(&pq->hold_queue.lock);
597
598 if (skb_queue_empty(&list))
599 return;
600
601 pq = &new->polq;
602
603 spin_lock_bh(&pq->hold_queue.lock);
604 skb_queue_splice(&list, &pq->hold_queue);
605 pq->timeout = XFRM_QUEUE_TMO_MIN;
606 mod_timer(&pq->hold_timer, jiffies);
607 spin_unlock_bh(&pq->hold_queue.lock);
608}
609
610static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
611 struct xfrm_policy *pol)
612{
613 u32 mark = policy->mark.v & policy->mark.m;
614
615 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
616 return true;
617
618 if ((mark & pol->mark.m) == pol->mark.v &&
619 policy->priority == pol->priority)
620 return true;
621
622 return false;
623}
624
565int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 625int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
566{ 626{
567 struct net *net = xp_net(policy); 627 struct net *net = xp_net(policy);
@@ -569,7 +629,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
569 struct xfrm_policy *delpol; 629 struct xfrm_policy *delpol;
570 struct hlist_head *chain; 630 struct hlist_head *chain;
571 struct hlist_node *entry, *newpos; 631 struct hlist_node *entry, *newpos;
572 u32 mark = policy->mark.v & policy->mark.m;
573 632
574 write_lock_bh(&xfrm_policy_lock); 633 write_lock_bh(&xfrm_policy_lock);
575 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 634 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
@@ -578,7 +637,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
578 hlist_for_each_entry(pol, entry, chain, bydst) { 637 hlist_for_each_entry(pol, entry, chain, bydst) {
579 if (pol->type == policy->type && 638 if (pol->type == policy->type &&
580 !selector_cmp(&pol->selector, &policy->selector) && 639 !selector_cmp(&pol->selector, &policy->selector) &&
581 (mark & pol->mark.m) == pol->mark.v && 640 xfrm_policy_mark_match(policy, pol) &&
582 xfrm_sec_ctx_match(pol->security, policy->security) && 641 xfrm_sec_ctx_match(pol->security, policy->security) &&
583 !WARN_ON(delpol)) { 642 !WARN_ON(delpol)) {
584 if (excl) { 643 if (excl) {
@@ -603,8 +662,10 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
603 net->xfrm.policy_count[dir]++; 662 net->xfrm.policy_count[dir]++;
604 atomic_inc(&flow_cache_genid); 663 atomic_inc(&flow_cache_genid);
605 rt_genid_bump(net); 664 rt_genid_bump(net);
606 if (delpol) 665 if (delpol) {
666 xfrm_policy_requeue(delpol, policy);
607 __xfrm_policy_unlink(delpol, dir); 667 __xfrm_policy_unlink(delpol, dir);
668 }
608 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 669 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
609 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 670 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
610 policy->curlft.add_time = get_seconds(); 671 policy->curlft.add_time = get_seconds();
@@ -1115,11 +1176,15 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1115 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1176 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1116 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1177 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1117 } 1178 }
1118 if (old_pol) 1179 if (old_pol) {
1180 if (pol)
1181 xfrm_policy_requeue(old_pol, pol);
1182
1119 /* Unlinking succeeds always. This is the only function 1183 /* Unlinking succeeds always. This is the only function
1120 * allowed to delete or replace socket policy. 1184 * allowed to delete or replace socket policy.
1121 */ 1185 */
1122 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1186 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1187 }
1123 write_unlock_bh(&xfrm_policy_lock); 1188 write_unlock_bh(&xfrm_policy_lock);
1124 1189
1125 if (old_pol) { 1190 if (old_pol) {
@@ -1310,6 +1375,8 @@ static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *f
1310 * It means we need to try again resolving. */ 1375 * It means we need to try again resolving. */
1311 if (xdst->num_xfrms > 0) 1376 if (xdst->num_xfrms > 0)
1312 return NULL; 1377 return NULL;
1378 } else if (dst->flags & DST_XFRM_QUEUE) {
1379 return NULL;
1313 } else { 1380 } else {
1314 /* Real bundle */ 1381 /* Real bundle */
1315 if (stale_bundle(dst)) 1382 if (stale_bundle(dst))
@@ -1673,6 +1740,171 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1673 return xdst; 1740 return xdst;
1674} 1741}
1675 1742
1743static void xfrm_policy_queue_process(unsigned long arg)
1744{
1745 int err = 0;
1746 struct sk_buff *skb;
1747 struct sock *sk;
1748 struct dst_entry *dst;
1749 struct net_device *dev;
1750 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1751 struct xfrm_policy_queue *pq = &pol->polq;
1752 struct flowi fl;
1753 struct sk_buff_head list;
1754
1755 spin_lock(&pq->hold_queue.lock);
1756 skb = skb_peek(&pq->hold_queue);
1757 dst = skb_dst(skb);
1758 sk = skb->sk;
1759 xfrm_decode_session(skb, &fl, dst->ops->family);
1760 spin_unlock(&pq->hold_queue.lock);
1761
1762 dst_hold(dst->path);
1763 dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1764 sk, 0);
1765 if (IS_ERR(dst))
1766 goto purge_queue;
1767
1768 if (dst->flags & DST_XFRM_QUEUE) {
1769 dst_release(dst);
1770
1771 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1772 goto purge_queue;
1773
1774 pq->timeout = pq->timeout << 1;
1775 mod_timer(&pq->hold_timer, jiffies + pq->timeout);
1776 return;
1777 }
1778
1779 dst_release(dst);
1780
1781 __skb_queue_head_init(&list);
1782
1783 spin_lock(&pq->hold_queue.lock);
1784 pq->timeout = 0;
1785 skb_queue_splice_init(&pq->hold_queue, &list);
1786 spin_unlock(&pq->hold_queue.lock);
1787
1788 while (!skb_queue_empty(&list)) {
1789 skb = __skb_dequeue(&list);
1790
1791 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1792 dst_hold(skb_dst(skb)->path);
1793 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1794 &fl, skb->sk, 0);
1795 if (IS_ERR(dst)) {
1796 dev_put(skb->dev);
1797 kfree_skb(skb);
1798 continue;
1799 }
1800
1801 nf_reset(skb);
1802 skb_dst_drop(skb);
1803 skb_dst_set(skb, dst);
1804
1805 dev = skb->dev;
1806 err = dst_output(skb);
1807 dev_put(dev);
1808 }
1809
1810 return;
1811
1812purge_queue:
1813 pq->timeout = 0;
1814 xfrm_queue_purge(&pq->hold_queue);
1815}
1816
1817static int xdst_queue_output(struct sk_buff *skb)
1818{
1819 unsigned long sched_next;
1820 struct dst_entry *dst = skb_dst(skb);
1821 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1822 struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
1823
1824 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1825 kfree_skb(skb);
1826 return -EAGAIN;
1827 }
1828
1829 skb_dst_force(skb);
1830 dev_hold(skb->dev);
1831
1832 spin_lock_bh(&pq->hold_queue.lock);
1833
1834 if (!pq->timeout)
1835 pq->timeout = XFRM_QUEUE_TMO_MIN;
1836
1837 sched_next = jiffies + pq->timeout;
1838
1839 if (del_timer(&pq->hold_timer)) {
1840 if (time_before(pq->hold_timer.expires, sched_next))
1841 sched_next = pq->hold_timer.expires;
1842 }
1843
1844 __skb_queue_tail(&pq->hold_queue, skb);
1845 mod_timer(&pq->hold_timer, sched_next);
1846
1847 spin_unlock_bh(&pq->hold_queue.lock);
1848
1849 return 0;
1850}
1851
1852static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1853 struct dst_entry *dst,
1854 const struct flowi *fl,
1855 int num_xfrms,
1856 u16 family)
1857{
1858 int err;
1859 struct net_device *dev;
1860 struct dst_entry *dst1;
1861 struct xfrm_dst *xdst;
1862
1863 xdst = xfrm_alloc_dst(net, family);
1864 if (IS_ERR(xdst))
1865 return xdst;
1866
1867 if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
1868 (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
1869 return xdst;
1870
1871 dst1 = &xdst->u.dst;
1872 dst_hold(dst);
1873 xdst->route = dst;
1874
1875 dst_copy_metrics(dst1, dst);
1876
1877 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1878 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1879 dst1->lastuse = jiffies;
1880
1881 dst1->input = dst_discard;
1882 dst1->output = xdst_queue_output;
1883
1884 dst_hold(dst);
1885 dst1->child = dst;
1886 dst1->path = dst;
1887
1888 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1889
1890 err = -ENODEV;
1891 dev = dst->dev;
1892 if (!dev)
1893 goto free_dst;
1894
1895 err = xfrm_fill_dst(xdst, dev, fl);
1896 if (err)
1897 goto free_dst;
1898
1899out:
1900 return xdst;
1901
1902free_dst:
1903 dst_release(dst1);
1904 xdst = ERR_PTR(err);
1905 goto out;
1906}
1907
1676static struct flow_cache_object * 1908static struct flow_cache_object *
1677xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1909xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1678 struct flow_cache_object *oldflo, void *ctx) 1910 struct flow_cache_object *oldflo, void *ctx)
@@ -1751,7 +1983,7 @@ make_dummy_bundle:
1751 /* We found policies, but there's no bundles to instantiate: 1983 /* We found policies, but there's no bundles to instantiate:
1752 * either because the policy blocks, has no transformations or 1984 * either because the policy blocks, has no transformations or
1753 * we could not build template (no xfrm_states).*/ 1985 * we could not build template (no xfrm_states).*/
1754 xdst = xfrm_alloc_dst(net, family); 1986 xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
1755 if (IS_ERR(xdst)) { 1987 if (IS_ERR(xdst)) {
1756 xfrm_pols_put(pols, num_pols); 1988 xfrm_pols_put(pols, num_pols);
1757 return ERR_CAST(xdst); 1989 return ERR_CAST(xdst);
@@ -2359,6 +2591,9 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
2359 (dst->dev && !netif_running(dst->dev))) 2591 (dst->dev && !netif_running(dst->dev)))
2360 return 0; 2592 return 0;
2361 2593
2594 if (dst->flags & DST_XFRM_QUEUE)
2595 return 1;
2596
2362 last = NULL; 2597 last = NULL;
2363 2598
2364 do { 2599 do {
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 603903853e89..c721b0d9ab8b 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -74,13 +74,13 @@ static const struct file_operations xfrm_statistics_seq_fops = {
74 74
75int __net_init xfrm_proc_init(struct net *net) 75int __net_init xfrm_proc_init(struct net *net)
76{ 76{
77 if (!proc_net_fops_create(net, "xfrm_stat", S_IRUGO, 77 if (!proc_create("xfrm_stat", S_IRUGO, net->proc_net,
78 &xfrm_statistics_seq_fops)) 78 &xfrm_statistics_seq_fops))
79 return -ENOMEM; 79 return -ENOMEM;
80 return 0; 80 return 0;
81} 81}
82 82
83void xfrm_proc_fini(struct net *net) 83void xfrm_proc_fini(struct net *net)
84{ 84{
85 proc_net_remove(net, "xfrm_stat"); 85 remove_proc_entry("xfrm_stat", net->proc_net);
86} 86}