aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/a2mp.c6
-rw-r--r--net/bluetooth/af_bluetooth.c15
-rw-r--r--net/bluetooth/bnep/sock.c4
-rw-r--r--net/bluetooth/cmtp/sock.c4
-rw-r--r--net/bluetooth/hci_conn.c4
-rw-r--r--net/bluetooth/hci_core.c727
-rw-r--r--net/bluetooth/hci_event.c605
-rw-r--r--net/bluetooth/hci_sock.c9
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/hidp/core.c4
-rw-r--r--net/bluetooth/hidp/sock.c4
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/mgmt.c680
-rw-r--r--net/bluetooth/rfcomm/core.c167
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/mac80211/cfg.c178
-rw-r--r--net/mac80211/debugfs_key.c10
-rw-r--r--net/mac80211/debugfs_netdev.c22
-rw-r--r--net/mac80211/debugfs_sta.c31
-rw-r--r--net/mac80211/driver-ops.h67
-rw-r--r--net/mac80211/ht.c52
-rw-r--r--net/mac80211/ibss.c29
-rw-r--r--net/mac80211/ieee80211_i.h37
-rw-r--r--net/mac80211/iface.c29
-rw-r--r--net/mac80211/key.c208
-rw-r--r--net/mac80211/key.h18
-rw-r--r--net/mac80211/main.c77
-rw-r--r--net/mac80211/mesh.c64
-rw-r--r--net/mac80211/mesh.h12
-rw-r--r--net/mac80211/mesh_plink.c37
-rw-r--r--net/mac80211/mlme.c112
-rw-r--r--net/mac80211/offchannel.c10
-rw-r--r--net/mac80211/pm.c123
-rw-r--r--net/mac80211/rc80211_minstrel.c204
-rw-r--r--net/mac80211/rc80211_minstrel.h31
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c103
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h6
-rw-r--r--net/mac80211/rx.c61
-rw-r--r--net/mac80211/scan.c7
-rw-r--r--net/mac80211/sta_info.c56
-rw-r--r--net/mac80211/sta_info.h11
-rw-r--r--net/mac80211/trace.h46
-rw-r--r--net/mac80211/tx.c8
-rw-r--r--net/mac80211/util.c121
-rw-r--r--net/mac80211/vht.c212
-rw-r--r--net/nfc/llcp/commands.c205
-rw-r--r--net/nfc/llcp/llcp.c112
-rw-r--r--net/nfc/llcp/llcp.h36
-rw-r--r--net/nfc/llcp/sock.c130
-rw-r--r--net/nfc/netlink.c172
-rw-r--r--net/nfc/nfc.h14
-rw-r--r--net/rfkill/rfkill-regulator.c2
-rw-r--r--net/wireless/ap.c62
-rw-r--r--net/wireless/core.c77
-rw-r--r--net/wireless/core.h22
-rw-r--r--net/wireless/mesh.c15
-rw-r--r--net/wireless/mlme.c230
-rw-r--r--net/wireless/nl80211.c1857
-rw-r--r--net/wireless/nl80211.h68
-rw-r--r--net/wireless/rdev-ops.h20
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/sme.c28
-rw-r--r--net/wireless/sysfs.c25
-rw-r--r--net/wireless/trace.h46
66 files changed, 4370 insertions, 2994 deletions
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index eb0f4b16ff09..17f33a62f6db 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -397,13 +397,12 @@ static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
397 if (ctrl) { 397 if (ctrl) {
398 u8 *assoc; 398 u8 *assoc;
399 399
400 assoc = kzalloc(assoc_len, GFP_KERNEL); 400 assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
401 if (!assoc) { 401 if (!assoc) {
402 amp_ctrl_put(ctrl); 402 amp_ctrl_put(ctrl);
403 return -ENOMEM; 403 return -ENOMEM;
404 } 404 }
405 405
406 memcpy(assoc, rsp->amp_assoc, assoc_len);
407 ctrl->assoc = assoc; 406 ctrl->assoc = assoc;
408 ctrl->assoc_len = assoc_len; 407 ctrl->assoc_len = assoc_len;
409 ctrl->assoc_rem_len = assoc_len; 408 ctrl->assoc_rem_len = assoc_len;
@@ -472,13 +471,12 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
472 size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); 471 size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
473 u8 *assoc; 472 u8 *assoc;
474 473
475 assoc = kzalloc(assoc_len, GFP_KERNEL); 474 assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
476 if (!assoc) { 475 if (!assoc) {
477 amp_ctrl_put(ctrl); 476 amp_ctrl_put(ctrl);
478 return -ENOMEM; 477 return -ENOMEM;
479 } 478 }
480 479
481 memcpy(assoc, req->amp_assoc, assoc_len);
482 ctrl->assoc = assoc; 480 ctrl->assoc = assoc;
483 ctrl->assoc_len = assoc_len; 481 ctrl->assoc_len = assoc_len;
484 ctrl->assoc_rem_len = assoc_len; 482 ctrl->assoc_rem_len = assoc_len;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index d3ee69b35a78..81598e588f7f 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -92,23 +92,14 @@ int bt_sock_register(int proto, const struct net_proto_family *ops)
92} 92}
93EXPORT_SYMBOL(bt_sock_register); 93EXPORT_SYMBOL(bt_sock_register);
94 94
95int bt_sock_unregister(int proto) 95void bt_sock_unregister(int proto)
96{ 96{
97 int err = 0;
98
99 if (proto < 0 || proto >= BT_MAX_PROTO) 97 if (proto < 0 || proto >= BT_MAX_PROTO)
100 return -EINVAL; 98 return;
101 99
102 write_lock(&bt_proto_lock); 100 write_lock(&bt_proto_lock);
103 101 bt_proto[proto] = NULL;
104 if (!bt_proto[proto])
105 err = -ENOENT;
106 else
107 bt_proto[proto] = NULL;
108
109 write_unlock(&bt_proto_lock); 102 write_unlock(&bt_proto_lock);
110
111 return err;
112} 103}
113EXPORT_SYMBOL(bt_sock_unregister); 104EXPORT_SYMBOL(bt_sock_unregister);
114 105
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index e7154a58465f..5b1c04e28821 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -253,8 +253,6 @@ error:
253void __exit bnep_sock_cleanup(void) 253void __exit bnep_sock_cleanup(void)
254{ 254{
255 bt_procfs_cleanup(&init_net, "bnep"); 255 bt_procfs_cleanup(&init_net, "bnep");
256 if (bt_sock_unregister(BTPROTO_BNEP) < 0) 256 bt_sock_unregister(BTPROTO_BNEP);
257 BT_ERR("Can't unregister BNEP socket");
258
259 proto_unregister(&bnep_proto); 257 proto_unregister(&bnep_proto);
260} 258}
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 1c57482112b6..58d9edebab4b 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -264,8 +264,6 @@ error:
264void cmtp_cleanup_sockets(void) 264void cmtp_cleanup_sockets(void)
265{ 265{
266 bt_procfs_cleanup(&init_net, "cmtp"); 266 bt_procfs_cleanup(&init_net, "cmtp");
267 if (bt_sock_unregister(BTPROTO_CMTP) < 0) 267 bt_sock_unregister(BTPROTO_CMTP);
268 BT_ERR("Can't unregister CMTP socket");
269
270 proto_unregister(&cmtp_proto); 268 proto_unregister(&cmtp_proto);
271} 269}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 4925a02ae7e4..b9f90169940b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -117,7 +117,7 @@ static void hci_acl_create_connection_cancel(struct hci_conn *conn)
117 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); 117 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
118} 118}
119 119
120void hci_acl_disconn(struct hci_conn *conn, __u8 reason) 120void hci_disconnect(struct hci_conn *conn, __u8 reason)
121{ 121{
122 struct hci_cp_disconnect cp; 122 struct hci_cp_disconnect cp;
123 123
@@ -253,7 +253,7 @@ static void hci_conn_disconnect(struct hci_conn *conn)
253 hci_amp_disconn(conn, reason); 253 hci_amp_disconn(conn, reason);
254 break; 254 break;
255 default: 255 default:
256 hci_acl_disconn(conn, reason); 256 hci_disconnect(conn, reason);
257 break; 257 break;
258 } 258 }
259} 259}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 60793e7b768b..cfcad5423f1c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -57,36 +57,9 @@ static void hci_notify(struct hci_dev *hdev, int event)
57 57
58/* ---- HCI requests ---- */ 58/* ---- HCI requests ---- */
59 59
60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61{ 61{
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result); 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90 63
91 if (hdev->req_status == HCI_REQ_PEND) { 64 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result; 65 hdev->req_result = result;
@@ -107,21 +80,41 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
107} 80}
108 81
109/* Execute request and wait for completion. */ 82/* Execute request and wait for completion. */
110static int __hci_request(struct hci_dev *hdev, 83static int __hci_req_sync(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt), 84 void (*func)(struct hci_request *req,
112 unsigned long opt, __u32 timeout) 85 unsigned long opt),
86 unsigned long opt, __u32 timeout)
113{ 87{
88 struct hci_request req;
114 DECLARE_WAITQUEUE(wait, current); 89 DECLARE_WAITQUEUE(wait, current);
115 int err = 0; 90 int err = 0;
116 91
117 BT_DBG("%s start", hdev->name); 92 BT_DBG("%s start", hdev->name);
118 93
94 hci_req_init(&req, hdev);
95
119 hdev->req_status = HCI_REQ_PEND; 96 hdev->req_status = HCI_REQ_PEND;
120 97
98 func(&req, opt);
99
100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
102 hdev->req_status = 0;
103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
108 */
109 if (err == -ENODATA)
110 return 0;
111
112 return err;
113 }
114
121 add_wait_queue(&hdev->req_wait_q, &wait); 115 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE); 116 set_current_state(TASK_INTERRUPTIBLE);
123 117
124 req(hdev, opt);
125 schedule_timeout(timeout); 118 schedule_timeout(timeout);
126 119
127 remove_wait_queue(&hdev->req_wait_q, &wait); 120 remove_wait_queue(&hdev->req_wait_q, &wait);
@@ -150,9 +143,10 @@ static int __hci_request(struct hci_dev *hdev,
150 return err; 143 return err;
151} 144}
152 145
153static int hci_request(struct hci_dev *hdev, 146static int hci_req_sync(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt), 147 void (*req)(struct hci_request *req,
155 unsigned long opt, __u32 timeout) 148 unsigned long opt),
149 unsigned long opt, __u32 timeout)
156{ 150{
157 int ret; 151 int ret;
158 152
@@ -161,75 +155,86 @@ static int hci_request(struct hci_dev *hdev,
161 155
162 /* Serialize all requests */ 156 /* Serialize all requests */
163 hci_req_lock(hdev); 157 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout); 158 ret = __hci_req_sync(hdev, req, opt, timeout);
165 hci_req_unlock(hdev); 159 hci_req_unlock(hdev);
166 160
167 return ret; 161 return ret;
168} 162}
169 163
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) 164static void hci_reset_req(struct hci_request *req, unsigned long opt)
171{ 165{
172 BT_DBG("%s %ld", hdev->name, opt); 166 BT_DBG("%s %ld", req->hdev->name, opt);
173 167
174 /* Reset device */ 168 /* Reset device */
175 set_bit(HCI_RESET, &hdev->flags); 169 set_bit(HCI_RESET, &req->hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
177} 171}
178 172
179static void bredr_init(struct hci_dev *hdev) 173static void bredr_init(struct hci_request *req)
180{ 174{
181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182 176
183 /* Read Local Supported Features */ 177 /* Read Local Supported Features */
184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
185 179
186 /* Read Local Version */ 180 /* Read Local Version */
187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
182
183 /* Read BD Address */
184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
188} 185}
189 186
190static void amp_init(struct hci_dev *hdev) 187static void amp_init(struct hci_request *req)
191{ 188{
192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193 190
194 /* Read Local Version */ 191 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196 193
197 /* Read Local AMP Info */ 194 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
199 196
200 /* Read Data Blk size */ 197 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); 198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
202} 199}
203 200
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 201static void hci_init1_req(struct hci_request *req, unsigned long opt)
205{ 202{
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
206 struct sk_buff *skb; 205 struct sk_buff *skb;
207 206
208 BT_DBG("%s %ld", hdev->name, opt); 207 BT_DBG("%s %ld", hdev->name, opt);
209 208
210 /* Driver initialization */ 209 /* Driver initialization */
211 210
211 hci_req_init(&init_req, hdev);
212
212 /* Special commands */ 213 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) { 214 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev; 216 skb->dev = (void *) hdev;
216 217
217 skb_queue_tail(&hdev->cmd_q, skb); 218 if (skb_queue_empty(&init_req.cmd_q))
218 queue_work(hdev->workqueue, &hdev->cmd_work); 219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
219 } 222 }
220 skb_queue_purge(&hdev->driver_init); 223 skb_queue_purge(&hdev->driver_init);
221 224
225 hci_req_run(&init_req, NULL);
226
222 /* Reset */ 227 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0); 229 hci_reset_req(req, 0);
225 230
226 switch (hdev->dev_type) { 231 switch (hdev->dev_type) {
227 case HCI_BREDR: 232 case HCI_BREDR:
228 bredr_init(hdev); 233 bredr_init(req);
229 break; 234 break;
230 235
231 case HCI_AMP: 236 case HCI_AMP:
232 amp_init(hdev); 237 amp_init(req);
233 break; 238 break;
234 239
235 default: 240 default:
@@ -238,44 +243,327 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238 } 243 }
239} 244}
240 245
241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 246static void bredr_setup(struct hci_request *req)
247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
254
255 /* Read Class of Device */
256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
257
258 /* Read Local Name */
259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
260
261 /* Read Voice Setting */
262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
275
276 /* Read page scan parameters */
277 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
280 }
281}
282
283static void le_setup(struct hci_request *req)
284{
285 /* Read LE Buffer Size */
286 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287
288 /* Read LE Local Supported Features */
289 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
290
291 /* Read LE Advertising Channel TX Power */
292 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
293
294 /* Read LE White List Size */
295 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
296
297 /* Read LE Supported States */
298 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
299}
300
301static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302{
303 if (lmp_ext_inq_capable(hdev))
304 return 0x02;
305
306 if (lmp_inq_rssi_capable(hdev))
307 return 0x01;
308
309 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310 hdev->lmp_subver == 0x0757)
311 return 0x01;
312
313 if (hdev->manufacturer == 15) {
314 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315 return 0x01;
316 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317 return 0x01;
318 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319 return 0x01;
320 }
321
322 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323 hdev->lmp_subver == 0x1805)
324 return 0x01;
325
326 return 0x00;
327}
328
329static void hci_setup_inquiry_mode(struct hci_request *req)
330{
331 u8 mode;
332
333 mode = hci_get_inquiry_mode(req->hdev);
334
335 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
336}
337
338static void hci_setup_event_mask(struct hci_request *req)
339{
340 struct hci_dev *hdev = req->hdev;
341
342 /* The second byte is 0xff instead of 0x9f (two reserved bits
343 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
344 * command otherwise.
345 */
346 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
347
348 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349 * any event mask for pre 1.2 devices.
350 */
351 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
352 return;
353
354 if (lmp_bredr_capable(hdev)) {
355 events[4] |= 0x01; /* Flow Specification Complete */
356 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358 events[5] |= 0x08; /* Synchronous Connection Complete */
359 events[5] |= 0x10; /* Synchronous Connection Changed */
360 }
361
362 if (lmp_inq_rssi_capable(hdev))
363 events[4] |= 0x02; /* Inquiry Result with RSSI */
364
365 if (lmp_sniffsubr_capable(hdev))
366 events[5] |= 0x20; /* Sniff Subrating */
367
368 if (lmp_pause_enc_capable(hdev))
369 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370
371 if (lmp_ext_inq_capable(hdev))
372 events[5] |= 0x40; /* Extended Inquiry Result */
373
374 if (lmp_no_flush_capable(hdev))
375 events[7] |= 0x01; /* Enhanced Flush Complete */
376
377 if (lmp_lsto_capable(hdev))
378 events[6] |= 0x80; /* Link Supervision Timeout Changed */
379
380 if (lmp_ssp_capable(hdev)) {
381 events[6] |= 0x01; /* IO Capability Request */
382 events[6] |= 0x02; /* IO Capability Response */
383 events[6] |= 0x04; /* User Confirmation Request */
384 events[6] |= 0x08; /* User Passkey Request */
385 events[6] |= 0x10; /* Remote OOB Data Request */
386 events[6] |= 0x20; /* Simple Pairing Complete */
387 events[7] |= 0x04; /* User Passkey Notification */
388 events[7] |= 0x08; /* Keypress Notification */
389 events[7] |= 0x10; /* Remote Host Supported
390 * Features Notification
391 */
392 }
393
394 if (lmp_le_capable(hdev))
395 events[7] |= 0x20; /* LE Meta-Event */
396
397 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
398
399 if (lmp_le_capable(hdev)) {
400 memset(events, 0, sizeof(events));
401 events[0] = 0x1f;
402 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403 sizeof(events), events);
404 }
405}
406
407static void hci_init2_req(struct hci_request *req, unsigned long opt)
408{
409 struct hci_dev *hdev = req->hdev;
410
411 if (lmp_bredr_capable(hdev))
412 bredr_setup(req);
413
414 if (lmp_le_capable(hdev))
415 le_setup(req);
416
417 hci_setup_event_mask(req);
418
419 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
420 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
421
422 if (lmp_ssp_capable(hdev)) {
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
424 u8 mode = 0x01;
425 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426 sizeof(mode), &mode);
427 } else {
428 struct hci_cp_write_eir cp;
429
430 memset(hdev->eir, 0, sizeof(hdev->eir));
431 memset(&cp, 0, sizeof(cp));
432
433 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
434 }
435 }
436
437 if (lmp_inq_rssi_capable(hdev))
438 hci_setup_inquiry_mode(req);
439
440 if (lmp_inq_tx_pwr_capable(hdev))
441 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
442
443 if (lmp_ext_feat_capable(hdev)) {
444 struct hci_cp_read_local_ext_features cp;
445
446 cp.page = 0x01;
447 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
448 sizeof(cp), &cp);
449 }
450
451 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
452 u8 enable = 1;
453 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
454 &enable);
455 }
456}
457
458static void hci_setup_link_policy(struct hci_request *req)
459{
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_def_link_policy cp;
462 u16 link_policy = 0;
463
464 if (lmp_rswitch_capable(hdev))
465 link_policy |= HCI_LP_RSWITCH;
466 if (lmp_hold_capable(hdev))
467 link_policy |= HCI_LP_HOLD;
468 if (lmp_sniff_capable(hdev))
469 link_policy |= HCI_LP_SNIFF;
470 if (lmp_park_capable(hdev))
471 link_policy |= HCI_LP_PARK;
472
473 cp.policy = cpu_to_le16(link_policy);
474 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
475}
476
477static void hci_set_le_support(struct hci_request *req)
478{
479 struct hci_dev *hdev = req->hdev;
480 struct hci_cp_write_le_host_supported cp;
481
482 memset(&cp, 0, sizeof(cp));
483
484 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
485 cp.le = 0x01;
486 cp.simul = lmp_le_br_capable(hdev);
487 }
488
489 if (cp.le != lmp_host_le_capable(hdev))
490 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
491 &cp);
492}
493
494static void hci_init3_req(struct hci_request *req, unsigned long opt)
495{
496 struct hci_dev *hdev = req->hdev;
497
498 if (hdev->commands[5] & 0x10)
499 hci_setup_link_policy(req);
500
501 if (lmp_le_capable(hdev)) {
502 hci_set_le_support(req);
503 hci_update_ad(req);
504 }
505}
506
507static int __hci_init(struct hci_dev *hdev)
508{
509 int err;
510
511 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
512 if (err < 0)
513 return err;
514
515 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516 * BR/EDR/LE type controllers. AMP controllers only need the
517 * first stage init.
518 */
519 if (hdev->dev_type != HCI_BREDR)
520 return 0;
521
522 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
523 if (err < 0)
524 return err;
525
526 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
527}
528
529static void hci_scan_req(struct hci_request *req, unsigned long opt)
242{ 530{
243 __u8 scan = opt; 531 __u8 scan = opt;
244 532
245 BT_DBG("%s %x", hdev->name, scan); 533 BT_DBG("%s %x", req->hdev->name, scan);
246 534
247 /* Inquiry and Page scans */ 535 /* Inquiry and Page scans */
248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
249} 537}
250 538
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 539static void hci_auth_req(struct hci_request *req, unsigned long opt)
252{ 540{
253 __u8 auth = opt; 541 __u8 auth = opt;
254 542
255 BT_DBG("%s %x", hdev->name, auth); 543 BT_DBG("%s %x", req->hdev->name, auth);
256 544
257 /* Authentication */ 545 /* Authentication */
258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 546 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
259} 547}
260 548
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 549static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
262{ 550{
263 __u8 encrypt = opt; 551 __u8 encrypt = opt;
264 552
265 BT_DBG("%s %x", hdev->name, encrypt); 553 BT_DBG("%s %x", req->hdev->name, encrypt);
266 554
267 /* Encryption */ 555 /* Encryption */
268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 556 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
269} 557}
270 558
271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) 559static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
272{ 560{
273 __le16 policy = cpu_to_le16(opt); 561 __le16 policy = cpu_to_le16(opt);
274 562
275 BT_DBG("%s %x", hdev->name, policy); 563 BT_DBG("%s %x", req->hdev->name, policy);
276 564
277 /* Default link policy */ 565 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 566 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279} 567}
280 568
281/* Get HCI device by index. 569/* Get HCI device by index.
@@ -512,9 +800,10 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
512 return copied; 800 return copied;
513} 801}
514 802
515static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) 803static void hci_inq_req(struct hci_request *req, unsigned long opt)
516{ 804{
517 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 805 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
806 struct hci_dev *hdev = req->hdev;
518 struct hci_cp_inquiry cp; 807 struct hci_cp_inquiry cp;
519 808
520 BT_DBG("%s", hdev->name); 809 BT_DBG("%s", hdev->name);
@@ -526,7 +815,7 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
526 memcpy(&cp.lap, &ir->lap, 3); 815 memcpy(&cp.lap, &ir->lap, 3);
527 cp.length = ir->length; 816 cp.length = ir->length;
528 cp.num_rsp = ir->num_rsp; 817 cp.num_rsp = ir->num_rsp;
529 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); 818 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
530} 819}
531 820
532int hci_inquiry(void __user *arg) 821int hci_inquiry(void __user *arg)
@@ -556,7 +845,8 @@ int hci_inquiry(void __user *arg)
556 timeo = ir.length * msecs_to_jiffies(2000); 845 timeo = ir.length * msecs_to_jiffies(2000);
557 846
558 if (do_inquiry) { 847 if (do_inquiry) {
559 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo); 848 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
849 timeo);
560 if (err < 0) 850 if (err < 0)
561 goto done; 851 goto done;
562 } 852 }
@@ -654,39 +944,29 @@ static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
654 return ad_len; 944 return ad_len;
655} 945}
656 946
657int hci_update_ad(struct hci_dev *hdev) 947void hci_update_ad(struct hci_request *req)
658{ 948{
949 struct hci_dev *hdev = req->hdev;
659 struct hci_cp_le_set_adv_data cp; 950 struct hci_cp_le_set_adv_data cp;
660 u8 len; 951 u8 len;
661 int err;
662 952
663 hci_dev_lock(hdev); 953 if (!lmp_le_capable(hdev))
664 954 return;
665 if (!lmp_le_capable(hdev)) {
666 err = -EINVAL;
667 goto unlock;
668 }
669 955
670 memset(&cp, 0, sizeof(cp)); 956 memset(&cp, 0, sizeof(cp));
671 957
672 len = create_ad(hdev, cp.data); 958 len = create_ad(hdev, cp.data);
673 959
674 if (hdev->adv_data_len == len && 960 if (hdev->adv_data_len == len &&
675 memcmp(cp.data, hdev->adv_data, len) == 0) { 961 memcmp(cp.data, hdev->adv_data, len) == 0)
676 err = 0; 962 return;
677 goto unlock;
678 }
679 963
680 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); 964 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
681 hdev->adv_data_len = len; 965 hdev->adv_data_len = len;
682 966
683 cp.length = len; 967 cp.length = len;
684 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
685 968
686unlock: 969 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
687 hci_dev_unlock(hdev);
688
689 return err;
690} 970}
691 971
692/* ---- HCI ioctl helpers ---- */ 972/* ---- HCI ioctl helpers ---- */
@@ -735,10 +1015,7 @@ int hci_dev_open(__u16 dev)
735 if (!test_bit(HCI_RAW, &hdev->flags)) { 1015 if (!test_bit(HCI_RAW, &hdev->flags)) {
736 atomic_set(&hdev->cmd_cnt, 1); 1016 atomic_set(&hdev->cmd_cnt, 1);
737 set_bit(HCI_INIT, &hdev->flags); 1017 set_bit(HCI_INIT, &hdev->flags);
738 hdev->init_last_cmd = 0; 1018 ret = __hci_init(hdev);
739
740 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
741
742 clear_bit(HCI_INIT, &hdev->flags); 1019 clear_bit(HCI_INIT, &hdev->flags);
743 } 1020 }
744 1021
@@ -746,7 +1023,6 @@ int hci_dev_open(__u16 dev)
746 hci_dev_hold(hdev); 1023 hci_dev_hold(hdev);
747 set_bit(HCI_UP, &hdev->flags); 1024 set_bit(HCI_UP, &hdev->flags);
748 hci_notify(hdev, HCI_DEV_UP); 1025 hci_notify(hdev, HCI_DEV_UP);
749 hci_update_ad(hdev);
750 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 1026 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
751 mgmt_valid_hdev(hdev)) { 1027 mgmt_valid_hdev(hdev)) {
752 hci_dev_lock(hdev); 1028 hci_dev_lock(hdev);
@@ -828,7 +1104,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
828 if (!test_bit(HCI_RAW, &hdev->flags) && 1104 if (!test_bit(HCI_RAW, &hdev->flags) &&
829 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 1105 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
830 set_bit(HCI_INIT, &hdev->flags); 1106 set_bit(HCI_INIT, &hdev->flags);
831 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); 1107 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
832 clear_bit(HCI_INIT, &hdev->flags); 1108 clear_bit(HCI_INIT, &hdev->flags);
833 } 1109 }
834 1110
@@ -851,6 +1127,10 @@ static int hci_dev_do_close(struct hci_dev *hdev)
851 * and no tasks are scheduled. */ 1127 * and no tasks are scheduled. */
852 hdev->close(hdev); 1128 hdev->close(hdev);
853 1129
1130 /* Clear flags */
1131 hdev->flags = 0;
1132 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1133
854 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && 1134 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
855 mgmt_valid_hdev(hdev)) { 1135 mgmt_valid_hdev(hdev)) {
856 hci_dev_lock(hdev); 1136 hci_dev_lock(hdev);
@@ -858,9 +1138,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
858 hci_dev_unlock(hdev); 1138 hci_dev_unlock(hdev);
859 } 1139 }
860 1140
861 /* Clear flags */
862 hdev->flags = 0;
863
864 /* Controller radio is available but is currently powered down */ 1141 /* Controller radio is available but is currently powered down */
865 hdev->amp_status = 0; 1142 hdev->amp_status = 0;
866 1143
@@ -921,7 +1198,7 @@ int hci_dev_reset(__u16 dev)
921 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 1198 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
922 1199
923 if (!test_bit(HCI_RAW, &hdev->flags)) 1200 if (!test_bit(HCI_RAW, &hdev->flags))
924 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); 1201 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
925 1202
926done: 1203done:
927 hci_req_unlock(hdev); 1204 hci_req_unlock(hdev);
@@ -960,8 +1237,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
960 1237
961 switch (cmd) { 1238 switch (cmd) {
962 case HCISETAUTH: 1239 case HCISETAUTH:
963 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 1240 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
964 HCI_INIT_TIMEOUT); 1241 HCI_INIT_TIMEOUT);
965 break; 1242 break;
966 1243
967 case HCISETENCRYPT: 1244 case HCISETENCRYPT:
@@ -972,24 +1249,24 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
972 1249
973 if (!test_bit(HCI_AUTH, &hdev->flags)) { 1250 if (!test_bit(HCI_AUTH, &hdev->flags)) {
974 /* Auth must be enabled first */ 1251 /* Auth must be enabled first */
975 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 1252 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
976 HCI_INIT_TIMEOUT); 1253 HCI_INIT_TIMEOUT);
977 if (err) 1254 if (err)
978 break; 1255 break;
979 } 1256 }
980 1257
981 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 1258 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
982 HCI_INIT_TIMEOUT); 1259 HCI_INIT_TIMEOUT);
983 break; 1260 break;
984 1261
985 case HCISETSCAN: 1262 case HCISETSCAN:
986 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 1263 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
987 HCI_INIT_TIMEOUT); 1264 HCI_INIT_TIMEOUT);
988 break; 1265 break;
989 1266
990 case HCISETLINKPOL: 1267 case HCISETLINKPOL:
991 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 1268 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
992 HCI_INIT_TIMEOUT); 1269 HCI_INIT_TIMEOUT);
993 break; 1270 break;
994 1271
995 case HCISETLINKMODE: 1272 case HCISETLINKMODE:
@@ -1566,7 +1843,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1566 return mgmt_device_unblocked(hdev, bdaddr, type); 1843 return mgmt_device_unblocked(hdev, bdaddr, type);
1567} 1844}
1568 1845
1569static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt) 1846static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1570{ 1847{
1571 struct le_scan_params *param = (struct le_scan_params *) opt; 1848 struct le_scan_params *param = (struct le_scan_params *) opt;
1572 struct hci_cp_le_set_scan_param cp; 1849 struct hci_cp_le_set_scan_param cp;
@@ -1576,10 +1853,10 @@ static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1576 cp.interval = cpu_to_le16(param->interval); 1853 cp.interval = cpu_to_le16(param->interval);
1577 cp.window = cpu_to_le16(param->window); 1854 cp.window = cpu_to_le16(param->window);
1578 1855
1579 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp); 1856 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1580} 1857}
1581 1858
1582static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) 1859static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1583{ 1860{
1584 struct hci_cp_le_set_scan_enable cp; 1861 struct hci_cp_le_set_scan_enable cp;
1585 1862
@@ -1587,7 +1864,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1587 cp.enable = 1; 1864 cp.enable = 1;
1588 cp.filter_dup = 1; 1865 cp.filter_dup = 1;
1589 1866
1590 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1867 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1591} 1868}
1592 1869
1593static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, 1870static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
@@ -1608,10 +1885,10 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1608 1885
1609 hci_req_lock(hdev); 1886 hci_req_lock(hdev);
1610 1887
1611 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param, 1888 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1612 timeo); 1889 timeo);
1613 if (!err) 1890 if (!err)
1614 err = __hci_request(hdev, le_scan_enable_req, 0, timeo); 1891 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1615 1892
1616 hci_req_unlock(hdev); 1893 hci_req_unlock(hdev);
1617 1894
@@ -2160,20 +2437,55 @@ static int hci_send_frame(struct sk_buff *skb)
2160 return hdev->send(skb); 2437 return hdev->send(skb);
2161} 2438}
2162 2439
2163/* Send HCI command */ 2440void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2164int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) 2441{
2442 skb_queue_head_init(&req->cmd_q);
2443 req->hdev = hdev;
2444 req->err = 0;
2445}
2446
2447int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2448{
2449 struct hci_dev *hdev = req->hdev;
2450 struct sk_buff *skb;
2451 unsigned long flags;
2452
2453 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2454
2455 /* If an error occured during request building, remove all HCI
2456 * commands queued on the HCI request queue.
2457 */
2458 if (req->err) {
2459 skb_queue_purge(&req->cmd_q);
2460 return req->err;
2461 }
2462
2463 /* Do not allow empty requests */
2464 if (skb_queue_empty(&req->cmd_q))
2465 return -ENODATA;
2466
2467 skb = skb_peek_tail(&req->cmd_q);
2468 bt_cb(skb)->req.complete = complete;
2469
2470 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2471 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2472 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2473
2474 queue_work(hdev->workqueue, &hdev->cmd_work);
2475
2476 return 0;
2477}
2478
2479static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2480 u32 plen, void *param)
2165{ 2481{
2166 int len = HCI_COMMAND_HDR_SIZE + plen; 2482 int len = HCI_COMMAND_HDR_SIZE + plen;
2167 struct hci_command_hdr *hdr; 2483 struct hci_command_hdr *hdr;
2168 struct sk_buff *skb; 2484 struct sk_buff *skb;
2169 2485
2170 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2171
2172 skb = bt_skb_alloc(len, GFP_ATOMIC); 2486 skb = bt_skb_alloc(len, GFP_ATOMIC);
2173 if (!skb) { 2487 if (!skb)
2174 BT_ERR("%s no memory for command", hdev->name); 2488 return NULL;
2175 return -ENOMEM;
2176 }
2177 2489
2178 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 2490 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2179 hdr->opcode = cpu_to_le16(opcode); 2491 hdr->opcode = cpu_to_le16(opcode);
@@ -2187,8 +2499,26 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2187 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 2499 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2188 skb->dev = (void *) hdev; 2500 skb->dev = (void *) hdev;
2189 2501
2190 if (test_bit(HCI_INIT, &hdev->flags)) 2502 return skb;
2191 hdev->init_last_cmd = opcode; 2503}
2504
2505/* Send HCI command */
2506int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2507{
2508 struct sk_buff *skb;
2509
2510 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2511
2512 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2513 if (!skb) {
2514 BT_ERR("%s no memory for command", hdev->name);
2515 return -ENOMEM;
2516 }
2517
2518 /* Stand-alone HCI commands must be flaged as
2519 * single-command requests.
2520 */
2521 bt_cb(skb)->req.start = true;
2192 2522
2193 skb_queue_tail(&hdev->cmd_q, skb); 2523 skb_queue_tail(&hdev->cmd_q, skb);
2194 queue_work(hdev->workqueue, &hdev->cmd_work); 2524 queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -2196,6 +2526,34 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2196 return 0; 2526 return 0;
2197} 2527}
2198 2528
2529/* Queue a command to an asynchronous HCI request */
2530void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2531{
2532 struct hci_dev *hdev = req->hdev;
2533 struct sk_buff *skb;
2534
2535 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2536
2537 /* If an error occured during request building, there is no point in
2538 * queueing the HCI command. We can simply return.
2539 */
2540 if (req->err)
2541 return;
2542
2543 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2544 if (!skb) {
2545 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2546 hdev->name, opcode);
2547 req->err = -ENOMEM;
2548 return;
2549 }
2550
2551 if (skb_queue_empty(&req->cmd_q))
2552 bt_cb(skb)->req.start = true;
2553
2554 skb_queue_tail(&req->cmd_q, skb);
2555}
2556
2199/* Get data from the previously sent command */ 2557/* Get data from the previously sent command */
2200void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 2558void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2201{ 2559{
@@ -2398,7 +2756,7 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2398 if (c->type == type && c->sent) { 2756 if (c->type == type && c->sent) {
2399 BT_ERR("%s killing stalled connection %pMR", 2757 BT_ERR("%s killing stalled connection %pMR",
2400 hdev->name, &c->dst); 2758 hdev->name, &c->dst);
2401 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); 2759 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2402 } 2760 }
2403 } 2761 }
2404 2762
@@ -2860,6 +3218,123 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2860 kfree_skb(skb); 3218 kfree_skb(skb);
2861} 3219}
2862 3220
3221static bool hci_req_is_complete(struct hci_dev *hdev)
3222{
3223 struct sk_buff *skb;
3224
3225 skb = skb_peek(&hdev->cmd_q);
3226 if (!skb)
3227 return true;
3228
3229 return bt_cb(skb)->req.start;
3230}
3231
3232static void hci_resend_last(struct hci_dev *hdev)
3233{
3234 struct hci_command_hdr *sent;
3235 struct sk_buff *skb;
3236 u16 opcode;
3237
3238 if (!hdev->sent_cmd)
3239 return;
3240
3241 sent = (void *) hdev->sent_cmd->data;
3242 opcode = __le16_to_cpu(sent->opcode);
3243 if (opcode == HCI_OP_RESET)
3244 return;
3245
3246 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3247 if (!skb)
3248 return;
3249
3250 skb_queue_head(&hdev->cmd_q, skb);
3251 queue_work(hdev->workqueue, &hdev->cmd_work);
3252}
3253
3254void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3255{
3256 hci_req_complete_t req_complete = NULL;
3257 struct sk_buff *skb;
3258 unsigned long flags;
3259
3260 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3261
3262 /* If the completed command doesn't match the last one that was
3263 * sent we need to do special handling of it.
3264 */
3265 if (!hci_sent_cmd_data(hdev, opcode)) {
3266 /* Some CSR based controllers generate a spontaneous
3267 * reset complete event during init and any pending
3268 * command will never be completed. In such a case we
3269 * need to resend whatever was the last sent
3270 * command.
3271 */
3272 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3273 hci_resend_last(hdev);
3274
3275 return;
3276 }
3277
3278 /* If the command succeeded and there's still more commands in
3279 * this request the request is not yet complete.
3280 */
3281 if (!status && !hci_req_is_complete(hdev))
3282 return;
3283
3284 /* If this was the last command in a request the complete
3285 * callback would be found in hdev->sent_cmd instead of the
3286 * command queue (hdev->cmd_q).
3287 */
3288 if (hdev->sent_cmd) {
3289 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3290 if (req_complete)
3291 goto call_complete;
3292 }
3293
3294 /* Remove all pending commands belonging to this request */
3295 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3296 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3297 if (bt_cb(skb)->req.start) {
3298 __skb_queue_head(&hdev->cmd_q, skb);
3299 break;
3300 }
3301
3302 req_complete = bt_cb(skb)->req.complete;
3303 kfree_skb(skb);
3304 }
3305 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3306
3307call_complete:
3308 if (req_complete)
3309 req_complete(hdev, status);
3310}
3311
3312void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3313{
3314 hci_req_complete_t req_complete = NULL;
3315
3316 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3317
3318 if (status) {
3319 hci_req_cmd_complete(hdev, opcode, status);
3320 return;
3321 }
3322
3323 /* No need to handle success status if there are more commands */
3324 if (!hci_req_is_complete(hdev))
3325 return;
3326
3327 if (hdev->sent_cmd)
3328 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3329
3330 /* If the request doesn't have a complete callback or there
3331 * are other commands/requests in the hdev queue we consider
3332 * this request as completed.
3333 */
3334 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3335 hci_req_cmd_complete(hdev, opcode, status);
3336}
3337
2863static void hci_rx_work(struct work_struct *work) 3338static void hci_rx_work(struct work_struct *work)
2864{ 3339{
2865 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 3340 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 477726a63512..138580745c2c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -53,7 +53,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev); 54 hci_dev_unlock(hdev);
55 55
56 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 56 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
57 57
58 hci_conn_check_pending(hdev); 58 hci_conn_check_pending(hdev);
59} 59}
@@ -183,8 +183,6 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
183 183
184 if (!status) 184 if (!status)
185 hdev->link_policy = get_unaligned_le16(sent); 185 hdev->link_policy = get_unaligned_le16(sent);
186
187 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
188} 186}
189 187
190static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
@@ -195,11 +193,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 193
196 clear_bit(HCI_RESET, &hdev->flags); 194 clear_bit(HCI_RESET, &hdev->flags);
197 195
198 hci_req_complete(hdev, HCI_OP_RESET, status);
199
200 /* Reset all non-persistent flags */ 196 /* Reset all non-persistent flags */
201 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) | 197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
202 BIT(HCI_PERIODIC_INQ));
203 198
204 hdev->discovery.state = DISCOVERY_STOPPED; 199 hdev->discovery.state = DISCOVERY_STOPPED;
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
@@ -228,11 +223,6 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
228 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229 224
230 hci_dev_unlock(hdev); 225 hci_dev_unlock(hdev);
231
232 if (!status && !test_bit(HCI_INIT, &hdev->flags))
233 hci_update_ad(hdev);
234
235 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
236} 226}
237 227
238static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 228static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -270,8 +260,6 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
270 260
271 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 mgmt_auth_enable_complete(hdev, status); 262 mgmt_auth_enable_complete(hdev, status);
273
274 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
275} 263}
276 264
277static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 265static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -293,8 +281,6 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
293 else 281 else
294 clear_bit(HCI_ENCRYPT, &hdev->flags); 282 clear_bit(HCI_ENCRYPT, &hdev->flags);
295 } 283 }
296
297 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
298} 284}
299 285
300static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 286static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -343,7 +329,6 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
343 329
344done: 330done:
345 hci_dev_unlock(hdev); 331 hci_dev_unlock(hdev);
346 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
347} 332}
348 333
349static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 334static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -435,15 +420,6 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev,
435 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
436} 421}
437 422
438static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
439{
440 __u8 status = *((__u8 *) skb->data);
441
442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
443
444 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
445}
446
447static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 423static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
448{ 424{
449 __u8 status = *((__u8 *) skb->data); 425 __u8 status = *((__u8 *) skb->data);
@@ -472,211 +448,6 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
472 } 448 }
473} 449}
474 450
475static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
476{
477 if (lmp_ext_inq_capable(hdev))
478 return 2;
479
480 if (lmp_inq_rssi_capable(hdev))
481 return 1;
482
483 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
484 hdev->lmp_subver == 0x0757)
485 return 1;
486
487 if (hdev->manufacturer == 15) {
488 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
489 return 1;
490 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
491 return 1;
492 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
493 return 1;
494 }
495
496 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
497 hdev->lmp_subver == 0x1805)
498 return 1;
499
500 return 0;
501}
502
503static void hci_setup_inquiry_mode(struct hci_dev *hdev)
504{
505 u8 mode;
506
507 mode = hci_get_inquiry_mode(hdev);
508
509 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
510}
511
512static void hci_setup_event_mask(struct hci_dev *hdev)
513{
514 /* The second byte is 0xff instead of 0x9f (two reserved bits
515 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
516 * command otherwise */
517 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
518
519 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
520 * any event mask for pre 1.2 devices */
521 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
522 return;
523
524 if (lmp_bredr_capable(hdev)) {
525 events[4] |= 0x01; /* Flow Specification Complete */
526 events[4] |= 0x02; /* Inquiry Result with RSSI */
527 events[4] |= 0x04; /* Read Remote Extended Features Complete */
528 events[5] |= 0x08; /* Synchronous Connection Complete */
529 events[5] |= 0x10; /* Synchronous Connection Changed */
530 }
531
532 if (lmp_inq_rssi_capable(hdev))
533 events[4] |= 0x02; /* Inquiry Result with RSSI */
534
535 if (lmp_sniffsubr_capable(hdev))
536 events[5] |= 0x20; /* Sniff Subrating */
537
538 if (lmp_pause_enc_capable(hdev))
539 events[5] |= 0x80; /* Encryption Key Refresh Complete */
540
541 if (lmp_ext_inq_capable(hdev))
542 events[5] |= 0x40; /* Extended Inquiry Result */
543
544 if (lmp_no_flush_capable(hdev))
545 events[7] |= 0x01; /* Enhanced Flush Complete */
546
547 if (lmp_lsto_capable(hdev))
548 events[6] |= 0x80; /* Link Supervision Timeout Changed */
549
550 if (lmp_ssp_capable(hdev)) {
551 events[6] |= 0x01; /* IO Capability Request */
552 events[6] |= 0x02; /* IO Capability Response */
553 events[6] |= 0x04; /* User Confirmation Request */
554 events[6] |= 0x08; /* User Passkey Request */
555 events[6] |= 0x10; /* Remote OOB Data Request */
556 events[6] |= 0x20; /* Simple Pairing Complete */
557 events[7] |= 0x04; /* User Passkey Notification */
558 events[7] |= 0x08; /* Keypress Notification */
559 events[7] |= 0x10; /* Remote Host Supported
560 * Features Notification */
561 }
562
563 if (lmp_le_capable(hdev))
564 events[7] |= 0x20; /* LE Meta-Event */
565
566 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
567
568 if (lmp_le_capable(hdev)) {
569 memset(events, 0, sizeof(events));
570 events[0] = 0x1f;
571 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
572 sizeof(events), events);
573 }
574}
575
576static void bredr_setup(struct hci_dev *hdev)
577{
578 struct hci_cp_delete_stored_link_key cp;
579 __le16 param;
580 __u8 flt_type;
581
582 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
583 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
584
585 /* Read Class of Device */
586 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
587
588 /* Read Local Name */
589 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
590
591 /* Read Voice Setting */
592 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
593
594 /* Clear Event Filters */
595 flt_type = HCI_FLT_CLEAR_ALL;
596 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
597
598 /* Connection accept timeout ~20 secs */
599 param = __constant_cpu_to_le16(0x7d00);
600 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
601
602 bacpy(&cp.bdaddr, BDADDR_ANY);
603 cp.delete_all = 1;
604 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
605}
606
607static void le_setup(struct hci_dev *hdev)
608{
609 /* Read LE Buffer Size */
610 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
611
612 /* Read LE Local Supported Features */
613 hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
614
615 /* Read LE Advertising Channel TX Power */
616 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
617
618 /* Read LE White List Size */
619 hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
620
621 /* Read LE Supported States */
622 hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
623}
624
625static void hci_setup(struct hci_dev *hdev)
626{
627 if (hdev->dev_type != HCI_BREDR)
628 return;
629
630 /* Read BD Address */
631 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
632
633 if (lmp_bredr_capable(hdev))
634 bredr_setup(hdev);
635
636 if (lmp_le_capable(hdev))
637 le_setup(hdev);
638
639 hci_setup_event_mask(hdev);
640
641 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
642 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
643
644 if (lmp_ssp_capable(hdev)) {
645 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
646 u8 mode = 0x01;
647 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
648 sizeof(mode), &mode);
649 } else {
650 struct hci_cp_write_eir cp;
651
652 memset(hdev->eir, 0, sizeof(hdev->eir));
653 memset(&cp, 0, sizeof(cp));
654
655 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
656 }
657 }
658
659 if (lmp_inq_rssi_capable(hdev))
660 hci_setup_inquiry_mode(hdev);
661
662 if (lmp_inq_tx_pwr_capable(hdev))
663 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
664
665 if (lmp_ext_feat_capable(hdev)) {
666 struct hci_cp_read_local_ext_features cp;
667
668 cp.page = 0x01;
669 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
670 &cp);
671 }
672
673 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
674 u8 enable = 1;
675 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
676 &enable);
677 }
678}
679
680static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 451static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
681{ 452{
682 struct hci_rp_read_local_version *rp = (void *) skb->data; 453 struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -684,7 +455,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
684 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 455 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685 456
686 if (rp->status) 457 if (rp->status)
687 goto done; 458 return;
688 459
689 hdev->hci_ver = rp->hci_ver; 460 hdev->hci_ver = rp->hci_ver;
690 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 461 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
@@ -694,30 +465,6 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
694 465
695 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, 466 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
696 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); 467 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
697
698 if (test_bit(HCI_INIT, &hdev->flags))
699 hci_setup(hdev);
700
701done:
702 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
703}
704
705static void hci_setup_link_policy(struct hci_dev *hdev)
706{
707 struct hci_cp_write_def_link_policy cp;
708 u16 link_policy = 0;
709
710 if (lmp_rswitch_capable(hdev))
711 link_policy |= HCI_LP_RSWITCH;
712 if (lmp_hold_capable(hdev))
713 link_policy |= HCI_LP_HOLD;
714 if (lmp_sniff_capable(hdev))
715 link_policy |= HCI_LP_SNIFF;
716 if (lmp_park_capable(hdev))
717 link_policy |= HCI_LP_PARK;
718
719 cp.policy = cpu_to_le16(link_policy);
720 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
721} 468}
722 469
723static void hci_cc_read_local_commands(struct hci_dev *hdev, 470static void hci_cc_read_local_commands(struct hci_dev *hdev,
@@ -727,16 +474,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
727 474
728 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 475 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 476
730 if (rp->status) 477 if (!rp->status)
731 goto done; 478 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
732
733 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
734
735 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
736 hci_setup_link_policy(hdev);
737
738done:
739 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
740} 479}
741 480
742static void hci_cc_read_local_features(struct hci_dev *hdev, 481static void hci_cc_read_local_features(struct hci_dev *hdev,
@@ -795,22 +534,6 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
795 hdev->features[6], hdev->features[7]); 534 hdev->features[6], hdev->features[7]);
796} 535}
797 536
798static void hci_set_le_support(struct hci_dev *hdev)
799{
800 struct hci_cp_write_le_host_supported cp;
801
802 memset(&cp, 0, sizeof(cp));
803
804 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
805 cp.le = 1;
806 cp.simul = lmp_le_br_capable(hdev);
807 }
808
809 if (cp.le != lmp_host_le_capable(hdev))
810 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
811 &cp);
812}
813
814static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 537static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
815 struct sk_buff *skb) 538 struct sk_buff *skb)
816{ 539{
@@ -819,7 +542,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
819 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 542 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
820 543
821 if (rp->status) 544 if (rp->status)
822 goto done; 545 return;
823 546
824 switch (rp->page) { 547 switch (rp->page) {
825 case 0: 548 case 0:
@@ -829,12 +552,6 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
829 memcpy(hdev->host_features, rp->features, 8); 552 memcpy(hdev->host_features, rp->features, 8);
830 break; 553 break;
831 } 554 }
832
833 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
834 hci_set_le_support(hdev);
835
836done:
837 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
838} 555}
839 556
840static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 557static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
@@ -844,12 +561,8 @@ static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
844 561
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 562 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846 563
847 if (rp->status) 564 if (!rp->status)
848 return; 565 hdev->flow_ctl_mode = rp->mode;
849
850 hdev->flow_ctl_mode = rp->mode;
851
852 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
853} 566}
854 567
855static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 568static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -886,8 +599,65 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
886 599
887 if (!rp->status) 600 if (!rp->status)
888 bacpy(&hdev->bdaddr, &rp->bdaddr); 601 bacpy(&hdev->bdaddr, &rp->bdaddr);
602}
603
604static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
605 struct sk_buff *skb)
606{
607 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
612 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
613 hdev->page_scan_window = __le16_to_cpu(rp->window);
614 }
615}
616
617static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
618 struct sk_buff *skb)
619{
620 u8 status = *((u8 *) skb->data);
621 struct hci_cp_write_page_scan_activity *sent;
622
623 BT_DBG("%s status 0x%2.2x", hdev->name, status);
624
625 if (status)
626 return;
627
628 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
629 if (!sent)
630 return;
631
632 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
633 hdev->page_scan_window = __le16_to_cpu(sent->window);
634}
635
636static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
637 struct sk_buff *skb)
638{
639 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
640
641 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642
643 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
644 hdev->page_scan_type = rp->type;
645}
646
647static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
648 struct sk_buff *skb)
649{
650 u8 status = *((u8 *) skb->data);
651 u8 *type;
889 652
890 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); 653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
654
655 if (status)
656 return;
657
658 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
659 if (type)
660 hdev->page_scan_type = *type;
891} 661}
892 662
893static void hci_cc_read_data_block_size(struct hci_dev *hdev, 663static void hci_cc_read_data_block_size(struct hci_dev *hdev,
@@ -908,17 +678,6 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
908 678
909 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 679 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
910 hdev->block_cnt, hdev->block_len); 680 hdev->block_cnt, hdev->block_len);
911
912 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
913}
914
915static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
916{
917 __u8 status = *((__u8 *) skb->data);
918
919 BT_DBG("%s status 0x%2.2x", hdev->name, status);
920
921 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
922} 681}
923 682
924static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 683static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
@@ -942,8 +701,6 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
942 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 701 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
943 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 702 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
944 703
945 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
946
947a2mp_rsp: 704a2mp_rsp:
948 a2mp_send_getinfo_rsp(hdev); 705 a2mp_send_getinfo_rsp(hdev);
949} 706}
@@ -985,35 +742,6 @@ a2mp_rsp:
985 a2mp_send_create_phy_link_req(hdev, rp->status); 742 a2mp_send_create_phy_link_req(hdev, rp->status);
986} 743}
987 744
988static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
989 struct sk_buff *skb)
990{
991 __u8 status = *((__u8 *) skb->data);
992
993 BT_DBG("%s status 0x%2.2x", hdev->name, status);
994
995 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
996}
997
998static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
999{
1000 __u8 status = *((__u8 *) skb->data);
1001
1002 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1003
1004 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
1005}
1006
1007static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
1008 struct sk_buff *skb)
1009{
1010 __u8 status = *((__u8 *) skb->data);
1011
1012 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1013
1014 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
1015}
1016
1017static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 745static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
1018 struct sk_buff *skb) 746 struct sk_buff *skb)
1019{ 747{
@@ -1023,17 +751,6 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
1023 751
1024 if (!rp->status) 752 if (!rp->status)
1025 hdev->inq_tx_power = rp->tx_power; 753 hdev->inq_tx_power = rp->tx_power;
1026
1027 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
1028}
1029
1030static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
1031{
1032 __u8 status = *((__u8 *) skb->data);
1033
1034 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1035
1036 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
1037} 754}
1038 755
1039static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 756static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1095,8 +812,6 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1095 hdev->le_cnt = hdev->le_pkts; 812 hdev->le_cnt = hdev->le_pkts;
1096 813
1097 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 814 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1098
1099 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1100} 815}
1101 816
1102static void hci_cc_le_read_local_features(struct hci_dev *hdev, 817static void hci_cc_le_read_local_features(struct hci_dev *hdev,
@@ -1108,8 +823,6 @@ static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1108 823
1109 if (!rp->status) 824 if (!rp->status)
1110 memcpy(hdev->le_features, rp->features, 8); 825 memcpy(hdev->le_features, rp->features, 8);
1111
1112 hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status);
1113} 826}
1114 827
1115static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 828static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
@@ -1119,22 +832,8 @@ static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1119 832
1120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 833 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1121 834
1122 if (!rp->status) { 835 if (!rp->status)
1123 hdev->adv_tx_power = rp->tx_power; 836 hdev->adv_tx_power = rp->tx_power;
1124 if (!test_bit(HCI_INIT, &hdev->flags))
1125 hci_update_ad(hdev);
1126 }
1127
1128 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
1129}
1130
1131static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
1132{
1133 __u8 status = *((__u8 *) skb->data);
1134
1135 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1136
1137 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
1138} 837}
1139 838
1140static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 839static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1231,12 +930,15 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1231 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); 930 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1232 } 931 }
1233 932
1234 hci_dev_unlock(hdev); 933 if (!test_bit(HCI_INIT, &hdev->flags)) {
934 struct hci_request req;
1235 935
1236 if (!test_bit(HCI_INIT, &hdev->flags)) 936 hci_req_init(&req, hdev);
1237 hci_update_ad(hdev); 937 hci_update_ad(&req);
938 hci_req_run(&req, NULL);
939 }
1238 940
1239 hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status); 941 hci_dev_unlock(hdev);
1240} 942}
1241 943
1242static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 944static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1245,8 +947,6 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1245 947
1246 BT_DBG("%s status 0x%2.2x", hdev->name, status); 948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247 949
1248 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1249
1250 if (status) { 950 if (status) {
1251 hci_dev_lock(hdev); 951 hci_dev_lock(hdev);
1252 mgmt_start_discovery_failed(hdev, status); 952 mgmt_start_discovery_failed(hdev, status);
@@ -1269,8 +969,6 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1269 969
1270 switch (cp->enable) { 970 switch (cp->enable) {
1271 case LE_SCANNING_ENABLED: 971 case LE_SCANNING_ENABLED:
1272 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1273
1274 if (status) { 972 if (status) {
1275 hci_dev_lock(hdev); 973 hci_dev_lock(hdev);
1276 mgmt_start_discovery_failed(hdev, status); 974 mgmt_start_discovery_failed(hdev, status);
@@ -1321,32 +1019,6 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1321 1019
1322 if (!rp->status) 1020 if (!rp->status)
1323 hdev->le_white_list_size = rp->size; 1021 hdev->le_white_list_size = rp->size;
1324
1325 hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
1326}
1327
1328static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1329{
1330 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1331
1332 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1333
1334 if (rp->status)
1335 return;
1336
1337 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1338}
1339
1340static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1341{
1342 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1343
1344 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1345
1346 if (rp->status)
1347 return;
1348
1349 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1350} 1022}
1351 1023
1352static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1024static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
@@ -1358,8 +1030,6 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1358 1030
1359 if (!rp->status) 1031 if (!rp->status)
1360 memcpy(hdev->le_states, rp->le_states, 8); 1032 memcpy(hdev->le_states, rp->le_states, 8);
1361
1362 hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status);
1363} 1033}
1364 1034
1365static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1035static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
@@ -1389,8 +1059,6 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1389 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1059 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1390 !test_bit(HCI_INIT, &hdev->flags)) 1060 !test_bit(HCI_INIT, &hdev->flags))
1391 mgmt_le_enable_complete(hdev, sent->le, status); 1061 mgmt_le_enable_complete(hdev, sent->le, status);
1392
1393 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1394} 1062}
1395 1063
1396static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, 1064static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
@@ -1412,7 +1080,6 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1412 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1413 1081
1414 if (status) { 1082 if (status) {
1415 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1416 hci_conn_check_pending(hdev); 1083 hci_conn_check_pending(hdev);
1417 hci_dev_lock(hdev); 1084 hci_dev_lock(hdev);
1418 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1085 if (test_bit(HCI_MGMT, &hdev->dev_flags))
@@ -1884,11 +1551,6 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1884 } 1551 }
1885} 1552}
1886 1553
1887static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1888{
1889 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1890}
1891
1892static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) 1554static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1893{ 1555{
1894 struct hci_cp_create_phy_link *cp; 1556 struct hci_cp_create_phy_link *cp;
@@ -1930,11 +1592,6 @@ static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1930 amp_write_remote_assoc(hdev, cp->phy_handle); 1592 amp_write_remote_assoc(hdev, cp->phy_handle);
1931} 1593}
1932 1594
1933static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status)
1934{
1935 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1936}
1937
1938static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1595static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1939{ 1596{
1940 __u8 status = *((__u8 *) skb->data); 1597 __u8 status = *((__u8 *) skb->data);
@@ -1943,7 +1600,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1943 1600
1944 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1601 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1945 1602
1946 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1603 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
1947 1604
1948 hci_conn_check_pending(hdev); 1605 hci_conn_check_pending(hdev);
1949 1606
@@ -2399,7 +2056,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2399 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2056 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2400 2057
2401 if (ev->status && conn->state == BT_CONNECTED) { 2058 if (ev->status && conn->state == BT_CONNECTED) {
2402 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); 2059 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2403 hci_conn_put(conn); 2060 hci_conn_put(conn);
2404 goto unlock; 2061 goto unlock;
2405 } 2062 }
@@ -2491,20 +2148,10 @@ unlock:
2491 hci_dev_unlock(hdev); 2148 hci_dev_unlock(hdev);
2492} 2149}
2493 2150
2494static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{
2496 BT_DBG("%s", hdev->name);
2497}
2498
2499static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2500 struct sk_buff *skb)
2501{
2502 BT_DBG("%s", hdev->name);
2503}
2504
2505static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2151static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2506{ 2152{
2507 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2153 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2154 u8 status = skb->data[sizeof(*ev)];
2508 __u16 opcode; 2155 __u16 opcode;
2509 2156
2510 skb_pull(skb, sizeof(*ev)); 2157 skb_pull(skb, sizeof(*ev));
@@ -2588,10 +2235,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588 hci_cc_write_voice_setting(hdev, skb); 2235 hci_cc_write_voice_setting(hdev, skb);
2589 break; 2236 break;
2590 2237
2591 case HCI_OP_HOST_BUFFER_SIZE:
2592 hci_cc_host_buffer_size(hdev, skb);
2593 break;
2594
2595 case HCI_OP_WRITE_SSP_MODE: 2238 case HCI_OP_WRITE_SSP_MODE:
2596 hci_cc_write_ssp_mode(hdev, skb); 2239 hci_cc_write_ssp_mode(hdev, skb);
2597 break; 2240 break;
@@ -2620,46 +2263,42 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2620 hci_cc_read_bd_addr(hdev, skb); 2263 hci_cc_read_bd_addr(hdev, skb);
2621 break; 2264 break;
2622 2265
2623 case HCI_OP_READ_DATA_BLOCK_SIZE: 2266 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2624 hci_cc_read_data_block_size(hdev, skb); 2267 hci_cc_read_page_scan_activity(hdev, skb);
2625 break; 2268 break;
2626 2269
2627 case HCI_OP_WRITE_CA_TIMEOUT: 2270 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2628 hci_cc_write_ca_timeout(hdev, skb); 2271 hci_cc_write_page_scan_activity(hdev, skb);
2629 break; 2272 break;
2630 2273
2631 case HCI_OP_READ_FLOW_CONTROL_MODE: 2274 case HCI_OP_READ_PAGE_SCAN_TYPE:
2632 hci_cc_read_flow_control_mode(hdev, skb); 2275 hci_cc_read_page_scan_type(hdev, skb);
2633 break; 2276 break;
2634 2277
2635 case HCI_OP_READ_LOCAL_AMP_INFO: 2278 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2636 hci_cc_read_local_amp_info(hdev, skb); 2279 hci_cc_write_page_scan_type(hdev, skb);
2637 break; 2280 break;
2638 2281
2639 case HCI_OP_READ_LOCAL_AMP_ASSOC: 2282 case HCI_OP_READ_DATA_BLOCK_SIZE:
2640 hci_cc_read_local_amp_assoc(hdev, skb); 2283 hci_cc_read_data_block_size(hdev, skb);
2641 break; 2284 break;
2642 2285
2643 case HCI_OP_DELETE_STORED_LINK_KEY: 2286 case HCI_OP_READ_FLOW_CONTROL_MODE:
2644 hci_cc_delete_stored_link_key(hdev, skb); 2287 hci_cc_read_flow_control_mode(hdev, skb);
2645 break; 2288 break;
2646 2289
2647 case HCI_OP_SET_EVENT_MASK: 2290 case HCI_OP_READ_LOCAL_AMP_INFO:
2648 hci_cc_set_event_mask(hdev, skb); 2291 hci_cc_read_local_amp_info(hdev, skb);
2649 break; 2292 break;
2650 2293
2651 case HCI_OP_WRITE_INQUIRY_MODE: 2294 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2652 hci_cc_write_inquiry_mode(hdev, skb); 2295 hci_cc_read_local_amp_assoc(hdev, skb);
2653 break; 2296 break;
2654 2297
2655 case HCI_OP_READ_INQ_RSP_TX_POWER: 2298 case HCI_OP_READ_INQ_RSP_TX_POWER:
2656 hci_cc_read_inq_rsp_tx_power(hdev, skb); 2299 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2657 break; 2300 break;
2658 2301
2659 case HCI_OP_SET_EVENT_FLT:
2660 hci_cc_set_event_flt(hdev, skb);
2661 break;
2662
2663 case HCI_OP_PIN_CODE_REPLY: 2302 case HCI_OP_PIN_CODE_REPLY:
2664 hci_cc_pin_code_reply(hdev, skb); 2303 hci_cc_pin_code_reply(hdev, skb);
2665 break; 2304 break;
@@ -2684,10 +2323,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2684 hci_cc_le_read_adv_tx_power(hdev, skb); 2323 hci_cc_le_read_adv_tx_power(hdev, skb);
2685 break; 2324 break;
2686 2325
2687 case HCI_OP_LE_SET_EVENT_MASK:
2688 hci_cc_le_set_event_mask(hdev, skb);
2689 break;
2690
2691 case HCI_OP_USER_CONFIRM_REPLY: 2326 case HCI_OP_USER_CONFIRM_REPLY:
2692 hci_cc_user_confirm_reply(hdev, skb); 2327 hci_cc_user_confirm_reply(hdev, skb);
2693 break; 2328 break;
@@ -2720,14 +2355,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2720 hci_cc_le_read_white_list_size(hdev, skb); 2355 hci_cc_le_read_white_list_size(hdev, skb);
2721 break; 2356 break;
2722 2357
2723 case HCI_OP_LE_LTK_REPLY:
2724 hci_cc_le_ltk_reply(hdev, skb);
2725 break;
2726
2727 case HCI_OP_LE_LTK_NEG_REPLY:
2728 hci_cc_le_ltk_neg_reply(hdev, skb);
2729 break;
2730
2731 case HCI_OP_LE_READ_SUPPORTED_STATES: 2358 case HCI_OP_LE_READ_SUPPORTED_STATES:
2732 hci_cc_le_read_supported_states(hdev, skb); 2359 hci_cc_le_read_supported_states(hdev, skb);
2733 break; 2360 break;
@@ -2745,9 +2372,11 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2745 break; 2372 break;
2746 } 2373 }
2747 2374
2748 if (ev->opcode != HCI_OP_NOP) 2375 if (opcode != HCI_OP_NOP)
2749 del_timer(&hdev->cmd_timer); 2376 del_timer(&hdev->cmd_timer);
2750 2377
2378 hci_req_cmd_complete(hdev, opcode, status);
2379
2751 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2380 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2752 atomic_set(&hdev->cmd_cnt, 1); 2381 atomic_set(&hdev->cmd_cnt, 1);
2753 if (!skb_queue_empty(&hdev->cmd_q)) 2382 if (!skb_queue_empty(&hdev->cmd_q))
@@ -2817,10 +2446,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2817 hci_cs_le_create_conn(hdev, ev->status); 2446 hci_cs_le_create_conn(hdev, ev->status);
2818 break; 2447 break;
2819 2448
2820 case HCI_OP_LE_START_ENC:
2821 hci_cs_le_start_enc(hdev, ev->status);
2822 break;
2823
2824 case HCI_OP_CREATE_PHY_LINK: 2449 case HCI_OP_CREATE_PHY_LINK:
2825 hci_cs_create_phylink(hdev, ev->status); 2450 hci_cs_create_phylink(hdev, ev->status);
2826 break; 2451 break;
@@ -2829,18 +2454,16 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2829 hci_cs_accept_phylink(hdev, ev->status); 2454 hci_cs_accept_phylink(hdev, ev->status);
2830 break; 2455 break;
2831 2456
2832 case HCI_OP_CREATE_LOGICAL_LINK:
2833 hci_cs_create_logical_link(hdev, ev->status);
2834 break;
2835
2836 default: 2457 default:
2837 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2458 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2838 break; 2459 break;
2839 } 2460 }
2840 2461
2841 if (ev->opcode != HCI_OP_NOP) 2462 if (opcode != HCI_OP_NOP)
2842 del_timer(&hdev->cmd_timer); 2463 del_timer(&hdev->cmd_timer);
2843 2464
2465 hci_req_cmd_status(hdev, opcode, ev->status);
2466
2844 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2467 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2845 atomic_set(&hdev->cmd_cnt, 1); 2468 atomic_set(&hdev->cmd_cnt, 1);
2846 if (!skb_queue_empty(&hdev->cmd_q)) 2469 if (!skb_queue_empty(&hdev->cmd_q))
@@ -3391,18 +3014,6 @@ unlock:
3391 hci_dev_unlock(hdev); 3014 hci_dev_unlock(hdev);
3392} 3015}
3393 3016
3394static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3395{
3396 BT_DBG("%s", hdev->name);
3397}
3398
3399static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3400{
3401 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3402
3403 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3404}
3405
3406static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 3017static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3407 struct sk_buff *skb) 3018 struct sk_buff *skb)
3408{ 3019{
@@ -3472,7 +3083,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3472 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3083 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3473 3084
3474 if (ev->status && conn->state == BT_CONNECTED) { 3085 if (ev->status && conn->state == BT_CONNECTED) {
3475 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); 3086 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3476 hci_conn_put(conn); 3087 hci_conn_put(conn);
3477 goto unlock; 3088 goto unlock;
3478 } 3089 }
@@ -4130,14 +3741,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4130 hci_remote_features_evt(hdev, skb); 3741 hci_remote_features_evt(hdev, skb);
4131 break; 3742 break;
4132 3743
4133 case HCI_EV_REMOTE_VERSION:
4134 hci_remote_version_evt(hdev, skb);
4135 break;
4136
4137 case HCI_EV_QOS_SETUP_COMPLETE:
4138 hci_qos_setup_complete_evt(hdev, skb);
4139 break;
4140
4141 case HCI_EV_CMD_COMPLETE: 3744 case HCI_EV_CMD_COMPLETE:
4142 hci_cmd_complete_evt(hdev, skb); 3745 hci_cmd_complete_evt(hdev, skb);
4143 break; 3746 break;
@@ -4194,14 +3797,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4194 hci_sync_conn_complete_evt(hdev, skb); 3797 hci_sync_conn_complete_evt(hdev, skb);
4195 break; 3798 break;
4196 3799
4197 case HCI_EV_SYNC_CONN_CHANGED:
4198 hci_sync_conn_changed_evt(hdev, skb);
4199 break;
4200
4201 case HCI_EV_SNIFF_SUBRATE:
4202 hci_sniff_subrate_evt(hdev, skb);
4203 break;
4204
4205 case HCI_EV_EXTENDED_INQUIRY_RESULT: 3800 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4206 hci_extended_inquiry_result_evt(hdev, skb); 3801 hci_extended_inquiry_result_evt(hdev, skb);
4207 break; 3802 break;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 6a93614f2c49..aa4354fca77c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -854,6 +854,11 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
854 skb_queue_tail(&hdev->raw_q, skb); 854 skb_queue_tail(&hdev->raw_q, skb);
855 queue_work(hdev->workqueue, &hdev->tx_work); 855 queue_work(hdev->workqueue, &hdev->tx_work);
856 } else { 856 } else {
857 /* Stand-alone HCI commands must be flaged as
858 * single-command requests.
859 */
860 bt_cb(skb)->req.start = true;
861
857 skb_queue_tail(&hdev->cmd_q, skb); 862 skb_queue_tail(&hdev->cmd_q, skb);
858 queue_work(hdev->workqueue, &hdev->cmd_work); 863 queue_work(hdev->workqueue, &hdev->cmd_work);
859 } 864 }
@@ -1121,8 +1126,6 @@ error:
1121void hci_sock_cleanup(void) 1126void hci_sock_cleanup(void)
1122{ 1127{
1123 bt_procfs_cleanup(&init_net, "hci"); 1128 bt_procfs_cleanup(&init_net, "hci");
1124 if (bt_sock_unregister(BTPROTO_HCI) < 0) 1129 bt_sock_unregister(BTPROTO_HCI);
1125 BT_ERR("HCI socket unregistration failed");
1126
1127 proto_unregister(&hci_sk_proto); 1130 proto_unregister(&hci_sk_proto);
1128} 1131}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 23b4e242a31a..ff38561385de 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -590,10 +590,8 @@ int __init bt_sysfs_init(void)
590 bt_debugfs = debugfs_create_dir("bluetooth", NULL); 590 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
591 591
592 bt_class = class_create(THIS_MODULE, "bluetooth"); 592 bt_class = class_create(THIS_MODULE, "bluetooth");
593 if (IS_ERR(bt_class))
594 return PTR_ERR(bt_class);
595 593
596 return 0; 594 return PTR_RET(bt_class);
597} 595}
598 596
599void bt_sysfs_cleanup(void) 597void bt_sysfs_cleanup(void)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index a7352ff3fd1e..2342327f3335 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -311,6 +311,9 @@ static int hidp_get_raw_report(struct hid_device *hid,
311 int numbered_reports = hid->report_enum[report_type].numbered; 311 int numbered_reports = hid->report_enum[report_type].numbered;
312 int ret; 312 int ret;
313 313
314 if (atomic_read(&session->terminate))
315 return -EIO;
316
314 switch (report_type) { 317 switch (report_type) {
315 case HID_FEATURE_REPORT: 318 case HID_FEATURE_REPORT:
316 report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE; 319 report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE;
@@ -722,6 +725,7 @@ static int hidp_session(void *arg)
722 set_current_state(TASK_INTERRUPTIBLE); 725 set_current_state(TASK_INTERRUPTIBLE);
723 } 726 }
724 set_current_state(TASK_RUNNING); 727 set_current_state(TASK_RUNNING);
728 atomic_inc(&session->terminate);
725 remove_wait_queue(sk_sleep(intr_sk), &intr_wait); 729 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
726 remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); 730 remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
727 731
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 82a829d90b0f..5d0f1ca0a314 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -304,8 +304,6 @@ error:
304void __exit hidp_cleanup_sockets(void) 304void __exit hidp_cleanup_sockets(void)
305{ 305{
306 bt_procfs_cleanup(&init_net, "hidp"); 306 bt_procfs_cleanup(&init_net, "hidp");
307 if (bt_sock_unregister(BTPROTO_HIDP) < 0) 307 bt_sock_unregister(BTPROTO_HIDP);
308 BT_ERR("Can't unregister HIDP socket");
309
310 proto_unregister(&hidp_proto); 308 proto_unregister(&hidp_proto);
311} 309}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 1bcfb8422fdc..7f9704993b74 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1312,8 +1312,6 @@ error:
1312void l2cap_cleanup_sockets(void) 1312void l2cap_cleanup_sockets(void)
1313{ 1313{
1314 bt_procfs_cleanup(&init_net, "l2cap"); 1314 bt_procfs_cleanup(&init_net, "l2cap");
1315 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 1315 bt_sock_unregister(BTPROTO_L2CAP);
1316 BT_ERR("L2CAP socket unregistration failed");
1317
1318 proto_unregister(&l2cap_proto); 1316 proto_unregister(&l2cap_proto);
1319} 1317}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 39395c7144aa..03e7e732215f 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -384,7 +384,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
384 384
385 if (lmp_bredr_capable(hdev)) { 385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE; 386 settings |= MGMT_SETTING_CONNECTABLE;
387 settings |= MGMT_SETTING_FAST_CONNECTABLE; 387 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
388 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE; 389 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR; 390 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY; 391 settings |= MGMT_SETTING_LINK_SECURITY;
@@ -409,6 +410,9 @@ static u32 get_current_settings(struct hci_dev *hdev)
409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_CONNECTABLE; 411 settings |= MGMT_SETTING_CONNECTABLE;
411 412
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
415
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE; 417 settings |= MGMT_SETTING_DISCOVERABLE;
414 418
@@ -591,32 +595,33 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
591 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 595 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
592} 596}
593 597
594static int update_eir(struct hci_dev *hdev) 598static void update_eir(struct hci_request *req)
595{ 599{
600 struct hci_dev *hdev = req->hdev;
596 struct hci_cp_write_eir cp; 601 struct hci_cp_write_eir cp;
597 602
598 if (!hdev_is_powered(hdev)) 603 if (!hdev_is_powered(hdev))
599 return 0; 604 return;
600 605
601 if (!lmp_ext_inq_capable(hdev)) 606 if (!lmp_ext_inq_capable(hdev))
602 return 0; 607 return;
603 608
604 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 609 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
605 return 0; 610 return;
606 611
607 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 612 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
608 return 0; 613 return;
609 614
610 memset(&cp, 0, sizeof(cp)); 615 memset(&cp, 0, sizeof(cp));
611 616
612 create_eir(hdev, cp.data); 617 create_eir(hdev, cp.data);
613 618
614 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) 619 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
615 return 0; 620 return;
616 621
617 memcpy(hdev->eir, cp.data, sizeof(cp.data)); 622 memcpy(hdev->eir, cp.data, sizeof(cp.data));
618 623
619 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 624 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
620} 625}
621 626
622static u8 get_service_classes(struct hci_dev *hdev) 627static u8 get_service_classes(struct hci_dev *hdev)
@@ -630,47 +635,48 @@ static u8 get_service_classes(struct hci_dev *hdev)
630 return val; 635 return val;
631} 636}
632 637
633static int update_class(struct hci_dev *hdev) 638static void update_class(struct hci_request *req)
634{ 639{
640 struct hci_dev *hdev = req->hdev;
635 u8 cod[3]; 641 u8 cod[3];
636 int err;
637 642
638 BT_DBG("%s", hdev->name); 643 BT_DBG("%s", hdev->name);
639 644
640 if (!hdev_is_powered(hdev)) 645 if (!hdev_is_powered(hdev))
641 return 0; 646 return;
642 647
643 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 648 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
644 return 0; 649 return;
645 650
646 cod[0] = hdev->minor_class; 651 cod[0] = hdev->minor_class;
647 cod[1] = hdev->major_class; 652 cod[1] = hdev->major_class;
648 cod[2] = get_service_classes(hdev); 653 cod[2] = get_service_classes(hdev);
649 654
650 if (memcmp(cod, hdev->dev_class, 3) == 0) 655 if (memcmp(cod, hdev->dev_class, 3) == 0)
651 return 0; 656 return;
652
653 err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
654 if (err == 0)
655 set_bit(HCI_PENDING_CLASS, &hdev->dev_flags);
656 657
657 return err; 658 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
658} 659}
659 660
660static void service_cache_off(struct work_struct *work) 661static void service_cache_off(struct work_struct *work)
661{ 662{
662 struct hci_dev *hdev = container_of(work, struct hci_dev, 663 struct hci_dev *hdev = container_of(work, struct hci_dev,
663 service_cache.work); 664 service_cache.work);
665 struct hci_request req;
664 666
665 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 667 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
666 return; 668 return;
667 669
670 hci_req_init(&req, hdev);
671
668 hci_dev_lock(hdev); 672 hci_dev_lock(hdev);
669 673
670 update_eir(hdev); 674 update_eir(&req);
671 update_class(hdev); 675 update_class(&req);
672 676
673 hci_dev_unlock(hdev); 677 hci_dev_unlock(hdev);
678
679 hci_req_run(&req, NULL);
674} 680}
675 681
676static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) 682static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
@@ -994,11 +1000,64 @@ failed:
994 return err; 1000 return err;
995} 1001}
996 1002
1003static void write_fast_connectable(struct hci_request *req, bool enable)
1004{
1005 struct hci_dev *hdev = req->hdev;
1006 struct hci_cp_write_page_scan_activity acp;
1007 u8 type;
1008
1009 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1010 return;
1011
1012 if (enable) {
1013 type = PAGE_SCAN_TYPE_INTERLACED;
1014
1015 /* 160 msec page scan interval */
1016 acp.interval = __constant_cpu_to_le16(0x0100);
1017 } else {
1018 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1019
1020 /* default 1.28 sec page scan */
1021 acp.interval = __constant_cpu_to_le16(0x0800);
1022 }
1023
1024 acp.window = __constant_cpu_to_le16(0x0012);
1025
1026 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1027 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1028 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1029 sizeof(acp), &acp);
1030
1031 if (hdev->page_scan_type != type)
1032 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1033}
1034
1035static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1036{
1037 struct pending_cmd *cmd;
1038
1039 BT_DBG("status 0x%02x", status);
1040
1041 hci_dev_lock(hdev);
1042
1043 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1044 if (!cmd)
1045 goto unlock;
1046
1047 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1048
1049 mgmt_pending_remove(cmd);
1050
1051unlock:
1052 hci_dev_unlock(hdev);
1053}
1054
997static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, 1055static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
998 u16 len) 1056 u16 len)
999{ 1057{
1000 struct mgmt_mode *cp = data; 1058 struct mgmt_mode *cp = data;
1001 struct pending_cmd *cmd; 1059 struct pending_cmd *cmd;
1060 struct hci_request req;
1002 u8 scan; 1061 u8 scan;
1003 int err; 1062 int err;
1004 1063
@@ -1065,7 +1124,20 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1065 cancel_delayed_work(&hdev->discov_off); 1124 cancel_delayed_work(&hdev->discov_off);
1066 } 1125 }
1067 1126
1068 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1127 hci_req_init(&req, hdev);
1128
1129 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1130
1131 /* If we're going from non-connectable to connectable or
1132 * vice-versa when fast connectable is enabled ensure that fast
1133 * connectable gets disabled. write_fast_connectable won't do
1134 * anything if the page scan parameters are already what they
1135 * should be.
1136 */
1137 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1138 write_fast_connectable(&req, false);
1139
1140 err = hci_req_run(&req, set_connectable_complete);
1069 if (err < 0) 1141 if (err < 0)
1070 mgmt_pending_remove(cmd); 1142 mgmt_pending_remove(cmd);
1071 1143
@@ -1332,6 +1404,29 @@ unlock:
1332 return err; 1404 return err;
1333} 1405}
1334 1406
1407/* This is a helper function to test for pending mgmt commands that can
1408 * cause CoD or EIR HCI commands. We can only allow one such pending
1409 * mgmt command at a time since otherwise we cannot easily track what
1410 * the current values are, will be, and based on that calculate if a new
1411 * HCI command needs to be sent and if yes with what value.
1412 */
1413static bool pending_eir_or_class(struct hci_dev *hdev)
1414{
1415 struct pending_cmd *cmd;
1416
1417 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1418 switch (cmd->opcode) {
1419 case MGMT_OP_ADD_UUID:
1420 case MGMT_OP_REMOVE_UUID:
1421 case MGMT_OP_SET_DEV_CLASS:
1422 case MGMT_OP_SET_POWERED:
1423 return true;
1424 }
1425 }
1426
1427 return false;
1428}
1429
1335static const u8 bluetooth_base_uuid[] = { 1430static const u8 bluetooth_base_uuid[] = {
1336 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80, 1431 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1337 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1432 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1351,10 +1446,37 @@ static u8 get_uuid_size(const u8 *uuid)
1351 return 16; 1446 return 16;
1352} 1447}
1353 1448
1449static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1450{
1451 struct pending_cmd *cmd;
1452
1453 hci_dev_lock(hdev);
1454
1455 cmd = mgmt_pending_find(mgmt_op, hdev);
1456 if (!cmd)
1457 goto unlock;
1458
1459 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1460 hdev->dev_class, 3);
1461
1462 mgmt_pending_remove(cmd);
1463
1464unlock:
1465 hci_dev_unlock(hdev);
1466}
1467
1468static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1469{
1470 BT_DBG("status 0x%02x", status);
1471
1472 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1473}
1474
1354static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1475static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1355{ 1476{
1356 struct mgmt_cp_add_uuid *cp = data; 1477 struct mgmt_cp_add_uuid *cp = data;
1357 struct pending_cmd *cmd; 1478 struct pending_cmd *cmd;
1479 struct hci_request req;
1358 struct bt_uuid *uuid; 1480 struct bt_uuid *uuid;
1359 int err; 1481 int err;
1360 1482
@@ -1362,7 +1484,7 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1362 1484
1363 hci_dev_lock(hdev); 1485 hci_dev_lock(hdev);
1364 1486
1365 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { 1487 if (pending_eir_or_class(hdev)) {
1366 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, 1488 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1367 MGMT_STATUS_BUSY); 1489 MGMT_STATUS_BUSY);
1368 goto failed; 1490 goto failed;
@@ -1380,23 +1502,28 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1380 1502
1381 list_add_tail(&uuid->list, &hdev->uuids); 1503 list_add_tail(&uuid->list, &hdev->uuids);
1382 1504
1383 err = update_class(hdev); 1505 hci_req_init(&req, hdev);
1384 if (err < 0)
1385 goto failed;
1386 1506
1387 err = update_eir(hdev); 1507 update_class(&req);
1388 if (err < 0) 1508 update_eir(&req);
1389 goto failed; 1509
1510 err = hci_req_run(&req, add_uuid_complete);
1511 if (err < 0) {
1512 if (err != -ENODATA)
1513 goto failed;
1390 1514
1391 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1392 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, 1515 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1393 hdev->dev_class, 3); 1516 hdev->dev_class, 3);
1394 goto failed; 1517 goto failed;
1395 } 1518 }
1396 1519
1397 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); 1520 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1398 if (!cmd) 1521 if (!cmd) {
1399 err = -ENOMEM; 1522 err = -ENOMEM;
1523 goto failed;
1524 }
1525
1526 err = 0;
1400 1527
1401failed: 1528failed:
1402 hci_dev_unlock(hdev); 1529 hci_dev_unlock(hdev);
@@ -1417,6 +1544,13 @@ static bool enable_service_cache(struct hci_dev *hdev)
1417 return false; 1544 return false;
1418} 1545}
1419 1546
1547static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1548{
1549 BT_DBG("status 0x%02x", status);
1550
1551 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1552}
1553
1420static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, 1554static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1421 u16 len) 1555 u16 len)
1422{ 1556{
@@ -1424,13 +1558,14 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1424 struct pending_cmd *cmd; 1558 struct pending_cmd *cmd;
1425 struct bt_uuid *match, *tmp; 1559 struct bt_uuid *match, *tmp;
1426 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 1560 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1561 struct hci_request req;
1427 int err, found; 1562 int err, found;
1428 1563
1429 BT_DBG("request for %s", hdev->name); 1564 BT_DBG("request for %s", hdev->name);
1430 1565
1431 hci_dev_lock(hdev); 1566 hci_dev_lock(hdev);
1432 1567
1433 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { 1568 if (pending_eir_or_class(hdev)) {
1434 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, 1569 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1435 MGMT_STATUS_BUSY); 1570 MGMT_STATUS_BUSY);
1436 goto unlock; 1571 goto unlock;
@@ -1466,34 +1601,47 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1466 } 1601 }
1467 1602
1468update_class: 1603update_class:
1469 err = update_class(hdev); 1604 hci_req_init(&req, hdev);
1470 if (err < 0)
1471 goto unlock;
1472 1605
1473 err = update_eir(hdev); 1606 update_class(&req);
1474 if (err < 0) 1607 update_eir(&req);
1475 goto unlock; 1608
1609 err = hci_req_run(&req, remove_uuid_complete);
1610 if (err < 0) {
1611 if (err != -ENODATA)
1612 goto unlock;
1476 1613
1477 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1478 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, 1614 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1479 hdev->dev_class, 3); 1615 hdev->dev_class, 3);
1480 goto unlock; 1616 goto unlock;
1481 } 1617 }
1482 1618
1483 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); 1619 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1484 if (!cmd) 1620 if (!cmd) {
1485 err = -ENOMEM; 1621 err = -ENOMEM;
1622 goto unlock;
1623 }
1624
1625 err = 0;
1486 1626
1487unlock: 1627unlock:
1488 hci_dev_unlock(hdev); 1628 hci_dev_unlock(hdev);
1489 return err; 1629 return err;
1490} 1630}
1491 1631
1632static void set_class_complete(struct hci_dev *hdev, u8 status)
1633{
1634 BT_DBG("status 0x%02x", status);
1635
1636 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1637}
1638
1492static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, 1639static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1493 u16 len) 1640 u16 len)
1494{ 1641{
1495 struct mgmt_cp_set_dev_class *cp = data; 1642 struct mgmt_cp_set_dev_class *cp = data;
1496 struct pending_cmd *cmd; 1643 struct pending_cmd *cmd;
1644 struct hci_request req;
1497 int err; 1645 int err;
1498 1646
1499 BT_DBG("request for %s", hdev->name); 1647 BT_DBG("request for %s", hdev->name);
@@ -1502,15 +1650,19 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1502 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 1650 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1503 MGMT_STATUS_NOT_SUPPORTED); 1651 MGMT_STATUS_NOT_SUPPORTED);
1504 1652
1505 if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) 1653 hci_dev_lock(hdev);
1506 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1507 MGMT_STATUS_BUSY);
1508 1654
1509 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) 1655 if (pending_eir_or_class(hdev)) {
1510 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 1656 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1511 MGMT_STATUS_INVALID_PARAMS); 1657 MGMT_STATUS_BUSY);
1658 goto unlock;
1659 }
1512 1660
1513 hci_dev_lock(hdev); 1661 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1662 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1663 MGMT_STATUS_INVALID_PARAMS);
1664 goto unlock;
1665 }
1514 1666
1515 hdev->major_class = cp->major; 1667 hdev->major_class = cp->major;
1516 hdev->minor_class = cp->minor; 1668 hdev->minor_class = cp->minor;
@@ -1521,26 +1673,34 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1521 goto unlock; 1673 goto unlock;
1522 } 1674 }
1523 1675
1676 hci_req_init(&req, hdev);
1677
1524 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { 1678 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1525 hci_dev_unlock(hdev); 1679 hci_dev_unlock(hdev);
1526 cancel_delayed_work_sync(&hdev->service_cache); 1680 cancel_delayed_work_sync(&hdev->service_cache);
1527 hci_dev_lock(hdev); 1681 hci_dev_lock(hdev);
1528 update_eir(hdev); 1682 update_eir(&req);
1529 } 1683 }
1530 1684
1531 err = update_class(hdev); 1685 update_class(&req);
1532 if (err < 0) 1686
1533 goto unlock; 1687 err = hci_req_run(&req, set_class_complete);
1688 if (err < 0) {
1689 if (err != -ENODATA)
1690 goto unlock;
1534 1691
1535 if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) {
1536 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, 1692 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1537 hdev->dev_class, 3); 1693 hdev->dev_class, 3);
1538 goto unlock; 1694 goto unlock;
1539 } 1695 }
1540 1696
1541 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); 1697 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1542 if (!cmd) 1698 if (!cmd) {
1543 err = -ENOMEM; 1699 err = -ENOMEM;
1700 goto unlock;
1701 }
1702
1703 err = 0;
1544 1704
1545unlock: 1705unlock:
1546 hci_dev_unlock(hdev); 1706 hci_dev_unlock(hdev);
@@ -2140,7 +2300,7 @@ unlock:
2140} 2300}
2141 2301
2142static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, 2302static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2143 bdaddr_t *bdaddr, u8 type, u16 mgmt_op, 2303 struct mgmt_addr_info *addr, u16 mgmt_op,
2144 u16 hci_op, __le32 passkey) 2304 u16 hci_op, __le32 passkey)
2145{ 2305{
2146 struct pending_cmd *cmd; 2306 struct pending_cmd *cmd;
@@ -2150,37 +2310,41 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2150 hci_dev_lock(hdev); 2310 hci_dev_lock(hdev);
2151 2311
2152 if (!hdev_is_powered(hdev)) { 2312 if (!hdev_is_powered(hdev)) {
2153 err = cmd_status(sk, hdev->id, mgmt_op, 2313 err = cmd_complete(sk, hdev->id, mgmt_op,
2154 MGMT_STATUS_NOT_POWERED); 2314 MGMT_STATUS_NOT_POWERED, addr,
2315 sizeof(*addr));
2155 goto done; 2316 goto done;
2156 } 2317 }
2157 2318
2158 if (type == BDADDR_BREDR) 2319 if (addr->type == BDADDR_BREDR)
2159 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr); 2320 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2160 else 2321 else
2161 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 2322 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2162 2323
2163 if (!conn) { 2324 if (!conn) {
2164 err = cmd_status(sk, hdev->id, mgmt_op, 2325 err = cmd_complete(sk, hdev->id, mgmt_op,
2165 MGMT_STATUS_NOT_CONNECTED); 2326 MGMT_STATUS_NOT_CONNECTED, addr,
2327 sizeof(*addr));
2166 goto done; 2328 goto done;
2167 } 2329 }
2168 2330
2169 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) { 2331 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2170 /* Continue with pairing via SMP */ 2332 /* Continue with pairing via SMP */
2171 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 2333 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2172 2334
2173 if (!err) 2335 if (!err)
2174 err = cmd_status(sk, hdev->id, mgmt_op, 2336 err = cmd_complete(sk, hdev->id, mgmt_op,
2175 MGMT_STATUS_SUCCESS); 2337 MGMT_STATUS_SUCCESS, addr,
2338 sizeof(*addr));
2176 else 2339 else
2177 err = cmd_status(sk, hdev->id, mgmt_op, 2340 err = cmd_complete(sk, hdev->id, mgmt_op,
2178 MGMT_STATUS_FAILED); 2341 MGMT_STATUS_FAILED, addr,
2342 sizeof(*addr));
2179 2343
2180 goto done; 2344 goto done;
2181 } 2345 }
2182 2346
2183 cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr)); 2347 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2184 if (!cmd) { 2348 if (!cmd) {
2185 err = -ENOMEM; 2349 err = -ENOMEM;
2186 goto done; 2350 goto done;
@@ -2190,11 +2354,12 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2190 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { 2354 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2191 struct hci_cp_user_passkey_reply cp; 2355 struct hci_cp_user_passkey_reply cp;
2192 2356
2193 bacpy(&cp.bdaddr, bdaddr); 2357 bacpy(&cp.bdaddr, &addr->bdaddr);
2194 cp.passkey = passkey; 2358 cp.passkey = passkey;
2195 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); 2359 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2196 } else 2360 } else
2197 err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr); 2361 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2362 &addr->bdaddr);
2198 2363
2199 if (err < 0) 2364 if (err < 0)
2200 mgmt_pending_remove(cmd); 2365 mgmt_pending_remove(cmd);
@@ -2211,7 +2376,7 @@ static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2211 2376
2212 BT_DBG(""); 2377 BT_DBG("");
2213 2378
2214 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, 2379 return user_pairing_resp(sk, hdev, &cp->addr,
2215 MGMT_OP_PIN_CODE_NEG_REPLY, 2380 MGMT_OP_PIN_CODE_NEG_REPLY,
2216 HCI_OP_PIN_CODE_NEG_REPLY, 0); 2381 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2217} 2382}
@@ -2227,7 +2392,7 @@ static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2227 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, 2392 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2228 MGMT_STATUS_INVALID_PARAMS); 2393 MGMT_STATUS_INVALID_PARAMS);
2229 2394
2230 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, 2395 return user_pairing_resp(sk, hdev, &cp->addr,
2231 MGMT_OP_USER_CONFIRM_REPLY, 2396 MGMT_OP_USER_CONFIRM_REPLY,
2232 HCI_OP_USER_CONFIRM_REPLY, 0); 2397 HCI_OP_USER_CONFIRM_REPLY, 0);
2233} 2398}
@@ -2239,7 +2404,7 @@ static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2239 2404
2240 BT_DBG(""); 2405 BT_DBG("");
2241 2406
2242 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, 2407 return user_pairing_resp(sk, hdev, &cp->addr,
2243 MGMT_OP_USER_CONFIRM_NEG_REPLY, 2408 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2244 HCI_OP_USER_CONFIRM_NEG_REPLY, 0); 2409 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2245} 2410}
@@ -2251,7 +2416,7 @@ static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2251 2416
2252 BT_DBG(""); 2417 BT_DBG("");
2253 2418
2254 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, 2419 return user_pairing_resp(sk, hdev, &cp->addr,
2255 MGMT_OP_USER_PASSKEY_REPLY, 2420 MGMT_OP_USER_PASSKEY_REPLY,
2256 HCI_OP_USER_PASSKEY_REPLY, cp->passkey); 2421 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2257} 2422}
@@ -2263,18 +2428,47 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2263 2428
2264 BT_DBG(""); 2429 BT_DBG("");
2265 2430
2266 return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, 2431 return user_pairing_resp(sk, hdev, &cp->addr,
2267 MGMT_OP_USER_PASSKEY_NEG_REPLY, 2432 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2268 HCI_OP_USER_PASSKEY_NEG_REPLY, 0); 2433 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2269} 2434}
2270 2435
2271static int update_name(struct hci_dev *hdev, const char *name) 2436static void update_name(struct hci_request *req)
2272{ 2437{
2438 struct hci_dev *hdev = req->hdev;
2273 struct hci_cp_write_local_name cp; 2439 struct hci_cp_write_local_name cp;
2274 2440
2275 memcpy(cp.name, name, sizeof(cp.name)); 2441 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2442
2443 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2444}
2445
2446static void set_name_complete(struct hci_dev *hdev, u8 status)
2447{
2448 struct mgmt_cp_set_local_name *cp;
2449 struct pending_cmd *cmd;
2450
2451 BT_DBG("status 0x%02x", status);
2452
2453 hci_dev_lock(hdev);
2454
2455 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2456 if (!cmd)
2457 goto unlock;
2458
2459 cp = cmd->param;
2276 2460
2277 return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); 2461 if (status)
2462 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2463 mgmt_status(status));
2464 else
2465 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2466 cp, sizeof(*cp));
2467
2468 mgmt_pending_remove(cmd);
2469
2470unlock:
2471 hci_dev_unlock(hdev);
2278} 2472}
2279 2473
2280static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, 2474static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -2282,12 +2476,24 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2282{ 2476{
2283 struct mgmt_cp_set_local_name *cp = data; 2477 struct mgmt_cp_set_local_name *cp = data;
2284 struct pending_cmd *cmd; 2478 struct pending_cmd *cmd;
2479 struct hci_request req;
2285 int err; 2480 int err;
2286 2481
2287 BT_DBG(""); 2482 BT_DBG("");
2288 2483
2289 hci_dev_lock(hdev); 2484 hci_dev_lock(hdev);
2290 2485
2486 /* If the old values are the same as the new ones just return a
2487 * direct command complete event.
2488 */
2489 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2490 !memcmp(hdev->short_name, cp->short_name,
2491 sizeof(hdev->short_name))) {
2492 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2493 data, len);
2494 goto failed;
2495 }
2496
2291 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); 2497 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2292 2498
2293 if (!hdev_is_powered(hdev)) { 2499 if (!hdev_is_powered(hdev)) {
@@ -2310,7 +2516,19 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2310 goto failed; 2516 goto failed;
2311 } 2517 }
2312 2518
2313 err = update_name(hdev, cp->name); 2519 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2520
2521 hci_req_init(&req, hdev);
2522
2523 if (lmp_bredr_capable(hdev)) {
2524 update_name(&req);
2525 update_eir(&req);
2526 }
2527
2528 if (lmp_le_capable(hdev))
2529 hci_update_ad(&req);
2530
2531 err = hci_req_run(&req, set_name_complete);
2314 if (err < 0) 2532 if (err < 0)
2315 mgmt_pending_remove(cmd); 2533 mgmt_pending_remove(cmd);
2316 2534
@@ -2698,6 +2916,7 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2698 u16 len) 2916 u16 len)
2699{ 2917{
2700 struct mgmt_cp_set_device_id *cp = data; 2918 struct mgmt_cp_set_device_id *cp = data;
2919 struct hci_request req;
2701 int err; 2920 int err;
2702 __u16 source; 2921 __u16 source;
2703 2922
@@ -2718,24 +2937,59 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2718 2937
2719 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); 2938 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2720 2939
2721 update_eir(hdev); 2940 hci_req_init(&req, hdev);
2941 update_eir(&req);
2942 hci_req_run(&req, NULL);
2722 2943
2723 hci_dev_unlock(hdev); 2944 hci_dev_unlock(hdev);
2724 2945
2725 return err; 2946 return err;
2726} 2947}
2727 2948
2949static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
2950{
2951 struct pending_cmd *cmd;
2952
2953 BT_DBG("status 0x%02x", status);
2954
2955 hci_dev_lock(hdev);
2956
2957 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2958 if (!cmd)
2959 goto unlock;
2960
2961 if (status) {
2962 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2963 mgmt_status(status));
2964 } else {
2965 struct mgmt_mode *cp = cmd->param;
2966
2967 if (cp->val)
2968 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2969 else
2970 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
2971
2972 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
2973 new_settings(hdev, cmd->sk);
2974 }
2975
2976 mgmt_pending_remove(cmd);
2977
2978unlock:
2979 hci_dev_unlock(hdev);
2980}
2981
2728static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, 2982static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2729 void *data, u16 len) 2983 void *data, u16 len)
2730{ 2984{
2731 struct mgmt_mode *cp = data; 2985 struct mgmt_mode *cp = data;
2732 struct hci_cp_write_page_scan_activity acp; 2986 struct pending_cmd *cmd;
2733 u8 type; 2987 struct hci_request req;
2734 int err; 2988 int err;
2735 2989
2736 BT_DBG("%s", hdev->name); 2990 BT_DBG("%s", hdev->name);
2737 2991
2738 if (!lmp_bredr_capable(hdev)) 2992 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
2739 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2993 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2740 MGMT_STATUS_NOT_SUPPORTED); 2994 MGMT_STATUS_NOT_SUPPORTED);
2741 2995
@@ -2753,40 +3007,39 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2753 3007
2754 hci_dev_lock(hdev); 3008 hci_dev_lock(hdev);
2755 3009
2756 if (cp->val) { 3010 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
2757 type = PAGE_SCAN_TYPE_INTERLACED; 3011 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3012 MGMT_STATUS_BUSY);
3013 goto unlock;
3014 }
2758 3015
2759 /* 160 msec page scan interval */ 3016 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
2760 acp.interval = __constant_cpu_to_le16(0x0100); 3017 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
2761 } else { 3018 hdev);
2762 type = PAGE_SCAN_TYPE_STANDARD; /* default */ 3019 goto unlock;
3020 }
2763 3021
2764 /* default 1.28 sec page scan */ 3022 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
2765 acp.interval = __constant_cpu_to_le16(0x0800); 3023 data, len);
3024 if (!cmd) {
3025 err = -ENOMEM;
3026 goto unlock;
2766 } 3027 }
2767 3028
2768 /* default 11.25 msec page scan window */ 3029 hci_req_init(&req, hdev);
2769 acp.window = __constant_cpu_to_le16(0x0012);
2770 3030
2771 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), 3031 write_fast_connectable(&req, cp->val);
2772 &acp);
2773 if (err < 0) {
2774 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2775 MGMT_STATUS_FAILED);
2776 goto done;
2777 }
2778 3032
2779 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); 3033 err = hci_req_run(&req, fast_connectable_complete);
2780 if (err < 0) { 3034 if (err < 0) {
2781 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 3035 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
2782 MGMT_STATUS_FAILED); 3036 MGMT_STATUS_FAILED);
2783 goto done; 3037 mgmt_pending_remove(cmd);
2784 } 3038 }
2785 3039
2786 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0, 3040unlock:
2787 NULL, 0);
2788done:
2789 hci_dev_unlock(hdev); 3041 hci_dev_unlock(hdev);
3042
2790 return err; 3043 return err;
2791} 3044}
2792 3045
@@ -3043,79 +3296,115 @@ static void settings_rsp(struct pending_cmd *cmd, void *data)
3043 mgmt_pending_free(cmd); 3296 mgmt_pending_free(cmd);
3044} 3297}
3045 3298
3046static int set_bredr_scan(struct hci_dev *hdev) 3299static void set_bredr_scan(struct hci_request *req)
3047{ 3300{
3301 struct hci_dev *hdev = req->hdev;
3048 u8 scan = 0; 3302 u8 scan = 0;
3049 3303
3304 /* Ensure that fast connectable is disabled. This function will
3305 * not do anything if the page scan parameters are already what
3306 * they should be.
3307 */
3308 write_fast_connectable(req, false);
3309
3050 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 3310 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3051 scan |= SCAN_PAGE; 3311 scan |= SCAN_PAGE;
3052 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 3312 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3053 scan |= SCAN_INQUIRY; 3313 scan |= SCAN_INQUIRY;
3054 3314
3055 if (!scan) 3315 if (scan)
3056 return 0; 3316 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3057
3058 return hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3059} 3317}
3060 3318
3061int mgmt_powered(struct hci_dev *hdev, u8 powered) 3319static void powered_complete(struct hci_dev *hdev, u8 status)
3062{ 3320{
3063 struct cmd_lookup match = { NULL, hdev }; 3321 struct cmd_lookup match = { NULL, hdev };
3064 int err;
3065 3322
3066 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3323 BT_DBG("status 0x%02x", status);
3067 return 0; 3324
3325 hci_dev_lock(hdev);
3068 3326
3069 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 3327 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3070 3328
3071 if (powered) { 3329 new_settings(hdev, match.sk);
3072 u8 link_sec;
3073 3330
3074 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 3331 hci_dev_unlock(hdev);
3075 !lmp_host_ssp_capable(hdev)) {
3076 u8 ssp = 1;
3077 3332
3078 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); 3333 if (match.sk)
3079 } 3334 sock_put(match.sk);
3335}
3080 3336
3081 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 3337static int powered_update_hci(struct hci_dev *hdev)
3082 struct hci_cp_write_le_host_supported cp; 3338{
3339 struct hci_request req;
3340 u8 link_sec;
3083 3341
3084 cp.le = 1; 3342 hci_req_init(&req, hdev);
3085 cp.simul = lmp_le_br_capable(hdev);
3086 3343
3087 /* Check first if we already have the right 3344 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3088 * host state (host features set) 3345 !lmp_host_ssp_capable(hdev)) {
3089 */ 3346 u8 ssp = 1;
3090 if (cp.le != lmp_host_le_capable(hdev) ||
3091 cp.simul != lmp_host_le_br_capable(hdev))
3092 hci_send_cmd(hdev,
3093 HCI_OP_WRITE_LE_HOST_SUPPORTED,
3094 sizeof(cp), &cp);
3095 }
3096 3347
3097 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 3348 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3098 if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) 3349 }
3099 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3100 sizeof(link_sec), &link_sec);
3101 3350
3102 if (lmp_bredr_capable(hdev)) { 3351 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
3103 set_bredr_scan(hdev); 3352 struct hci_cp_write_le_host_supported cp;
3104 update_class(hdev);
3105 update_name(hdev, hdev->dev_name);
3106 update_eir(hdev);
3107 }
3108 } else {
3109 u8 status = MGMT_STATUS_NOT_POWERED;
3110 u8 zero_cod[] = { 0, 0, 0 };
3111 3353
3112 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 3354 cp.le = 1;
3355 cp.simul = lmp_le_br_capable(hdev);
3113 3356
3114 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) 3357 /* Check first if we already have the right
3115 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, 3358 * host state (host features set)
3116 zero_cod, sizeof(zero_cod), NULL); 3359 */
3360 if (cp.le != lmp_host_le_capable(hdev) ||
3361 cp.simul != lmp_host_le_br_capable(hdev))
3362 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3363 sizeof(cp), &cp);
3117 } 3364 }
3118 3365
3366 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3367 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3368 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3369 sizeof(link_sec), &link_sec);
3370
3371 if (lmp_bredr_capable(hdev)) {
3372 set_bredr_scan(&req);
3373 update_class(&req);
3374 update_name(&req);
3375 update_eir(&req);
3376 }
3377
3378 return hci_req_run(&req, powered_complete);
3379}
3380
3381int mgmt_powered(struct hci_dev *hdev, u8 powered)
3382{
3383 struct cmd_lookup match = { NULL, hdev };
3384 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3385 u8 zero_cod[] = { 0, 0, 0 };
3386 int err;
3387
3388 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3389 return 0;
3390
3391 if (powered) {
3392 if (powered_update_hci(hdev) == 0)
3393 return 0;
3394
3395 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3396 &match);
3397 goto new_settings;
3398 }
3399
3400 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3401 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3402
3403 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3404 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3405 zero_cod, sizeof(zero_cod), NULL);
3406
3407new_settings:
3119 err = new_settings(hdev, match.sk); 3408 err = new_settings(hdev, match.sk);
3120 3409
3121 if (match.sk) 3410 if (match.sk)
@@ -3152,7 +3441,7 @@ int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3152 3441
3153int mgmt_connectable(struct hci_dev *hdev, u8 connectable) 3442int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3154{ 3443{
3155 struct cmd_lookup match = { NULL, hdev }; 3444 struct pending_cmd *cmd;
3156 bool changed = false; 3445 bool changed = false;
3157 int err = 0; 3446 int err = 0;
3158 3447
@@ -3164,14 +3453,10 @@ int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3164 changed = true; 3453 changed = true;
3165 } 3454 }
3166 3455
3167 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp, 3456 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3168 &match);
3169 3457
3170 if (changed) 3458 if (changed)
3171 err = new_settings(hdev, match.sk); 3459 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3172
3173 if (match.sk)
3174 sock_put(match.sk);
3175 3460
3176 return err; 3461 return err;
3177} 3462}
@@ -3555,23 +3840,25 @@ int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3555 return err; 3840 return err;
3556} 3841}
3557 3842
3558static int clear_eir(struct hci_dev *hdev) 3843static void clear_eir(struct hci_request *req)
3559{ 3844{
3845 struct hci_dev *hdev = req->hdev;
3560 struct hci_cp_write_eir cp; 3846 struct hci_cp_write_eir cp;
3561 3847
3562 if (!lmp_ext_inq_capable(hdev)) 3848 if (!lmp_ext_inq_capable(hdev))
3563 return 0; 3849 return;
3564 3850
3565 memset(hdev->eir, 0, sizeof(hdev->eir)); 3851 memset(hdev->eir, 0, sizeof(hdev->eir));
3566 3852
3567 memset(&cp, 0, sizeof(cp)); 3853 memset(&cp, 0, sizeof(cp));
3568 3854
3569 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 3855 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3570} 3856}
3571 3857
3572int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) 3858int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3573{ 3859{
3574 struct cmd_lookup match = { NULL, hdev }; 3860 struct cmd_lookup match = { NULL, hdev };
3861 struct hci_request req;
3575 bool changed = false; 3862 bool changed = false;
3576 int err = 0; 3863 int err = 0;
3577 3864
@@ -3604,29 +3891,26 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3604 if (match.sk) 3891 if (match.sk)
3605 sock_put(match.sk); 3892 sock_put(match.sk);
3606 3893
3894 hci_req_init(&req, hdev);
3895
3607 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 3896 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
3608 update_eir(hdev); 3897 update_eir(&req);
3609 else 3898 else
3610 clear_eir(hdev); 3899 clear_eir(&req);
3900
3901 hci_req_run(&req, NULL);
3611 3902
3612 return err; 3903 return err;
3613} 3904}
3614 3905
3615static void class_rsp(struct pending_cmd *cmd, void *data) 3906static void sk_lookup(struct pending_cmd *cmd, void *data)
3616{ 3907{
3617 struct cmd_lookup *match = data; 3908 struct cmd_lookup *match = data;
3618 3909
3619 cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status,
3620 match->hdev->dev_class, 3);
3621
3622 list_del(&cmd->list);
3623
3624 if (match->sk == NULL) { 3910 if (match->sk == NULL) {
3625 match->sk = cmd->sk; 3911 match->sk = cmd->sk;
3626 sock_hold(match->sk); 3912 sock_hold(match->sk);
3627 } 3913 }
3628
3629 mgmt_pending_free(cmd);
3630} 3914}
3631 3915
3632int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 3916int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
@@ -3635,11 +3919,9 @@ int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3635 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; 3919 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
3636 int err = 0; 3920 int err = 0;
3637 3921
3638 clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags); 3922 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
3639 3923 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
3640 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match); 3924 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
3641 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match);
3642 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match);
3643 3925
3644 if (!status) 3926 if (!status)
3645 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3927 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
@@ -3653,55 +3935,29 @@ int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
3653 3935
3654int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) 3936int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
3655{ 3937{
3656 struct pending_cmd *cmd;
3657 struct mgmt_cp_set_local_name ev; 3938 struct mgmt_cp_set_local_name ev;
3658 bool changed = false; 3939 struct pending_cmd *cmd;
3659 int err = 0;
3660 3940
3661 if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) { 3941 if (status)
3662 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); 3942 return 0;
3663 changed = true;
3664 }
3665 3943
3666 memset(&ev, 0, sizeof(ev)); 3944 memset(&ev, 0, sizeof(ev));
3667 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 3945 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
3668 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); 3946 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
3669 3947
3670 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 3948 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3671 if (!cmd) 3949 if (!cmd) {
3672 goto send_event; 3950 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
3673
3674 /* Always assume that either the short or the complete name has
3675 * changed if there was a pending mgmt command */
3676 changed = true;
3677 3951
3678 if (status) { 3952 /* If this is a HCI command related to powering on the
3679 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 3953 * HCI dev don't send any mgmt signals.
3680 mgmt_status(status)); 3954 */
3681 goto failed; 3955 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
3956 return 0;
3682 } 3957 }
3683 3958
3684 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev, 3959 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
3685 sizeof(ev)); 3960 cmd ? cmd->sk : NULL);
3686 if (err < 0)
3687 goto failed;
3688
3689send_event:
3690 if (changed)
3691 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev,
3692 sizeof(ev), cmd ? cmd->sk : NULL);
3693
3694 /* EIR is taken care of separately when powering on the
3695 * adapter so only update them here if this is a name change
3696 * unrelated to power on.
3697 */
3698 if (!test_bit(HCI_INIT, &hdev->flags))
3699 update_eir(hdev);
3700
3701failed:
3702 if (cmd)
3703 mgmt_pending_remove(cmd);
3704 return err;
3705} 3961}
3706 3962
3707int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, 3963int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index b23e2713fea8..ca957d34b0c8 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -69,7 +69,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
69 u8 sec_level, 69 u8 sec_level,
70 int *err); 70 int *err);
71static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); 71static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
72static void rfcomm_session_del(struct rfcomm_session *s); 72static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
73 73
74/* ---- RFCOMM frame parsing macros ---- */ 74/* ---- RFCOMM frame parsing macros ---- */
75#define __get_dlci(b) ((b & 0xfc) >> 2) 75#define __get_dlci(b) ((b & 0xfc) >> 2)
@@ -108,12 +108,6 @@ static void rfcomm_schedule(void)
108 wake_up_process(rfcomm_thread); 108 wake_up_process(rfcomm_thread);
109} 109}
110 110
111static void rfcomm_session_put(struct rfcomm_session *s)
112{
113 if (atomic_dec_and_test(&s->refcnt))
114 rfcomm_session_del(s);
115}
116
117/* ---- RFCOMM FCS computation ---- */ 111/* ---- RFCOMM FCS computation ---- */
118 112
119/* reversed, 8-bit, poly=0x07 */ 113/* reversed, 8-bit, poly=0x07 */
@@ -249,16 +243,14 @@ static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
249{ 243{
250 BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); 244 BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
251 245
252 if (!mod_timer(&s->timer, jiffies + timeout)) 246 mod_timer(&s->timer, jiffies + timeout);
253 rfcomm_session_hold(s);
254} 247}
255 248
256static void rfcomm_session_clear_timer(struct rfcomm_session *s) 249static void rfcomm_session_clear_timer(struct rfcomm_session *s)
257{ 250{
258 BT_DBG("session %p state %ld", s, s->state); 251 BT_DBG("session %p state %ld", s, s->state);
259 252
260 if (del_timer(&s->timer)) 253 del_timer_sync(&s->timer);
261 rfcomm_session_put(s);
262} 254}
263 255
264/* ---- RFCOMM DLCs ---- */ 256/* ---- RFCOMM DLCs ---- */
@@ -336,8 +328,6 @@ static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d)
336{ 328{
337 BT_DBG("dlc %p session %p", d, s); 329 BT_DBG("dlc %p session %p", d, s);
338 330
339 rfcomm_session_hold(s);
340
341 rfcomm_session_clear_timer(s); 331 rfcomm_session_clear_timer(s);
342 rfcomm_dlc_hold(d); 332 rfcomm_dlc_hold(d);
343 list_add(&d->list, &s->dlcs); 333 list_add(&d->list, &s->dlcs);
@@ -356,8 +346,6 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
356 346
357 if (list_empty(&s->dlcs)) 347 if (list_empty(&s->dlcs))
358 rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); 348 rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT);
359
360 rfcomm_session_put(s);
361} 349}
362 350
363static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) 351static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
@@ -493,12 +481,34 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
493 481
494int rfcomm_dlc_close(struct rfcomm_dlc *d, int err) 482int rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
495{ 483{
496 int r; 484 int r = 0;
485 struct rfcomm_dlc *d_list;
486 struct rfcomm_session *s, *s_list;
487
488 BT_DBG("dlc %p state %ld dlci %d err %d", d, d->state, d->dlci, err);
497 489
498 rfcomm_lock(); 490 rfcomm_lock();
499 491
500 r = __rfcomm_dlc_close(d, err); 492 s = d->session;
493 if (!s)
494 goto no_session;
495
496 /* after waiting on the mutex check the session still exists
497 * then check the dlc still exists
498 */
499 list_for_each_entry(s_list, &session_list, list) {
500 if (s_list == s) {
501 list_for_each_entry(d_list, &s->dlcs, list) {
502 if (d_list == d) {
503 r = __rfcomm_dlc_close(d, err);
504 break;
505 }
506 }
507 break;
508 }
509 }
501 510
511no_session:
502 rfcomm_unlock(); 512 rfcomm_unlock();
503 return r; 513 return r;
504} 514}
@@ -609,7 +619,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
609 return s; 619 return s;
610} 620}
611 621
612static void rfcomm_session_del(struct rfcomm_session *s) 622static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s)
613{ 623{
614 int state = s->state; 624 int state = s->state;
615 625
@@ -617,15 +627,14 @@ static void rfcomm_session_del(struct rfcomm_session *s)
617 627
618 list_del(&s->list); 628 list_del(&s->list);
619 629
620 if (state == BT_CONNECTED)
621 rfcomm_send_disc(s, 0);
622
623 rfcomm_session_clear_timer(s); 630 rfcomm_session_clear_timer(s);
624 sock_release(s->sock); 631 sock_release(s->sock);
625 kfree(s); 632 kfree(s);
626 633
627 if (state != BT_LISTEN) 634 if (state != BT_LISTEN)
628 module_put(THIS_MODULE); 635 module_put(THIS_MODULE);
636
637 return NULL;
629} 638}
630 639
631static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst) 640static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
@@ -644,17 +653,16 @@ static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
644 return NULL; 653 return NULL;
645} 654}
646 655
647static void rfcomm_session_close(struct rfcomm_session *s, int err) 656static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s,
657 int err)
648{ 658{
649 struct rfcomm_dlc *d; 659 struct rfcomm_dlc *d;
650 struct list_head *p, *n; 660 struct list_head *p, *n;
651 661
652 BT_DBG("session %p state %ld err %d", s, s->state, err);
653
654 rfcomm_session_hold(s);
655
656 s->state = BT_CLOSED; 662 s->state = BT_CLOSED;
657 663
664 BT_DBG("session %p state %ld err %d", s, s->state, err);
665
658 /* Close all dlcs */ 666 /* Close all dlcs */
659 list_for_each_safe(p, n, &s->dlcs) { 667 list_for_each_safe(p, n, &s->dlcs) {
660 d = list_entry(p, struct rfcomm_dlc, list); 668 d = list_entry(p, struct rfcomm_dlc, list);
@@ -663,7 +671,7 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
663 } 671 }
664 672
665 rfcomm_session_clear_timer(s); 673 rfcomm_session_clear_timer(s);
666 rfcomm_session_put(s); 674 return rfcomm_session_del(s);
667} 675}
668 676
669static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, 677static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
@@ -715,8 +723,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
715 if (*err == 0 || *err == -EINPROGRESS) 723 if (*err == 0 || *err == -EINPROGRESS)
716 return s; 724 return s;
717 725
718 rfcomm_session_del(s); 726 return rfcomm_session_del(s);
719 return NULL;
720 727
721failed: 728failed:
722 sock_release(sock); 729 sock_release(sock);
@@ -1105,7 +1112,7 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
1105} 1112}
1106 1113
1107/* ---- RFCOMM frame reception ---- */ 1114/* ---- RFCOMM frame reception ---- */
1108static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) 1115static struct rfcomm_session *rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1109{ 1116{
1110 BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); 1117 BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
1111 1118
@@ -1114,7 +1121,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1114 struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); 1121 struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci);
1115 if (!d) { 1122 if (!d) {
1116 rfcomm_send_dm(s, dlci); 1123 rfcomm_send_dm(s, dlci);
1117 return 0; 1124 return s;
1118 } 1125 }
1119 1126
1120 switch (d->state) { 1127 switch (d->state) {
@@ -1150,25 +1157,14 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1150 break; 1157 break;
1151 1158
1152 case BT_DISCONN: 1159 case BT_DISCONN:
1153 /* rfcomm_session_put is called later so don't do 1160 s = rfcomm_session_close(s, ECONNRESET);
1154 * anything here otherwise we will mess up the session
1155 * reference counter:
1156 *
1157 * (a) when we are the initiator dlc_unlink will drive
1158 * the reference counter to 0 (there is no initial put
1159 * after session_add)
1160 *
1161 * (b) when we are not the initiator rfcomm_rx_process
1162 * will explicitly call put to balance the initial hold
1163 * done after session add.
1164 */
1165 break; 1161 break;
1166 } 1162 }
1167 } 1163 }
1168 return 0; 1164 return s;
1169} 1165}
1170 1166
1171static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci) 1167static struct rfcomm_session *rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
1172{ 1168{
1173 int err = 0; 1169 int err = 0;
1174 1170
@@ -1192,13 +1188,13 @@ static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
1192 else 1188 else
1193 err = ECONNRESET; 1189 err = ECONNRESET;
1194 1190
1195 s->state = BT_CLOSED; 1191 s = rfcomm_session_close(s, err);
1196 rfcomm_session_close(s, err);
1197 } 1192 }
1198 return 0; 1193 return s;
1199} 1194}
1200 1195
1201static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci) 1196static struct rfcomm_session *rfcomm_recv_disc(struct rfcomm_session *s,
1197 u8 dlci)
1202{ 1198{
1203 int err = 0; 1199 int err = 0;
1204 1200
@@ -1227,11 +1223,9 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
1227 else 1223 else
1228 err = ECONNRESET; 1224 err = ECONNRESET;
1229 1225
1230 s->state = BT_CLOSED; 1226 s = rfcomm_session_close(s, err);
1231 rfcomm_session_close(s, err);
1232 } 1227 }
1233 1228 return s;
1234 return 0;
1235} 1229}
1236 1230
1237void rfcomm_dlc_accept(struct rfcomm_dlc *d) 1231void rfcomm_dlc_accept(struct rfcomm_dlc *d)
@@ -1652,11 +1646,18 @@ drop:
1652 return 0; 1646 return 0;
1653} 1647}
1654 1648
1655static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) 1649static struct rfcomm_session *rfcomm_recv_frame(struct rfcomm_session *s,
1650 struct sk_buff *skb)
1656{ 1651{
1657 struct rfcomm_hdr *hdr = (void *) skb->data; 1652 struct rfcomm_hdr *hdr = (void *) skb->data;
1658 u8 type, dlci, fcs; 1653 u8 type, dlci, fcs;
1659 1654
1655 if (!s) {
1656 /* no session, so free socket data */
1657 kfree_skb(skb);
1658 return s;
1659 }
1660
1660 dlci = __get_dlci(hdr->addr); 1661 dlci = __get_dlci(hdr->addr);
1661 type = __get_type(hdr->ctrl); 1662 type = __get_type(hdr->ctrl);
1662 1663
@@ -1667,7 +1668,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
1667 if (__check_fcs(skb->data, type, fcs)) { 1668 if (__check_fcs(skb->data, type, fcs)) {
1668 BT_ERR("bad checksum in packet"); 1669 BT_ERR("bad checksum in packet");
1669 kfree_skb(skb); 1670 kfree_skb(skb);
1670 return -EILSEQ; 1671 return s;
1671 } 1672 }
1672 1673
1673 if (__test_ea(hdr->len)) 1674 if (__test_ea(hdr->len))
@@ -1683,22 +1684,23 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
1683 1684
1684 case RFCOMM_DISC: 1685 case RFCOMM_DISC:
1685 if (__test_pf(hdr->ctrl)) 1686 if (__test_pf(hdr->ctrl))
1686 rfcomm_recv_disc(s, dlci); 1687 s = rfcomm_recv_disc(s, dlci);
1687 break; 1688 break;
1688 1689
1689 case RFCOMM_UA: 1690 case RFCOMM_UA:
1690 if (__test_pf(hdr->ctrl)) 1691 if (__test_pf(hdr->ctrl))
1691 rfcomm_recv_ua(s, dlci); 1692 s = rfcomm_recv_ua(s, dlci);
1692 break; 1693 break;
1693 1694
1694 case RFCOMM_DM: 1695 case RFCOMM_DM:
1695 rfcomm_recv_dm(s, dlci); 1696 s = rfcomm_recv_dm(s, dlci);
1696 break; 1697 break;
1697 1698
1698 case RFCOMM_UIH: 1699 case RFCOMM_UIH:
1699 if (dlci) 1700 if (dlci) {
1700 return rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb); 1701 rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb);
1701 1702 return s;
1703 }
1702 rfcomm_recv_mcc(s, skb); 1704 rfcomm_recv_mcc(s, skb);
1703 break; 1705 break;
1704 1706
@@ -1707,7 +1709,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
1707 break; 1709 break;
1708 } 1710 }
1709 kfree_skb(skb); 1711 kfree_skb(skb);
1710 return 0; 1712 return s;
1711} 1713}
1712 1714
1713/* ---- Connection and data processing ---- */ 1715/* ---- Connection and data processing ---- */
@@ -1844,7 +1846,7 @@ static void rfcomm_process_dlcs(struct rfcomm_session *s)
1844 } 1846 }
1845} 1847}
1846 1848
1847static void rfcomm_process_rx(struct rfcomm_session *s) 1849static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
1848{ 1850{
1849 struct socket *sock = s->sock; 1851 struct socket *sock = s->sock;
1850 struct sock *sk = sock->sk; 1852 struct sock *sk = sock->sk;
@@ -1856,17 +1858,15 @@ static void rfcomm_process_rx(struct rfcomm_session *s)
1856 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 1858 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1857 skb_orphan(skb); 1859 skb_orphan(skb);
1858 if (!skb_linearize(skb)) 1860 if (!skb_linearize(skb))
1859 rfcomm_recv_frame(s, skb); 1861 s = rfcomm_recv_frame(s, skb);
1860 else 1862 else
1861 kfree_skb(skb); 1863 kfree_skb(skb);
1862 } 1864 }
1863 1865
1864 if (sk->sk_state == BT_CLOSED) { 1866 if (s && (sk->sk_state == BT_CLOSED))
1865 if (!s->initiator) 1867 s = rfcomm_session_close(s, sk->sk_err);
1866 rfcomm_session_put(s);
1867 1868
1868 rfcomm_session_close(s, sk->sk_err); 1869 return s;
1869 }
1870} 1870}
1871 1871
1872static void rfcomm_accept_connection(struct rfcomm_session *s) 1872static void rfcomm_accept_connection(struct rfcomm_session *s)
@@ -1891,8 +1891,6 @@ static void rfcomm_accept_connection(struct rfcomm_session *s)
1891 1891
1892 s = rfcomm_session_add(nsock, BT_OPEN); 1892 s = rfcomm_session_add(nsock, BT_OPEN);
1893 if (s) { 1893 if (s) {
1894 rfcomm_session_hold(s);
1895
1896 /* We should adjust MTU on incoming sessions. 1894 /* We should adjust MTU on incoming sessions.
1897 * L2CAP MTU minus UIH header and FCS. */ 1895 * L2CAP MTU minus UIH header and FCS. */
1898 s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu, 1896 s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
@@ -1903,7 +1901,7 @@ static void rfcomm_accept_connection(struct rfcomm_session *s)
1903 sock_release(nsock); 1901 sock_release(nsock);
1904} 1902}
1905 1903
1906static void rfcomm_check_connection(struct rfcomm_session *s) 1904static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s)
1907{ 1905{
1908 struct sock *sk = s->sock->sk; 1906 struct sock *sk = s->sock->sk;
1909 1907
@@ -1921,10 +1919,10 @@ static void rfcomm_check_connection(struct rfcomm_session *s)
1921 break; 1919 break;
1922 1920
1923 case BT_CLOSED: 1921 case BT_CLOSED:
1924 s->state = BT_CLOSED; 1922 s = rfcomm_session_close(s, sk->sk_err);
1925 rfcomm_session_close(s, sk->sk_err);
1926 break; 1923 break;
1927 } 1924 }
1925 return s;
1928} 1926}
1929 1927
1930static void rfcomm_process_sessions(void) 1928static void rfcomm_process_sessions(void)
@@ -1940,7 +1938,6 @@ static void rfcomm_process_sessions(void)
1940 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1938 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1941 s->state = BT_DISCONN; 1939 s->state = BT_DISCONN;
1942 rfcomm_send_disc(s, 0); 1940 rfcomm_send_disc(s, 0);
1943 rfcomm_session_put(s);
1944 continue; 1941 continue;
1945 } 1942 }
1946 1943
@@ -1949,21 +1946,18 @@ static void rfcomm_process_sessions(void)
1949 continue; 1946 continue;
1950 } 1947 }
1951 1948
1952 rfcomm_session_hold(s);
1953
1954 switch (s->state) { 1949 switch (s->state) {
1955 case BT_BOUND: 1950 case BT_BOUND:
1956 rfcomm_check_connection(s); 1951 s = rfcomm_check_connection(s);
1957 break; 1952 break;
1958 1953
1959 default: 1954 default:
1960 rfcomm_process_rx(s); 1955 s = rfcomm_process_rx(s);
1961 break; 1956 break;
1962 } 1957 }
1963 1958
1964 rfcomm_process_dlcs(s); 1959 if (s)
1965 1960 rfcomm_process_dlcs(s);
1966 rfcomm_session_put(s);
1967 } 1961 }
1968 1962
1969 rfcomm_unlock(); 1963 rfcomm_unlock();
@@ -2010,10 +2004,11 @@ static int rfcomm_add_listener(bdaddr_t *ba)
2010 2004
2011 /* Add listening session */ 2005 /* Add listening session */
2012 s = rfcomm_session_add(sock, BT_LISTEN); 2006 s = rfcomm_session_add(sock, BT_LISTEN);
2013 if (!s) 2007 if (!s) {
2008 err = -ENOMEM;
2014 goto failed; 2009 goto failed;
2010 }
2015 2011
2016 rfcomm_session_hold(s);
2017 return 0; 2012 return 0;
2018failed: 2013failed:
2019 sock_release(sock); 2014 sock_release(sock);
@@ -2071,8 +2066,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2071 if (!s) 2066 if (!s)
2072 return; 2067 return;
2073 2068
2074 rfcomm_session_hold(s);
2075
2076 list_for_each_safe(p, n, &s->dlcs) { 2069 list_for_each_safe(p, n, &s->dlcs) {
2077 d = list_entry(p, struct rfcomm_dlc, list); 2070 d = list_entry(p, struct rfcomm_dlc, list);
2078 2071
@@ -2104,8 +2097,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2104 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 2097 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
2105 } 2098 }
2106 2099
2107 rfcomm_session_put(s);
2108
2109 rfcomm_schedule(); 2100 rfcomm_schedule();
2110} 2101}
2111 2102
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index c23bae86263b..3786ddc45152 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1065,8 +1065,7 @@ void __exit rfcomm_cleanup_sockets(void)
1065 1065
1066 debugfs_remove(rfcomm_sock_debugfs); 1066 debugfs_remove(rfcomm_sock_debugfs);
1067 1067
1068 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 1068 bt_sock_unregister(BTPROTO_RFCOMM);
1069 BT_ERR("RFCOMM socket layer unregistration failed");
1070 1069
1071 proto_unregister(&rfcomm_proto); 1070 proto_unregister(&rfcomm_proto);
1072} 1071}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index fad0302bdb32..d919d1161ab4 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1112,8 +1112,7 @@ void __exit sco_exit(void)
1112 1112
1113 debugfs_remove(sco_debugfs); 1113 debugfs_remove(sco_debugfs);
1114 1114
1115 if (bt_sock_unregister(BTPROTO_SCO) < 0) 1115 bt_sock_unregister(BTPROTO_SCO);
1116 BT_ERR("SCO socket unregistration failed");
1117 1116
1118 proto_unregister(&sco_proto); 1117 proto_unregister(&sco_proto);
1119} 1118}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fb306814576a..e5c1441ac2b8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -175,7 +175,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
175 * add it to the device after the station. 175 * add it to the device after the station.
176 */ 176 */
177 if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { 177 if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
178 ieee80211_key_free(sdata->local, key); 178 ieee80211_key_free_unused(key);
179 err = -ENOENT; 179 err = -ENOENT;
180 goto out_unlock; 180 goto out_unlock;
181 } 181 }
@@ -214,8 +214,6 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
214 } 214 }
215 215
216 err = ieee80211_key_link(key, sdata, sta); 216 err = ieee80211_key_link(key, sdata, sta);
217 if (err)
218 ieee80211_key_free(sdata->local, key);
219 217
220 out_unlock: 218 out_unlock:
221 mutex_unlock(&sdata->local->sta_mtx); 219 mutex_unlock(&sdata->local->sta_mtx);
@@ -254,7 +252,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
254 goto out_unlock; 252 goto out_unlock;
255 } 253 }
256 254
257 __ieee80211_key_free(key); 255 ieee80211_key_free(key, true);
258 256
259 ret = 0; 257 ret = 0;
260 out_unlock: 258 out_unlock:
@@ -445,12 +443,14 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
445 struct ieee80211_sub_if_data *sdata = sta->sdata; 443 struct ieee80211_sub_if_data *sdata = sta->sdata;
446 struct ieee80211_local *local = sdata->local; 444 struct ieee80211_local *local = sdata->local;
447 struct timespec uptime; 445 struct timespec uptime;
446 u64 packets = 0;
447 int ac;
448 448
449 sinfo->generation = sdata->local->sta_generation; 449 sinfo->generation = sdata->local->sta_generation;
450 450
451 sinfo->filled = STATION_INFO_INACTIVE_TIME | 451 sinfo->filled = STATION_INFO_INACTIVE_TIME |
452 STATION_INFO_RX_BYTES | 452 STATION_INFO_RX_BYTES64 |
453 STATION_INFO_TX_BYTES | 453 STATION_INFO_TX_BYTES64 |
454 STATION_INFO_RX_PACKETS | 454 STATION_INFO_RX_PACKETS |
455 STATION_INFO_TX_PACKETS | 455 STATION_INFO_TX_PACKETS |
456 STATION_INFO_TX_RETRIES | 456 STATION_INFO_TX_RETRIES |
@@ -467,10 +467,14 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
467 sinfo->connected_time = uptime.tv_sec - sta->last_connected; 467 sinfo->connected_time = uptime.tv_sec - sta->last_connected;
468 468
469 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); 469 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
470 sinfo->tx_bytes = 0;
471 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
472 sinfo->tx_bytes += sta->tx_bytes[ac];
473 packets += sta->tx_packets[ac];
474 }
475 sinfo->tx_packets = packets;
470 sinfo->rx_bytes = sta->rx_bytes; 476 sinfo->rx_bytes = sta->rx_bytes;
471 sinfo->tx_bytes = sta->tx_bytes;
472 sinfo->rx_packets = sta->rx_packets; 477 sinfo->rx_packets = sta->rx_packets;
473 sinfo->tx_packets = sta->tx_packets;
474 sinfo->tx_retries = sta->tx_retry_count; 478 sinfo->tx_retries = sta->tx_retry_count;
475 sinfo->tx_failed = sta->tx_retry_failed; 479 sinfo->tx_failed = sta->tx_retry_failed;
476 sinfo->rx_dropped_misc = sta->rx_dropped; 480 sinfo->rx_dropped_misc = sta->rx_dropped;
@@ -598,8 +602,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
598 data[i++] += sta->rx_fragments; \ 602 data[i++] += sta->rx_fragments; \
599 data[i++] += sta->rx_dropped; \ 603 data[i++] += sta->rx_dropped; \
600 \ 604 \
601 data[i++] += sta->tx_packets; \ 605 data[i++] += sinfo.tx_packets; \
602 data[i++] += sta->tx_bytes; \ 606 data[i++] += sinfo.tx_bytes; \
603 data[i++] += sta->tx_fragments; \ 607 data[i++] += sta->tx_fragments; \
604 data[i++] += sta->tx_filtered_count; \ 608 data[i++] += sta->tx_filtered_count; \
605 data[i++] += sta->tx_retry_failed; \ 609 data[i++] += sta->tx_retry_failed; \
@@ -621,13 +625,14 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
621 if (!(sta && !WARN_ON(sta->sdata->dev != dev))) 625 if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
622 goto do_survey; 626 goto do_survey;
623 627
628 sinfo.filled = 0;
629 sta_set_sinfo(sta, &sinfo);
630
624 i = 0; 631 i = 0;
625 ADD_STA_STATS(sta); 632 ADD_STA_STATS(sta);
626 633
627 data[i++] = sta->sta_state; 634 data[i++] = sta->sta_state;
628 635
629 sinfo.filled = 0;
630 sta_set_sinfo(sta, &sinfo);
631 636
632 if (sinfo.filled & STATION_INFO_TX_BITRATE) 637 if (sinfo.filled & STATION_INFO_TX_BITRATE)
633 data[i] = 100000 * 638 data[i] = 100000 *
@@ -1035,9 +1040,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1035 sta_info_flush_defer(vlan); 1040 sta_info_flush_defer(vlan);
1036 sta_info_flush_defer(sdata); 1041 sta_info_flush_defer(sdata);
1037 rcu_barrier(); 1042 rcu_barrier();
1038 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 1043 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
1039 sta_info_flush_cleanup(vlan); 1044 sta_info_flush_cleanup(vlan);
1045 ieee80211_free_keys(vlan);
1046 }
1040 sta_info_flush_cleanup(sdata); 1047 sta_info_flush_cleanup(sdata);
1048 ieee80211_free_keys(sdata);
1041 1049
1042 sdata->vif.bss_conf.enable_beacon = false; 1050 sdata->vif.bss_conf.enable_beacon = false;
1043 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1051 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
@@ -1177,6 +1185,18 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1177 mask |= BIT(NL80211_STA_FLAG_ASSOCIATED); 1185 mask |= BIT(NL80211_STA_FLAG_ASSOCIATED);
1178 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) 1186 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
1179 set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 1187 set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
1188 } else if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
1189 /*
1190 * TDLS -- everything follows authorized, but
1191 * only becoming authorized is possible, not
1192 * going back
1193 */
1194 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
1195 set |= BIT(NL80211_STA_FLAG_AUTHENTICATED) |
1196 BIT(NL80211_STA_FLAG_ASSOCIATED);
1197 mask |= BIT(NL80211_STA_FLAG_AUTHENTICATED) |
1198 BIT(NL80211_STA_FLAG_ASSOCIATED);
1199 }
1180 } 1200 }
1181 1201
1182 ret = sta_apply_auth_flags(local, sta, mask, set); 1202 ret = sta_apply_auth_flags(local, sta, mask, set);
@@ -1261,7 +1281,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1261 if (ieee80211_vif_is_mesh(&sdata->vif)) { 1281 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1262#ifdef CONFIG_MAC80211_MESH 1282#ifdef CONFIG_MAC80211_MESH
1263 u32 changed = 0; 1283 u32 changed = 0;
1264 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) { 1284
1285 if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
1265 switch (params->plink_state) { 1286 switch (params->plink_state) {
1266 case NL80211_PLINK_ESTAB: 1287 case NL80211_PLINK_ESTAB:
1267 if (sta->plink_state != NL80211_PLINK_ESTAB) 1288 if (sta->plink_state != NL80211_PLINK_ESTAB)
@@ -1292,15 +1313,18 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1292 /* nothing */ 1313 /* nothing */
1293 break; 1314 break;
1294 } 1315 }
1295 } else { 1316 }
1296 switch (params->plink_action) { 1317
1297 case PLINK_ACTION_OPEN: 1318 switch (params->plink_action) {
1298 changed |= mesh_plink_open(sta); 1319 case NL80211_PLINK_ACTION_NO_ACTION:
1299 break; 1320 /* nothing */
1300 case PLINK_ACTION_BLOCK: 1321 break;
1301 changed |= mesh_plink_block(sta); 1322 case NL80211_PLINK_ACTION_OPEN:
1302 break; 1323 changed |= mesh_plink_open(sta);
1303 } 1324 break;
1325 case NL80211_PLINK_ACTION_BLOCK:
1326 changed |= mesh_plink_block(sta);
1327 break;
1304 } 1328 }
1305 1329
1306 if (params->local_pm) 1330 if (params->local_pm)
@@ -1346,8 +1370,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1346 * defaults -- if userspace wants something else we'll 1370 * defaults -- if userspace wants something else we'll
1347 * change it accordingly in sta_apply_parameters() 1371 * change it accordingly in sta_apply_parameters()
1348 */ 1372 */
1349 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 1373 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) {
1350 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 1374 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
1375 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
1376 }
1351 1377
1352 err = sta_apply_parameters(local, sta, params); 1378 err = sta_apply_parameters(local, sta, params);
1353 if (err) { 1379 if (err) {
@@ -1356,8 +1382,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1356 } 1382 }
1357 1383
1358 /* 1384 /*
1359 * for TDLS, rate control should be initialized only when supported 1385 * for TDLS, rate control should be initialized only when
1360 * rates are known. 1386 * rates are known and station is marked authorized
1361 */ 1387 */
1362 if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 1388 if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
1363 rate_control_rate_init(sta); 1389 rate_control_rate_init(sta);
@@ -1394,50 +1420,67 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1394} 1420}
1395 1421
1396static int ieee80211_change_station(struct wiphy *wiphy, 1422static int ieee80211_change_station(struct wiphy *wiphy,
1397 struct net_device *dev, 1423 struct net_device *dev, u8 *mac,
1398 u8 *mac,
1399 struct station_parameters *params) 1424 struct station_parameters *params)
1400{ 1425{
1401 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1426 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1402 struct ieee80211_local *local = wiphy_priv(wiphy); 1427 struct ieee80211_local *local = wiphy_priv(wiphy);
1403 struct sta_info *sta; 1428 struct sta_info *sta;
1404 struct ieee80211_sub_if_data *vlansdata; 1429 struct ieee80211_sub_if_data *vlansdata;
1430 enum cfg80211_station_type statype;
1405 int err; 1431 int err;
1406 1432
1407 mutex_lock(&local->sta_mtx); 1433 mutex_lock(&local->sta_mtx);
1408 1434
1409 sta = sta_info_get_bss(sdata, mac); 1435 sta = sta_info_get_bss(sdata, mac);
1410 if (!sta) { 1436 if (!sta) {
1411 mutex_unlock(&local->sta_mtx); 1437 err = -ENOENT;
1412 return -ENOENT; 1438 goto out_err;
1413 } 1439 }
1414 1440
1415 /* in station mode, some updates are only valid with TDLS */ 1441 switch (sdata->vif.type) {
1416 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1442 case NL80211_IFTYPE_MESH_POINT:
1417 (params->supported_rates || params->ht_capa || params->vht_capa || 1443 if (sdata->u.mesh.user_mpm)
1418 params->sta_modify_mask || 1444 statype = CFG80211_STA_MESH_PEER_USER;
1419 (params->sta_flags_mask & BIT(NL80211_STA_FLAG_WME))) && 1445 else
1420 !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { 1446 statype = CFG80211_STA_MESH_PEER_KERNEL;
1421 mutex_unlock(&local->sta_mtx); 1447 break;
1422 return -EINVAL; 1448 case NL80211_IFTYPE_ADHOC:
1449 statype = CFG80211_STA_IBSS;
1450 break;
1451 case NL80211_IFTYPE_STATION:
1452 if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
1453 statype = CFG80211_STA_AP_STA;
1454 break;
1455 }
1456 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
1457 statype = CFG80211_STA_TDLS_PEER_ACTIVE;
1458 else
1459 statype = CFG80211_STA_TDLS_PEER_SETUP;
1460 break;
1461 case NL80211_IFTYPE_AP:
1462 case NL80211_IFTYPE_AP_VLAN:
1463 statype = CFG80211_STA_AP_CLIENT;
1464 break;
1465 default:
1466 err = -EOPNOTSUPP;
1467 goto out_err;
1423 } 1468 }
1424 1469
1470 err = cfg80211_check_station_change(wiphy, params, statype);
1471 if (err)
1472 goto out_err;
1473
1425 if (params->vlan && params->vlan != sta->sdata->dev) { 1474 if (params->vlan && params->vlan != sta->sdata->dev) {
1426 bool prev_4addr = false; 1475 bool prev_4addr = false;
1427 bool new_4addr = false; 1476 bool new_4addr = false;
1428 1477
1429 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 1478 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
1430 1479
1431 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1432 vlansdata->vif.type != NL80211_IFTYPE_AP) {
1433 mutex_unlock(&local->sta_mtx);
1434 return -EINVAL;
1435 }
1436
1437 if (params->vlan->ieee80211_ptr->use_4addr) { 1480 if (params->vlan->ieee80211_ptr->use_4addr) {
1438 if (vlansdata->u.vlan.sta) { 1481 if (vlansdata->u.vlan.sta) {
1439 mutex_unlock(&local->sta_mtx); 1482 err = -EBUSY;
1440 return -EBUSY; 1483 goto out_err;
1441 } 1484 }
1442 1485
1443 rcu_assign_pointer(vlansdata->u.vlan.sta, sta); 1486 rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
@@ -1464,12 +1507,12 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1464 } 1507 }
1465 1508
1466 err = sta_apply_parameters(local, sta, params); 1509 err = sta_apply_parameters(local, sta, params);
1467 if (err) { 1510 if (err)
1468 mutex_unlock(&local->sta_mtx); 1511 goto out_err;
1469 return err;
1470 }
1471 1512
1472 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates) 1513 /* When peer becomes authorized, init rate control as well */
1514 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
1515 test_sta_flag(sta, WLAN_STA_AUTHORIZED))
1473 rate_control_rate_init(sta); 1516 rate_control_rate_init(sta);
1474 1517
1475 mutex_unlock(&local->sta_mtx); 1518 mutex_unlock(&local->sta_mtx);
@@ -1479,7 +1522,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1479 ieee80211_recalc_ps(local, -1); 1522 ieee80211_recalc_ps(local, -1);
1480 ieee80211_recalc_ps_vif(sdata); 1523 ieee80211_recalc_ps_vif(sdata);
1481 } 1524 }
1525
1482 return 0; 1526 return 0;
1527out_err:
1528 mutex_unlock(&local->sta_mtx);
1529 return err;
1483} 1530}
1484 1531
1485#ifdef CONFIG_MAC80211_MESH 1532#ifdef CONFIG_MAC80211_MESH
@@ -1687,6 +1734,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1687 ifmsh->mesh_sp_id = setup->sync_method; 1734 ifmsh->mesh_sp_id = setup->sync_method;
1688 ifmsh->mesh_pp_id = setup->path_sel_proto; 1735 ifmsh->mesh_pp_id = setup->path_sel_proto;
1689 ifmsh->mesh_pm_id = setup->path_metric; 1736 ifmsh->mesh_pm_id = setup->path_metric;
1737 ifmsh->user_mpm = setup->user_mpm;
1690 ifmsh->security = IEEE80211_MESH_SEC_NONE; 1738 ifmsh->security = IEEE80211_MESH_SEC_NONE;
1691 if (setup->is_authenticated) 1739 if (setup->is_authenticated)
1692 ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; 1740 ifmsh->security |= IEEE80211_MESH_SEC_AUTHED;
@@ -1730,8 +1778,11 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1730 conf->dot11MeshTTL = nconf->dot11MeshTTL; 1778 conf->dot11MeshTTL = nconf->dot11MeshTTL;
1731 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) 1779 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask))
1732 conf->element_ttl = nconf->element_ttl; 1780 conf->element_ttl = nconf->element_ttl;
1733 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) 1781 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) {
1782 if (ifmsh->user_mpm)
1783 return -EBUSY;
1734 conf->auto_open_plinks = nconf->auto_open_plinks; 1784 conf->auto_open_plinks = nconf->auto_open_plinks;
1785 }
1735 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) 1786 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
1736 conf->dot11MeshNbrOffsetMaxNeighbor = 1787 conf->dot11MeshNbrOffsetMaxNeighbor =
1737 nconf->dot11MeshNbrOffsetMaxNeighbor; 1788 nconf->dot11MeshNbrOffsetMaxNeighbor;
@@ -2371,7 +2422,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2371 struct ieee80211_sub_if_data *sdata, 2422 struct ieee80211_sub_if_data *sdata,
2372 struct ieee80211_channel *channel, 2423 struct ieee80211_channel *channel,
2373 unsigned int duration, u64 *cookie, 2424 unsigned int duration, u64 *cookie,
2374 struct sk_buff *txskb) 2425 struct sk_buff *txskb,
2426 enum ieee80211_roc_type type)
2375{ 2427{
2376 struct ieee80211_roc_work *roc, *tmp; 2428 struct ieee80211_roc_work *roc, *tmp;
2377 bool queued = false; 2429 bool queued = false;
@@ -2390,6 +2442,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2390 roc->duration = duration; 2442 roc->duration = duration;
2391 roc->req_duration = duration; 2443 roc->req_duration = duration;
2392 roc->frame = txskb; 2444 roc->frame = txskb;
2445 roc->type = type;
2393 roc->mgmt_tx_cookie = (unsigned long)txskb; 2446 roc->mgmt_tx_cookie = (unsigned long)txskb;
2394 roc->sdata = sdata; 2447 roc->sdata = sdata;
2395 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work); 2448 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
@@ -2420,7 +2473,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2420 if (!duration) 2473 if (!duration)
2421 duration = 10; 2474 duration = 10;
2422 2475
2423 ret = drv_remain_on_channel(local, sdata, channel, duration); 2476 ret = drv_remain_on_channel(local, sdata, channel, duration, type);
2424 if (ret) { 2477 if (ret) {
2425 kfree(roc); 2478 kfree(roc);
2426 return ret; 2479 return ret;
@@ -2439,10 +2492,13 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2439 * 2492 *
2440 * If it hasn't started yet, just increase the duration 2493 * If it hasn't started yet, just increase the duration
2441 * and add the new one to the list of dependents. 2494 * and add the new one to the list of dependents.
2495 * If the type of the new ROC has higher priority, modify the
2496 * type of the previous one to match that of the new one.
2442 */ 2497 */
2443 if (!tmp->started) { 2498 if (!tmp->started) {
2444 list_add_tail(&roc->list, &tmp->dependents); 2499 list_add_tail(&roc->list, &tmp->dependents);
2445 tmp->duration = max(tmp->duration, roc->duration); 2500 tmp->duration = max(tmp->duration, roc->duration);
2501 tmp->type = max(tmp->type, roc->type);
2446 queued = true; 2502 queued = true;
2447 break; 2503 break;
2448 } 2504 }
@@ -2454,16 +2510,18 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2454 /* 2510 /*
2455 * In the offloaded ROC case, if it hasn't begun, add 2511 * In the offloaded ROC case, if it hasn't begun, add
2456 * this new one to the dependent list to be handled 2512 * this new one to the dependent list to be handled
2457 * when the the master one begins. If it has begun, 2513 * when the master one begins. If it has begun,
2458 * check that there's still a minimum time left and 2514 * check that there's still a minimum time left and
2459 * if so, start this one, transmitting the frame, but 2515 * if so, start this one, transmitting the frame, but
2460 * add it to the list directly after this one with a 2516 * add it to the list directly after this one with
2461 * a reduced time so we'll ask the driver to execute 2517 * a reduced time so we'll ask the driver to execute
2462 * it right after finishing the previous one, in the 2518 * it right after finishing the previous one, in the
2463 * hope that it'll also be executed right afterwards, 2519 * hope that it'll also be executed right afterwards,
2464 * effectively extending the old one. 2520 * effectively extending the old one.
2465 * If there's no minimum time left, just add it to the 2521 * If there's no minimum time left, just add it to the
2466 * normal list. 2522 * normal list.
2523 * TODO: the ROC type is ignored here, assuming that it
2524 * is better to immediately use the current ROC.
2467 */ 2525 */
2468 if (!tmp->hw_begun) { 2526 if (!tmp->hw_begun) {
2469 list_add_tail(&roc->list, &tmp->dependents); 2527 list_add_tail(&roc->list, &tmp->dependents);
@@ -2557,7 +2615,8 @@ static int ieee80211_remain_on_channel(struct wiphy *wiphy,
2557 2615
2558 mutex_lock(&local->mtx); 2616 mutex_lock(&local->mtx);
2559 ret = ieee80211_start_roc_work(local, sdata, chan, 2617 ret = ieee80211_start_roc_work(local, sdata, chan,
2560 duration, cookie, NULL); 2618 duration, cookie, NULL,
2619 IEEE80211_ROC_TYPE_NORMAL);
2561 mutex_unlock(&local->mtx); 2620 mutex_unlock(&local->mtx);
2562 2621
2563 return ret; 2622 return ret;
@@ -2790,7 +2849,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2790 2849
2791 /* This will handle all kinds of coalescing and immediate TX */ 2850 /* This will handle all kinds of coalescing and immediate TX */
2792 ret = ieee80211_start_roc_work(local, sdata, chan, 2851 ret = ieee80211_start_roc_work(local, sdata, chan,
2793 wait, cookie, skb); 2852 wait, cookie, skb,
2853 IEEE80211_ROC_TYPE_MGMT_TX);
2794 if (ret) 2854 if (ret)
2795 kfree_skb(skb); 2855 kfree_skb(skb);
2796 out_unlock: 2856 out_unlock:
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index c3a3082b72e5..1521cabad3d6 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -295,7 +295,7 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
295 char buf[50]; 295 char buf[50];
296 struct ieee80211_key *key; 296 struct ieee80211_key *key;
297 297
298 if (!sdata->debugfs.dir) 298 if (!sdata->vif.debugfs_dir)
299 return; 299 return;
300 300
301 lockdep_assert_held(&sdata->local->key_mtx); 301 lockdep_assert_held(&sdata->local->key_mtx);
@@ -311,7 +311,7 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
311 sprintf(buf, "../keys/%d", key->debugfs.cnt); 311 sprintf(buf, "../keys/%d", key->debugfs.cnt);
312 sdata->debugfs.default_unicast_key = 312 sdata->debugfs.default_unicast_key =
313 debugfs_create_symlink("default_unicast_key", 313 debugfs_create_symlink("default_unicast_key",
314 sdata->debugfs.dir, buf); 314 sdata->vif.debugfs_dir, buf);
315 } 315 }
316 316
317 if (sdata->debugfs.default_multicast_key) { 317 if (sdata->debugfs.default_multicast_key) {
@@ -325,7 +325,7 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
325 sprintf(buf, "../keys/%d", key->debugfs.cnt); 325 sprintf(buf, "../keys/%d", key->debugfs.cnt);
326 sdata->debugfs.default_multicast_key = 326 sdata->debugfs.default_multicast_key =
327 debugfs_create_symlink("default_multicast_key", 327 debugfs_create_symlink("default_multicast_key",
328 sdata->debugfs.dir, buf); 328 sdata->vif.debugfs_dir, buf);
329 } 329 }
330} 330}
331 331
@@ -334,7 +334,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
334 char buf[50]; 334 char buf[50];
335 struct ieee80211_key *key; 335 struct ieee80211_key *key;
336 336
337 if (!sdata->debugfs.dir) 337 if (!sdata->vif.debugfs_dir)
338 return; 338 return;
339 339
340 key = key_mtx_dereference(sdata->local, 340 key = key_mtx_dereference(sdata->local,
@@ -343,7 +343,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
343 sprintf(buf, "../keys/%d", key->debugfs.cnt); 343 sprintf(buf, "../keys/%d", key->debugfs.cnt);
344 sdata->debugfs.default_mgmt_key = 344 sdata->debugfs.default_mgmt_key =
345 debugfs_create_symlink("default_mgmt_key", 345 debugfs_create_symlink("default_mgmt_key",
346 sdata->debugfs.dir, buf); 346 sdata->vif.debugfs_dir, buf);
347 } else 347 } else
348 ieee80211_debugfs_key_remove_mgmt_default(sdata); 348 ieee80211_debugfs_key_remove_mgmt_default(sdata);
349} 349}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 059bbb82e84f..ddb426867904 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -521,7 +521,7 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
521#endif 521#endif
522 522
523#define DEBUGFS_ADD_MODE(name, mode) \ 523#define DEBUGFS_ADD_MODE(name, mode) \
524 debugfs_create_file(#name, mode, sdata->debugfs.dir, \ 524 debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \
525 sdata, &name##_ops); 525 sdata, &name##_ops);
526 526
527#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) 527#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
@@ -577,7 +577,7 @@ static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
577static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) 577static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
578{ 578{
579 struct dentry *dir = debugfs_create_dir("mesh_stats", 579 struct dentry *dir = debugfs_create_dir("mesh_stats",
580 sdata->debugfs.dir); 580 sdata->vif.debugfs_dir);
581#define MESHSTATS_ADD(name)\ 581#define MESHSTATS_ADD(name)\
582 debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); 582 debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
583 583
@@ -594,7 +594,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
594static void add_mesh_config(struct ieee80211_sub_if_data *sdata) 594static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
595{ 595{
596 struct dentry *dir = debugfs_create_dir("mesh_config", 596 struct dentry *dir = debugfs_create_dir("mesh_config",
597 sdata->debugfs.dir); 597 sdata->vif.debugfs_dir);
598 598
599#define MESHPARAMS_ADD(name) \ 599#define MESHPARAMS_ADD(name) \
600 debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); 600 debugfs_create_file(#name, 0600, dir, sdata, &name##_ops);
@@ -631,7 +631,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
631 631
632static void add_files(struct ieee80211_sub_if_data *sdata) 632static void add_files(struct ieee80211_sub_if_data *sdata)
633{ 633{
634 if (!sdata->debugfs.dir) 634 if (!sdata->vif.debugfs_dir)
635 return; 635 return;
636 636
637 DEBUGFS_ADD(flags); 637 DEBUGFS_ADD(flags);
@@ -673,21 +673,21 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
673 char buf[10+IFNAMSIZ]; 673 char buf[10+IFNAMSIZ];
674 674
675 sprintf(buf, "netdev:%s", sdata->name); 675 sprintf(buf, "netdev:%s", sdata->name);
676 sdata->debugfs.dir = debugfs_create_dir(buf, 676 sdata->vif.debugfs_dir = debugfs_create_dir(buf,
677 sdata->local->hw.wiphy->debugfsdir); 677 sdata->local->hw.wiphy->debugfsdir);
678 if (sdata->debugfs.dir) 678 if (sdata->vif.debugfs_dir)
679 sdata->debugfs.subdir_stations = debugfs_create_dir("stations", 679 sdata->debugfs.subdir_stations = debugfs_create_dir("stations",
680 sdata->debugfs.dir); 680 sdata->vif.debugfs_dir);
681 add_files(sdata); 681 add_files(sdata);
682} 682}
683 683
684void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) 684void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
685{ 685{
686 if (!sdata->debugfs.dir) 686 if (!sdata->vif.debugfs_dir)
687 return; 687 return;
688 688
689 debugfs_remove_recursive(sdata->debugfs.dir); 689 debugfs_remove_recursive(sdata->vif.debugfs_dir);
690 sdata->debugfs.dir = NULL; 690 sdata->vif.debugfs_dir = NULL;
691} 691}
692 692
693void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) 693void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
@@ -695,7 +695,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
695 struct dentry *dir; 695 struct dentry *dir;
696 char buf[10 + IFNAMSIZ]; 696 char buf[10 + IFNAMSIZ];
697 697
698 dir = sdata->debugfs.dir; 698 dir = sdata->vif.debugfs_dir;
699 699
700 if (!dir) 700 if (!dir)
701 return; 701 return;
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c7591f73dbc3..4f841fe559df 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -325,6 +325,36 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
325} 325}
326STA_OPS(ht_capa); 326STA_OPS(ht_capa);
327 327
328static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
329 size_t count, loff_t *ppos)
330{
331 char buf[128], *p = buf;
332 struct sta_info *sta = file->private_data;
333 struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap;
334
335 p += scnprintf(p, sizeof(buf) + buf - p, "VHT %ssupported\n",
336 vhtc->vht_supported ? "" : "not ");
337 if (vhtc->vht_supported) {
338 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.8x\n", vhtc->cap);
339
340 p += scnprintf(p, sizeof(buf)+buf-p, "RX MCS: %.4x\n",
341 le16_to_cpu(vhtc->vht_mcs.rx_mcs_map));
342 if (vhtc->vht_mcs.rx_highest)
343 p += scnprintf(p, sizeof(buf)+buf-p,
344 "MCS RX highest: %d Mbps\n",
345 le16_to_cpu(vhtc->vht_mcs.rx_highest));
346 p += scnprintf(p, sizeof(buf)+buf-p, "TX MCS: %.4x\n",
347 le16_to_cpu(vhtc->vht_mcs.tx_mcs_map));
348 if (vhtc->vht_mcs.tx_highest)
349 p += scnprintf(p, sizeof(buf)+buf-p,
350 "MCS TX highest: %d Mbps\n",
351 le16_to_cpu(vhtc->vht_mcs.tx_highest));
352 }
353
354 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
355}
356STA_OPS(vht_capa);
357
328static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf, 358static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf,
329 size_t count, loff_t *ppos) 359 size_t count, loff_t *ppos)
330{ 360{
@@ -405,6 +435,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
405 DEBUGFS_ADD(dev); 435 DEBUGFS_ADD(dev);
406 DEBUGFS_ADD(last_signal); 436 DEBUGFS_ADD(last_signal);
407 DEBUGFS_ADD(ht_capa); 437 DEBUGFS_ADD(ht_capa);
438 DEBUGFS_ADD(vht_capa);
408 DEBUGFS_ADD(last_ack_signal); 439 DEBUGFS_ADD(last_ack_signal);
409 DEBUGFS_ADD(current_tx_rate); 440 DEBUGFS_ADD(current_tx_rate);
410 DEBUGFS_ADD(last_rx_rate); 441 DEBUGFS_ADD(last_rx_rate);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index ee56d0779d8b..169664c122e2 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -241,6 +241,22 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
241 return ret; 241 return ret;
242} 242}
243 243
244static inline void drv_set_multicast_list(struct ieee80211_local *local,
245 struct ieee80211_sub_if_data *sdata,
246 struct netdev_hw_addr_list *mc_list)
247{
248 bool allmulti = sdata->flags & IEEE80211_SDATA_ALLMULTI;
249
250 trace_drv_set_multicast_list(local, sdata, mc_list->count);
251
252 check_sdata_in_driver(sdata);
253
254 if (local->ops->set_multicast_list)
255 local->ops->set_multicast_list(&local->hw, &sdata->vif,
256 allmulti, mc_list);
257 trace_drv_return_void(local);
258}
259
244static inline void drv_configure_filter(struct ieee80211_local *local, 260static inline void drv_configure_filter(struct ieee80211_local *local,
245 unsigned int changed_flags, 261 unsigned int changed_flags,
246 unsigned int *total_flags, 262 unsigned int *total_flags,
@@ -531,43 +547,6 @@ static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
531 local->ops->sta_remove_debugfs(&local->hw, &sdata->vif, 547 local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
532 sta, dir); 548 sta, dir);
533} 549}
534
535static inline
536void drv_add_interface_debugfs(struct ieee80211_local *local,
537 struct ieee80211_sub_if_data *sdata)
538{
539 might_sleep();
540
541 check_sdata_in_driver(sdata);
542
543 if (!local->ops->add_interface_debugfs)
544 return;
545
546 local->ops->add_interface_debugfs(&local->hw, &sdata->vif,
547 sdata->debugfs.dir);
548}
549
550static inline
551void drv_remove_interface_debugfs(struct ieee80211_local *local,
552 struct ieee80211_sub_if_data *sdata)
553{
554 might_sleep();
555
556 check_sdata_in_driver(sdata);
557
558 if (!local->ops->remove_interface_debugfs)
559 return;
560
561 local->ops->remove_interface_debugfs(&local->hw, &sdata->vif,
562 sdata->debugfs.dir);
563}
564#else
565static inline
566void drv_add_interface_debugfs(struct ieee80211_local *local,
567 struct ieee80211_sub_if_data *sdata) {}
568static inline
569void drv_remove_interface_debugfs(struct ieee80211_local *local,
570 struct ieee80211_sub_if_data *sdata) {}
571#endif 550#endif
572 551
573static inline __must_check 552static inline __must_check
@@ -741,13 +720,14 @@ static inline void drv_rfkill_poll(struct ieee80211_local *local)
741 local->ops->rfkill_poll(&local->hw); 720 local->ops->rfkill_poll(&local->hw);
742} 721}
743 722
744static inline void drv_flush(struct ieee80211_local *local, bool drop) 723static inline void drv_flush(struct ieee80211_local *local,
724 u32 queues, bool drop)
745{ 725{
746 might_sleep(); 726 might_sleep();
747 727
748 trace_drv_flush(local, drop); 728 trace_drv_flush(local, queues, drop);
749 if (local->ops->flush) 729 if (local->ops->flush)
750 local->ops->flush(&local->hw, drop); 730 local->ops->flush(&local->hw, queues, drop);
751 trace_drv_return_void(local); 731 trace_drv_return_void(local);
752} 732}
753 733
@@ -787,15 +767,16 @@ static inline int drv_get_antenna(struct ieee80211_local *local,
787static inline int drv_remain_on_channel(struct ieee80211_local *local, 767static inline int drv_remain_on_channel(struct ieee80211_local *local,
788 struct ieee80211_sub_if_data *sdata, 768 struct ieee80211_sub_if_data *sdata,
789 struct ieee80211_channel *chan, 769 struct ieee80211_channel *chan,
790 unsigned int duration) 770 unsigned int duration,
771 enum ieee80211_roc_type type)
791{ 772{
792 int ret; 773 int ret;
793 774
794 might_sleep(); 775 might_sleep();
795 776
796 trace_drv_remain_on_channel(local, sdata, chan, duration); 777 trace_drv_remain_on_channel(local, sdata, chan, duration, type);
797 ret = local->ops->remain_on_channel(&local->hw, &sdata->vif, 778 ret = local->ops->remain_on_channel(&local->hw, &sdata->vif,
798 chan, duration); 779 chan, duration, type);
799 trace_drv_return_int(local, ret); 780 trace_drv_return_int(local, ret);
800 781
801 return ret; 782 return ret;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 0db25d4bb223..af8cee06e4f3 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -40,13 +40,6 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
40 if (!ht_cap->ht_supported) 40 if (!ht_cap->ht_supported)
41 return; 41 return;
42 42
43 if (sdata->vif.type != NL80211_IFTYPE_STATION) {
44 /* AP interfaces call this code when adding new stations,
45 * so just silently ignore non station interfaces.
46 */
47 return;
48 }
49
50 /* NOTE: If you add more over-rides here, update register_hw 43 /* NOTE: If you add more over-rides here, update register_hw
51 * ht_capa_mod_msk logic in main.c as well. 44 * ht_capa_mod_msk logic in main.c as well.
52 * And, if this method can ever change ht_cap.ht_supported, fix 45 * And, if this method can ever change ht_cap.ht_supported, fix
@@ -97,7 +90,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
97 const struct ieee80211_ht_cap *ht_cap_ie, 90 const struct ieee80211_ht_cap *ht_cap_ie,
98 struct sta_info *sta) 91 struct sta_info *sta)
99{ 92{
100 struct ieee80211_sta_ht_cap ht_cap; 93 struct ieee80211_sta_ht_cap ht_cap, own_cap;
101 u8 ampdu_info, tx_mcs_set_cap; 94 u8 ampdu_info, tx_mcs_set_cap;
102 int i, max_tx_streams; 95 int i, max_tx_streams;
103 bool changed; 96 bool changed;
@@ -111,6 +104,18 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
111 104
112 ht_cap.ht_supported = true; 105 ht_cap.ht_supported = true;
113 106
107 own_cap = sband->ht_cap;
108
109 /*
110 * If user has specified capability over-rides, take care
111 * of that if the station we're setting up is the AP that
112 * we advertised a restricted capability set to. Override
113 * our own capabilities and then use those below.
114 */
115 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
116 !test_sta_flag(sta, WLAN_STA_TDLS_PEER))
117 ieee80211_apply_htcap_overrides(sdata, &own_cap);
118
114 /* 119 /*
115 * The bits listed in this expression should be 120 * The bits listed in this expression should be
116 * the same for the peer and us, if the station 121 * the same for the peer and us, if the station
@@ -118,21 +123,20 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
118 * we mask them out. 123 * we mask them out.
119 */ 124 */
120 ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) & 125 ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) &
121 (sband->ht_cap.cap | 126 (own_cap.cap | ~(IEEE80211_HT_CAP_LDPC_CODING |
122 ~(IEEE80211_HT_CAP_LDPC_CODING | 127 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
123 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 128 IEEE80211_HT_CAP_GRN_FLD |
124 IEEE80211_HT_CAP_GRN_FLD | 129 IEEE80211_HT_CAP_SGI_20 |
125 IEEE80211_HT_CAP_SGI_20 | 130 IEEE80211_HT_CAP_SGI_40 |
126 IEEE80211_HT_CAP_SGI_40 | 131 IEEE80211_HT_CAP_DSSSCCK40));
127 IEEE80211_HT_CAP_DSSSCCK40));
128 132
129 /* 133 /*
130 * The STBC bits are asymmetric -- if we don't have 134 * The STBC bits are asymmetric -- if we don't have
131 * TX then mask out the peer's RX and vice versa. 135 * TX then mask out the peer's RX and vice versa.
132 */ 136 */
133 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)) 137 if (!(own_cap.cap & IEEE80211_HT_CAP_TX_STBC))
134 ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC; 138 ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC;
135 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)) 139 if (!(own_cap.cap & IEEE80211_HT_CAP_RX_STBC))
136 ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC; 140 ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC;
137 141
138 ampdu_info = ht_cap_ie->ampdu_params_info; 142 ampdu_info = ht_cap_ie->ampdu_params_info;
@@ -142,7 +146,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
142 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; 146 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
143 147
144 /* own MCS TX capabilities */ 148 /* own MCS TX capabilities */
145 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; 149 tx_mcs_set_cap = own_cap.mcs.tx_params;
146 150
147 /* Copy peer MCS TX capabilities, the driver might need them. */ 151 /* Copy peer MCS TX capabilities, the driver might need them. */
148 ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params; 152 ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params;
@@ -168,26 +172,20 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
168 */ 172 */
169 for (i = 0; i < max_tx_streams; i++) 173 for (i = 0; i < max_tx_streams; i++)
170 ht_cap.mcs.rx_mask[i] = 174 ht_cap.mcs.rx_mask[i] =
171 sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; 175 own_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i];
172 176
173 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) 177 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION)
174 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; 178 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE;
175 i < IEEE80211_HT_MCS_MASK_LEN; i++) 179 i < IEEE80211_HT_MCS_MASK_LEN; i++)
176 ht_cap.mcs.rx_mask[i] = 180 ht_cap.mcs.rx_mask[i] =
177 sband->ht_cap.mcs.rx_mask[i] & 181 own_cap.mcs.rx_mask[i] &
178 ht_cap_ie->mcs.rx_mask[i]; 182 ht_cap_ie->mcs.rx_mask[i];
179 183
180 /* handle MCS rate 32 too */ 184 /* handle MCS rate 32 too */
181 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) 185 if (own_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
182 ht_cap.mcs.rx_mask[32/8] |= 1; 186 ht_cap.mcs.rx_mask[32/8] |= 1;
183 187
184 apply: 188 apply:
185 /*
186 * If user has specified capability over-rides, take care
187 * of that here.
188 */
189 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
190
191 changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); 189 changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
192 190
193 memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); 191 memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 40b71dfcc79d..539d4a11b47b 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -985,36 +985,9 @@ static void ieee80211_ibss_timer(unsigned long data)
985{ 985{
986 struct ieee80211_sub_if_data *sdata = 986 struct ieee80211_sub_if_data *sdata =
987 (struct ieee80211_sub_if_data *) data; 987 (struct ieee80211_sub_if_data *) data;
988 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
989 struct ieee80211_local *local = sdata->local;
990
991 if (local->quiescing) {
992 ifibss->timer_running = true;
993 return;
994 }
995
996 ieee80211_queue_work(&local->hw, &sdata->work);
997}
998
999#ifdef CONFIG_PM
1000void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
1001{
1002 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
1003 988
1004 if (del_timer_sync(&ifibss->timer)) 989 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1005 ifibss->timer_running = true;
1006}
1007
1008void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata)
1009{
1010 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
1011
1012 if (ifibss->timer_running) {
1013 add_timer(&ifibss->timer);
1014 ifibss->timer_running = false;
1015 }
1016} 990}
1017#endif
1018 991
1019void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) 992void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
1020{ 993{
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 388580a1bada..ae2d1754b792 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -315,6 +315,7 @@ struct ieee80211_roc_work {
315 u32 duration, req_duration; 315 u32 duration, req_duration;
316 struct sk_buff *frame; 316 struct sk_buff *frame;
317 u64 cookie, mgmt_tx_cookie; 317 u64 cookie, mgmt_tx_cookie;
318 enum ieee80211_roc_type type;
318}; 319};
319 320
320/* flags used in struct ieee80211_if_managed.flags */ 321/* flags used in struct ieee80211_if_managed.flags */
@@ -400,7 +401,6 @@ struct ieee80211_if_managed {
400 401
401 u16 aid; 402 u16 aid;
402 403
403 unsigned long timers_running; /* used for quiesce/restart */
404 bool powersave; /* powersave requested for this iface */ 404 bool powersave; /* powersave requested for this iface */
405 bool broken_ap; /* AP is broken -- turn off powersave */ 405 bool broken_ap; /* AP is broken -- turn off powersave */
406 u8 dtim_period; 406 u8 dtim_period;
@@ -479,6 +479,8 @@ struct ieee80211_if_managed {
479 479
480 struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ 480 struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
481 struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ 481 struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
482 struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */
483 struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */
482}; 484};
483 485
484struct ieee80211_if_ibss { 486struct ieee80211_if_ibss {
@@ -490,8 +492,6 @@ struct ieee80211_if_ibss {
490 492
491 u32 basic_rates; 493 u32 basic_rates;
492 494
493 bool timer_running;
494
495 bool fixed_bssid; 495 bool fixed_bssid;
496 bool fixed_channel; 496 bool fixed_channel;
497 bool privacy; 497 bool privacy;
@@ -543,8 +543,6 @@ struct ieee80211_if_mesh {
543 struct timer_list mesh_path_timer; 543 struct timer_list mesh_path_timer;
544 struct timer_list mesh_path_root_timer; 544 struct timer_list mesh_path_root_timer;
545 545
546 unsigned long timers_running;
547
548 unsigned long wrkq_flags; 546 unsigned long wrkq_flags;
549 547
550 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; 548 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
@@ -590,6 +588,7 @@ struct ieee80211_if_mesh {
590 IEEE80211_MESH_SEC_AUTHED = 0x1, 588 IEEE80211_MESH_SEC_AUTHED = 0x1,
591 IEEE80211_MESH_SEC_SECURED = 0x2, 589 IEEE80211_MESH_SEC_SECURED = 0x2,
592 } security; 590 } security;
591 bool user_mpm;
593 /* Extensible Synchronization Framework */ 592 /* Extensible Synchronization Framework */
594 const struct ieee80211_mesh_sync_ops *sync_ops; 593 const struct ieee80211_mesh_sync_ops *sync_ops;
595 s64 sync_offset_clockdrift_max; 594 s64 sync_offset_clockdrift_max;
@@ -682,6 +681,8 @@ struct ieee80211_sub_if_data {
682 681
683 /* count for keys needing tailroom space allocation */ 682 /* count for keys needing tailroom space allocation */
684 int crypto_tx_tailroom_needed_cnt; 683 int crypto_tx_tailroom_needed_cnt;
684 int crypto_tx_tailroom_pending_dec;
685 struct delayed_work dec_tailroom_needed_wk;
685 686
686 struct net_device *dev; 687 struct net_device *dev;
687 struct ieee80211_local *local; 688 struct ieee80211_local *local;
@@ -757,7 +758,6 @@ struct ieee80211_sub_if_data {
757 758
758#ifdef CONFIG_MAC80211_DEBUGFS 759#ifdef CONFIG_MAC80211_DEBUGFS
759 struct { 760 struct {
760 struct dentry *dir;
761 struct dentry *subdir_stations; 761 struct dentry *subdir_stations;
762 struct dentry *default_unicast_key; 762 struct dentry *default_unicast_key;
763 struct dentry *default_multicast_key; 763 struct dentry *default_multicast_key;
@@ -765,10 +765,6 @@ struct ieee80211_sub_if_data {
765 } debugfs; 765 } debugfs;
766#endif 766#endif
767 767
768#ifdef CONFIG_PM
769 struct ieee80211_bss_conf suspend_bss_conf;
770#endif
771
772 /* must be last, dynamically sized area in this! */ 768 /* must be last, dynamically sized area in this! */
773 struct ieee80211_vif vif; 769 struct ieee80211_vif vif;
774}; 770};
@@ -803,11 +799,6 @@ enum sdata_queue_type {
803enum { 799enum {
804 IEEE80211_RX_MSG = 1, 800 IEEE80211_RX_MSG = 1,
805 IEEE80211_TX_STATUS_MSG = 2, 801 IEEE80211_TX_STATUS_MSG = 2,
806 IEEE80211_EOSP_MSG = 3,
807};
808
809struct skb_eosp_msg_data {
810 u8 sta[ETH_ALEN], iface[ETH_ALEN];
811}; 802};
812 803
813enum queue_stop_reason { 804enum queue_stop_reason {
@@ -818,6 +809,7 @@ enum queue_stop_reason {
818 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 809 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
819 IEEE80211_QUEUE_STOP_REASON_SKB_ADD, 810 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
820 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, 811 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
812 IEEE80211_QUEUE_STOP_REASON_FLUSH,
821}; 813};
822 814
823#ifdef CONFIG_MAC80211_LEDS 815#ifdef CONFIG_MAC80211_LEDS
@@ -1136,11 +1128,6 @@ struct ieee80211_local {
1136 1128
1137 struct ieee80211_sub_if_data __rcu *p2p_sdata; 1129 struct ieee80211_sub_if_data __rcu *p2p_sdata;
1138 1130
1139 /* dummy netdev for use w/ NAPI */
1140 struct net_device napi_dev;
1141
1142 struct napi_struct napi;
1143
1144 /* virtual monitor interface */ 1131 /* virtual monitor interface */
1145 struct ieee80211_sub_if_data __rcu *monitor_sdata; 1132 struct ieee80211_sub_if_data __rcu *monitor_sdata;
1146 struct cfg80211_chan_def monitor_chandef; 1133 struct cfg80211_chan_def monitor_chandef;
@@ -1283,8 +1270,6 @@ void
1283ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1270ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1284 const struct ieee80211_channel_sw_ie *sw_elem, 1271 const struct ieee80211_channel_sw_ie *sw_elem,
1285 struct ieee80211_bss *bss, u64 timestamp); 1272 struct ieee80211_bss *bss, u64 timestamp);
1286void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
1287void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
1288void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); 1273void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
1289void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 1274void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1290 struct sk_buff *skb); 1275 struct sk_buff *skb);
@@ -1302,8 +1287,6 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
1302int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 1287int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1303 struct cfg80211_ibss_params *params); 1288 struct cfg80211_ibss_params *params);
1304int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); 1289int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
1305void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
1306void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
1307void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); 1290void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
1308void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 1291void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1309 struct sk_buff *skb); 1292 struct sk_buff *skb);
@@ -1441,6 +1424,8 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1441void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1424void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1442 struct sta_info *sta, u8 opmode, 1425 struct sta_info *sta, u8 opmode,
1443 enum ieee80211_band band, bool nss_only); 1426 enum ieee80211_band band, bool nss_only);
1427void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
1428 struct ieee80211_sta_vht_cap *vht_cap);
1444 1429
1445/* Spectrum management */ 1430/* Spectrum management */
1446void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1431void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1538,8 +1523,10 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
1538 struct ieee80211_hdr *hdr, bool ack); 1523 struct ieee80211_hdr *hdr, bool ack);
1539 1524
1540void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1525void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1526 unsigned long queues,
1541 enum queue_stop_reason reason); 1527 enum queue_stop_reason reason);
1542void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 1528void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
1529 unsigned long queues,
1543 enum queue_stop_reason reason); 1530 enum queue_stop_reason reason);
1544void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 1531void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
1545 enum queue_stop_reason reason); 1532 enum queue_stop_reason reason);
@@ -1556,6 +1543,8 @@ static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1556{ 1543{
1557 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); 1544 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
1558} 1545}
1546void ieee80211_flush_queues(struct ieee80211_local *local,
1547 struct ieee80211_sub_if_data *sdata);
1559 1548
1560void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1549void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1561 u16 transaction, u16 auth_alg, u16 status, 1550 u16 transaction, u16 auth_alg, u16 status,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bfe2612c8c2..2a3c1e9bdf25 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -92,7 +92,7 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
92 if (local->hw.conf.flags & IEEE80211_CONF_IDLE) 92 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
93 return 0; 93 return 0;
94 94
95 drv_flush(local, false); 95 ieee80211_flush_queues(local, NULL);
96 96
97 local->hw.conf.flags |= IEEE80211_CONF_IDLE; 97 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
98 return IEEE80211_CONF_CHANGE_IDLE; 98 return IEEE80211_CONF_CHANGE_IDLE;
@@ -488,8 +488,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
488 res = drv_start(local); 488 res = drv_start(local);
489 if (res) 489 if (res)
490 goto err_del_bss; 490 goto err_del_bss;
491 if (local->ops->napi_poll)
492 napi_enable(&local->napi);
493 /* we're brought up, everything changes */ 491 /* we're brought up, everything changes */
494 hw_reconf_flags = ~0; 492 hw_reconf_flags = ~0;
495 ieee80211_led_radio(local, true); 493 ieee80211_led_radio(local, true);
@@ -562,8 +560,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
562 goto err_del_interface; 560 goto err_del_interface;
563 } 561 }
564 562
565 drv_add_interface_debugfs(local, sdata);
566
567 if (sdata->vif.type == NL80211_IFTYPE_AP) { 563 if (sdata->vif.type == NL80211_IFTYPE_AP) {
568 local->fif_pspoll++; 564 local->fif_pspoll++;
569 local->fif_probe_req++; 565 local->fif_probe_req++;
@@ -841,15 +837,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
841 rcu_barrier(); 837 rcu_barrier();
842 sta_info_flush_cleanup(sdata); 838 sta_info_flush_cleanup(sdata);
843 839
844 skb_queue_purge(&sdata->skb_queue);
845
846 /* 840 /*
847 * Free all remaining keys, there shouldn't be any, 841 * Free all remaining keys, there shouldn't be any,
848 * except maybe group keys in AP more or WDS? 842 * except maybe in WDS mode?
849 */ 843 */
850 ieee80211_free_keys(sdata); 844 ieee80211_free_keys(sdata);
851 845
852 drv_remove_interface_debugfs(local, sdata); 846 /* fall through */
847 case NL80211_IFTYPE_AP:
848 skb_queue_purge(&sdata->skb_queue);
853 849
854 if (going_down) 850 if (going_down)
855 drv_remove_interface(local, sdata); 851 drv_remove_interface(local, sdata);
@@ -860,8 +856,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
860 ieee80211_recalc_ps(local, -1); 856 ieee80211_recalc_ps(local, -1);
861 857
862 if (local->open_count == 0) { 858 if (local->open_count == 0) {
863 if (local->ops->napi_poll)
864 napi_disable(&local->napi);
865 ieee80211_clear_tx_pending(local); 859 ieee80211_clear_tx_pending(local);
866 ieee80211_stop_device(local); 860 ieee80211_stop_device(local);
867 861
@@ -924,6 +918,17 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
924 atomic_dec(&local->iff_promiscs); 918 atomic_dec(&local->iff_promiscs);
925 sdata->flags ^= IEEE80211_SDATA_PROMISC; 919 sdata->flags ^= IEEE80211_SDATA_PROMISC;
926 } 920 }
921
922 /*
923 * TODO: If somebody needs this on AP interfaces,
924 * it can be enabled easily but multicast
925 * addresses from VLANs need to be synced.
926 */
927 if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
928 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
929 sdata->vif.type != NL80211_IFTYPE_AP)
930 drv_set_multicast_list(local, sdata, &dev->mc);
931
927 spin_lock_bh(&local->filter_lock); 932 spin_lock_bh(&local->filter_lock);
928 __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); 933 __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
929 spin_unlock_bh(&local->filter_lock); 934 spin_unlock_bh(&local->filter_lock);
@@ -1550,6 +1555,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1550 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk); 1555 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
1551 INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work, 1556 INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
1552 ieee80211_dfs_cac_timer_work); 1557 ieee80211_dfs_cac_timer_work);
1558 INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
1559 ieee80211_delayed_tailroom_dec);
1553 1560
1554 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 1561 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1555 struct ieee80211_supported_band *sband; 1562 struct ieee80211_supported_band *sband;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ef252eb58c36..67059b88fea5 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -248,11 +248,11 @@ void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
248} 248}
249 249
250 250
251static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, 251static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
252 struct sta_info *sta, 252 struct sta_info *sta,
253 bool pairwise, 253 bool pairwise,
254 struct ieee80211_key *old, 254 struct ieee80211_key *old,
255 struct ieee80211_key *new) 255 struct ieee80211_key *new)
256{ 256{
257 int idx; 257 int idx;
258 bool defunikey, defmultikey, defmgmtkey; 258 bool defunikey, defmultikey, defmgmtkey;
@@ -397,7 +397,41 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
397 return key; 397 return key;
398} 398}
399 399
400static void __ieee80211_key_destroy(struct ieee80211_key *key) 400static void ieee80211_key_free_common(struct ieee80211_key *key)
401{
402 if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
403 ieee80211_aes_key_free(key->u.ccmp.tfm);
404 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
405 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
406 kfree(key);
407}
408
409static void __ieee80211_key_destroy(struct ieee80211_key *key,
410 bool delay_tailroom)
411{
412 if (key->local)
413 ieee80211_key_disable_hw_accel(key);
414
415 if (key->local) {
416 struct ieee80211_sub_if_data *sdata = key->sdata;
417
418 ieee80211_debugfs_key_remove(key);
419
420 if (delay_tailroom) {
421 /* see ieee80211_delayed_tailroom_dec */
422 sdata->crypto_tx_tailroom_pending_dec++;
423 schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
424 HZ/2);
425 } else {
426 sdata->crypto_tx_tailroom_needed_cnt--;
427 }
428 }
429
430 ieee80211_key_free_common(key);
431}
432
433static void ieee80211_key_destroy(struct ieee80211_key *key,
434 bool delay_tailroom)
401{ 435{
402 if (!key) 436 if (!key)
403 return; 437 return;
@@ -408,19 +442,13 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
408 */ 442 */
409 synchronize_net(); 443 synchronize_net();
410 444
411 if (key->local) 445 __ieee80211_key_destroy(key, delay_tailroom);
412 ieee80211_key_disable_hw_accel(key); 446}
413
414 if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
415 ieee80211_aes_key_free(key->u.ccmp.tfm);
416 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
417 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
418 if (key->local) {
419 ieee80211_debugfs_key_remove(key);
420 key->sdata->crypto_tx_tailroom_needed_cnt--;
421 }
422 447
423 kfree(key); 448void ieee80211_key_free_unused(struct ieee80211_key *key)
449{
450 WARN_ON(key->sdata || key->local);
451 ieee80211_key_free_common(key);
424} 452}
425 453
426int ieee80211_key_link(struct ieee80211_key *key, 454int ieee80211_key_link(struct ieee80211_key *key,
@@ -440,32 +468,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
440 key->sdata = sdata; 468 key->sdata = sdata;
441 key->sta = sta; 469 key->sta = sta;
442 470
443 if (sta) {
444 /*
445 * some hardware cannot handle TKIP with QoS, so
446 * we indicate whether QoS could be in use.
447 */
448 if (test_sta_flag(sta, WLAN_STA_WME))
449 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
450 } else {
451 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
452 struct sta_info *ap;
453
454 /*
455 * We're getting a sta pointer in, so must be under
456 * appropriate locking for sta_info_get().
457 */
458
459 /* same here, the AP could be using QoS */
460 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
461 if (ap) {
462 if (test_sta_flag(ap, WLAN_STA_WME))
463 key->conf.flags |=
464 IEEE80211_KEY_FLAG_WMM_STA;
465 }
466 }
467 }
468
469 mutex_lock(&sdata->local->key_mtx); 471 mutex_lock(&sdata->local->key_mtx);
470 472
471 if (sta && pairwise) 473 if (sta && pairwise)
@@ -477,19 +479,22 @@ int ieee80211_key_link(struct ieee80211_key *key,
477 479
478 increment_tailroom_need_count(sdata); 480 increment_tailroom_need_count(sdata);
479 481
480 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 482 ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
481 __ieee80211_key_destroy(old_key); 483 ieee80211_key_destroy(old_key, true);
482 484
483 ieee80211_debugfs_key_add(key); 485 ieee80211_debugfs_key_add(key);
484 486
485 ret = ieee80211_key_enable_hw_accel(key); 487 ret = ieee80211_key_enable_hw_accel(key);
486 488
489 if (ret)
490 ieee80211_key_free(key, true);
491
487 mutex_unlock(&sdata->local->key_mtx); 492 mutex_unlock(&sdata->local->key_mtx);
488 493
489 return ret; 494 return ret;
490} 495}
491 496
492void __ieee80211_key_free(struct ieee80211_key *key) 497void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
493{ 498{
494 if (!key) 499 if (!key)
495 return; 500 return;
@@ -498,18 +503,10 @@ void __ieee80211_key_free(struct ieee80211_key *key)
498 * Replace key with nothingness if it was ever used. 503 * Replace key with nothingness if it was ever used.
499 */ 504 */
500 if (key->sdata) 505 if (key->sdata)
501 __ieee80211_key_replace(key->sdata, key->sta, 506 ieee80211_key_replace(key->sdata, key->sta,
502 key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, 507 key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
503 key, NULL); 508 key, NULL);
504 __ieee80211_key_destroy(key); 509 ieee80211_key_destroy(key, delay_tailroom);
505}
506
507void ieee80211_key_free(struct ieee80211_local *local,
508 struct ieee80211_key *key)
509{
510 mutex_lock(&local->key_mtx);
511 __ieee80211_key_free(key);
512 mutex_unlock(&local->key_mtx);
513} 510}
514 511
515void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) 512void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
@@ -566,36 +563,109 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
566} 563}
567EXPORT_SYMBOL(ieee80211_iter_keys); 564EXPORT_SYMBOL(ieee80211_iter_keys);
568 565
569void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) 566void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
570{ 567{
571 struct ieee80211_key *key; 568 struct ieee80211_key *key, *tmp;
569 LIST_HEAD(keys);
572 570
573 ASSERT_RTNL(); 571 cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk);
574 572
575 mutex_lock(&sdata->local->key_mtx); 573 mutex_lock(&sdata->local->key_mtx);
576 574
577 list_for_each_entry(key, &sdata->key_list, list) 575 sdata->crypto_tx_tailroom_needed_cnt -=
578 ieee80211_key_disable_hw_accel(key); 576 sdata->crypto_tx_tailroom_pending_dec;
577 sdata->crypto_tx_tailroom_pending_dec = 0;
578
579 ieee80211_debugfs_key_remove_mgmt_default(sdata);
580
581 list_for_each_entry_safe(key, tmp, &sdata->key_list, list) {
582 ieee80211_key_replace(key->sdata, key->sta,
583 key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
584 key, NULL);
585 list_add_tail(&key->list, &keys);
586 }
587
588 ieee80211_debugfs_key_update_default(sdata);
589
590 if (!list_empty(&keys)) {
591 synchronize_net();
592 list_for_each_entry_safe(key, tmp, &keys, list)
593 __ieee80211_key_destroy(key, false);
594 }
595
596 WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
597 sdata->crypto_tx_tailroom_pending_dec);
579 598
580 mutex_unlock(&sdata->local->key_mtx); 599 mutex_unlock(&sdata->local->key_mtx);
581} 600}
582 601
583void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) 602void ieee80211_free_sta_keys(struct ieee80211_local *local,
603 struct sta_info *sta)
584{ 604{
585 struct ieee80211_key *key, *tmp; 605 struct ieee80211_key *key, *tmp;
606 LIST_HEAD(keys);
607 int i;
586 608
587 mutex_lock(&sdata->local->key_mtx); 609 mutex_lock(&local->key_mtx);
610 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
611 key = key_mtx_dereference(local, sta->gtk[i]);
612 if (!key)
613 continue;
614 ieee80211_key_replace(key->sdata, key->sta,
615 key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
616 key, NULL);
617 list_add(&key->list, &keys);
618 }
588 619
589 ieee80211_debugfs_key_remove_mgmt_default(sdata); 620 key = key_mtx_dereference(local, sta->ptk);
621 if (key) {
622 ieee80211_key_replace(key->sdata, key->sta,
623 key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
624 key, NULL);
625 list_add(&key->list, &keys);
626 }
590 627
591 list_for_each_entry_safe(key, tmp, &sdata->key_list, list) 628 /*
592 __ieee80211_key_free(key); 629 * NB: the station code relies on this being
630 * done even if there aren't any keys
631 */
632 synchronize_net();
593 633
594 ieee80211_debugfs_key_update_default(sdata); 634 list_for_each_entry_safe(key, tmp, &keys, list)
635 __ieee80211_key_destroy(key, true);
595 636
596 mutex_unlock(&sdata->local->key_mtx); 637 mutex_unlock(&local->key_mtx);
597} 638}
598 639
640void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
641{
642 struct ieee80211_sub_if_data *sdata;
643
644 sdata = container_of(wk, struct ieee80211_sub_if_data,
645 dec_tailroom_needed_wk.work);
646
647 /*
648 * The reason for the delayed tailroom needed decrementing is to
649 * make roaming faster: during roaming, all keys are first deleted
650 * and then new keys are installed. The first new key causes the
651 * crypto_tx_tailroom_needed_cnt to go from 0 to 1, which invokes
652 * the cost of synchronize_net() (which can be slow). Avoid this
653 * by deferring the crypto_tx_tailroom_needed_cnt decrementing on
654 * key removal for a while, so if we roam the value is larger than
655 * zero and no 0->1 transition happens.
656 *
657 * The cost is that if the AP switching was from an AP with keys
658 * to one without, we still allocate tailroom while it would no
659 * longer be needed. However, in the typical (fast) roaming case
660 * within an ESS this usually won't happen.
661 */
662
663 mutex_lock(&sdata->local->key_mtx);
664 sdata->crypto_tx_tailroom_needed_cnt -=
665 sdata->crypto_tx_tailroom_pending_dec;
666 sdata->crypto_tx_tailroom_pending_dec = 0;
667 mutex_unlock(&sdata->local->key_mtx);
668}
599 669
600void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, 670void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
601 const u8 *replay_ctr, gfp_t gfp) 671 const u8 *replay_ctr, gfp_t gfp)
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 382dc44ed330..e8de3e6d7804 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -129,23 +129,25 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
129 size_t seq_len, const u8 *seq); 129 size_t seq_len, const u8 *seq);
130/* 130/*
131 * Insert a key into data structures (sdata, sta if necessary) 131 * Insert a key into data structures (sdata, sta if necessary)
132 * to make it used, free old key. 132 * to make it used, free old key. On failure, also free the new key.
133 */ 133 */
134int __must_check ieee80211_key_link(struct ieee80211_key *key, 134int ieee80211_key_link(struct ieee80211_key *key,
135 struct ieee80211_sub_if_data *sdata, 135 struct ieee80211_sub_if_data *sdata,
136 struct sta_info *sta); 136 struct sta_info *sta);
137void __ieee80211_key_free(struct ieee80211_key *key); 137void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom);
138void ieee80211_key_free(struct ieee80211_local *local, 138void ieee80211_key_free_unused(struct ieee80211_key *key);
139 struct ieee80211_key *key);
140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, 139void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx,
141 bool uni, bool multi); 140 bool uni, bool multi);
142void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, 141void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
143 int idx); 142 int idx);
144void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); 143void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
144void ieee80211_free_sta_keys(struct ieee80211_local *local,
145 struct sta_info *sta);
145void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 146void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
146void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
147 147
148#define key_mtx_dereference(local, ref) \ 148#define key_mtx_dereference(local, ref) \
149 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) 149 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
150 150
151void ieee80211_delayed_tailroom_dec(struct work_struct *wk);
152
151#endif /* IEEE80211_KEY_H */ 153#endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1a8591b77a13..c6f81ecc36a1 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -100,7 +100,6 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
100 int power; 100 int power;
101 enum nl80211_channel_type channel_type; 101 enum nl80211_channel_type channel_type;
102 u32 offchannel_flag; 102 u32 offchannel_flag;
103 bool scanning = false;
104 103
105 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; 104 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
106 if (local->scan_channel) { 105 if (local->scan_channel) {
@@ -147,9 +146,6 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
147 changed |= IEEE80211_CONF_CHANGE_SMPS; 146 changed |= IEEE80211_CONF_CHANGE_SMPS;
148 } 147 }
149 148
150 scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
151 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
152 test_bit(SCAN_HW_SCANNING, &local->scanning);
153 power = chan->max_power; 149 power = chan->max_power;
154 150
155 rcu_read_lock(); 151 rcu_read_lock();
@@ -226,8 +222,6 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
226static void ieee80211_tasklet_handler(unsigned long data) 222static void ieee80211_tasklet_handler(unsigned long data)
227{ 223{
228 struct ieee80211_local *local = (struct ieee80211_local *) data; 224 struct ieee80211_local *local = (struct ieee80211_local *) data;
229 struct sta_info *sta, *tmp;
230 struct skb_eosp_msg_data *eosp_data;
231 struct sk_buff *skb; 225 struct sk_buff *skb;
232 226
233 while ((skb = skb_dequeue(&local->skb_queue)) || 227 while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -243,18 +237,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
243 skb->pkt_type = 0; 237 skb->pkt_type = 0;
244 ieee80211_tx_status(&local->hw, skb); 238 ieee80211_tx_status(&local->hw, skb);
245 break; 239 break;
246 case IEEE80211_EOSP_MSG:
247 eosp_data = (void *)skb->cb;
248 for_each_sta_info(local, eosp_data->sta, sta, tmp) {
249 /* skip wrong virtual interface */
250 if (memcmp(eosp_data->iface,
251 sta->sdata->vif.addr, ETH_ALEN))
252 continue;
253 clear_sta_flag(sta, WLAN_STA_SP);
254 break;
255 }
256 dev_kfree_skb(skb);
257 break;
258 default: 240 default:
259 WARN(1, "mac80211: Packet is of unknown type %d\n", 241 WARN(1, "mac80211: Packet is of unknown type %d\n",
260 skb->pkt_type); 242 skb->pkt_type);
@@ -295,8 +277,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
295 "Hardware restart was requested\n"); 277 "Hardware restart was requested\n");
296 278
297 /* use this reason, ieee80211_reconfig will unblock it */ 279 /* use this reason, ieee80211_reconfig will unblock it */
298 ieee80211_stop_queues_by_reason(hw, 280 ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
299 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 281 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
300 282
301 /* 283 /*
302 * Stop all Rx during the reconfig. We don't want state changes 284 * Stop all Rx during the reconfig. We don't want state changes
@@ -399,30 +381,6 @@ static int ieee80211_ifa6_changed(struct notifier_block *nb,
399} 381}
400#endif 382#endif
401 383
402static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
403{
404 struct ieee80211_local *local =
405 container_of(napi, struct ieee80211_local, napi);
406
407 return local->ops->napi_poll(&local->hw, budget);
408}
409
410void ieee80211_napi_schedule(struct ieee80211_hw *hw)
411{
412 struct ieee80211_local *local = hw_to_local(hw);
413
414 napi_schedule(&local->napi);
415}
416EXPORT_SYMBOL(ieee80211_napi_schedule);
417
418void ieee80211_napi_complete(struct ieee80211_hw *hw)
419{
420 struct ieee80211_local *local = hw_to_local(hw);
421
422 napi_complete(&local->napi);
423}
424EXPORT_SYMBOL(ieee80211_napi_complete);
425
426/* There isn't a lot of sense in it, but you can transmit anything you like */ 384/* There isn't a lot of sense in it, but you can transmit anything you like */
427static const struct ieee80211_txrx_stypes 385static const struct ieee80211_txrx_stypes
428ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { 386ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
@@ -501,6 +459,27 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
501 }, 459 },
502}; 460};
503 461
462static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
463 .vht_cap_info =
464 cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
465 IEEE80211_VHT_CAP_SHORT_GI_80 |
466 IEEE80211_VHT_CAP_SHORT_GI_160 |
467 IEEE80211_VHT_CAP_RXSTBC_1 |
468 IEEE80211_VHT_CAP_RXSTBC_2 |
469 IEEE80211_VHT_CAP_RXSTBC_3 |
470 IEEE80211_VHT_CAP_RXSTBC_4 |
471 IEEE80211_VHT_CAP_TXSTBC |
472 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
473 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
474 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
475 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
476 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK),
477 .supp_mcs = {
478 .rx_mcs_map = cpu_to_le16(~0),
479 .tx_mcs_map = cpu_to_le16(~0),
480 },
481};
482
504static const u8 extended_capabilities[] = { 483static const u8 extended_capabilities[] = {
505 0, 0, 0, 0, 0, 0, 0, 484 0, 0, 0, 0, 0, 0, 0,
506 WLAN_EXT_CAPA8_OPMODE_NOTIF, 485 WLAN_EXT_CAPA8_OPMODE_NOTIF,
@@ -572,7 +551,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
572 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | 551 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
573 NL80211_FEATURE_SAE | 552 NL80211_FEATURE_SAE |
574 NL80211_FEATURE_HT_IBSS | 553 NL80211_FEATURE_HT_IBSS |
575 NL80211_FEATURE_VIF_TXPOWER; 554 NL80211_FEATURE_VIF_TXPOWER |
555 NL80211_FEATURE_USERSPACE_MPM;
576 556
577 if (!ops->hw_scan) 557 if (!ops->hw_scan)
578 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | 558 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -609,6 +589,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
609 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; 589 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
610 local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; 590 local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
611 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; 591 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
592 wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask;
612 593
613 INIT_LIST_HEAD(&local->interfaces); 594 INIT_LIST_HEAD(&local->interfaces);
614 595
@@ -664,9 +645,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
664 skb_queue_head_init(&local->skb_queue); 645 skb_queue_head_init(&local->skb_queue);
665 skb_queue_head_init(&local->skb_queue_unreliable); 646 skb_queue_head_init(&local->skb_queue_unreliable);
666 647
667 /* init dummy netdev for use w/ NAPI */
668 init_dummy_netdev(&local->napi_dev);
669
670 ieee80211_led_names(local); 648 ieee80211_led_names(local);
671 649
672 ieee80211_roc_setup(local); 650 ieee80211_roc_setup(local);
@@ -1021,9 +999,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1021 goto fail_ifa6; 999 goto fail_ifa6;
1022#endif 1000#endif
1023 1001
1024 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
1025 local->hw.napi_weight);
1026
1027 return 0; 1002 return 0;
1028 1003
1029#if IS_ENABLED(CONFIG_IPV6) 1004#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 4749b3858695..123a300cef57 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,10 +13,6 @@
13#include "ieee80211_i.h" 13#include "ieee80211_i.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#define TMR_RUNNING_HK 0
17#define TMR_RUNNING_MP 1
18#define TMR_RUNNING_MPR 2
19
20static int mesh_allocated; 16static int mesh_allocated;
21static struct kmem_cache *rm_cache; 17static struct kmem_cache *rm_cache;
22 18
@@ -50,11 +46,6 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
50 46
51 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); 47 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
52 48
53 if (local->quiescing) {
54 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
55 return;
56 }
57
58 ieee80211_queue_work(&local->hw, &sdata->work); 49 ieee80211_queue_work(&local->hw, &sdata->work);
59} 50}
60 51
@@ -165,7 +156,7 @@ void mesh_sta_cleanup(struct sta_info *sta)
165 * an update. 156 * an update.
166 */ 157 */
167 changed = mesh_accept_plinks_update(sdata); 158 changed = mesh_accept_plinks_update(sdata);
168 if (sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) { 159 if (!sdata->u.mesh.user_mpm) {
169 changed |= mesh_plink_deactivate(sta); 160 changed |= mesh_plink_deactivate(sta);
170 del_timer_sync(&sta->plink_timer); 161 del_timer_sync(&sta->plink_timer);
171 } 162 }
@@ -479,15 +470,8 @@ static void ieee80211_mesh_path_timer(unsigned long data)
479{ 470{
480 struct ieee80211_sub_if_data *sdata = 471 struct ieee80211_sub_if_data *sdata =
481 (struct ieee80211_sub_if_data *) data; 472 (struct ieee80211_sub_if_data *) data;
482 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
483 struct ieee80211_local *local = sdata->local;
484
485 if (local->quiescing) {
486 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
487 return;
488 }
489 473
490 ieee80211_queue_work(&local->hw, &sdata->work); 474 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
491} 475}
492 476
493static void ieee80211_mesh_path_root_timer(unsigned long data) 477static void ieee80211_mesh_path_root_timer(unsigned long data)
@@ -495,16 +479,10 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
495 struct ieee80211_sub_if_data *sdata = 479 struct ieee80211_sub_if_data *sdata =
496 (struct ieee80211_sub_if_data *) data; 480 (struct ieee80211_sub_if_data *) data;
497 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 481 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
498 struct ieee80211_local *local = sdata->local;
499 482
500 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 483 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
501 484
502 if (local->quiescing) { 485 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
503 set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
504 return;
505 }
506
507 ieee80211_queue_work(&local->hw, &sdata->work);
508} 486}
509 487
510void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) 488void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
@@ -622,35 +600,6 @@ static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
622 round_jiffies(TU_TO_EXP_TIME(interval))); 600 round_jiffies(TU_TO_EXP_TIME(interval)));
623} 601}
624 602
625#ifdef CONFIG_PM
626void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
627{
628 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
629
630 /* use atomic bitops in case all timers fire at the same time */
631
632 if (del_timer_sync(&ifmsh->housekeeping_timer))
633 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
634 if (del_timer_sync(&ifmsh->mesh_path_timer))
635 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
636 if (del_timer_sync(&ifmsh->mesh_path_root_timer))
637 set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
638}
639
640void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
641{
642 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
643
644 if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running))
645 add_timer(&ifmsh->housekeeping_timer);
646 if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
647 add_timer(&ifmsh->mesh_path_timer);
648 if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running))
649 add_timer(&ifmsh->mesh_path_root_timer);
650 ieee80211_mesh_root_setup(ifmsh);
651}
652#endif
653
654static int 603static int
655ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) 604ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
656{ 605{
@@ -750,10 +699,8 @@ out_free:
750static int 699static int
751ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh) 700ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh)
752{ 701{
753 struct ieee80211_sub_if_data *sdata;
754 struct beacon_data *old_bcn; 702 struct beacon_data *old_bcn;
755 int ret; 703 int ret;
756 sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
757 704
758 mutex_lock(&ifmsh->mtx); 705 mutex_lock(&ifmsh->mtx);
759 706
@@ -871,8 +818,6 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
871 local->fif_other_bss--; 818 local->fif_other_bss--;
872 atomic_dec(&local->iff_allmultis); 819 atomic_dec(&local->iff_allmultis);
873 ieee80211_configure_filter(local); 820 ieee80211_configure_filter(local);
874
875 sdata->u.mesh.timers_running = 0;
876} 821}
877 822
878static void 823static void
@@ -886,9 +831,8 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
886 struct ieee80211_mgmt *hdr; 831 struct ieee80211_mgmt *hdr;
887 struct ieee802_11_elems elems; 832 struct ieee802_11_elems elems;
888 size_t baselen; 833 size_t baselen;
889 u8 *pos, *end; 834 u8 *pos;
890 835
891 end = ((u8 *) mgmt) + len;
892 pos = mgmt->u.probe_req.variable; 836 pos = mgmt->u.probe_req.variable;
893 baselen = (u8 *) pos - (u8 *) mgmt; 837 baselen = (u8 *) pos - (u8 *) mgmt;
894 if (baselen > len) 838 if (baselen > len)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 336c88a16687..6ffabbe99c46 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -313,8 +313,6 @@ void mesh_path_timer(unsigned long data);
313void mesh_path_flush_by_nexthop(struct sta_info *sta); 313void mesh_path_flush_by_nexthop(struct sta_info *sta);
314void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, 314void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
315 struct sk_buff *skb); 315 struct sk_buff *skb);
316void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
317void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
318void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); 316void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
319 317
320bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); 318bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
@@ -359,22 +357,12 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
359 357
360void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 358void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
361 359
362void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
363void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
364void mesh_plink_quiesce(struct sta_info *sta);
365void mesh_plink_restart(struct sta_info *sta);
366void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); 360void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
367void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); 361void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
368void ieee80211s_stop(void); 362void ieee80211s_stop(void);
369#else 363#else
370static inline void 364static inline void
371ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} 365ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
372static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
373{}
374static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
375{}
376static inline void mesh_plink_quiesce(struct sta_info *sta) {}
377static inline void mesh_plink_restart(struct sta_info *sta) {}
378static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) 366static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
379{ return false; } 367{ return false; }
380static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 368static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 07d396d57079..937e06fe8f2a 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -420,7 +420,6 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
420 return NULL; 420 return NULL;
421 421
422 sta->plink_state = NL80211_PLINK_LISTEN; 422 sta->plink_state = NL80211_PLINK_LISTEN;
423 init_timer(&sta->plink_timer);
424 423
425 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 424 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
426 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 425 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -437,8 +436,9 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
437{ 436{
438 struct sta_info *sta = NULL; 437 struct sta_info *sta = NULL;
439 438
440 /* Userspace handles peer allocation when security is enabled */ 439 /* Userspace handles station allocation */
441 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) 440 if (sdata->u.mesh.user_mpm ||
441 sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
442 cfg80211_notify_new_peer_candidate(sdata->dev, addr, 442 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
443 elems->ie_start, 443 elems->ie_start,
444 elems->total_len, 444 elems->total_len,
@@ -534,10 +534,8 @@ static void mesh_plink_timer(unsigned long data)
534 */ 534 */
535 sta = (struct sta_info *) data; 535 sta = (struct sta_info *) data;
536 536
537 if (sta->sdata->local->quiescing) { 537 if (sta->sdata->local->quiescing)
538 sta->plink_timer_was_running = true;
539 return; 538 return;
540 }
541 539
542 spin_lock_bh(&sta->lock); 540 spin_lock_bh(&sta->lock);
543 if (sta->ignore_plink_timer) { 541 if (sta->ignore_plink_timer) {
@@ -598,29 +596,6 @@ static void mesh_plink_timer(unsigned long data)
598 } 596 }
599} 597}
600 598
601#ifdef CONFIG_PM
602void mesh_plink_quiesce(struct sta_info *sta)
603{
604 if (!ieee80211_vif_is_mesh(&sta->sdata->vif))
605 return;
606
607 /* no kernel mesh sta timers have been initialized */
608 if (sta->sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
609 return;
610
611 if (del_timer_sync(&sta->plink_timer))
612 sta->plink_timer_was_running = true;
613}
614
615void mesh_plink_restart(struct sta_info *sta)
616{
617 if (sta->plink_timer_was_running) {
618 add_timer(&sta->plink_timer);
619 sta->plink_timer_was_running = false;
620 }
621}
622#endif
623
624static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) 599static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
625{ 600{
626 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); 601 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
@@ -695,6 +670,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
695 if (len < IEEE80211_MIN_ACTION_SIZE + 3) 670 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
696 return; 671 return;
697 672
673 if (sdata->u.mesh.user_mpm)
674 /* userspace must register for these */
675 return;
676
698 if (is_multicast_ether_addr(mgmt->da)) { 677 if (is_multicast_ether_addr(mgmt->da)) {
699 mpl_dbg(sdata, 678 mpl_dbg(sdata,
700 "Mesh plink: ignore frame from multicast address\n"); 679 "Mesh plink: ignore frame from multicast address\n");
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 82cc30318a86..e06dbbf8cb4c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -87,9 +87,6 @@ MODULE_PARM_DESC(probe_wait_ms,
87 */ 87 */
88#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 88#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
89 89
90#define TMR_RUNNING_TIMER 0
91#define TMR_RUNNING_CHANSW 1
92
93/* 90/*
94 * All cfg80211 functions have to be called outside a locked 91 * All cfg80211 functions have to be called outside a locked
95 * section so that they can acquire a lock themselves... This 92 * section so that they can acquire a lock themselves... This
@@ -609,6 +606,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
609 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap)); 606 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
610 607
611 memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); 608 memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
609 ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
612 610
613 /* determine capability flags */ 611 /* determine capability flags */
614 cap = vht_cap.cap; 612 cap = vht_cap.cap;
@@ -1011,6 +1009,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
1011 1009
1012 /* XXX: wait for a beacon first? */ 1010 /* XXX: wait for a beacon first? */
1013 ieee80211_wake_queues_by_reason(&sdata->local->hw, 1011 ieee80211_wake_queues_by_reason(&sdata->local->hw,
1012 IEEE80211_MAX_QUEUE_MAP,
1014 IEEE80211_QUEUE_STOP_REASON_CSA); 1013 IEEE80211_QUEUE_STOP_REASON_CSA);
1015 out: 1014 out:
1016 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 1015 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
@@ -1038,14 +1037,8 @@ static void ieee80211_chswitch_timer(unsigned long data)
1038{ 1037{
1039 struct ieee80211_sub_if_data *sdata = 1038 struct ieee80211_sub_if_data *sdata =
1040 (struct ieee80211_sub_if_data *) data; 1039 (struct ieee80211_sub_if_data *) data;
1041 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1042 1040
1043 if (sdata->local->quiescing) { 1041 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
1044 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
1045 return;
1046 }
1047
1048 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
1049} 1042}
1050 1043
1051void 1044void
@@ -1116,6 +1109,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1116 1109
1117 if (sw_elem->mode) 1110 if (sw_elem->mode)
1118 ieee80211_stop_queues_by_reason(&sdata->local->hw, 1111 ieee80211_stop_queues_by_reason(&sdata->local->hw,
1112 IEEE80211_MAX_QUEUE_MAP,
1119 IEEE80211_QUEUE_STOP_REASON_CSA); 1113 IEEE80211_QUEUE_STOP_REASON_CSA);
1120 1114
1121 if (sdata->local->ops->channel_switch) { 1115 if (sdata->local->ops->channel_switch) {
@@ -1383,6 +1377,7 @@ void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
1383 } 1377 }
1384 1378
1385 ieee80211_wake_queues_by_reason(&local->hw, 1379 ieee80211_wake_queues_by_reason(&local->hw,
1380 IEEE80211_MAX_QUEUE_MAP,
1386 IEEE80211_QUEUE_STOP_REASON_PS); 1381 IEEE80211_QUEUE_STOP_REASON_PS);
1387} 1382}
1388 1383
@@ -1444,7 +1439,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
1444 else { 1439 else {
1445 ieee80211_send_nullfunc(local, sdata, 1); 1440 ieee80211_send_nullfunc(local, sdata, 1);
1446 /* Flush to get the tx status of nullfunc frame */ 1441 /* Flush to get the tx status of nullfunc frame */
1447 drv_flush(local, false); 1442 ieee80211_flush_queues(local, sdata);
1448 } 1443 }
1449 } 1444 }
1450 1445
@@ -1775,7 +1770,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1775 1770
1776 /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */ 1771 /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
1777 if (tx) 1772 if (tx)
1778 drv_flush(local, false); 1773 ieee80211_flush_queues(local, sdata);
1779 1774
1780 /* deauthenticate/disassociate now */ 1775 /* deauthenticate/disassociate now */
1781 if (tx || frame_buf) 1776 if (tx || frame_buf)
@@ -1784,7 +1779,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1784 1779
1785 /* flush out frame */ 1780 /* flush out frame */
1786 if (tx) 1781 if (tx)
1787 drv_flush(local, false); 1782 ieee80211_flush_queues(local, sdata);
1788 1783
1789 /* clear bssid only after building the needed mgmt frames */ 1784 /* clear bssid only after building the needed mgmt frames */
1790 memset(ifmgd->bssid, 0, ETH_ALEN); 1785 memset(ifmgd->bssid, 0, ETH_ALEN);
@@ -1802,9 +1797,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1802 sdata->vif.bss_conf.p2p_ctwindow = 0; 1797 sdata->vif.bss_conf.p2p_ctwindow = 0;
1803 sdata->vif.bss_conf.p2p_oppps = false; 1798 sdata->vif.bss_conf.p2p_oppps = false;
1804 1799
1805 /* on the next assoc, re-program HT parameters */ 1800 /* on the next assoc, re-program HT/VHT parameters */
1806 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); 1801 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
1807 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); 1802 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
1803 memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
1804 memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
1808 1805
1809 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; 1806 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
1810 1807
@@ -1830,8 +1827,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1830 del_timer_sync(&sdata->u.mgd.timer); 1827 del_timer_sync(&sdata->u.mgd.timer);
1831 del_timer_sync(&sdata->u.mgd.chswitch_timer); 1828 del_timer_sync(&sdata->u.mgd.chswitch_timer);
1832 1829
1833 sdata->u.mgd.timers_running = 0;
1834
1835 sdata->vif.bss_conf.dtim_period = 0; 1830 sdata->vif.bss_conf.dtim_period = 0;
1836 1831
1837 ifmgd->flags = 0; 1832 ifmgd->flags = 0;
@@ -1956,7 +1951,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1956 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); 1951 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
1957 run_again(ifmgd, ifmgd->probe_timeout); 1952 run_again(ifmgd, ifmgd->probe_timeout);
1958 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) 1953 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
1959 drv_flush(sdata->local, false); 1954 ieee80211_flush_queues(sdata->local, sdata);
1960} 1955}
1961 1956
1962static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, 1957static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
@@ -2079,6 +2074,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2079 true, frame_buf); 2074 true, frame_buf);
2080 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; 2075 ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
2081 ieee80211_wake_queues_by_reason(&sdata->local->hw, 2076 ieee80211_wake_queues_by_reason(&sdata->local->hw,
2077 IEEE80211_MAX_QUEUE_MAP,
2082 IEEE80211_QUEUE_STOP_REASON_CSA); 2078 IEEE80211_QUEUE_STOP_REASON_CSA);
2083 mutex_unlock(&ifmgd->mtx); 2079 mutex_unlock(&ifmgd->mtx);
2084 2080
@@ -3140,15 +3136,8 @@ static void ieee80211_sta_timer(unsigned long data)
3140{ 3136{
3141 struct ieee80211_sub_if_data *sdata = 3137 struct ieee80211_sub_if_data *sdata =
3142 (struct ieee80211_sub_if_data *) data; 3138 (struct ieee80211_sub_if_data *) data;
3143 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3144 struct ieee80211_local *local = sdata->local;
3145
3146 if (local->quiescing) {
3147 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
3148 return;
3149 }
3150 3139
3151 ieee80211_queue_work(&local->hw, &sdata->work); 3140 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
3152} 3141}
3153 3142
3154static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, 3143static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
@@ -3500,72 +3489,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
3500 } 3489 }
3501} 3490}
3502 3491
3503#ifdef CONFIG_PM
3504void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
3505{
3506 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3507
3508 /*
3509 * Stop timers before deleting work items, as timers
3510 * could race and re-add the work-items. They will be
3511 * re-established on connection.
3512 */
3513 del_timer_sync(&ifmgd->conn_mon_timer);
3514 del_timer_sync(&ifmgd->bcn_mon_timer);
3515
3516 /*
3517 * we need to use atomic bitops for the running bits
3518 * only because both timers might fire at the same
3519 * time -- the code here is properly synchronised.
3520 */
3521
3522 cancel_work_sync(&ifmgd->request_smps_work);
3523
3524 cancel_work_sync(&ifmgd->monitor_work);
3525 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
3526 cancel_work_sync(&ifmgd->csa_connection_drop_work);
3527 if (del_timer_sync(&ifmgd->timer))
3528 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
3529
3530 if (del_timer_sync(&ifmgd->chswitch_timer))
3531 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
3532 cancel_work_sync(&ifmgd->chswitch_work);
3533}
3534
3535void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
3536{
3537 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3538
3539 mutex_lock(&ifmgd->mtx);
3540 if (!ifmgd->associated) {
3541 mutex_unlock(&ifmgd->mtx);
3542 return;
3543 }
3544
3545 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
3546 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
3547 mlme_dbg(sdata, "driver requested disconnect after resume\n");
3548 ieee80211_sta_connection_lost(sdata,
3549 ifmgd->associated->bssid,
3550 WLAN_REASON_UNSPECIFIED,
3551 true);
3552 mutex_unlock(&ifmgd->mtx);
3553 return;
3554 }
3555 mutex_unlock(&ifmgd->mtx);
3556
3557 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
3558 add_timer(&ifmgd->timer);
3559 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
3560 add_timer(&ifmgd->chswitch_timer);
3561 ieee80211_sta_reset_beacon_monitor(sdata);
3562
3563 mutex_lock(&sdata->local->mtx);
3564 ieee80211_restart_sta_timer(sdata);
3565 mutex_unlock(&sdata->local->mtx);
3566}
3567#endif
3568
3569/* interface setup */ 3492/* interface setup */
3570void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) 3493void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
3571{ 3494{
@@ -4073,6 +3996,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
4073 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; 3996 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
4074 } 3997 }
4075 3998
3999 if (req->flags & ASSOC_REQ_DISABLE_VHT)
4000 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
4001
4076 /* Also disable HT if we don't support it or the AP doesn't use WMM */ 4002 /* Also disable HT if we don't support it or the AP doesn't use WMM */
4077 sband = local->hw.wiphy->bands[req->bss->channel->band]; 4003 sband = local->hw.wiphy->bands[req->bss->channel->band];
4078 if (!sband->ht_cap.ht_supported || 4004 if (!sband->ht_cap.ht_supported ||
@@ -4096,6 +4022,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
4096 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, 4022 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
4097 sizeof(ifmgd->ht_capa_mask)); 4023 sizeof(ifmgd->ht_capa_mask));
4098 4024
4025 memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
4026 memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
4027 sizeof(ifmgd->vht_capa_mask));
4028
4099 if (req->ie && req->ie_len) { 4029 if (req->ie && req->ie_len) {
4100 memcpy(assoc_data->ie, req->ie, req->ie_len); 4030 memcpy(assoc_data->ie, req->ie, req->ie_len);
4101 assoc_data->ie_len = req->ie_len; 4031 assoc_data->ie_len = req->ie_len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index cc79b4a2e821..b01eb7314ec6 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -118,9 +118,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
118 * Stop queues and transmit all frames queued by the driver 118 * Stop queues and transmit all frames queued by the driver
119 * before sending nullfunc to enable powersave at the AP. 119 * before sending nullfunc to enable powersave at the AP.
120 */ 120 */
121 ieee80211_stop_queues_by_reason(&local->hw, 121 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
122 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL); 122 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
123 drv_flush(local, false); 123 ieee80211_flush_queues(local, NULL);
124 124
125 mutex_lock(&local->iflist_mtx); 125 mutex_lock(&local->iflist_mtx);
126 list_for_each_entry(sdata, &local->interfaces, list) { 126 list_for_each_entry(sdata, &local->interfaces, list) {
@@ -181,7 +181,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
181 } 181 }
182 mutex_unlock(&local->iflist_mtx); 182 mutex_unlock(&local->iflist_mtx);
183 183
184 ieee80211_wake_queues_by_reason(&local->hw, 184 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
185 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL); 185 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
186} 186}
187 187
@@ -277,7 +277,7 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
277 duration = 10; 277 duration = 10;
278 278
279 ret = drv_remain_on_channel(local, roc->sdata, roc->chan, 279 ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
280 duration); 280 duration, roc->type);
281 281
282 roc->started = true; 282 roc->started = true;
283 283
@@ -373,7 +373,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
373 ieee80211_roc_notify_destroy(roc); 373 ieee80211_roc_notify_destroy(roc);
374 374
375 if (started) { 375 if (started) {
376 drv_flush(local, false); 376 ieee80211_flush_queues(local, NULL);
377 377
378 local->tmp_channel = NULL; 378 local->tmp_channel = NULL;
379 ieee80211_hw_config(local, 0); 379 ieee80211_hw_config(local, 0);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d0275f34bf70..3d16f4e61743 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -6,32 +6,11 @@
6#include "driver-ops.h" 6#include "driver-ops.h"
7#include "led.h" 7#include "led.h"
8 8
9/* return value indicates whether the driver should be further notified */
10static void ieee80211_quiesce(struct ieee80211_sub_if_data *sdata)
11{
12 switch (sdata->vif.type) {
13 case NL80211_IFTYPE_STATION:
14 ieee80211_sta_quiesce(sdata);
15 break;
16 case NL80211_IFTYPE_ADHOC:
17 ieee80211_ibss_quiesce(sdata);
18 break;
19 case NL80211_IFTYPE_MESH_POINT:
20 ieee80211_mesh_quiesce(sdata);
21 break;
22 default:
23 break;
24 }
25
26 cancel_work_sync(&sdata->work);
27}
28
29int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 9int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
30{ 10{
31 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
32 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
33 struct sta_info *sta; 13 struct sta_info *sta;
34 struct ieee80211_chanctx *ctx;
35 14
36 if (!local->open_count) 15 if (!local->open_count)
37 goto suspend; 16 goto suspend;
@@ -51,12 +30,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
51 } 30 }
52 31
53 ieee80211_stop_queues_by_reason(hw, 32 ieee80211_stop_queues_by_reason(hw,
54 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 33 IEEE80211_MAX_QUEUE_MAP,
34 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
55 35
56 /* flush out all packets */ 36 /* flush out all packets */
57 synchronize_net(); 37 synchronize_net();
58 38
59 drv_flush(local, false); 39 ieee80211_flush_queues(local, NULL);
60 40
61 local->quiescing = true; 41 local->quiescing = true;
62 /* make quiescing visible to timers everywhere */ 42 /* make quiescing visible to timers everywhere */
@@ -89,23 +69,17 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89 mutex_unlock(&local->sta_mtx); 69 mutex_unlock(&local->sta_mtx);
90 } 70 }
91 ieee80211_wake_queues_by_reason(hw, 71 ieee80211_wake_queues_by_reason(hw,
72 IEEE80211_MAX_QUEUE_MAP,
92 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 73 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
93 return err; 74 return err;
94 } else if (err > 0) { 75 } else if (err > 0) {
95 WARN_ON(err != 1); 76 WARN_ON(err != 1);
96 local->wowlan = false; 77 return err;
97 } else { 78 } else {
98 list_for_each_entry(sdata, &local->interfaces, list)
99 if (ieee80211_sdata_running(sdata))
100 ieee80211_quiesce(sdata);
101 goto suspend; 79 goto suspend;
102 } 80 }
103 } 81 }
104 82
105 /* disable keys */
106 list_for_each_entry(sdata, &local->interfaces, list)
107 ieee80211_disable_keys(sdata);
108
109 /* tear down aggregation sessions and remove STAs */ 83 /* tear down aggregation sessions and remove STAs */
110 mutex_lock(&local->sta_mtx); 84 mutex_lock(&local->sta_mtx);
111 list_for_each_entry(sta, &local->sta_list, list) { 85 list_for_each_entry(sta, &local->sta_list, list) {
@@ -117,100 +91,25 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
117 WARN_ON(drv_sta_state(local, sta->sdata, sta, 91 WARN_ON(drv_sta_state(local, sta->sdata, sta,
118 state, state - 1)); 92 state, state - 1));
119 } 93 }
120
121 mesh_plink_quiesce(sta);
122 } 94 }
123 mutex_unlock(&local->sta_mtx); 95 mutex_unlock(&local->sta_mtx);
124 96
125 /* remove all interfaces */ 97 /* remove all interfaces */
126 list_for_each_entry(sdata, &local->interfaces, list) { 98 list_for_each_entry(sdata, &local->interfaces, list) {
127 static u8 zero_addr[ETH_ALEN] = {};
128 u32 changed = 0;
129
130 if (!ieee80211_sdata_running(sdata)) 99 if (!ieee80211_sdata_running(sdata))
131 continue; 100 continue;
132
133 switch (sdata->vif.type) {
134 case NL80211_IFTYPE_AP_VLAN:
135 case NL80211_IFTYPE_MONITOR:
136 /* skip these */
137 continue;
138 case NL80211_IFTYPE_STATION:
139 if (sdata->vif.bss_conf.assoc)
140 changed = BSS_CHANGED_ASSOC |
141 BSS_CHANGED_BSSID |
142 BSS_CHANGED_IDLE;
143 break;
144 case NL80211_IFTYPE_AP:
145 case NL80211_IFTYPE_ADHOC:
146 case NL80211_IFTYPE_MESH_POINT:
147 if (sdata->vif.bss_conf.enable_beacon)
148 changed = BSS_CHANGED_BEACON_ENABLED;
149 break;
150 default:
151 break;
152 }
153
154 ieee80211_quiesce(sdata);
155
156 sdata->suspend_bss_conf = sdata->vif.bss_conf;
157 memset(&sdata->vif.bss_conf, 0, sizeof(sdata->vif.bss_conf));
158 sdata->vif.bss_conf.idle = true;
159 if (sdata->suspend_bss_conf.bssid)
160 sdata->vif.bss_conf.bssid = zero_addr;
161
162 /* disable beaconing or remove association */
163 ieee80211_bss_info_change_notify(sdata, changed);
164
165 if (sdata->vif.type == NL80211_IFTYPE_AP &&
166 rcu_access_pointer(sdata->u.ap.beacon))
167 drv_stop_ap(local, sdata);
168
169 if (local->use_chanctx) {
170 struct ieee80211_chanctx_conf *conf;
171
172 mutex_lock(&local->chanctx_mtx);
173 conf = rcu_dereference_protected(
174 sdata->vif.chanctx_conf,
175 lockdep_is_held(&local->chanctx_mtx));
176 if (conf) {
177 ctx = container_of(conf,
178 struct ieee80211_chanctx,
179 conf);
180 drv_unassign_vif_chanctx(local, sdata, ctx);
181 }
182
183 mutex_unlock(&local->chanctx_mtx);
184 }
185 drv_remove_interface(local, sdata); 101 drv_remove_interface(local, sdata);
186 } 102 }
187 103
188 sdata = rtnl_dereference(local->monitor_sdata); 104 sdata = rtnl_dereference(local->monitor_sdata);
189 if (sdata) { 105 if (sdata)
190 if (local->use_chanctx) {
191 struct ieee80211_chanctx_conf *conf;
192
193 mutex_lock(&local->chanctx_mtx);
194 conf = rcu_dereference_protected(
195 sdata->vif.chanctx_conf,
196 lockdep_is_held(&local->chanctx_mtx));
197 if (conf) {
198 ctx = container_of(conf,
199 struct ieee80211_chanctx,
200 conf);
201 drv_unassign_vif_chanctx(local, sdata, ctx);
202 }
203
204 mutex_unlock(&local->chanctx_mtx);
205 }
206
207 drv_remove_interface(local, sdata); 106 drv_remove_interface(local, sdata);
208 }
209 107
210 mutex_lock(&local->chanctx_mtx); 108 /*
211 list_for_each_entry(ctx, &local->chanctx_list, list) 109 * We disconnected on all interfaces before suspend, all channel
212 drv_remove_chanctx(local, ctx); 110 * contexts should be released.
213 mutex_unlock(&local->chanctx_mtx); 111 */
112 WARN_ON(!list_empty(&local->chanctx_list));
214 113
215 /* stop hardware - this must stop RX */ 114 /* stop hardware - this must stop RX */
216 if (local->open_count) 115 if (local->open_count)
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index eea45a2c7c35..1c36c9b4fa4a 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -55,7 +55,6 @@
55#include "rate.h" 55#include "rate.h"
56#include "rc80211_minstrel.h" 56#include "rc80211_minstrel.h"
57 57
58#define SAMPLE_COLUMNS 10
59#define SAMPLE_TBL(_mi, _idx, _col) \ 58#define SAMPLE_TBL(_mi, _idx, _col) \
60 _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col] 59 _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col]
61 60
@@ -70,16 +69,31 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
70 return i; 69 return i;
71} 70}
72 71
72/* find & sort topmost throughput rates */
73static inline void
74minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
75{
76 int j = MAX_THR_RATES;
77
78 while (j > 0 && mi->r[i].cur_tp > mi->r[tp_list[j - 1]].cur_tp)
79 j--;
80 if (j < MAX_THR_RATES - 1)
81 memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
82 if (j < MAX_THR_RATES)
83 tp_list[j] = i;
84}
85
73static void 86static void
74minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) 87minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
75{ 88{
76 u32 max_tp = 0, index_max_tp = 0, index_max_tp2 = 0; 89 u8 tmp_tp_rate[MAX_THR_RATES];
77 u32 max_prob = 0, index_max_prob = 0; 90 u8 tmp_prob_rate = 0;
78 u32 usecs; 91 u32 usecs;
79 u32 p;
80 int i; 92 int i;
81 93
82 mi->stats_update = jiffies; 94 for (i=0; i < MAX_THR_RATES; i++)
95 tmp_tp_rate[i] = 0;
96
83 for (i = 0; i < mi->n_rates; i++) { 97 for (i = 0; i < mi->n_rates; i++) {
84 struct minstrel_rate *mr = &mi->r[i]; 98 struct minstrel_rate *mr = &mi->r[i];
85 99
@@ -87,27 +101,32 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
87 if (!usecs) 101 if (!usecs)
88 usecs = 1000000; 102 usecs = 1000000;
89 103
90 /* To avoid rounding issues, probabilities scale from 0 (0%) 104 if (unlikely(mr->attempts > 0)) {
91 * to 18000 (100%) */ 105 mr->sample_skipped = 0;
92 if (mr->attempts) { 106 mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
93 p = (mr->success * 18000) / mr->attempts;
94 mr->succ_hist += mr->success; 107 mr->succ_hist += mr->success;
95 mr->att_hist += mr->attempts; 108 mr->att_hist += mr->attempts;
96 mr->cur_prob = p; 109 mr->probability = minstrel_ewma(mr->probability,
97 p = ((p * (100 - mp->ewma_level)) + (mr->probability * 110 mr->cur_prob,
98 mp->ewma_level)) / 100; 111 EWMA_LEVEL);
99 mr->probability = p; 112 } else
100 mr->cur_tp = p * (1000000 / usecs); 113 mr->sample_skipped++;
101 }
102 114
103 mr->last_success = mr->success; 115 mr->last_success = mr->success;
104 mr->last_attempts = mr->attempts; 116 mr->last_attempts = mr->attempts;
105 mr->success = 0; 117 mr->success = 0;
106 mr->attempts = 0; 118 mr->attempts = 0;
107 119
120 /* Update throughput per rate, reset thr. below 10% success */
121 if (mr->probability < MINSTREL_FRAC(10, 100))
122 mr->cur_tp = 0;
123 else
124 mr->cur_tp = mr->probability * (1000000 / usecs);
125
108 /* Sample less often below the 10% chance of success. 126 /* Sample less often below the 10% chance of success.
109 * Sample less often above the 95% chance of success. */ 127 * Sample less often above the 95% chance of success. */
110 if ((mr->probability > 17100) || (mr->probability < 1800)) { 128 if (mr->probability > MINSTREL_FRAC(95, 100) ||
129 mr->probability < MINSTREL_FRAC(10, 100)) {
111 mr->adjusted_retry_count = mr->retry_count >> 1; 130 mr->adjusted_retry_count = mr->retry_count >> 1;
112 if (mr->adjusted_retry_count > 2) 131 if (mr->adjusted_retry_count > 2)
113 mr->adjusted_retry_count = 2; 132 mr->adjusted_retry_count = 2;
@@ -118,35 +137,30 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
118 } 137 }
119 if (!mr->adjusted_retry_count) 138 if (!mr->adjusted_retry_count)
120 mr->adjusted_retry_count = 2; 139 mr->adjusted_retry_count = 2;
121 }
122 140
123 for (i = 0; i < mi->n_rates; i++) { 141 minstrel_sort_best_tp_rates(mi, i, tmp_tp_rate);
124 struct minstrel_rate *mr = &mi->r[i]; 142
125 if (max_tp < mr->cur_tp) { 143 /* To determine the most robust rate (max_prob_rate) used at
126 index_max_tp = i; 144 * 3rd mmr stage we distinct between two cases:
127 max_tp = mr->cur_tp; 145 * (1) if any success probabilitiy >= 95%, out of those rates
128 } 146 * choose the maximum throughput rate as max_prob_rate
129 if (max_prob < mr->probability) { 147 * (2) if all success probabilities < 95%, the rate with
130 index_max_prob = i; 148 * highest success probability is choosen as max_prob_rate */
131 max_prob = mr->probability; 149 if (mr->probability >= MINSTREL_FRAC(95,100)) {
150 if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
151 tmp_prob_rate = i;
152 } else {
153 if (mr->probability >= mi->r[tmp_prob_rate].probability)
154 tmp_prob_rate = i;
132 } 155 }
133 } 156 }
134 157
135 max_tp = 0; 158 /* Assign the new rate set */
136 for (i = 0; i < mi->n_rates; i++) { 159 memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
137 struct minstrel_rate *mr = &mi->r[i]; 160 mi->max_prob_rate = tmp_prob_rate;
138
139 if (i == index_max_tp)
140 continue;
141 161
142 if (max_tp < mr->cur_tp) { 162 /* Reset update timer */
143 index_max_tp2 = i; 163 mi->stats_update = jiffies;
144 max_tp = mr->cur_tp;
145 }
146 }
147 mi->max_tp_rate = index_max_tp;
148 mi->max_tp_rate2 = index_max_tp2;
149 mi->max_prob_rate = index_max_prob;
150} 164}
151 165
152static void 166static void
@@ -207,10 +221,10 @@ static int
207minstrel_get_next_sample(struct minstrel_sta_info *mi) 221minstrel_get_next_sample(struct minstrel_sta_info *mi)
208{ 222{
209 unsigned int sample_ndx; 223 unsigned int sample_ndx;
210 sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); 224 sample_ndx = SAMPLE_TBL(mi, mi->sample_row, mi->sample_column);
211 mi->sample_idx++; 225 mi->sample_row++;
212 if ((int) mi->sample_idx > (mi->n_rates - 2)) { 226 if ((int) mi->sample_row >= mi->n_rates) {
213 mi->sample_idx = 0; 227 mi->sample_row = 0;
214 mi->sample_column++; 228 mi->sample_column++;
215 if (mi->sample_column >= SAMPLE_COLUMNS) 229 if (mi->sample_column >= SAMPLE_COLUMNS)
216 mi->sample_column = 0; 230 mi->sample_column = 0;
@@ -228,31 +242,37 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
228 struct minstrel_priv *mp = priv; 242 struct minstrel_priv *mp = priv;
229 struct ieee80211_tx_rate *ar = info->control.rates; 243 struct ieee80211_tx_rate *ar = info->control.rates;
230 unsigned int ndx, sample_ndx = 0; 244 unsigned int ndx, sample_ndx = 0;
231 bool mrr; 245 bool mrr_capable;
232 bool sample_slower = false; 246 bool indirect_rate_sampling = false;
233 bool sample = false; 247 bool rate_sampling = false;
234 int i, delta; 248 int i, delta;
235 int mrr_ndx[3]; 249 int mrr_ndx[3];
236 int sample_rate; 250 int sampling_ratio;
237 251
252 /* management/no-ack frames do not use rate control */
238 if (rate_control_send_low(sta, priv_sta, txrc)) 253 if (rate_control_send_low(sta, priv_sta, txrc))
239 return; 254 return;
240 255
241 mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; 256 /* check multi-rate-retry capabilities & adjust lookaround_rate */
242 257 mrr_capable = mp->has_mrr &&
243 ndx = mi->max_tp_rate; 258 !txrc->rts &&
244 259 !txrc->bss_conf->use_cts_prot;
245 if (mrr) 260 if (mrr_capable)
246 sample_rate = mp->lookaround_rate_mrr; 261 sampling_ratio = mp->lookaround_rate_mrr;
247 else 262 else
248 sample_rate = mp->lookaround_rate; 263 sampling_ratio = mp->lookaround_rate;
264
265 /* init rateindex [ndx] with max throughput rate */
266 ndx = mi->max_tp_rate[0];
249 267
268 /* increase sum packet counter */
250 mi->packet_count++; 269 mi->packet_count++;
251 delta = (mi->packet_count * sample_rate / 100) - 270
271 delta = (mi->packet_count * sampling_ratio / 100) -
252 (mi->sample_count + mi->sample_deferred / 2); 272 (mi->sample_count + mi->sample_deferred / 2);
253 273
254 /* delta > 0: sampling required */ 274 /* delta > 0: sampling required */
255 if ((delta > 0) && (mrr || !mi->prev_sample)) { 275 if ((delta > 0) && (mrr_capable || !mi->prev_sample)) {
256 struct minstrel_rate *msr; 276 struct minstrel_rate *msr;
257 if (mi->packet_count >= 10000) { 277 if (mi->packet_count >= 10000) {
258 mi->sample_deferred = 0; 278 mi->sample_deferred = 0;
@@ -271,21 +291,28 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
271 mi->sample_count += (delta - mi->n_rates * 2); 291 mi->sample_count += (delta - mi->n_rates * 2);
272 } 292 }
273 293
294 /* get next random rate sample */
274 sample_ndx = minstrel_get_next_sample(mi); 295 sample_ndx = minstrel_get_next_sample(mi);
275 msr = &mi->r[sample_ndx]; 296 msr = &mi->r[sample_ndx];
276 sample = true; 297 rate_sampling = true;
277 sample_slower = mrr && (msr->perfect_tx_time > 298
278 mi->r[ndx].perfect_tx_time); 299 /* Decide if direct ( 1st mrr stage) or indirect (2nd mrr stage)
279 300 * rate sampling method should be used.
280 if (!sample_slower) { 301 * Respect such rates that are not sampled for 20 interations.
302 */
303 if (mrr_capable &&
304 msr->perfect_tx_time > mi->r[ndx].perfect_tx_time &&
305 msr->sample_skipped < 20)
306 indirect_rate_sampling = true;
307
308 if (!indirect_rate_sampling) {
281 if (msr->sample_limit != 0) { 309 if (msr->sample_limit != 0) {
282 ndx = sample_ndx; 310 ndx = sample_ndx;
283 mi->sample_count++; 311 mi->sample_count++;
284 if (msr->sample_limit > 0) 312 if (msr->sample_limit > 0)
285 msr->sample_limit--; 313 msr->sample_limit--;
286 } else { 314 } else
287 sample = false; 315 rate_sampling = false;
288 }
289 } else { 316 } else {
290 /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark 317 /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
291 * packets that have the sampling rate deferred to the 318 * packets that have the sampling rate deferred to the
@@ -297,34 +324,39 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
297 mi->sample_deferred++; 324 mi->sample_deferred++;
298 } 325 }
299 } 326 }
300 mi->prev_sample = sample; 327 mi->prev_sample = rate_sampling;
301 328
302 /* If we're not using MRR and the sampling rate already 329 /* If we're not using MRR and the sampling rate already
303 * has a probability of >95%, we shouldn't be attempting 330 * has a probability of >95%, we shouldn't be attempting
304 * to use it, as this only wastes precious airtime */ 331 * to use it, as this only wastes precious airtime */
305 if (!mrr && sample && (mi->r[ndx].probability > 17100)) 332 if (!mrr_capable && rate_sampling &&
306 ndx = mi->max_tp_rate; 333 (mi->r[ndx].probability > MINSTREL_FRAC(95, 100)))
334 ndx = mi->max_tp_rate[0];
307 335
336 /* mrr setup for 1st stage */
308 ar[0].idx = mi->r[ndx].rix; 337 ar[0].idx = mi->r[ndx].rix;
309 ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info); 338 ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info);
310 339
311 if (!mrr) { 340 /* non mrr setup for 2nd stage */
312 if (!sample) 341 if (!mrr_capable) {
342 if (!rate_sampling)
313 ar[0].count = mp->max_retry; 343 ar[0].count = mp->max_retry;
314 ar[1].idx = mi->lowest_rix; 344 ar[1].idx = mi->lowest_rix;
315 ar[1].count = mp->max_retry; 345 ar[1].count = mp->max_retry;
316 return; 346 return;
317 } 347 }
318 348
319 /* MRR setup */ 349 /* mrr setup for 2nd stage */
320 if (sample) { 350 if (rate_sampling) {
321 if (sample_slower) 351 if (indirect_rate_sampling)
322 mrr_ndx[0] = sample_ndx; 352 mrr_ndx[0] = sample_ndx;
323 else 353 else
324 mrr_ndx[0] = mi->max_tp_rate; 354 mrr_ndx[0] = mi->max_tp_rate[0];
325 } else { 355 } else {
326 mrr_ndx[0] = mi->max_tp_rate2; 356 mrr_ndx[0] = mi->max_tp_rate[1];
327 } 357 }
358
359 /* mrr setup for 3rd & 4th stage */
328 mrr_ndx[1] = mi->max_prob_rate; 360 mrr_ndx[1] = mi->max_prob_rate;
329 mrr_ndx[2] = 0; 361 mrr_ndx[2] = 0;
330 for (i = 1; i < 4; i++) { 362 for (i = 1; i < 4; i++) {
@@ -351,26 +383,21 @@ static void
351init_sample_table(struct minstrel_sta_info *mi) 383init_sample_table(struct minstrel_sta_info *mi)
352{ 384{
353 unsigned int i, col, new_idx; 385 unsigned int i, col, new_idx;
354 unsigned int n_srates = mi->n_rates - 1;
355 u8 rnd[8]; 386 u8 rnd[8];
356 387
357 mi->sample_column = 0; 388 mi->sample_column = 0;
358 mi->sample_idx = 0; 389 mi->sample_row = 0;
359 memset(mi->sample_table, 0, SAMPLE_COLUMNS * mi->n_rates); 390 memset(mi->sample_table, 0xff, SAMPLE_COLUMNS * mi->n_rates);
360 391
361 for (col = 0; col < SAMPLE_COLUMNS; col++) { 392 for (col = 0; col < SAMPLE_COLUMNS; col++) {
362 for (i = 0; i < n_srates; i++) { 393 for (i = 0; i < mi->n_rates; i++) {
363 get_random_bytes(rnd, sizeof(rnd)); 394 get_random_bytes(rnd, sizeof(rnd));
364 new_idx = (i + rnd[i & 7]) % n_srates; 395 new_idx = (i + rnd[i & 7]) % mi->n_rates;
365 396
366 while (SAMPLE_TBL(mi, new_idx, col) != 0) 397 while (SAMPLE_TBL(mi, new_idx, col) != 0xff)
367 new_idx = (new_idx + 1) % n_srates; 398 new_idx = (new_idx + 1) % mi->n_rates;
368 399
369 /* Don't sample the slowest rate (i.e. slowest base 400 SAMPLE_TBL(mi, new_idx, col) = i;
370 * rate). We must presume that the slowest rate works
371 * fine, or else other management frames will also be
372 * failing and the link will break */
373 SAMPLE_TBL(mi, new_idx, col) = i + 1;
374 } 401 }
375 } 402 }
376} 403}
@@ -542,9 +569,6 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
542 mp->lookaround_rate = 5; 569 mp->lookaround_rate = 5;
543 mp->lookaround_rate_mrr = 10; 570 mp->lookaround_rate_mrr = 10;
544 571
545 /* moving average weight for EWMA */
546 mp->ewma_level = 75;
547
548 /* maximum time that the hw is allowed to stay in one MRR segment */ 572 /* maximum time that the hw is allowed to stay in one MRR segment */
549 mp->segment_size = 6000; 573 mp->segment_size = 6000;
550 574
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 5ecf757817f2..85ebf42cb46d 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -9,6 +9,28 @@
9#ifndef __RC_MINSTREL_H 9#ifndef __RC_MINSTREL_H
10#define __RC_MINSTREL_H 10#define __RC_MINSTREL_H
11 11
12#define EWMA_LEVEL 75 /* ewma weighting factor [%] */
13#define SAMPLE_COLUMNS 10 /* number of columns in sample table */
14
15
16/* scaled fraction values */
17#define MINSTREL_SCALE 16
18#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
19#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
20
21/* number of highest throughput rates to consider*/
22#define MAX_THR_RATES 4
23
24/*
25 * Perform EWMA (Exponentially Weighted Moving Average) calculation
26 */
27static inline int
28minstrel_ewma(int old, int new, int weight)
29{
30 return (new * (100 - weight) + old * weight) / 100;
31}
32
33
12struct minstrel_rate { 34struct minstrel_rate {
13 int bitrate; 35 int bitrate;
14 int rix; 36 int rix;
@@ -26,6 +48,7 @@ struct minstrel_rate {
26 u32 attempts; 48 u32 attempts;
27 u32 last_attempts; 49 u32 last_attempts;
28 u32 last_success; 50 u32 last_success;
51 u8 sample_skipped;
29 52
30 /* parts per thousand */ 53 /* parts per thousand */
31 u32 cur_prob; 54 u32 cur_prob;
@@ -45,14 +68,13 @@ struct minstrel_sta_info {
45 68
46 unsigned int lowest_rix; 69 unsigned int lowest_rix;
47 70
48 unsigned int max_tp_rate; 71 u8 max_tp_rate[MAX_THR_RATES];
49 unsigned int max_tp_rate2; 72 u8 max_prob_rate;
50 unsigned int max_prob_rate;
51 unsigned int packet_count; 73 unsigned int packet_count;
52 unsigned int sample_count; 74 unsigned int sample_count;
53 int sample_deferred; 75 int sample_deferred;
54 76
55 unsigned int sample_idx; 77 unsigned int sample_row;
56 unsigned int sample_column; 78 unsigned int sample_column;
57 79
58 int n_rates; 80 int n_rates;
@@ -73,7 +95,6 @@ struct minstrel_priv {
73 unsigned int cw_min; 95 unsigned int cw_min;
74 unsigned int cw_max; 96 unsigned int cw_max;
75 unsigned int max_retry; 97 unsigned int max_retry;
76 unsigned int ewma_level;
77 unsigned int segment_size; 98 unsigned int segment_size;
78 unsigned int update_interval; 99 unsigned int update_interval;
79 unsigned int lookaround_rate; 100 unsigned int lookaround_rate;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index d5a56226e675..d1048348d399 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -73,15 +73,17 @@ minstrel_stats_open(struct inode *inode, struct file *file)
73 for (i = 0; i < mi->n_rates; i++) { 73 for (i = 0; i < mi->n_rates; i++) {
74 struct minstrel_rate *mr = &mi->r[i]; 74 struct minstrel_rate *mr = &mi->r[i];
75 75
76 *(p++) = (i == mi->max_tp_rate) ? 'T' : ' '; 76 *(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
77 *(p++) = (i == mi->max_tp_rate2) ? 't' : ' '; 77 *(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
78 *(p++) = (i == mi->max_tp_rate[2]) ? 'C' : ' ';
79 *(p++) = (i == mi->max_tp_rate[3]) ? 'D' : ' ';
78 *(p++) = (i == mi->max_prob_rate) ? 'P' : ' '; 80 *(p++) = (i == mi->max_prob_rate) ? 'P' : ' ';
79 p += sprintf(p, "%3u%s", mr->bitrate / 2, 81 p += sprintf(p, "%3u%s", mr->bitrate / 2,
80 (mr->bitrate & 1 ? ".5" : " ")); 82 (mr->bitrate & 1 ? ".5" : " "));
81 83
82 tp = mr->cur_tp / ((18000 << 10) / 96); 84 tp = MINSTREL_TRUNC(mr->cur_tp / 10);
83 prob = mr->cur_prob / 18; 85 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
84 eprob = mr->probability / 18; 86 eprob = MINSTREL_TRUNC(mr->probability * 1000);
85 87
86 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " 88 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
87 "%3u(%3u) %8llu %8llu\n", 89 "%3u(%3u) %8llu %8llu\n",
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 3af141c69712..d2b264d1311d 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -17,8 +17,6 @@
17#include "rc80211_minstrel_ht.h" 17#include "rc80211_minstrel_ht.h"
18 18
19#define AVG_PKT_SIZE 1200 19#define AVG_PKT_SIZE 1200
20#define SAMPLE_COLUMNS 10
21#define EWMA_LEVEL 75
22 20
23/* Number of bits for an average sized packet */ 21/* Number of bits for an average sized packet */
24#define MCS_NBITS (AVG_PKT_SIZE << 3) 22#define MCS_NBITS (AVG_PKT_SIZE << 3)
@@ -26,11 +24,11 @@
26/* Number of symbols for a packet with (bps) bits per symbol */ 24/* Number of symbols for a packet with (bps) bits per symbol */
27#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) 25#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps))
28 26
29/* Transmission time for a packet containing (syms) symbols */ 27/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
30#define MCS_SYMBOL_TIME(sgi, syms) \ 28#define MCS_SYMBOL_TIME(sgi, syms) \
31 (sgi ? \ 29 (sgi ? \
32 ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \ 30 ((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \
33 (syms) << 2 /* syms * 4 us */ \ 31 ((syms) * 1000) << 2 /* syms * 4 us */ \
34 ) 32 )
35 33
36/* Transmit duration for the raw data part of an average sized packet */ 34/* Transmit duration for the raw data part of an average sized packet */
@@ -64,9 +62,9 @@
64} 62}
65 63
66#define CCK_DURATION(_bitrate, _short, _len) \ 64#define CCK_DURATION(_bitrate, _short, _len) \
67 (10 /* SIFS */ + \ 65 (1000 * (10 /* SIFS */ + \
68 (_short ? 72 + 24 : 144 + 48 ) + \ 66 (_short ? 72 + 24 : 144 + 48 ) + \
69 (8 * (_len + 4) * 10) / (_bitrate)) 67 (8 * (_len + 4) * 10) / (_bitrate)))
70 68
71#define CCK_ACK_DURATION(_bitrate, _short) \ 69#define CCK_ACK_DURATION(_bitrate, _short) \
72 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \ 70 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
@@ -129,15 +127,6 @@ const struct mcs_group minstrel_mcs_groups[] = {
129static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; 127static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
130 128
131/* 129/*
132 * Perform EWMA (Exponentially Weighted Moving Average) calculation
133 */
134static int
135minstrel_ewma(int old, int new, int weight)
136{
137 return (new * (100 - weight) + old * weight) / 100;
138}
139
140/*
141 * Look up an MCS group index based on mac80211 rate information 130 * Look up an MCS group index based on mac80211 rate information
142 */ 131 */
143static int 132static int
@@ -211,20 +200,32 @@ static void
211minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) 200minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
212{ 201{
213 struct minstrel_rate_stats *mr; 202 struct minstrel_rate_stats *mr;
214 unsigned int usecs = 0; 203 unsigned int nsecs = 0;
204 unsigned int tp;
205 unsigned int prob;
215 206
216 mr = &mi->groups[group].rates[rate]; 207 mr = &mi->groups[group].rates[rate];
208 prob = mr->probability;
217 209
218 if (mr->probability < MINSTREL_FRAC(1, 10)) { 210 if (prob < MINSTREL_FRAC(1, 10)) {
219 mr->cur_tp = 0; 211 mr->cur_tp = 0;
220 return; 212 return;
221 } 213 }
222 214
215 /*
216 * For the throughput calculation, limit the probability value to 90% to
217 * account for collision related packet error rate fluctuation
218 */
219 if (prob > MINSTREL_FRAC(9, 10))
220 prob = MINSTREL_FRAC(9, 10);
221
223 if (group != MINSTREL_CCK_GROUP) 222 if (group != MINSTREL_CCK_GROUP)
224 usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 223 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
224
225 nsecs += minstrel_mcs_groups[group].duration[rate];
226 tp = 1000000 * ((mr->probability * 1000) / nsecs);
225 227
226 usecs += minstrel_mcs_groups[group].duration[rate]; 228 mr->cur_tp = MINSTREL_TRUNC(tp);
227 mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
228} 229}
229 230
230/* 231/*
@@ -308,8 +309,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
308 } 309 }
309 } 310 }
310 311
311 /* try to sample up to half of the available rates during each interval */ 312 /* try to sample all available rates during each interval */
312 mi->sample_count *= 4; 313 mi->sample_count *= 8;
313 314
314 cur_prob = 0; 315 cur_prob = 0;
315 cur_prob_tp = 0; 316 cur_prob_tp = 0;
@@ -320,20 +321,13 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
320 if (!mg->supported) 321 if (!mg->supported)
321 continue; 322 continue;
322 323
323 mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
324 if (cur_prob_tp < mr->cur_tp &&
325 minstrel_mcs_groups[group].streams == 1) {
326 mi->max_prob_rate = mg->max_prob_rate;
327 cur_prob = mr->cur_prob;
328 cur_prob_tp = mr->cur_tp;
329 }
330
331 mr = minstrel_get_ratestats(mi, mg->max_tp_rate); 324 mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
332 if (cur_tp < mr->cur_tp) { 325 if (cur_tp < mr->cur_tp) {
333 mi->max_tp_rate2 = mi->max_tp_rate; 326 mi->max_tp_rate2 = mi->max_tp_rate;
334 cur_tp2 = cur_tp; 327 cur_tp2 = cur_tp;
335 mi->max_tp_rate = mg->max_tp_rate; 328 mi->max_tp_rate = mg->max_tp_rate;
336 cur_tp = mr->cur_tp; 329 cur_tp = mr->cur_tp;
330 mi->max_prob_streams = minstrel_mcs_groups[group].streams - 1;
337 } 331 }
338 332
339 mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); 333 mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
@@ -343,6 +337,23 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
343 } 337 }
344 } 338 }
345 339
340 if (mi->max_prob_streams < 1)
341 mi->max_prob_streams = 1;
342
343 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
344 mg = &mi->groups[group];
345 if (!mg->supported)
346 continue;
347 mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
348 if (cur_prob_tp < mr->cur_tp &&
349 minstrel_mcs_groups[group].streams <= mi->max_prob_streams) {
350 mi->max_prob_rate = mg->max_prob_rate;
351 cur_prob = mr->cur_prob;
352 cur_prob_tp = mr->cur_tp;
353 }
354 }
355
356
346 mi->stats_update = jiffies; 357 mi->stats_update = jiffies;
347} 358}
348 359
@@ -467,7 +478,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
467 478
468 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { 479 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
469 mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); 480 mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
470 mi->sample_tries = 2; 481 mi->sample_tries = 1;
471 mi->sample_count--; 482 mi->sample_count--;
472 } 483 }
473 484
@@ -536,7 +547,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
536 mr->retry_updated = true; 547 mr->retry_updated = true;
537 548
538 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 549 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
539 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; 550 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
540 551
541 /* Contention time for first 2 tries */ 552 /* Contention time for first 2 tries */
542 ctime = (t_slot * cw) >> 1; 553 ctime = (t_slot * cw) >> 1;
@@ -616,6 +627,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
616{ 627{
617 struct minstrel_rate_stats *mr; 628 struct minstrel_rate_stats *mr;
618 struct minstrel_mcs_group_data *mg; 629 struct minstrel_mcs_group_data *mg;
630 unsigned int sample_dur, sample_group;
619 int sample_idx = 0; 631 int sample_idx = 0;
620 632
621 if (mi->sample_wait > 0) { 633 if (mi->sample_wait > 0) {
@@ -626,39 +638,46 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
626 if (!mi->sample_tries) 638 if (!mi->sample_tries)
627 return -1; 639 return -1;
628 640
629 mi->sample_tries--;
630 mg = &mi->groups[mi->sample_group]; 641 mg = &mi->groups[mi->sample_group];
631 sample_idx = sample_table[mg->column][mg->index]; 642 sample_idx = sample_table[mg->column][mg->index];
632 mr = &mg->rates[sample_idx]; 643 mr = &mg->rates[sample_idx];
633 sample_idx += mi->sample_group * MCS_GROUP_RATES; 644 sample_group = mi->sample_group;
645 sample_idx += sample_group * MCS_GROUP_RATES;
634 minstrel_next_sample_idx(mi); 646 minstrel_next_sample_idx(mi);
635 647
636 /* 648 /*
637 * Sampling might add some overhead (RTS, no aggregation) 649 * Sampling might add some overhead (RTS, no aggregation)
638 * to the frame. Hence, don't use sampling for the currently 650 * to the frame. Hence, don't use sampling for the currently
639 * used max TP rate. 651 * used rates.
640 */ 652 */
641 if (sample_idx == mi->max_tp_rate) 653 if (sample_idx == mi->max_tp_rate ||
654 sample_idx == mi->max_tp_rate2 ||
655 sample_idx == mi->max_prob_rate)
642 return -1; 656 return -1;
657
643 /* 658 /*
644 * When not using MRR, do not sample if the probability is already 659 * Do not sample if the probability is already higher than 95%
645 * higher than 95% to avoid wasting airtime 660 * to avoid wasting airtime.
646 */ 661 */
647 if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100))) 662 if (mr->probability > MINSTREL_FRAC(95, 100))
648 return -1; 663 return -1;
649 664
650 /* 665 /*
651 * Make sure that lower rates get sampled only occasionally, 666 * Make sure that lower rates get sampled only occasionally,
652 * if the link is working perfectly. 667 * if the link is working perfectly.
653 */ 668 */
654 if (minstrel_get_duration(sample_idx) > 669 sample_dur = minstrel_get_duration(sample_idx);
655 minstrel_get_duration(mi->max_tp_rate)) { 670 if (sample_dur >= minstrel_get_duration(mi->max_tp_rate2) &&
671 (mi->max_prob_streams <
672 minstrel_mcs_groups[sample_group].streams ||
673 sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
656 if (mr->sample_skipped < 20) 674 if (mr->sample_skipped < 20)
657 return -1; 675 return -1;
658 676
659 if (mi->sample_slow++ > 2) 677 if (mi->sample_slow++ > 2)
660 return -1; 678 return -1;
661 } 679 }
680 mi->sample_tries--;
662 681
663 return sample_idx; 682 return sample_idx;
664} 683}
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 302dbd52180d..9b16e9de9923 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -16,11 +16,6 @@
16#define MINSTREL_MAX_STREAMS 3 16#define MINSTREL_MAX_STREAMS 3
17#define MINSTREL_STREAM_GROUPS 4 17#define MINSTREL_STREAM_GROUPS 4
18 18
19/* scaled fraction values */
20#define MINSTREL_SCALE 16
21#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
22#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
23
24#define MCS_GROUP_RATES 8 19#define MCS_GROUP_RATES 8
25 20
26struct mcs_group { 21struct mcs_group {
@@ -85,6 +80,7 @@ struct minstrel_ht_sta {
85 80
86 /* best probability rate */ 81 /* best probability rate */
87 unsigned int max_prob_rate; 82 unsigned int max_prob_rate;
83 unsigned int max_prob_streams;
88 84
89 /* time of last status update */ 85 /* time of last status update */
90 unsigned long stats_update; 86 unsigned long stats_update;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c6844ad080be..2528b5a4d6d4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -648,24 +648,6 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
648 return RX_CONTINUE; 648 return RX_CONTINUE;
649} 649}
650 650
651#define SEQ_MODULO 0x1000
652#define SEQ_MASK 0xfff
653
654static inline int seq_less(u16 sq1, u16 sq2)
655{
656 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
657}
658
659static inline u16 seq_inc(u16 sq)
660{
661 return (sq + 1) & SEQ_MASK;
662}
663
664static inline u16 seq_sub(u16 sq1, u16 sq2)
665{
666 return (sq1 - sq2) & SEQ_MASK;
667}
668
669static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 651static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
670 struct tid_ampdu_rx *tid_agg_rx, 652 struct tid_ampdu_rx *tid_agg_rx,
671 int index, 653 int index,
@@ -687,7 +669,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
687 __skb_queue_tail(frames, skb); 669 __skb_queue_tail(frames, skb);
688 670
689no_frame: 671no_frame:
690 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 672 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
691} 673}
692 674
693static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 675static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
@@ -699,8 +681,9 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
699 681
700 lockdep_assert_held(&tid_agg_rx->reorder_lock); 682 lockdep_assert_held(&tid_agg_rx->reorder_lock);
701 683
702 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 684 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
703 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 685 index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
686 tid_agg_rx->ssn) %
704 tid_agg_rx->buf_size; 687 tid_agg_rx->buf_size;
705 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 688 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
706 frames); 689 frames);
@@ -727,8 +710,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
727 lockdep_assert_held(&tid_agg_rx->reorder_lock); 710 lockdep_assert_held(&tid_agg_rx->reorder_lock);
728 711
729 /* release the buffer until next missing frame */ 712 /* release the buffer until next missing frame */
730 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 713 index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
731 tid_agg_rx->buf_size; 714 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
732 if (!tid_agg_rx->reorder_buf[index] && 715 if (!tid_agg_rx->reorder_buf[index] &&
733 tid_agg_rx->stored_mpdu_num) { 716 tid_agg_rx->stored_mpdu_num) {
734 /* 717 /*
@@ -756,19 +739,22 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
756 * Increment the head seq# also for the skipped slots. 739 * Increment the head seq# also for the skipped slots.
757 */ 740 */
758 tid_agg_rx->head_seq_num = 741 tid_agg_rx->head_seq_num =
759 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; 742 (tid_agg_rx->head_seq_num +
743 skipped) & IEEE80211_SN_MASK;
760 skipped = 0; 744 skipped = 0;
761 } 745 }
762 } else while (tid_agg_rx->reorder_buf[index]) { 746 } else while (tid_agg_rx->reorder_buf[index]) {
763 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 747 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
764 frames); 748 frames);
765 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 749 index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
750 tid_agg_rx->ssn) %
766 tid_agg_rx->buf_size; 751 tid_agg_rx->buf_size;
767 } 752 }
768 753
769 if (tid_agg_rx->stored_mpdu_num) { 754 if (tid_agg_rx->stored_mpdu_num) {
770 j = index = seq_sub(tid_agg_rx->head_seq_num, 755 j = index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
771 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 756 tid_agg_rx->ssn) %
757 tid_agg_rx->buf_size;
772 758
773 for (; j != (index - 1) % tid_agg_rx->buf_size; 759 for (; j != (index - 1) % tid_agg_rx->buf_size;
774 j = (j + 1) % tid_agg_rx->buf_size) { 760 j = (j + 1) % tid_agg_rx->buf_size) {
@@ -809,7 +795,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
809 head_seq_num = tid_agg_rx->head_seq_num; 795 head_seq_num = tid_agg_rx->head_seq_num;
810 796
811 /* frame with out of date sequence number */ 797 /* frame with out of date sequence number */
812 if (seq_less(mpdu_seq_num, head_seq_num)) { 798 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
813 dev_kfree_skb(skb); 799 dev_kfree_skb(skb);
814 goto out; 800 goto out;
815 } 801 }
@@ -818,8 +804,9 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
818 * If frame the sequence number exceeds our buffering window 804 * If frame the sequence number exceeds our buffering window
819 * size release some previous frames to make room for this one. 805 * size release some previous frames to make room for this one.
820 */ 806 */
821 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 807 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
822 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 808 head_seq_num = ieee80211_sn_inc(
809 ieee80211_sn_sub(mpdu_seq_num, buf_size));
823 /* release stored frames up to new head to stack */ 810 /* release stored frames up to new head to stack */
824 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 811 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
825 head_seq_num, frames); 812 head_seq_num, frames);
@@ -827,7 +814,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
827 814
828 /* Now the new frame is always in the range of the reordering buffer */ 815 /* Now the new frame is always in the range of the reordering buffer */
829 816
830 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; 817 index = ieee80211_sn_sub(mpdu_seq_num,
818 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
831 819
832 /* check if we already stored this frame */ 820 /* check if we already stored this frame */
833 if (tid_agg_rx->reorder_buf[index]) { 821 if (tid_agg_rx->reorder_buf[index]) {
@@ -843,7 +831,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
843 */ 831 */
844 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 832 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
845 tid_agg_rx->stored_mpdu_num == 0) { 833 tid_agg_rx->stored_mpdu_num == 0) {
846 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 834 tid_agg_rx->head_seq_num =
835 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
847 ret = false; 836 ret = false;
848 goto out; 837 goto out;
849 } 838 }
@@ -1894,8 +1883,10 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1894 * 'align' will only take the values 0 or 2 here 1883 * 'align' will only take the values 0 or 2 here
1895 * since all frames are required to be aligned 1884 * since all frames are required to be aligned
1896 * to 2-byte boundaries when being passed to 1885 * to 2-byte boundaries when being passed to
1897 * mac80211. That also explains the __skb_push() 1886 * mac80211; the code here works just as well if
1898 * below. 1887 * that isn't true, but mac80211 assumes it can
1888 * access fields as 2-byte aligned (e.g. for
1889 * compare_ether_addr)
1899 */ 1890 */
1900 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; 1891 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1901 if (align) { 1892 if (align) {
@@ -2552,7 +2543,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2552 case WLAN_SP_MESH_PEERING_CONFIRM: 2543 case WLAN_SP_MESH_PEERING_CONFIRM:
2553 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2544 if (!ieee80211_vif_is_mesh(&sdata->vif))
2554 goto invalid; 2545 goto invalid;
2555 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE) 2546 if (sdata->u.mesh.user_mpm)
2556 /* userspace handles this frame */ 2547 /* userspace handles this frame */
2557 break; 2548 break;
2558 goto queue; 2549 goto queue;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 43a45cf00e06..cb34cbbaa20c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -153,7 +153,6 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
153 u8 *elements; 153 u8 *elements;
154 struct ieee80211_channel *channel; 154 struct ieee80211_channel *channel;
155 size_t baselen; 155 size_t baselen;
156 bool beacon;
157 struct ieee802_11_elems elems; 156 struct ieee802_11_elems elems;
158 157
159 if (skb->len < 24 || 158 if (skb->len < 24 ||
@@ -175,11 +174,9 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
175 174
176 elements = mgmt->u.probe_resp.variable; 175 elements = mgmt->u.probe_resp.variable;
177 baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); 176 baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
178 beacon = false;
179 } else { 177 } else {
180 baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); 178 baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
181 elements = mgmt->u.beacon.variable; 179 elements = mgmt->u.beacon.variable;
182 beacon = true;
183 } 180 }
184 181
185 if (baselen > skb->len) 182 if (baselen > skb->len)
@@ -335,7 +332,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
335 ieee80211_offchannel_stop_vifs(local); 332 ieee80211_offchannel_stop_vifs(local);
336 333
337 /* ensure nullfunc is transmitted before leaving operating channel */ 334 /* ensure nullfunc is transmitted before leaving operating channel */
338 drv_flush(local, false); 335 ieee80211_flush_queues(local, NULL);
339 336
340 ieee80211_configure_filter(local); 337 ieee80211_configure_filter(local);
341 338
@@ -671,7 +668,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local,
671 ieee80211_offchannel_stop_vifs(local); 668 ieee80211_offchannel_stop_vifs(local);
672 669
673 if (local->ops->flush) { 670 if (local->ops->flush) {
674 drv_flush(local, false); 671 ieee80211_flush_queues(local, NULL);
675 *next_delay = 0; 672 *next_delay = 0;
676 } else 673 } else
677 *next_delay = HZ / 10; 674 *next_delay = HZ / 10;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 238a0cca320e..11216bc13b27 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -342,6 +342,11 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
342 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 342 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
343 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 343 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
344 mutex_init(&sta->ampdu_mlme.mtx); 344 mutex_init(&sta->ampdu_mlme.mtx);
345#ifdef CONFIG_MAC80211_MESH
346 if (ieee80211_vif_is_mesh(&sdata->vif) &&
347 !sdata->u.mesh.user_mpm)
348 init_timer(&sta->plink_timer);
349#endif
345 350
346 memcpy(sta->sta.addr, addr, ETH_ALEN); 351 memcpy(sta->sta.addr, addr, ETH_ALEN);
347 sta->local = local; 352 sta->local = local;
@@ -551,6 +556,15 @@ static inline void __bss_tim_clear(u8 *tim, u16 id)
551 tim[id / 8] &= ~(1 << (id % 8)); 556 tim[id / 8] &= ~(1 << (id % 8));
552} 557}
553 558
559static inline bool __bss_tim_get(u8 *tim, u16 id)
560{
561 /*
562 * This format has been mandated by the IEEE specifications,
563 * so this line may not be changed to use the test_bit() format.
564 */
565 return tim[id / 8] & (1 << (id % 8));
566}
567
554static unsigned long ieee80211_tids_for_ac(int ac) 568static unsigned long ieee80211_tids_for_ac(int ac)
555{ 569{
556 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 570 /* If we ever support TIDs > 7, this obviously needs to be adjusted */
@@ -631,6 +645,9 @@ void sta_info_recalc_tim(struct sta_info *sta)
631 done: 645 done:
632 spin_lock_bh(&local->tim_lock); 646 spin_lock_bh(&local->tim_lock);
633 647
648 if (indicate_tim == __bss_tim_get(ps->tim, id))
649 goto out_unlock;
650
634 if (indicate_tim) 651 if (indicate_tim)
635 __bss_tim_set(ps->tim, id); 652 __bss_tim_set(ps->tim, id);
636 else 653 else
@@ -642,6 +659,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
642 local->tim_in_locked_section = false; 659 local->tim_in_locked_section = false;
643 } 660 }
644 661
662out_unlock:
645 spin_unlock_bh(&local->tim_lock); 663 spin_unlock_bh(&local->tim_lock);
646} 664}
647 665
@@ -765,8 +783,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
765{ 783{
766 struct ieee80211_local *local; 784 struct ieee80211_local *local;
767 struct ieee80211_sub_if_data *sdata; 785 struct ieee80211_sub_if_data *sdata;
768 int ret, i; 786 int ret;
769 bool have_key = false;
770 787
771 might_sleep(); 788 might_sleep();
772 789
@@ -793,19 +810,8 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
793 810
794 list_del_rcu(&sta->list); 811 list_del_rcu(&sta->list);
795 812
796 mutex_lock(&local->key_mtx); 813 /* this always calls synchronize_net() */
797 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 814 ieee80211_free_sta_keys(local, sta);
798 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
799 have_key = true;
800 }
801 if (sta->ptk) {
802 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
803 have_key = true;
804 }
805 mutex_unlock(&local->key_mtx);
806
807 if (!have_key)
808 synchronize_net();
809 815
810 sta->dead = true; 816 sta->dead = true;
811 817
@@ -1391,30 +1397,16 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
1391} 1397}
1392EXPORT_SYMBOL(ieee80211_sta_block_awake); 1398EXPORT_SYMBOL(ieee80211_sta_block_awake);
1393 1399
1394void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta) 1400void ieee80211_sta_eosp(struct ieee80211_sta *pubsta)
1395{ 1401{
1396 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1402 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1397 struct ieee80211_local *local = sta->local; 1403 struct ieee80211_local *local = sta->local;
1398 struct sk_buff *skb;
1399 struct skb_eosp_msg_data *data;
1400 1404
1401 trace_api_eosp(local, pubsta); 1405 trace_api_eosp(local, pubsta);
1402 1406
1403 skb = alloc_skb(0, GFP_ATOMIC); 1407 clear_sta_flag(sta, WLAN_STA_SP);
1404 if (!skb) {
1405 /* too bad ... but race is better than loss */
1406 clear_sta_flag(sta, WLAN_STA_SP);
1407 return;
1408 }
1409
1410 data = (void *)skb->cb;
1411 memcpy(data->sta, pubsta->addr, ETH_ALEN);
1412 memcpy(data->iface, sta->sdata->vif.addr, ETH_ALEN);
1413 skb->pkt_type = IEEE80211_EOSP_MSG;
1414 skb_queue_tail(&local->skb_queue, skb);
1415 tasklet_schedule(&local->tasklet);
1416} 1408}
1417EXPORT_SYMBOL(ieee80211_sta_eosp_irqsafe); 1409EXPORT_SYMBOL(ieee80211_sta_eosp);
1418 1410
1419void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 1411void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1420 u8 tid, bool buffered) 1412 u8 tid, bool buffered)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 4947341a2a82..adc30045f99e 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -281,7 +281,6 @@ struct sta_ampdu_mlme {
281 * @plink_state: peer link state 281 * @plink_state: peer link state
282 * @plink_timeout: timeout of peer link 282 * @plink_timeout: timeout of peer link
283 * @plink_timer: peer link watch timer 283 * @plink_timer: peer link watch timer
284 * @plink_timer_was_running: used by suspend/resume to restore timers
285 * @t_offset: timing offset relative to this host 284 * @t_offset: timing offset relative to this host
286 * @t_offset_setpoint: reference timing offset of this sta to be used when 285 * @t_offset_setpoint: reference timing offset of this sta to be used when
287 * calculating clockdrift 286 * calculating clockdrift
@@ -334,7 +333,8 @@ struct sta_info {
334 unsigned long driver_buffered_tids; 333 unsigned long driver_buffered_tids;
335 334
336 /* Updated from RX path only, no locking requirements */ 335 /* Updated from RX path only, no locking requirements */
337 unsigned long rx_packets, rx_bytes; 336 unsigned long rx_packets;
337 u64 rx_bytes;
338 unsigned long wep_weak_iv_count; 338 unsigned long wep_weak_iv_count;
339 unsigned long last_rx; 339 unsigned long last_rx;
340 long last_connected; 340 long last_connected;
@@ -354,9 +354,9 @@ struct sta_info {
354 unsigned int fail_avg; 354 unsigned int fail_avg;
355 355
356 /* Updated from TX path only, no locking requirements */ 356 /* Updated from TX path only, no locking requirements */
357 unsigned long tx_packets; 357 u32 tx_fragments;
358 unsigned long tx_bytes; 358 u64 tx_packets[IEEE80211_NUM_ACS];
359 unsigned long tx_fragments; 359 u64 tx_bytes[IEEE80211_NUM_ACS];
360 struct ieee80211_tx_rate last_tx_rate; 360 struct ieee80211_tx_rate last_tx_rate;
361 int last_rx_rate_idx; 361 int last_rx_rate_idx;
362 u32 last_rx_rate_flag; 362 u32 last_rx_rate_flag;
@@ -379,7 +379,6 @@ struct sta_info {
379 __le16 reason; 379 __le16 reason;
380 u8 plink_retries; 380 u8 plink_retries;
381 bool ignore_plink_timer; 381 bool ignore_plink_timer;
382 bool plink_timer_was_running;
383 enum nl80211_plink_state plink_state; 382 enum nl80211_plink_state plink_state;
384 u32 plink_timeout; 383 u32 plink_timeout;
385 struct timer_list plink_timer; 384 struct timer_list plink_timer;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 3d7cd2a0582f..c5899797a8d4 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -431,6 +431,30 @@ TRACE_EVENT(drv_prepare_multicast,
431 ) 431 )
432); 432);
433 433
434TRACE_EVENT(drv_set_multicast_list,
435 TP_PROTO(struct ieee80211_local *local,
436 struct ieee80211_sub_if_data *sdata, int mc_count),
437
438 TP_ARGS(local, sdata, mc_count),
439
440 TP_STRUCT__entry(
441 LOCAL_ENTRY
442 __field(bool, allmulti)
443 __field(int, mc_count)
444 ),
445
446 TP_fast_assign(
447 LOCAL_ASSIGN;
448 __entry->allmulti = sdata->flags & IEEE80211_SDATA_ALLMULTI;
449 __entry->mc_count = mc_count;
450 ),
451
452 TP_printk(
453 LOCAL_PR_FMT " configure mc filter, count=%d, allmulti=%d",
454 LOCAL_PR_ARG, __entry->mc_count, __entry->allmulti
455 )
456);
457
434TRACE_EVENT(drv_configure_filter, 458TRACE_EVENT(drv_configure_filter,
435 TP_PROTO(struct ieee80211_local *local, 459 TP_PROTO(struct ieee80211_local *local,
436 unsigned int changed_flags, 460 unsigned int changed_flags,
@@ -940,23 +964,26 @@ TRACE_EVENT(drv_get_survey,
940); 964);
941 965
942TRACE_EVENT(drv_flush, 966TRACE_EVENT(drv_flush,
943 TP_PROTO(struct ieee80211_local *local, bool drop), 967 TP_PROTO(struct ieee80211_local *local,
968 u32 queues, bool drop),
944 969
945 TP_ARGS(local, drop), 970 TP_ARGS(local, queues, drop),
946 971
947 TP_STRUCT__entry( 972 TP_STRUCT__entry(
948 LOCAL_ENTRY 973 LOCAL_ENTRY
949 __field(bool, drop) 974 __field(bool, drop)
975 __field(u32, queues)
950 ), 976 ),
951 977
952 TP_fast_assign( 978 TP_fast_assign(
953 LOCAL_ASSIGN; 979 LOCAL_ASSIGN;
954 __entry->drop = drop; 980 __entry->drop = drop;
981 __entry->queues = queues;
955 ), 982 ),
956 983
957 TP_printk( 984 TP_printk(
958 LOCAL_PR_FMT " drop:%d", 985 LOCAL_PR_FMT " queues:0x%x drop:%d",
959 LOCAL_PR_ARG, __entry->drop 986 LOCAL_PR_ARG, __entry->queues, __entry->drop
960 ) 987 )
961); 988);
962 989
@@ -1042,15 +1069,17 @@ TRACE_EVENT(drv_remain_on_channel,
1042 TP_PROTO(struct ieee80211_local *local, 1069 TP_PROTO(struct ieee80211_local *local,
1043 struct ieee80211_sub_if_data *sdata, 1070 struct ieee80211_sub_if_data *sdata,
1044 struct ieee80211_channel *chan, 1071 struct ieee80211_channel *chan,
1045 unsigned int duration), 1072 unsigned int duration,
1073 enum ieee80211_roc_type type),
1046 1074
1047 TP_ARGS(local, sdata, chan, duration), 1075 TP_ARGS(local, sdata, chan, duration, type),
1048 1076
1049 TP_STRUCT__entry( 1077 TP_STRUCT__entry(
1050 LOCAL_ENTRY 1078 LOCAL_ENTRY
1051 VIF_ENTRY 1079 VIF_ENTRY
1052 __field(int, center_freq) 1080 __field(int, center_freq)
1053 __field(unsigned int, duration) 1081 __field(unsigned int, duration)
1082 __field(u32, type)
1054 ), 1083 ),
1055 1084
1056 TP_fast_assign( 1085 TP_fast_assign(
@@ -1058,12 +1087,13 @@ TRACE_EVENT(drv_remain_on_channel,
1058 VIF_ASSIGN; 1087 VIF_ASSIGN;
1059 __entry->center_freq = chan->center_freq; 1088 __entry->center_freq = chan->center_freq;
1060 __entry->duration = duration; 1089 __entry->duration = duration;
1090 __entry->type = type;
1061 ), 1091 ),
1062 1092
1063 TP_printk( 1093 TP_printk(
1064 LOCAL_PR_FMT VIF_PR_FMT " freq:%dMHz duration:%dms", 1094 LOCAL_PR_FMT VIF_PR_FMT " freq:%dMHz duration:%dms type=%d",
1065 LOCAL_PR_ARG, VIF_PR_ARG, 1095 LOCAL_PR_ARG, VIF_PR_ARG,
1066 __entry->center_freq, __entry->duration 1096 __entry->center_freq, __entry->duration, __entry->type
1067 ) 1097 )
1068); 1098);
1069 1099
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8914d2d2881a..2a6ae8030bd9 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -233,6 +233,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
233 233
234 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 234 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
235 ieee80211_stop_queues_by_reason(&local->hw, 235 ieee80211_stop_queues_by_reason(&local->hw,
236 IEEE80211_MAX_QUEUE_MAP,
236 IEEE80211_QUEUE_STOP_REASON_PS); 237 IEEE80211_QUEUE_STOP_REASON_PS);
237 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; 238 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
238 ieee80211_queue_work(&local->hw, 239 ieee80211_queue_work(&local->hw,
@@ -991,15 +992,18 @@ static ieee80211_tx_result debug_noinline
991ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) 992ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
992{ 993{
993 struct sk_buff *skb; 994 struct sk_buff *skb;
995 int ac = -1;
994 996
995 if (!tx->sta) 997 if (!tx->sta)
996 return TX_CONTINUE; 998 return TX_CONTINUE;
997 999
998 tx->sta->tx_packets++;
999 skb_queue_walk(&tx->skbs, skb) { 1000 skb_queue_walk(&tx->skbs, skb) {
1001 ac = skb_get_queue_mapping(skb);
1000 tx->sta->tx_fragments++; 1002 tx->sta->tx_fragments++;
1001 tx->sta->tx_bytes += skb->len; 1003 tx->sta->tx_bytes[ac] += skb->len;
1002 } 1004 }
1005 if (ac >= 0)
1006 tx->sta->tx_packets[ac]++;
1003 1007
1004 return TX_CONTINUE; 1008 return TX_CONTINUE;
1005} 1009}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0f38f43ac62e..a7368870c8ee 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -453,7 +453,8 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
453} 453}
454 454
455void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 455void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
456 enum queue_stop_reason reason) 456 unsigned long queues,
457 enum queue_stop_reason reason)
457{ 458{
458 struct ieee80211_local *local = hw_to_local(hw); 459 struct ieee80211_local *local = hw_to_local(hw);
459 unsigned long flags; 460 unsigned long flags;
@@ -461,7 +462,7 @@ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
461 462
462 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 463 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
463 464
464 for (i = 0; i < hw->queues; i++) 465 for_each_set_bit(i, &queues, hw->queues)
465 __ieee80211_stop_queue(hw, i, reason); 466 __ieee80211_stop_queue(hw, i, reason);
466 467
467 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 468 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -469,7 +470,7 @@ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
469 470
470void ieee80211_stop_queues(struct ieee80211_hw *hw) 471void ieee80211_stop_queues(struct ieee80211_hw *hw)
471{ 472{
472 ieee80211_stop_queues_by_reason(hw, 473 ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
473 IEEE80211_QUEUE_STOP_REASON_DRIVER); 474 IEEE80211_QUEUE_STOP_REASON_DRIVER);
474} 475}
475EXPORT_SYMBOL(ieee80211_stop_queues); 476EXPORT_SYMBOL(ieee80211_stop_queues);
@@ -491,6 +492,7 @@ int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
491EXPORT_SYMBOL(ieee80211_queue_stopped); 492EXPORT_SYMBOL(ieee80211_queue_stopped);
492 493
493void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 494void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
495 unsigned long queues,
494 enum queue_stop_reason reason) 496 enum queue_stop_reason reason)
495{ 497{
496 struct ieee80211_local *local = hw_to_local(hw); 498 struct ieee80211_local *local = hw_to_local(hw);
@@ -499,7 +501,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
499 501
500 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 502 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
501 503
502 for (i = 0; i < hw->queues; i++) 504 for_each_set_bit(i, &queues, hw->queues)
503 __ieee80211_wake_queue(hw, i, reason); 505 __ieee80211_wake_queue(hw, i, reason);
504 506
505 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 507 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -507,10 +509,42 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
507 509
508void ieee80211_wake_queues(struct ieee80211_hw *hw) 510void ieee80211_wake_queues(struct ieee80211_hw *hw)
509{ 511{
510 ieee80211_wake_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_DRIVER); 512 ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
513 IEEE80211_QUEUE_STOP_REASON_DRIVER);
511} 514}
512EXPORT_SYMBOL(ieee80211_wake_queues); 515EXPORT_SYMBOL(ieee80211_wake_queues);
513 516
517void ieee80211_flush_queues(struct ieee80211_local *local,
518 struct ieee80211_sub_if_data *sdata)
519{
520 u32 queues;
521
522 if (!local->ops->flush)
523 return;
524
525 if (sdata && local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
526 int ac;
527
528 queues = 0;
529
530 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
531 queues |= BIT(sdata->vif.hw_queue[ac]);
532 if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE)
533 queues |= BIT(sdata->vif.cab_queue);
534 } else {
535 /* all queues */
536 queues = BIT(local->hw.queues) - 1;
537 }
538
539 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
540 IEEE80211_QUEUE_STOP_REASON_FLUSH);
541
542 drv_flush(local, queues, false);
543
544 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
545 IEEE80211_QUEUE_STOP_REASON_FLUSH);
546}
547
514void ieee80211_iterate_active_interfaces( 548void ieee80211_iterate_active_interfaces(
515 struct ieee80211_hw *hw, u32 iter_flags, 549 struct ieee80211_hw *hw, u32 iter_flags,
516 void (*iterator)(void *data, u8 *mac, 550 void (*iterator)(void *data, u8 *mac,
@@ -1357,6 +1391,25 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1357 drv_stop(local); 1391 drv_stop(local);
1358} 1392}
1359 1393
1394static void ieee80211_assign_chanctx(struct ieee80211_local *local,
1395 struct ieee80211_sub_if_data *sdata)
1396{
1397 struct ieee80211_chanctx_conf *conf;
1398 struct ieee80211_chanctx *ctx;
1399
1400 if (!local->use_chanctx)
1401 return;
1402
1403 mutex_lock(&local->chanctx_mtx);
1404 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1405 lockdep_is_held(&local->chanctx_mtx));
1406 if (conf) {
1407 ctx = container_of(conf, struct ieee80211_chanctx, conf);
1408 drv_assign_vif_chanctx(local, sdata, ctx);
1409 }
1410 mutex_unlock(&local->chanctx_mtx);
1411}
1412
1360int ieee80211_reconfig(struct ieee80211_local *local) 1413int ieee80211_reconfig(struct ieee80211_local *local)
1361{ 1414{
1362 struct ieee80211_hw *hw = &local->hw; 1415 struct ieee80211_hw *hw = &local->hw;
@@ -1445,36 +1498,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1445 } 1498 }
1446 1499
1447 list_for_each_entry(sdata, &local->interfaces, list) { 1500 list_for_each_entry(sdata, &local->interfaces, list) {
1448 struct ieee80211_chanctx_conf *ctx_conf;
1449
1450 if (!ieee80211_sdata_running(sdata)) 1501 if (!ieee80211_sdata_running(sdata))
1451 continue; 1502 continue;
1452 1503 ieee80211_assign_chanctx(local, sdata);
1453 mutex_lock(&local->chanctx_mtx);
1454 ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1455 lockdep_is_held(&local->chanctx_mtx));
1456 if (ctx_conf) {
1457 ctx = container_of(ctx_conf, struct ieee80211_chanctx,
1458 conf);
1459 drv_assign_vif_chanctx(local, sdata, ctx);
1460 }
1461 mutex_unlock(&local->chanctx_mtx);
1462 } 1504 }
1463 1505
1464 sdata = rtnl_dereference(local->monitor_sdata); 1506 sdata = rtnl_dereference(local->monitor_sdata);
1465 if (sdata && local->use_chanctx && ieee80211_sdata_running(sdata)) { 1507 if (sdata && ieee80211_sdata_running(sdata))
1466 struct ieee80211_chanctx_conf *ctx_conf; 1508 ieee80211_assign_chanctx(local, sdata);
1467
1468 mutex_lock(&local->chanctx_mtx);
1469 ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1470 lockdep_is_held(&local->chanctx_mtx));
1471 if (ctx_conf) {
1472 ctx = container_of(ctx_conf, struct ieee80211_chanctx,
1473 conf);
1474 drv_assign_vif_chanctx(local, sdata, ctx);
1475 }
1476 mutex_unlock(&local->chanctx_mtx);
1477 }
1478 1509
1479 /* add STAs back */ 1510 /* add STAs back */
1480 mutex_lock(&local->sta_mtx); 1511 mutex_lock(&local->sta_mtx);
@@ -1534,11 +1565,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1534 BSS_CHANGED_IDLE | 1565 BSS_CHANGED_IDLE |
1535 BSS_CHANGED_TXPOWER; 1566 BSS_CHANGED_TXPOWER;
1536 1567
1537#ifdef CONFIG_PM
1538 if (local->resuming && !reconfig_due_to_wowlan)
1539 sdata->vif.bss_conf = sdata->suspend_bss_conf;
1540#endif
1541
1542 switch (sdata->vif.type) { 1568 switch (sdata->vif.type) {
1543 case NL80211_IFTYPE_STATION: 1569 case NL80211_IFTYPE_STATION:
1544 changed |= BSS_CHANGED_ASSOC | 1570 changed |= BSS_CHANGED_ASSOC |
@@ -1659,8 +1685,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1659 mutex_unlock(&local->sta_mtx); 1685 mutex_unlock(&local->sta_mtx);
1660 } 1686 }
1661 1687
1662 ieee80211_wake_queues_by_reason(hw, 1688 ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
1663 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1689 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1664 1690
1665 /* 1691 /*
1666 * If this is for hw restart things are still running. 1692 * If this is for hw restart things are still running.
@@ -1678,28 +1704,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1678 mb(); 1704 mb();
1679 local->resuming = false; 1705 local->resuming = false;
1680 1706
1681 list_for_each_entry(sdata, &local->interfaces, list) {
1682 switch(sdata->vif.type) {
1683 case NL80211_IFTYPE_STATION:
1684 ieee80211_sta_restart(sdata);
1685 break;
1686 case NL80211_IFTYPE_ADHOC:
1687 ieee80211_ibss_restart(sdata);
1688 break;
1689 case NL80211_IFTYPE_MESH_POINT:
1690 ieee80211_mesh_restart(sdata);
1691 break;
1692 default:
1693 break;
1694 }
1695 }
1696
1697 mod_timer(&local->sta_cleanup, jiffies + 1); 1707 mod_timer(&local->sta_cleanup, jiffies + 1);
1698
1699 mutex_lock(&local->sta_mtx);
1700 list_for_each_entry(sta, &local->sta_list, list)
1701 mesh_plink_restart(sta);
1702 mutex_unlock(&local->sta_mtx);
1703#else 1708#else
1704 WARN_ON(1); 1709 WARN_ON(1);
1705#endif 1710#endif
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index a2c2258bc84e..171344d4eb7c 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -13,6 +13,104 @@
13#include "rate.h" 13#include "rate.h"
14 14
15 15
16static void __check_vhtcap_disable(struct ieee80211_sub_if_data *sdata,
17 struct ieee80211_sta_vht_cap *vht_cap,
18 u32 flag)
19{
20 __le32 le_flag = cpu_to_le32(flag);
21
22 if (sdata->u.mgd.vht_capa_mask.vht_cap_info & le_flag &&
23 !(sdata->u.mgd.vht_capa.vht_cap_info & le_flag))
24 vht_cap->cap &= ~flag;
25}
26
27void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
28 struct ieee80211_sta_vht_cap *vht_cap)
29{
30 int i;
31 u16 rxmcs_mask, rxmcs_cap, rxmcs_n, txmcs_mask, txmcs_cap, txmcs_n;
32
33 if (!vht_cap->vht_supported)
34 return;
35
36 if (sdata->vif.type != NL80211_IFTYPE_STATION)
37 return;
38
39 __check_vhtcap_disable(sdata, vht_cap,
40 IEEE80211_VHT_CAP_RXLDPC);
41 __check_vhtcap_disable(sdata, vht_cap,
42 IEEE80211_VHT_CAP_SHORT_GI_80);
43 __check_vhtcap_disable(sdata, vht_cap,
44 IEEE80211_VHT_CAP_SHORT_GI_160);
45 __check_vhtcap_disable(sdata, vht_cap,
46 IEEE80211_VHT_CAP_TXSTBC);
47 __check_vhtcap_disable(sdata, vht_cap,
48 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
49 __check_vhtcap_disable(sdata, vht_cap,
50 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
51 __check_vhtcap_disable(sdata, vht_cap,
52 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN);
53 __check_vhtcap_disable(sdata, vht_cap,
54 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN);
55
56 /* Allow user to decrease AMPDU length exponent */
57 if (sdata->u.mgd.vht_capa_mask.vht_cap_info &
58 cpu_to_le32(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK)) {
59 u32 cap, n;
60
61 n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) &
62 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
63 n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
64 cap = vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
65 cap >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
66
67 if (n < cap) {
68 vht_cap->cap &=
69 ~IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
70 vht_cap->cap |=
71 n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
72 }
73 }
74
75 /* Allow the user to decrease MCSes */
76 rxmcs_mask =
77 le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.rx_mcs_map);
78 rxmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.rx_mcs_map);
79 rxmcs_n &= rxmcs_mask;
80 rxmcs_cap = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
81
82 txmcs_mask =
83 le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.tx_mcs_map);
84 txmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.tx_mcs_map);
85 txmcs_n &= txmcs_mask;
86 txmcs_cap = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
87 for (i = 0; i < 8; i++) {
88 u8 m, n, c;
89
90 m = (rxmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
91 n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
92 c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
93
94 if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) ||
95 n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) {
96 rxmcs_cap &= ~(3 << 2*i);
97 rxmcs_cap |= (rxmcs_n & (3 << 2*i));
98 }
99
100 m = (txmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
101 n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
102 c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
103
104 if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) ||
105 n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) {
106 txmcs_cap &= ~(3 << 2*i);
107 txmcs_cap |= (txmcs_n & (3 << 2*i));
108 }
109 }
110 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_cap);
111 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_cap);
112}
113
16void 114void
17ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 115ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
18 struct ieee80211_supported_band *sband, 116 struct ieee80211_supported_band *sband,
@@ -20,6 +118,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
20 struct sta_info *sta) 118 struct sta_info *sta)
21{ 119{
22 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; 120 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
121 struct ieee80211_sta_vht_cap own_cap;
122 u32 cap_info, i;
23 123
24 memset(vht_cap, 0, sizeof(*vht_cap)); 124 memset(vht_cap, 0, sizeof(*vht_cap));
25 125
@@ -35,12 +135,122 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
35 135
36 vht_cap->vht_supported = true; 136 vht_cap->vht_supported = true;
37 137
38 vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info); 138 own_cap = sband->vht_cap;
139 /*
140 * If user has specified capability overrides, take care
141 * of that if the station we're setting up is the AP that
142 * we advertised a restricted capability set to. Override
143 * our own capabilities and then use those below.
144 */
145 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
146 !test_sta_flag(sta, WLAN_STA_TDLS_PEER))
147 ieee80211_apply_vhtcap_overrides(sdata, &own_cap);
148
149 /* take some capabilities as-is */
150 cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
151 vht_cap->cap = cap_info;
152 vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
153 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
154 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
155 IEEE80211_VHT_CAP_RXLDPC |
156 IEEE80211_VHT_CAP_VHT_TXOP_PS |
157 IEEE80211_VHT_CAP_HTC_VHT |
158 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
159 IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB |
160 IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB |
161 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
162 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
163
164 /* and some based on our own capabilities */
165 switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
166 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
167 vht_cap->cap |= cap_info &
168 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
169 break;
170 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
171 vht_cap->cap |= cap_info &
172 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
173 break;
174 default:
175 /* nothing */
176 break;
177 }
178
179 /* symmetric capabilities */
180 vht_cap->cap |= cap_info & own_cap.cap &
181 (IEEE80211_VHT_CAP_SHORT_GI_80 |
182 IEEE80211_VHT_CAP_SHORT_GI_160);
183
184 /* remaining ones */
185 if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
186 vht_cap->cap |= cap_info &
187 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
188 IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX |
189 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX);
190 }
191
192 if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
193 vht_cap->cap |= cap_info &
194 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
195
196 if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
197 vht_cap->cap |= cap_info &
198 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
199
200 if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
201 vht_cap->cap |= cap_info &
202 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
203
204 if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
205 vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK;
206
207 if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
208 vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC;
39 209
40 /* Copy peer MCS info, the driver might need them. */ 210 /* Copy peer MCS info, the driver might need them. */
41 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, 211 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
42 sizeof(struct ieee80211_vht_mcs_info)); 212 sizeof(struct ieee80211_vht_mcs_info));
43 213
214 /* but also restrict MCSes */
215 for (i = 0; i < 8; i++) {
216 u16 own_rx, own_tx, peer_rx, peer_tx;
217
218 own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map);
219 own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
220
221 own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map);
222 own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
223
224 peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
225 peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
226
227 peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
228 peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
229
230 if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
231 if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED)
232 peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED;
233 else if (own_rx < peer_tx)
234 peer_tx = own_rx;
235 }
236
237 if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
238 if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED)
239 peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED;
240 else if (own_tx < peer_rx)
241 peer_rx = own_tx;
242 }
243
244 vht_cap->vht_mcs.rx_mcs_map &=
245 ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2);
246 vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2);
247
248 vht_cap->vht_mcs.tx_mcs_map &=
249 ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2);
250 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
251 }
252
253 /* finally set up the bandwidth */
44 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 254 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
45 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 255 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
46 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 256 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index c6bc3bd95052..b75a9b3f9e89 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -117,6 +117,88 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
117 return tlv; 117 return tlv;
118} 118}
119 119
120struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap)
121{
122 struct nfc_llcp_sdp_tlv *sdres;
123 u8 value[2];
124
125 sdres = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
126 if (sdres == NULL)
127 return NULL;
128
129 value[0] = tid;
130 value[1] = sap;
131
132 sdres->tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, value, 2,
133 &sdres->tlv_len);
134 if (sdres->tlv == NULL) {
135 kfree(sdres);
136 return NULL;
137 }
138
139 sdres->tid = tid;
140 sdres->sap = sap;
141
142 INIT_HLIST_NODE(&sdres->node);
143
144 return sdres;
145}
146
147struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
148 size_t uri_len)
149{
150 struct nfc_llcp_sdp_tlv *sdreq;
151
152 pr_debug("uri: %s, len: %zu\n", uri, uri_len);
153
154 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
155 if (sdreq == NULL)
156 return NULL;
157
158 sdreq->tlv_len = uri_len + 3;
159
160 if (uri[uri_len - 1] == 0)
161 sdreq->tlv_len--;
162
163 sdreq->tlv = kzalloc(sdreq->tlv_len + 1, GFP_KERNEL);
164 if (sdreq->tlv == NULL) {
165 kfree(sdreq);
166 return NULL;
167 }
168
169 sdreq->tlv[0] = LLCP_TLV_SDREQ;
170 sdreq->tlv[1] = sdreq->tlv_len - 2;
171 sdreq->tlv[2] = tid;
172
173 sdreq->tid = tid;
174 sdreq->uri = sdreq->tlv + 3;
175 memcpy(sdreq->uri, uri, uri_len);
176
177 sdreq->time = jiffies;
178
179 INIT_HLIST_NODE(&sdreq->node);
180
181 return sdreq;
182}
183
184void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp)
185{
186 kfree(sdp->tlv);
187 kfree(sdp);
188}
189
190void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head)
191{
192 struct nfc_llcp_sdp_tlv *sdp;
193 struct hlist_node *n;
194
195 hlist_for_each_entry_safe(sdp, n, head, node) {
196 hlist_del(&sdp->node);
197
198 nfc_llcp_free_sdp_tlv(sdp);
199 }
200}
201
120int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local, 202int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
121 u8 *tlv_array, u16 tlv_array_len) 203 u8 *tlv_array, u16 tlv_array_len)
122{ 204{
@@ -184,10 +266,10 @@ int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
184 266
185 switch (type) { 267 switch (type) {
186 case LLCP_TLV_MIUX: 268 case LLCP_TLV_MIUX:
187 sock->miu = llcp_tlv_miux(tlv) + 128; 269 sock->remote_miu = llcp_tlv_miux(tlv) + 128;
188 break; 270 break;
189 case LLCP_TLV_RW: 271 case LLCP_TLV_RW:
190 sock->rw = llcp_tlv_rw(tlv); 272 sock->remote_rw = llcp_tlv_rw(tlv);
191 break; 273 break;
192 case LLCP_TLV_SN: 274 case LLCP_TLV_SN:
193 break; 275 break;
@@ -200,7 +282,8 @@ int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
200 tlv += length + 2; 282 tlv += length + 2;
201 } 283 }
202 284
203 pr_debug("sock %p rw %d miu %d\n", sock, sock->rw, sock->miu); 285 pr_debug("sock %p rw %d miu %d\n", sock,
286 sock->remote_rw, sock->remote_miu);
204 287
205 return 0; 288 return 0;
206} 289}
@@ -318,9 +401,9 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
318 struct sk_buff *skb; 401 struct sk_buff *skb;
319 u8 *service_name_tlv = NULL, service_name_tlv_length; 402 u8 *service_name_tlv = NULL, service_name_tlv_length;
320 u8 *miux_tlv = NULL, miux_tlv_length; 403 u8 *miux_tlv = NULL, miux_tlv_length;
321 u8 *rw_tlv = NULL, rw_tlv_length; 404 u8 *rw_tlv = NULL, rw_tlv_length, rw;
322 int err; 405 int err;
323 u16 size = 0; 406 u16 size = 0, miux;
324 407
325 pr_debug("Sending CONNECT\n"); 408 pr_debug("Sending CONNECT\n");
326 409
@@ -336,11 +419,15 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
336 size += service_name_tlv_length; 419 size += service_name_tlv_length;
337 } 420 }
338 421
339 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, 422 /* If the socket parameters are not set, use the local ones */
423 miux = sock->miux > LLCP_MAX_MIUX ? local->miux : sock->miux;
424 rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw;
425
426 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
340 &miux_tlv_length); 427 &miux_tlv_length);
341 size += miux_tlv_length; 428 size += miux_tlv_length;
342 429
343 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length); 430 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
344 size += rw_tlv_length; 431 size += rw_tlv_length;
345 432
346 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); 433 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -377,9 +464,9 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
377 struct nfc_llcp_local *local; 464 struct nfc_llcp_local *local;
378 struct sk_buff *skb; 465 struct sk_buff *skb;
379 u8 *miux_tlv = NULL, miux_tlv_length; 466 u8 *miux_tlv = NULL, miux_tlv_length;
380 u8 *rw_tlv = NULL, rw_tlv_length; 467 u8 *rw_tlv = NULL, rw_tlv_length, rw;
381 int err; 468 int err;
382 u16 size = 0; 469 u16 size = 0, miux;
383 470
384 pr_debug("Sending CC\n"); 471 pr_debug("Sending CC\n");
385 472
@@ -387,11 +474,15 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
387 if (local == NULL) 474 if (local == NULL)
388 return -ENODEV; 475 return -ENODEV;
389 476
390 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, 477 /* If the socket parameters are not set, use the local ones */
478 miux = sock->miux > LLCP_MAX_MIUX ? local->miux : sock->miux;
479 rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw;
480
481 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
391 &miux_tlv_length); 482 &miux_tlv_length);
392 size += miux_tlv_length; 483 size += miux_tlv_length;
393 484
394 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length); 485 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
395 size += rw_tlv_length; 486 size += rw_tlv_length;
396 487
397 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); 488 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
@@ -416,48 +507,90 @@ error_tlv:
416 return err; 507 return err;
417} 508}
418 509
419int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap) 510static struct sk_buff *nfc_llcp_allocate_snl(struct nfc_llcp_local *local,
511 size_t tlv_length)
420{ 512{
421 struct sk_buff *skb; 513 struct sk_buff *skb;
422 struct nfc_dev *dev; 514 struct nfc_dev *dev;
423 u8 *sdres_tlv = NULL, sdres_tlv_length, sdres[2];
424 u16 size = 0; 515 u16 size = 0;
425 516
426 pr_debug("Sending SNL tid 0x%x sap 0x%x\n", tid, sap);
427
428 if (local == NULL) 517 if (local == NULL)
429 return -ENODEV; 518 return ERR_PTR(-ENODEV);
430 519
431 dev = local->dev; 520 dev = local->dev;
432 if (dev == NULL) 521 if (dev == NULL)
433 return -ENODEV; 522 return ERR_PTR(-ENODEV);
434
435 sdres[0] = tid;
436 sdres[1] = sap;
437 sdres_tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, sdres, 0,
438 &sdres_tlv_length);
439 if (sdres_tlv == NULL)
440 return -ENOMEM;
441 523
442 size += LLCP_HEADER_SIZE; 524 size += LLCP_HEADER_SIZE;
443 size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; 525 size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
444 size += sdres_tlv_length; 526 size += tlv_length;
445 527
446 skb = alloc_skb(size, GFP_KERNEL); 528 skb = alloc_skb(size, GFP_KERNEL);
447 if (skb == NULL) { 529 if (skb == NULL)
448 kfree(sdres_tlv); 530 return ERR_PTR(-ENOMEM);
449 return -ENOMEM;
450 }
451 531
452 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); 532 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
453 533
454 skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL); 534 skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL);
455 535
456 memcpy(skb_put(skb, sdres_tlv_length), sdres_tlv, sdres_tlv_length); 536 return skb;
537}
538
539int nfc_llcp_send_snl_sdres(struct nfc_llcp_local *local,
540 struct hlist_head *tlv_list, size_t tlvs_len)
541{
542 struct nfc_llcp_sdp_tlv *sdp;
543 struct hlist_node *n;
544 struct sk_buff *skb;
545
546 skb = nfc_llcp_allocate_snl(local, tlvs_len);
547 if (IS_ERR(skb))
548 return PTR_ERR(skb);
549
550 hlist_for_each_entry_safe(sdp, n, tlv_list, node) {
551 memcpy(skb_put(skb, sdp->tlv_len), sdp->tlv, sdp->tlv_len);
552
553 hlist_del(&sdp->node);
554
555 nfc_llcp_free_sdp_tlv(sdp);
556 }
457 557
458 skb_queue_tail(&local->tx_queue, skb); 558 skb_queue_tail(&local->tx_queue, skb);
459 559
460 kfree(sdres_tlv); 560 return 0;
561}
562
563int nfc_llcp_send_snl_sdreq(struct nfc_llcp_local *local,
564 struct hlist_head *tlv_list, size_t tlvs_len)
565{
566 struct nfc_llcp_sdp_tlv *sdreq;
567 struct hlist_node *n;
568 struct sk_buff *skb;
569
570 skb = nfc_llcp_allocate_snl(local, tlvs_len);
571 if (IS_ERR(skb))
572 return PTR_ERR(skb);
573
574 mutex_lock(&local->sdreq_lock);
575
576 if (hlist_empty(&local->pending_sdreqs))
577 mod_timer(&local->sdreq_timer,
578 jiffies + msecs_to_jiffies(3 * local->remote_lto));
579
580 hlist_for_each_entry_safe(sdreq, n, tlv_list, node) {
581 pr_debug("tid %d for %s\n", sdreq->tid, sdreq->uri);
582
583 memcpy(skb_put(skb, sdreq->tlv_len), sdreq->tlv,
584 sdreq->tlv_len);
585
586 hlist_del(&sdreq->node);
587
588 hlist_add_head(&sdreq->node, &local->pending_sdreqs);
589 }
590
591 mutex_unlock(&local->sdreq_lock);
592
593 skb_queue_tail(&local->tx_queue, skb);
461 594
462 return 0; 595 return 0;
463} 596}
@@ -532,8 +665,8 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
532 665
533 /* Remote is ready but has not acknowledged our frames */ 666 /* Remote is ready but has not acknowledged our frames */
534 if((sock->remote_ready && 667 if((sock->remote_ready &&
535 skb_queue_len(&sock->tx_pending_queue) >= sock->rw && 668 skb_queue_len(&sock->tx_pending_queue) >= sock->remote_rw &&
536 skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) { 669 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) {
537 pr_err("Pending queue is full %d frames\n", 670 pr_err("Pending queue is full %d frames\n",
538 skb_queue_len(&sock->tx_pending_queue)); 671 skb_queue_len(&sock->tx_pending_queue));
539 return -ENOBUFS; 672 return -ENOBUFS;
@@ -541,7 +674,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
541 674
542 /* Remote is not ready and we've been queueing enough frames */ 675 /* Remote is not ready and we've been queueing enough frames */
543 if ((!sock->remote_ready && 676 if ((!sock->remote_ready &&
544 skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) { 677 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) {
545 pr_err("Tx queue is full %d frames\n", 678 pr_err("Tx queue is full %d frames\n",
546 skb_queue_len(&sock->tx_queue)); 679 skb_queue_len(&sock->tx_queue));
547 return -ENOBUFS; 680 return -ENOBUFS;
@@ -561,7 +694,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
561 694
562 while (remaining_len > 0) { 695 while (remaining_len > 0) {
563 696
564 frag_len = min_t(size_t, sock->miu, remaining_len); 697 frag_len = min_t(size_t, sock->remote_miu, remaining_len);
565 698
566 pr_debug("Fragment %zd bytes remaining %zd", 699 pr_debug("Fragment %zd bytes remaining %zd",
567 frag_len, remaining_len); 700 frag_len, remaining_len);
@@ -621,7 +754,7 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
621 754
622 while (remaining_len > 0) { 755 while (remaining_len > 0) {
623 756
624 frag_len = min_t(size_t, sock->miu, remaining_len); 757 frag_len = min_t(size_t, sock->remote_miu, remaining_len);
625 758
626 pr_debug("Fragment %zd bytes remaining %zd", 759 pr_debug("Fragment %zd bytes remaining %zd",
627 frag_len, remaining_len); 760 frag_len, remaining_len);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index b530afadd76c..bb67b98b9797 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -188,6 +188,9 @@ static void local_cleanup(struct nfc_llcp_local *local, bool listen)
188 cancel_work_sync(&local->rx_work); 188 cancel_work_sync(&local->rx_work);
189 cancel_work_sync(&local->timeout_work); 189 cancel_work_sync(&local->timeout_work);
190 kfree_skb(local->rx_pending); 190 kfree_skb(local->rx_pending);
191 del_timer_sync(&local->sdreq_timer);
192 cancel_work_sync(&local->sdreq_timeout_work);
193 nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
191} 194}
192 195
193static void local_release(struct kref *ref) 196static void local_release(struct kref *ref)
@@ -265,6 +268,47 @@ static void nfc_llcp_symm_timer(unsigned long data)
265 schedule_work(&local->timeout_work); 268 schedule_work(&local->timeout_work);
266} 269}
267 270
271static void nfc_llcp_sdreq_timeout_work(struct work_struct *work)
272{
273 unsigned long time;
274 HLIST_HEAD(nl_sdres_list);
275 struct hlist_node *n;
276 struct nfc_llcp_sdp_tlv *sdp;
277 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
278 sdreq_timeout_work);
279
280 mutex_lock(&local->sdreq_lock);
281
282 time = jiffies - msecs_to_jiffies(3 * local->remote_lto);
283
284 hlist_for_each_entry_safe(sdp, n, &local->pending_sdreqs, node) {
285 if (time_after(sdp->time, time))
286 continue;
287
288 sdp->sap = LLCP_SDP_UNBOUND;
289
290 hlist_del(&sdp->node);
291
292 hlist_add_head(&sdp->node, &nl_sdres_list);
293 }
294
295 if (!hlist_empty(&local->pending_sdreqs))
296 mod_timer(&local->sdreq_timer,
297 jiffies + msecs_to_jiffies(3 * local->remote_lto));
298
299 mutex_unlock(&local->sdreq_lock);
300
301 if (!hlist_empty(&nl_sdres_list))
302 nfc_genl_llc_send_sdres(local->dev, &nl_sdres_list);
303}
304
305static void nfc_llcp_sdreq_timer(unsigned long data)
306{
307 struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
308
309 schedule_work(&local->sdreq_timeout_work);
310}
311
268struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) 312struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
269{ 313{
270 struct nfc_llcp_local *local, *n; 314 struct nfc_llcp_local *local, *n;
@@ -808,8 +852,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,
808 ui_cb->dsap = dsap; 852 ui_cb->dsap = dsap;
809 ui_cb->ssap = ssap; 853 ui_cb->ssap = ssap;
810 854
811 printk("%s %d %d\n", __func__, dsap, ssap);
812
813 pr_debug("%d %d\n", dsap, ssap); 855 pr_debug("%d %d\n", dsap, ssap);
814 856
815 /* We're looking for a bound socket, not a client one */ 857 /* We're looking for a bound socket, not a client one */
@@ -907,7 +949,9 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
907 new_sock = nfc_llcp_sock(new_sk); 949 new_sock = nfc_llcp_sock(new_sk);
908 new_sock->dev = local->dev; 950 new_sock->dev = local->dev;
909 new_sock->local = nfc_llcp_local_get(local); 951 new_sock->local = nfc_llcp_local_get(local);
910 new_sock->miu = local->remote_miu; 952 new_sock->rw = sock->rw;
953 new_sock->miux = sock->miux;
954 new_sock->remote_miu = local->remote_miu;
911 new_sock->nfc_protocol = sock->nfc_protocol; 955 new_sock->nfc_protocol = sock->nfc_protocol;
912 new_sock->dsap = ssap; 956 new_sock->dsap = ssap;
913 new_sock->target_idx = local->target_idx; 957 new_sock->target_idx = local->target_idx;
@@ -961,11 +1005,11 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
961 1005
962 pr_debug("Remote ready %d tx queue len %d remote rw %d", 1006 pr_debug("Remote ready %d tx queue len %d remote rw %d",
963 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue), 1007 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
964 sock->rw); 1008 sock->remote_rw);
965 1009
966 /* Try to queue some I frames for transmission */ 1010 /* Try to queue some I frames for transmission */
967 while (sock->remote_ready && 1011 while (sock->remote_ready &&
968 skb_queue_len(&sock->tx_pending_queue) < sock->rw) { 1012 skb_queue_len(&sock->tx_pending_queue) < sock->remote_rw) {
969 struct sk_buff *pdu; 1013 struct sk_buff *pdu;
970 1014
971 pdu = skb_dequeue(&sock->tx_queue); 1015 pdu = skb_dequeue(&sock->tx_queue);
@@ -1186,6 +1230,10 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1186 u16 tlv_len, offset; 1230 u16 tlv_len, offset;
1187 char *service_name; 1231 char *service_name;
1188 size_t service_name_len; 1232 size_t service_name_len;
1233 struct nfc_llcp_sdp_tlv *sdp;
1234 HLIST_HEAD(llc_sdres_list);
1235 size_t sdres_tlvs_len;
1236 HLIST_HEAD(nl_sdres_list);
1189 1237
1190 dsap = nfc_llcp_dsap(skb); 1238 dsap = nfc_llcp_dsap(skb);
1191 ssap = nfc_llcp_ssap(skb); 1239 ssap = nfc_llcp_ssap(skb);
@@ -1200,6 +1248,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1200 tlv = &skb->data[LLCP_HEADER_SIZE]; 1248 tlv = &skb->data[LLCP_HEADER_SIZE];
1201 tlv_len = skb->len - LLCP_HEADER_SIZE; 1249 tlv_len = skb->len - LLCP_HEADER_SIZE;
1202 offset = 0; 1250 offset = 0;
1251 sdres_tlvs_len = 0;
1203 1252
1204 while (offset < tlv_len) { 1253 while (offset < tlv_len) {
1205 type = tlv[0]; 1254 type = tlv[0];
@@ -1217,14 +1266,14 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1217 !strncmp(service_name, "urn:nfc:sn:sdp", 1266 !strncmp(service_name, "urn:nfc:sn:sdp",
1218 service_name_len)) { 1267 service_name_len)) {
1219 sap = 1; 1268 sap = 1;
1220 goto send_snl; 1269 goto add_snl;
1221 } 1270 }
1222 1271
1223 llcp_sock = nfc_llcp_sock_from_sn(local, service_name, 1272 llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
1224 service_name_len); 1273 service_name_len);
1225 if (!llcp_sock) { 1274 if (!llcp_sock) {
1226 sap = 0; 1275 sap = 0;
1227 goto send_snl; 1276 goto add_snl;
1228 } 1277 }
1229 1278
1230 /* 1279 /*
@@ -1241,7 +1290,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1241 1290
1242 if (sap == LLCP_SAP_MAX) { 1291 if (sap == LLCP_SAP_MAX) {
1243 sap = 0; 1292 sap = 0;
1244 goto send_snl; 1293 goto add_snl;
1245 } 1294 }
1246 1295
1247 client_count = 1296 client_count =
@@ -1258,8 +1307,37 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1258 1307
1259 pr_debug("%p %d\n", llcp_sock, sap); 1308 pr_debug("%p %d\n", llcp_sock, sap);
1260 1309
1261send_snl: 1310add_snl:
1262 nfc_llcp_send_snl(local, tid, sap); 1311 sdp = nfc_llcp_build_sdres_tlv(tid, sap);
1312 if (sdp == NULL)
1313 goto exit;
1314
1315 sdres_tlvs_len += sdp->tlv_len;
1316 hlist_add_head(&sdp->node, &llc_sdres_list);
1317 break;
1318
1319 case LLCP_TLV_SDRES:
1320 mutex_lock(&local->sdreq_lock);
1321
1322 pr_debug("LLCP_TLV_SDRES: searching tid %d\n", tlv[2]);
1323
1324 hlist_for_each_entry(sdp, &local->pending_sdreqs, node) {
1325 if (sdp->tid != tlv[2])
1326 continue;
1327
1328 sdp->sap = tlv[3];
1329
1330 pr_debug("Found: uri=%s, sap=%d\n",
1331 sdp->uri, sdp->sap);
1332
1333 hlist_del(&sdp->node);
1334
1335 hlist_add_head(&sdp->node, &nl_sdres_list);
1336
1337 break;
1338 }
1339
1340 mutex_unlock(&local->sdreq_lock);
1263 break; 1341 break;
1264 1342
1265 default: 1343 default:
@@ -1270,6 +1348,13 @@ send_snl:
1270 offset += length + 2; 1348 offset += length + 2;
1271 tlv += length + 2; 1349 tlv += length + 2;
1272 } 1350 }
1351
1352exit:
1353 if (!hlist_empty(&nl_sdres_list))
1354 nfc_genl_llc_send_sdres(local->dev, &nl_sdres_list);
1355
1356 if (!hlist_empty(&llc_sdres_list))
1357 nfc_llcp_send_snl_sdres(local, &llc_sdres_list, sdres_tlvs_len);
1273} 1358}
1274 1359
1275static void nfc_llcp_rx_work(struct work_struct *work) 1360static void nfc_llcp_rx_work(struct work_struct *work)
@@ -1455,6 +1540,13 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
1455 local->remote_miu = LLCP_DEFAULT_MIU; 1540 local->remote_miu = LLCP_DEFAULT_MIU;
1456 local->remote_lto = LLCP_DEFAULT_LTO; 1541 local->remote_lto = LLCP_DEFAULT_LTO;
1457 1542
1543 mutex_init(&local->sdreq_lock);
1544 INIT_HLIST_HEAD(&local->pending_sdreqs);
1545 init_timer(&local->sdreq_timer);
1546 local->sdreq_timer.data = (unsigned long) local;
1547 local->sdreq_timer.function = nfc_llcp_sdreq_timer;
1548 INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
1549
1458 list_add(&local->list, &llcp_devices); 1550 list_add(&local->list, &llcp_devices);
1459 1551
1460 return 0; 1552 return 0;
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 0eae5c509504..7e87a66b02ec 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -46,6 +46,19 @@ struct llcp_sock_list {
46 rwlock_t lock; 46 rwlock_t lock;
47}; 47};
48 48
49struct nfc_llcp_sdp_tlv {
50 u8 *tlv;
51 u8 tlv_len;
52
53 char *uri;
54 u8 tid;
55 u8 sap;
56
57 unsigned long time;
58
59 struct hlist_node node;
60};
61
49struct nfc_llcp_local { 62struct nfc_llcp_local {
50 struct list_head list; 63 struct list_head list;
51 struct nfc_dev *dev; 64 struct nfc_dev *dev;
@@ -86,6 +99,12 @@ struct nfc_llcp_local {
86 u8 remote_opt; 99 u8 remote_opt;
87 u16 remote_wks; 100 u16 remote_wks;
88 101
102 struct mutex sdreq_lock;
103 struct hlist_head pending_sdreqs;
104 struct timer_list sdreq_timer;
105 struct work_struct sdreq_timeout_work;
106 u8 sdreq_next_tid;
107
89 /* sockets array */ 108 /* sockets array */
90 struct llcp_sock_list sockets; 109 struct llcp_sock_list sockets;
91 struct llcp_sock_list connecting_sockets; 110 struct llcp_sock_list connecting_sockets;
@@ -105,7 +124,12 @@ struct nfc_llcp_sock {
105 char *service_name; 124 char *service_name;
106 size_t service_name_len; 125 size_t service_name_len;
107 u8 rw; 126 u8 rw;
108 u16 miu; 127 u16 miux;
128
129
130 /* Remote link parameters */
131 u8 remote_rw;
132 u16 remote_miu;
109 133
110 /* Link variables */ 134 /* Link variables */
111 u8 send_n; 135 u8 send_n;
@@ -213,12 +237,20 @@ int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
213/* Commands API */ 237/* Commands API */
214void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 238void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
215u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length); 239u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length);
240struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap);
241struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
242 size_t uri_len);
243void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
244void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head);
216void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 245void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
217int nfc_llcp_disconnect(struct nfc_llcp_sock *sock); 246int nfc_llcp_disconnect(struct nfc_llcp_sock *sock);
218int nfc_llcp_send_symm(struct nfc_dev *dev); 247int nfc_llcp_send_symm(struct nfc_dev *dev);
219int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); 248int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
220int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); 249int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
221int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap); 250int nfc_llcp_send_snl_sdres(struct nfc_llcp_local *local,
251 struct hlist_head *tlv_list, size_t tlvs_len);
252int nfc_llcp_send_snl_sdreq(struct nfc_llcp_local *local,
253 struct hlist_head *tlv_list, size_t tlvs_len);
222int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); 254int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
223int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); 255int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
224int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, 256int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 5c7cdf3f2a83..f1b377e247fe 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -223,6 +223,124 @@ error:
223 return ret; 223 return ret;
224} 224}
225 225
226static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
227 char __user *optval, unsigned int optlen)
228{
229 struct sock *sk = sock->sk;
230 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
231 u32 opt;
232 int err = 0;
233
234 pr_debug("%p optname %d\n", sk, optname);
235
236 if (level != SOL_NFC)
237 return -ENOPROTOOPT;
238
239 lock_sock(sk);
240
241 switch (optname) {
242 case NFC_LLCP_RW:
243 if (sk->sk_state == LLCP_CONNECTED ||
244 sk->sk_state == LLCP_BOUND ||
245 sk->sk_state == LLCP_LISTEN) {
246 err = -EINVAL;
247 break;
248 }
249
250 if (get_user(opt, (u32 __user *) optval)) {
251 err = -EFAULT;
252 break;
253 }
254
255 if (opt > LLCP_MAX_RW) {
256 err = -EINVAL;
257 break;
258 }
259
260 llcp_sock->rw = (u8) opt;
261
262 break;
263
264 case NFC_LLCP_MIUX:
265 if (sk->sk_state == LLCP_CONNECTED ||
266 sk->sk_state == LLCP_BOUND ||
267 sk->sk_state == LLCP_LISTEN) {
268 err = -EINVAL;
269 break;
270 }
271
272 if (get_user(opt, (u32 __user *) optval)) {
273 err = -EFAULT;
274 break;
275 }
276
277 if (opt > LLCP_MAX_MIUX) {
278 err = -EINVAL;
279 break;
280 }
281
282 llcp_sock->miux = (u16) opt;
283
284 break;
285
286 default:
287 err = -ENOPROTOOPT;
288 break;
289 }
290
291 release_sock(sk);
292
293 pr_debug("%p rw %d miux %d\n", llcp_sock,
294 llcp_sock->rw, llcp_sock->miux);
295
296 return err;
297}
298
299static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname,
300 char __user *optval, int __user *optlen)
301{
302 struct sock *sk = sock->sk;
303 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
304 int len, err = 0;
305
306 pr_debug("%p optname %d\n", sk, optname);
307
308 if (level != SOL_NFC)
309 return -ENOPROTOOPT;
310
311 if (get_user(len, optlen))
312 return -EFAULT;
313
314 len = min_t(u32, len, sizeof(u32));
315
316 lock_sock(sk);
317
318 switch (optname) {
319 case NFC_LLCP_RW:
320 if (put_user(llcp_sock->rw, (u32 __user *) optval))
321 err = -EFAULT;
322
323 break;
324
325 case NFC_LLCP_MIUX:
326 if (put_user(llcp_sock->miux, (u32 __user *) optval))
327 err = -EFAULT;
328
329 break;
330
331 default:
332 err = -ENOPROTOOPT;
333 break;
334 }
335
336 release_sock(sk);
337
338 if (put_user(len, optlen))
339 return -EFAULT;
340
341 return err;
342}
343
226void nfc_llcp_accept_unlink(struct sock *sk) 344void nfc_llcp_accept_unlink(struct sock *sk)
227{ 345{
228 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 346 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -543,7 +661,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
543 661
544 llcp_sock->dev = dev; 662 llcp_sock->dev = dev;
545 llcp_sock->local = nfc_llcp_local_get(local); 663 llcp_sock->local = nfc_llcp_local_get(local);
546 llcp_sock->miu = llcp_sock->local->remote_miu; 664 llcp_sock->remote_miu = llcp_sock->local->remote_miu;
547 llcp_sock->ssap = nfc_llcp_get_local_ssap(local); 665 llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
548 if (llcp_sock->ssap == LLCP_SAP_MAX) { 666 if (llcp_sock->ssap == LLCP_SAP_MAX) {
549 ret = -ENOMEM; 667 ret = -ENOMEM;
@@ -737,8 +855,8 @@ static const struct proto_ops llcp_sock_ops = {
737 .ioctl = sock_no_ioctl, 855 .ioctl = sock_no_ioctl,
738 .listen = llcp_sock_listen, 856 .listen = llcp_sock_listen,
739 .shutdown = sock_no_shutdown, 857 .shutdown = sock_no_shutdown,
740 .setsockopt = sock_no_setsockopt, 858 .setsockopt = nfc_llcp_setsockopt,
741 .getsockopt = sock_no_getsockopt, 859 .getsockopt = nfc_llcp_getsockopt,
742 .sendmsg = llcp_sock_sendmsg, 860 .sendmsg = llcp_sock_sendmsg,
743 .recvmsg = llcp_sock_recvmsg, 861 .recvmsg = llcp_sock_recvmsg,
744 .mmap = sock_no_mmap, 862 .mmap = sock_no_mmap,
@@ -802,8 +920,10 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
802 920
803 llcp_sock->ssap = 0; 921 llcp_sock->ssap = 0;
804 llcp_sock->dsap = LLCP_SAP_SDP; 922 llcp_sock->dsap = LLCP_SAP_SDP;
805 llcp_sock->rw = LLCP_DEFAULT_RW; 923 llcp_sock->rw = LLCP_MAX_RW + 1;
806 llcp_sock->miu = LLCP_DEFAULT_MIU; 924 llcp_sock->miux = LLCP_MAX_MIUX + 1;
925 llcp_sock->remote_rw = LLCP_DEFAULT_RW;
926 llcp_sock->remote_miu = LLCP_DEFAULT_MIU;
807 llcp_sock->send_n = llcp_sock->send_ack_n = 0; 927 llcp_sock->send_n = llcp_sock->send_ack_n = 0;
808 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; 928 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
809 llcp_sock->remote_ready = 1; 929 llcp_sock->remote_ready = 1;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 504b883439f1..73fd51098f4d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -53,6 +53,15 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
53 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, 53 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
54 [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 }, 54 [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 },
55 [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 }, 55 [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 },
56 [NFC_ATTR_LLC_PARAM_LTO] = { .type = NLA_U8 },
57 [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 },
58 [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 },
59 [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
60};
61
62static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
63 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
64 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
56}; 65};
57 66
58static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, 67static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -348,6 +357,74 @@ free_msg:
348 return -EMSGSIZE; 357 return -EMSGSIZE;
349} 358}
350 359
360int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list)
361{
362 struct sk_buff *msg;
363 struct nlattr *sdp_attr, *uri_attr;
364 struct nfc_llcp_sdp_tlv *sdres;
365 struct hlist_node *n;
366 void *hdr;
367 int rc = -EMSGSIZE;
368 int i;
369
370 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
371 if (!msg)
372 return -ENOMEM;
373
374 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
375 NFC_EVENT_LLC_SDRES);
376 if (!hdr)
377 goto free_msg;
378
379 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
380 goto nla_put_failure;
381
382 sdp_attr = nla_nest_start(msg, NFC_ATTR_LLC_SDP);
383 if (sdp_attr == NULL) {
384 rc = -ENOMEM;
385 goto nla_put_failure;
386 }
387
388 i = 1;
389 hlist_for_each_entry_safe(sdres, n, sdres_list, node) {
390 pr_debug("uri: %s, sap: %d\n", sdres->uri, sdres->sap);
391
392 uri_attr = nla_nest_start(msg, i++);
393 if (uri_attr == NULL) {
394 rc = -ENOMEM;
395 goto nla_put_failure;
396 }
397
398 if (nla_put_u8(msg, NFC_SDP_ATTR_SAP, sdres->sap))
399 goto nla_put_failure;
400
401 if (nla_put_string(msg, NFC_SDP_ATTR_URI, sdres->uri))
402 goto nla_put_failure;
403
404 nla_nest_end(msg, uri_attr);
405
406 hlist_del(&sdres->node);
407
408 nfc_llcp_free_sdp_tlv(sdres);
409 }
410
411 nla_nest_end(msg, sdp_attr);
412
413 genlmsg_end(msg, hdr);
414
415 return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
416
417nla_put_failure:
418 genlmsg_cancel(msg, hdr);
419
420free_msg:
421 nlmsg_free(msg);
422
423 nfc_llcp_free_sdp_tlv_list(sdres_list);
424
425 return rc;
426}
427
351static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, 428static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
352 u32 portid, u32 seq, 429 u32 portid, u32 seq,
353 struct netlink_callback *cb, 430 struct netlink_callback *cb,
@@ -859,6 +936,96 @@ exit:
859 return rc; 936 return rc;
860} 937}
861 938
939static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
940{
941 struct nfc_dev *dev;
942 struct nfc_llcp_local *local;
943 struct nlattr *attr, *sdp_attrs[NFC_SDP_ATTR_MAX+1];
944 u32 idx;
945 u8 tid;
946 char *uri;
947 int rc = 0, rem;
948 size_t uri_len, tlvs_len;
949 struct hlist_head sdreq_list;
950 struct nfc_llcp_sdp_tlv *sdreq;
951
952 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
953 !info->attrs[NFC_ATTR_LLC_SDP])
954 return -EINVAL;
955
956 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
957
958 dev = nfc_get_device(idx);
959 if (!dev) {
960 rc = -ENODEV;
961 goto exit;
962 }
963
964 device_lock(&dev->dev);
965
966 if (dev->dep_link_up == false) {
967 rc = -ENOLINK;
968 goto exit;
969 }
970
971 local = nfc_llcp_find_local(dev);
972 if (!local) {
973 nfc_put_device(dev);
974 rc = -ENODEV;
975 goto exit;
976 }
977
978 INIT_HLIST_HEAD(&sdreq_list);
979
980 tlvs_len = 0;
981
982 nla_for_each_nested(attr, info->attrs[NFC_ATTR_LLC_SDP], rem) {
983 rc = nla_parse_nested(sdp_attrs, NFC_SDP_ATTR_MAX, attr,
984 nfc_sdp_genl_policy);
985
986 if (rc != 0) {
987 rc = -EINVAL;
988 goto exit;
989 }
990
991 if (!sdp_attrs[NFC_SDP_ATTR_URI])
992 continue;
993
994 uri_len = nla_len(sdp_attrs[NFC_SDP_ATTR_URI]);
995 if (uri_len == 0)
996 continue;
997
998 uri = nla_data(sdp_attrs[NFC_SDP_ATTR_URI]);
999 if (uri == NULL || *uri == 0)
1000 continue;
1001
1002 tid = local->sdreq_next_tid++;
1003
1004 sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len);
1005 if (sdreq == NULL) {
1006 rc = -ENOMEM;
1007 goto exit;
1008 }
1009
1010 tlvs_len += sdreq->tlv_len;
1011
1012 hlist_add_head(&sdreq->node, &sdreq_list);
1013 }
1014
1015 if (hlist_empty(&sdreq_list)) {
1016 rc = -EINVAL;
1017 goto exit;
1018 }
1019
1020 rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len);
1021exit:
1022 device_unlock(&dev->dev);
1023
1024 nfc_put_device(dev);
1025
1026 return rc;
1027}
1028
862static struct genl_ops nfc_genl_ops[] = { 1029static struct genl_ops nfc_genl_ops[] = {
863 { 1030 {
864 .cmd = NFC_CMD_GET_DEVICE, 1031 .cmd = NFC_CMD_GET_DEVICE,
@@ -913,6 +1080,11 @@ static struct genl_ops nfc_genl_ops[] = {
913 .doit = nfc_genl_llc_set_params, 1080 .doit = nfc_genl_llc_set_params,
914 .policy = nfc_genl_policy, 1081 .policy = nfc_genl_policy,
915 }, 1082 },
1083 {
1084 .cmd = NFC_CMD_LLC_SDREQ,
1085 .doit = nfc_genl_llc_sdreq,
1086 .policy = nfc_genl_policy,
1087 },
916}; 1088};
917 1089
918 1090
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 87d914d2876a..94bfe19ba678 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -46,6 +46,8 @@ struct nfc_rawsock {
46#define to_rawsock_sk(_tx_work) \ 46#define to_rawsock_sk(_tx_work) \
47 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work)) 47 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
48 48
49struct nfc_llcp_sdp_tlv;
50
49#ifdef CONFIG_NFC_LLCP 51#ifdef CONFIG_NFC_LLCP
50 52
51void nfc_llcp_mac_is_down(struct nfc_dev *dev); 53void nfc_llcp_mac_is_down(struct nfc_dev *dev);
@@ -59,6 +61,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
59struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); 61struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
60int __init nfc_llcp_init(void); 62int __init nfc_llcp_init(void);
61void nfc_llcp_exit(void); 63void nfc_llcp_exit(void);
64void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
65void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head);
62 66
63#else 67#else
64 68
@@ -112,6 +116,14 @@ static inline void nfc_llcp_exit(void)
112{ 116{
113} 117}
114 118
119static inline void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp)
120{
121}
122
123static inline void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head)
124{
125}
126
115#endif 127#endif
116 128
117int __init rawsock_init(void); 129int __init rawsock_init(void);
@@ -144,6 +156,8 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
144int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol); 156int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol);
145int nfc_genl_tm_deactivated(struct nfc_dev *dev); 157int nfc_genl_tm_deactivated(struct nfc_dev *dev);
146 158
159int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list);
160
147struct nfc_dev *nfc_get_device(unsigned int idx); 161struct nfc_dev *nfc_get_device(unsigned int idx);
148 162
149static inline void nfc_put_device(struct nfc_dev *dev) 163static inline void nfc_put_device(struct nfc_dev *dev)
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 4b5ab21ecb24..d11ac79246e4 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -51,7 +51,7 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
51 return 0; 51 return 0;
52} 52}
53 53
54struct rfkill_ops rfkill_regulator_ops = { 54static struct rfkill_ops rfkill_regulator_ops = {
55 .set_block = rfkill_regulator_set_block, 55 .set_block = rfkill_regulator_set_block,
56}; 56};
57 57
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index a4a14e8f55cc..324e8d851dc4 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -46,65 +46,3 @@ int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
46 46
47 return err; 47 return err;
48} 48}
49
50void cfg80211_ch_switch_notify(struct net_device *dev,
51 struct cfg80211_chan_def *chandef)
52{
53 struct wireless_dev *wdev = dev->ieee80211_ptr;
54 struct wiphy *wiphy = wdev->wiphy;
55 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
56
57 trace_cfg80211_ch_switch_notify(dev, chandef);
58
59 wdev_lock(wdev);
60
61 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
62 wdev->iftype != NL80211_IFTYPE_P2P_GO))
63 goto out;
64
65 wdev->channel = chandef->chan;
66 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
67out:
68 wdev_unlock(wdev);
69 return;
70}
71EXPORT_SYMBOL(cfg80211_ch_switch_notify);
72
73bool cfg80211_rx_spurious_frame(struct net_device *dev,
74 const u8 *addr, gfp_t gfp)
75{
76 struct wireless_dev *wdev = dev->ieee80211_ptr;
77 bool ret;
78
79 trace_cfg80211_rx_spurious_frame(dev, addr);
80
81 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
82 wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
83 trace_cfg80211_return_bool(false);
84 return false;
85 }
86 ret = nl80211_unexpected_frame(dev, addr, gfp);
87 trace_cfg80211_return_bool(ret);
88 return ret;
89}
90EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
91
92bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
93 const u8 *addr, gfp_t gfp)
94{
95 struct wireless_dev *wdev = dev->ieee80211_ptr;
96 bool ret;
97
98 trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
99
100 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
101 wdev->iftype != NL80211_IFTYPE_P2P_GO &&
102 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) {
103 trace_cfg80211_return_bool(false);
104 return false;
105 }
106 ret = nl80211_unexpected_4addr_frame(dev, addr, gfp);
107 trace_cfg80211_return_bool(ret);
108 return ret;
109}
110EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6ddf74f0ae1e..84c9ad7e1dca 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -842,6 +842,46 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
842 rdev->num_running_monitor_ifaces += num; 842 rdev->num_running_monitor_ifaces += num;
843} 843}
844 844
845void cfg80211_leave(struct cfg80211_registered_device *rdev,
846 struct wireless_dev *wdev)
847{
848 struct net_device *dev = wdev->netdev;
849
850 switch (wdev->iftype) {
851 case NL80211_IFTYPE_ADHOC:
852 cfg80211_leave_ibss(rdev, dev, true);
853 break;
854 case NL80211_IFTYPE_P2P_CLIENT:
855 case NL80211_IFTYPE_STATION:
856 mutex_lock(&rdev->sched_scan_mtx);
857 __cfg80211_stop_sched_scan(rdev, false);
858 mutex_unlock(&rdev->sched_scan_mtx);
859
860 wdev_lock(wdev);
861#ifdef CONFIG_CFG80211_WEXT
862 kfree(wdev->wext.ie);
863 wdev->wext.ie = NULL;
864 wdev->wext.ie_len = 0;
865 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
866#endif
867 __cfg80211_disconnect(rdev, dev,
868 WLAN_REASON_DEAUTH_LEAVING, true);
869 cfg80211_mlme_down(rdev, dev);
870 wdev_unlock(wdev);
871 break;
872 case NL80211_IFTYPE_MESH_POINT:
873 cfg80211_leave_mesh(rdev, dev);
874 break;
875 case NL80211_IFTYPE_AP:
876 cfg80211_stop_ap(rdev, dev);
877 break;
878 default:
879 break;
880 }
881
882 wdev->beacon_interval = 0;
883}
884
845static int cfg80211_netdev_notifier_call(struct notifier_block *nb, 885static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
846 unsigned long state, 886 unsigned long state,
847 void *ndev) 887 void *ndev)
@@ -910,38 +950,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
910 dev->priv_flags |= IFF_DONT_BRIDGE; 950 dev->priv_flags |= IFF_DONT_BRIDGE;
911 break; 951 break;
912 case NETDEV_GOING_DOWN: 952 case NETDEV_GOING_DOWN:
913 switch (wdev->iftype) { 953 cfg80211_leave(rdev, wdev);
914 case NL80211_IFTYPE_ADHOC:
915 cfg80211_leave_ibss(rdev, dev, true);
916 break;
917 case NL80211_IFTYPE_P2P_CLIENT:
918 case NL80211_IFTYPE_STATION:
919 mutex_lock(&rdev->sched_scan_mtx);
920 __cfg80211_stop_sched_scan(rdev, false);
921 mutex_unlock(&rdev->sched_scan_mtx);
922
923 wdev_lock(wdev);
924#ifdef CONFIG_CFG80211_WEXT
925 kfree(wdev->wext.ie);
926 wdev->wext.ie = NULL;
927 wdev->wext.ie_len = 0;
928 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
929#endif
930 __cfg80211_disconnect(rdev, dev,
931 WLAN_REASON_DEAUTH_LEAVING, true);
932 cfg80211_mlme_down(rdev, dev);
933 wdev_unlock(wdev);
934 break;
935 case NL80211_IFTYPE_MESH_POINT:
936 cfg80211_leave_mesh(rdev, dev);
937 break;
938 case NL80211_IFTYPE_AP:
939 cfg80211_stop_ap(rdev, dev);
940 break;
941 default:
942 break;
943 }
944 wdev->beacon_interval = 0;
945 break; 954 break;
946 case NETDEV_DOWN: 955 case NETDEV_DOWN:
947 cfg80211_update_iface_num(rdev, wdev->iftype, -1); 956 cfg80211_update_iface_num(rdev, wdev->iftype, -1);
@@ -1117,8 +1126,10 @@ static int __init cfg80211_init(void)
1117 goto out_fail_reg; 1126 goto out_fail_reg;
1118 1127
1119 cfg80211_wq = create_singlethread_workqueue("cfg80211"); 1128 cfg80211_wq = create_singlethread_workqueue("cfg80211");
1120 if (!cfg80211_wq) 1129 if (!cfg80211_wq) {
1130 err = -ENOMEM;
1121 goto out_fail_wq; 1131 goto out_fail_wq;
1132 }
1122 1133
1123 return 0; 1134 return 0;
1124 1135
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5845c2b37aa8..124e5e773fbc 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -330,20 +330,15 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
330int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 330int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
331 struct net_device *dev, 331 struct net_device *dev,
332 struct ieee80211_channel *chan, 332 struct ieee80211_channel *chan,
333 const u8 *bssid, const u8 *prev_bssid, 333 const u8 *bssid,
334 const u8 *ssid, int ssid_len, 334 const u8 *ssid, int ssid_len,
335 const u8 *ie, int ie_len, bool use_mfp, 335 struct cfg80211_assoc_request *req);
336 struct cfg80211_crypto_settings *crypt,
337 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
338 struct ieee80211_ht_cap *ht_capa_mask);
339int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 336int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
340 struct net_device *dev, struct ieee80211_channel *chan, 337 struct net_device *dev,
341 const u8 *bssid, const u8 *prev_bssid, 338 struct ieee80211_channel *chan,
339 const u8 *bssid,
342 const u8 *ssid, int ssid_len, 340 const u8 *ssid, int ssid_len,
343 const u8 *ie, int ie_len, bool use_mfp, 341 struct cfg80211_assoc_request *req);
344 struct cfg80211_crypto_settings *crypt,
345 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
346 struct ieee80211_ht_cap *ht_capa_mask);
347int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 342int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
348 struct net_device *dev, const u8 *bssid, 343 struct net_device *dev, const u8 *bssid,
349 const u8 *ie, int ie_len, u16 reason, 344 const u8 *ie, int ie_len, u16 reason,
@@ -375,6 +370,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
375 bool no_cck, bool dont_wait_for_ack, u64 *cookie); 370 bool no_cck, bool dont_wait_for_ack, u64 *cookie);
376void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, 371void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
377 const struct ieee80211_ht_cap *ht_capa_mask); 372 const struct ieee80211_ht_cap *ht_capa_mask);
373void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
374 const struct ieee80211_vht_cap *vht_capa_mask);
378 375
379/* SME */ 376/* SME */
380int __cfg80211_connect(struct cfg80211_registered_device *rdev, 377int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -503,6 +500,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
503void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, 500void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
504 enum nl80211_iftype iftype, int num); 501 enum nl80211_iftype iftype, int num);
505 502
503void cfg80211_leave(struct cfg80211_registered_device *rdev,
504 struct wireless_dev *wdev);
505
506void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, 506void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
507 struct wireless_dev *wdev); 507 struct wireless_dev *wdev);
508 508
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 55957a284f6c..0bb93f3061a4 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -85,6 +85,7 @@ const struct mesh_setup default_mesh_setup = {
85 .ie = NULL, 85 .ie = NULL,
86 .ie_len = 0, 86 .ie_len = 0,
87 .is_secure = false, 87 .is_secure = false,
88 .user_mpm = false,
88 .beacon_interval = MESH_DEFAULT_BEACON_INTERVAL, 89 .beacon_interval = MESH_DEFAULT_BEACON_INTERVAL,
89 .dtim_period = MESH_DEFAULT_DTIM_PERIOD, 90 .dtim_period = MESH_DEFAULT_DTIM_PERIOD,
90}; 91};
@@ -233,20 +234,6 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
233 return 0; 234 return 0;
234} 235}
235 236
236void cfg80211_notify_new_peer_candidate(struct net_device *dev,
237 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
238{
239 struct wireless_dev *wdev = dev->ieee80211_ptr;
240
241 trace_cfg80211_notify_new_peer_candidate(dev, macaddr);
242 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
243 return;
244
245 nl80211_send_new_peer_candidate(wiphy_to_dev(wdev->wiphy), dev,
246 macaddr, ie, ie_len, gfp);
247}
248EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate);
249
250static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 237static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
251 struct net_device *dev) 238 struct net_device *dev)
252{ 239{
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index caddca35d686..390198bf4b36 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -187,30 +187,6 @@ void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
187} 187}
188EXPORT_SYMBOL(cfg80211_send_disassoc); 188EXPORT_SYMBOL(cfg80211_send_disassoc);
189 189
190void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
191 size_t len)
192{
193 struct wireless_dev *wdev = dev->ieee80211_ptr;
194 struct wiphy *wiphy = wdev->wiphy;
195 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
196
197 trace_cfg80211_send_unprot_deauth(dev);
198 nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC);
199}
200EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
201
202void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
203 size_t len)
204{
205 struct wireless_dev *wdev = dev->ieee80211_ptr;
206 struct wiphy *wiphy = wdev->wiphy;
207 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
208
209 trace_cfg80211_send_unprot_disassoc(dev);
210 nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC);
211}
212EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
213
214void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) 190void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
215{ 191{
216 struct wireless_dev *wdev = dev->ieee80211_ptr; 192 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -367,27 +343,38 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
367 p1[i] &= p2[i]; 343 p1[i] &= p2[i];
368} 344}
369 345
346/* Do a logical ht_capa &= ht_capa_mask. */
347void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
348 const struct ieee80211_vht_cap *vht_capa_mask)
349{
350 int i;
351 u8 *p1, *p2;
352 if (!vht_capa_mask) {
353 memset(vht_capa, 0, sizeof(*vht_capa));
354 return;
355 }
356
357 p1 = (u8*)(vht_capa);
358 p2 = (u8*)(vht_capa_mask);
359 for (i = 0; i < sizeof(*vht_capa); i++)
360 p1[i] &= p2[i];
361}
362
370int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 363int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
371 struct net_device *dev, 364 struct net_device *dev,
372 struct ieee80211_channel *chan, 365 struct ieee80211_channel *chan,
373 const u8 *bssid, const u8 *prev_bssid, 366 const u8 *bssid,
374 const u8 *ssid, int ssid_len, 367 const u8 *ssid, int ssid_len,
375 const u8 *ie, int ie_len, bool use_mfp, 368 struct cfg80211_assoc_request *req)
376 struct cfg80211_crypto_settings *crypt,
377 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
378 struct ieee80211_ht_cap *ht_capa_mask)
379{ 369{
380 struct wireless_dev *wdev = dev->ieee80211_ptr; 370 struct wireless_dev *wdev = dev->ieee80211_ptr;
381 struct cfg80211_assoc_request req;
382 int err; 371 int err;
383 bool was_connected = false; 372 bool was_connected = false;
384 373
385 ASSERT_WDEV_LOCK(wdev); 374 ASSERT_WDEV_LOCK(wdev);
386 375
387 memset(&req, 0, sizeof(req)); 376 if (wdev->current_bss && req->prev_bssid &&
388 377 ether_addr_equal(wdev->current_bss->pub.bssid, req->prev_bssid)) {
389 if (wdev->current_bss && prev_bssid &&
390 ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) {
391 /* 378 /*
392 * Trying to reassociate: Allow this to proceed and let the old 379 * Trying to reassociate: Allow this to proceed and let the old
393 * association to be dropped when the new one is completed. 380 * association to be dropped when the new one is completed.
@@ -399,40 +386,30 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
399 } else if (wdev->current_bss) 386 } else if (wdev->current_bss)
400 return -EALREADY; 387 return -EALREADY;
401 388
402 req.ie = ie; 389 cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
403 req.ie_len = ie_len;
404 memcpy(&req.crypto, crypt, sizeof(req.crypto));
405 req.use_mfp = use_mfp;
406 req.prev_bssid = prev_bssid;
407 req.flags = assoc_flags;
408 if (ht_capa)
409 memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa));
410 if (ht_capa_mask)
411 memcpy(&req.ht_capa_mask, ht_capa_mask,
412 sizeof(req.ht_capa_mask));
413 cfg80211_oper_and_ht_capa(&req.ht_capa_mask,
414 rdev->wiphy.ht_capa_mod_mask); 390 rdev->wiphy.ht_capa_mod_mask);
391 cfg80211_oper_and_vht_capa(&req->vht_capa_mask,
392 rdev->wiphy.vht_capa_mod_mask);
415 393
416 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, 394 req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
417 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 395 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
418 if (!req.bss) { 396 if (!req->bss) {
419 if (was_connected) 397 if (was_connected)
420 wdev->sme_state = CFG80211_SME_CONNECTED; 398 wdev->sme_state = CFG80211_SME_CONNECTED;
421 return -ENOENT; 399 return -ENOENT;
422 } 400 }
423 401
424 err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel, 402 err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED);
425 CHAN_MODE_SHARED);
426 if (err) 403 if (err)
427 goto out; 404 goto out;
428 405
429 err = rdev_assoc(rdev, dev, &req); 406 err = rdev_assoc(rdev, dev, req);
430 407
431out: 408out:
432 if (err) { 409 if (err) {
433 if (was_connected) 410 if (was_connected)
434 wdev->sme_state = CFG80211_SME_CONNECTED; 411 wdev->sme_state = CFG80211_SME_CONNECTED;
435 cfg80211_put_bss(&rdev->wiphy, req.bss); 412 cfg80211_put_bss(&rdev->wiphy, req->bss);
436 } 413 }
437 414
438 return err; 415 return err;
@@ -441,21 +418,17 @@ out:
441int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 418int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
442 struct net_device *dev, 419 struct net_device *dev,
443 struct ieee80211_channel *chan, 420 struct ieee80211_channel *chan,
444 const u8 *bssid, const u8 *prev_bssid, 421 const u8 *bssid,
445 const u8 *ssid, int ssid_len, 422 const u8 *ssid, int ssid_len,
446 const u8 *ie, int ie_len, bool use_mfp, 423 struct cfg80211_assoc_request *req)
447 struct cfg80211_crypto_settings *crypt,
448 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
449 struct ieee80211_ht_cap *ht_capa_mask)
450{ 424{
451 struct wireless_dev *wdev = dev->ieee80211_ptr; 425 struct wireless_dev *wdev = dev->ieee80211_ptr;
452 int err; 426 int err;
453 427
454 mutex_lock(&rdev->devlist_mtx); 428 mutex_lock(&rdev->devlist_mtx);
455 wdev_lock(wdev); 429 wdev_lock(wdev);
456 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 430 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid,
457 ssid, ssid_len, ie, ie_len, use_mfp, crypt, 431 ssid, ssid_len, req);
458 assoc_flags, ht_capa, ht_capa_mask);
459 wdev_unlock(wdev); 432 wdev_unlock(wdev);
460 mutex_unlock(&rdev->devlist_mtx); 433 mutex_unlock(&rdev->devlist_mtx);
461 434
@@ -577,62 +550,6 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
577 } 550 }
578} 551}
579 552
580void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
581 struct ieee80211_channel *chan,
582 unsigned int duration, gfp_t gfp)
583{
584 struct wiphy *wiphy = wdev->wiphy;
585 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
586
587 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
588 nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, duration, gfp);
589}
590EXPORT_SYMBOL(cfg80211_ready_on_channel);
591
592void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
593 struct ieee80211_channel *chan,
594 gfp_t gfp)
595{
596 struct wiphy *wiphy = wdev->wiphy;
597 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
598
599 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
600 nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan, gfp);
601}
602EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
603
604void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
605 struct station_info *sinfo, gfp_t gfp)
606{
607 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
608 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
609
610 trace_cfg80211_new_sta(dev, mac_addr, sinfo);
611 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
612}
613EXPORT_SYMBOL(cfg80211_new_sta);
614
615void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
616{
617 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
618 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
619
620 trace_cfg80211_del_sta(dev, mac_addr);
621 nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp);
622}
623EXPORT_SYMBOL(cfg80211_del_sta);
624
625void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
626 enum nl80211_connect_failed_reason reason,
627 gfp_t gfp)
628{
629 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
630 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
631
632 nl80211_send_conn_failed_event(rdev, dev, mac_addr, reason, gfp);
633}
634EXPORT_SYMBOL(cfg80211_conn_failed);
635
636struct cfg80211_mgmt_registration { 553struct cfg80211_mgmt_registration {
637 struct list_head list; 554 struct list_head list;
638 555
@@ -909,85 +826,6 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
909} 826}
910EXPORT_SYMBOL(cfg80211_rx_mgmt); 827EXPORT_SYMBOL(cfg80211_rx_mgmt);
911 828
912void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
913 const u8 *buf, size_t len, bool ack, gfp_t gfp)
914{
915 struct wiphy *wiphy = wdev->wiphy;
916 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
917
918 trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
919
920 /* Indicate TX status of the Action frame to user space */
921 nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp);
922}
923EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
924
925void cfg80211_cqm_rssi_notify(struct net_device *dev,
926 enum nl80211_cqm_rssi_threshold_event rssi_event,
927 gfp_t gfp)
928{
929 struct wireless_dev *wdev = dev->ieee80211_ptr;
930 struct wiphy *wiphy = wdev->wiphy;
931 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
932
933 trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
934
935 /* Indicate roaming trigger event to user space */
936 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
937}
938EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
939
940void cfg80211_cqm_pktloss_notify(struct net_device *dev,
941 const u8 *peer, u32 num_packets, gfp_t gfp)
942{
943 struct wireless_dev *wdev = dev->ieee80211_ptr;
944 struct wiphy *wiphy = wdev->wiphy;
945 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
946
947 trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
948
949 /* Indicate roaming trigger event to user space */
950 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp);
951}
952EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
953
954void cfg80211_cqm_txe_notify(struct net_device *dev,
955 const u8 *peer, u32 num_packets,
956 u32 rate, u32 intvl, gfp_t gfp)
957{
958 struct wireless_dev *wdev = dev->ieee80211_ptr;
959 struct wiphy *wiphy = wdev->wiphy;
960 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
961
962 nl80211_send_cqm_txe_notify(rdev, dev, peer, num_packets,
963 rate, intvl, gfp);
964}
965EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
966
967void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
968 const u8 *replay_ctr, gfp_t gfp)
969{
970 struct wireless_dev *wdev = dev->ieee80211_ptr;
971 struct wiphy *wiphy = wdev->wiphy;
972 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
973
974 trace_cfg80211_gtk_rekey_notify(dev, bssid);
975 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
976}
977EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
978
979void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
980 const u8 *bssid, bool preauth, gfp_t gfp)
981{
982 struct wireless_dev *wdev = dev->ieee80211_ptr;
983 struct wiphy *wiphy = wdev->wiphy;
984 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
985
986 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
987 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
988}
989EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
990
991void cfg80211_dfs_channels_update_work(struct work_struct *work) 829void cfg80211_dfs_channels_update_work(struct work_struct *work)
992{ 830{
993 struct delayed_work *delayed_work; 831 struct delayed_work *delayed_work;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 58e13a8c95f9..671b69a3c136 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -370,6 +370,14 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
370 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, 370 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
371 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, 371 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
372 [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, }, 372 [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, },
373 [NL80211_ATTR_SPLIT_WIPHY_DUMP] = { .type = NLA_FLAG, },
374 [NL80211_ATTR_DISABLE_VHT] = { .type = NLA_FLAG },
375 [NL80211_ATTR_VHT_CAPABILITY_MASK] = {
376 .len = NL80211_VHT_CAPABILITY_LEN,
377 },
378 [NL80211_ATTR_MDID] = { .type = NLA_U16 },
379 [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
380 .len = IEEE80211_MAX_DATA_LEN },
373}; 381};
374 382
375/* policy for the key attributes */ 383/* policy for the key attributes */
@@ -539,7 +547,8 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
539} 547}
540 548
541static int nl80211_msg_put_channel(struct sk_buff *msg, 549static int nl80211_msg_put_channel(struct sk_buff *msg,
542 struct ieee80211_channel *chan) 550 struct ieee80211_channel *chan,
551 bool large)
543{ 552{
544 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, 553 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
545 chan->center_freq)) 554 chan->center_freq))
@@ -554,9 +563,37 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 563 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) 564 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
556 goto nla_put_failure; 565 goto nla_put_failure;
557 if ((chan->flags & IEEE80211_CHAN_RADAR) && 566 if (chan->flags & IEEE80211_CHAN_RADAR) {
558 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 567 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
559 goto nla_put_failure; 568 goto nla_put_failure;
569 if (large) {
570 u32 time;
571
572 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
573
574 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
575 chan->dfs_state))
576 goto nla_put_failure;
577 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME,
578 time))
579 goto nla_put_failure;
580 }
581 }
582
583 if (large) {
584 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
585 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
586 goto nla_put_failure;
587 if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
588 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
589 goto nla_put_failure;
590 if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
591 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
592 goto nla_put_failure;
593 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
594 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
595 goto nla_put_failure;
596 }
560 597
561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 598 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
562 DBM_TO_MBM(chan->max_power))) 599 DBM_TO_MBM(chan->max_power)))
@@ -832,7 +869,8 @@ nla_put_failure:
832} 869}
833 870
834static int nl80211_put_iface_combinations(struct wiphy *wiphy, 871static int nl80211_put_iface_combinations(struct wiphy *wiphy,
835 struct sk_buff *msg) 872 struct sk_buff *msg,
873 bool large)
836{ 874{
837 struct nlattr *nl_combis; 875 struct nlattr *nl_combis;
838 int i, j; 876 int i, j;
@@ -881,6 +919,10 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
881 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, 919 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
882 c->max_interfaces)) 920 c->max_interfaces))
883 goto nla_put_failure; 921 goto nla_put_failure;
922 if (large &&
923 nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
924 c->radar_detect_widths))
925 goto nla_put_failure;
884 926
885 nla_nest_end(msg, nl_combi); 927 nla_nest_end(msg, nl_combi);
886 } 928 }
@@ -892,412 +934,611 @@ nla_put_failure:
892 return -ENOBUFS; 934 return -ENOBUFS;
893} 935}
894 936
895static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, 937#ifdef CONFIG_PM
896 struct cfg80211_registered_device *dev) 938static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
939 struct sk_buff *msg)
897{ 940{
898 void *hdr; 941 const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
899 struct nlattr *nl_bands, *nl_band; 942 struct nlattr *nl_tcp;
900 struct nlattr *nl_freqs, *nl_freq;
901 struct nlattr *nl_rates, *nl_rate;
902 struct nlattr *nl_cmds;
903 enum ieee80211_band band;
904 struct ieee80211_channel *chan;
905 struct ieee80211_rate *rate;
906 int i;
907 const struct ieee80211_txrx_stypes *mgmt_stypes =
908 dev->wiphy.mgmt_stypes;
909 943
910 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY); 944 if (!tcp)
911 if (!hdr) 945 return 0;
912 return -1;
913 946
914 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) || 947 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
915 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) || 948 if (!nl_tcp)
916 nla_put_u32(msg, NL80211_ATTR_GENERATION, 949 return -ENOBUFS;
917 cfg80211_rdev_list_generation) ||
918 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
919 dev->wiphy.retry_short) ||
920 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
921 dev->wiphy.retry_long) ||
922 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
923 dev->wiphy.frag_threshold) ||
924 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
925 dev->wiphy.rts_threshold) ||
926 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
927 dev->wiphy.coverage_class) ||
928 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
929 dev->wiphy.max_scan_ssids) ||
930 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
931 dev->wiphy.max_sched_scan_ssids) ||
932 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
933 dev->wiphy.max_scan_ie_len) ||
934 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
935 dev->wiphy.max_sched_scan_ie_len) ||
936 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
937 dev->wiphy.max_match_sets))
938 goto nla_put_failure;
939 950
940 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && 951 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
941 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) 952 tcp->data_payload_max))
942 goto nla_put_failure; 953 return -ENOBUFS;
943 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
944 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
945 goto nla_put_failure;
946 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
947 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
948 goto nla_put_failure;
949 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
950 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
951 goto nla_put_failure;
952 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
953 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
954 goto nla_put_failure;
955 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
956 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
957 goto nla_put_failure;
958 954
959 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, 955 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
960 sizeof(u32) * dev->wiphy.n_cipher_suites, 956 tcp->data_payload_max))
961 dev->wiphy.cipher_suites)) 957 return -ENOBUFS;
962 goto nla_put_failure;
963 958
964 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 959 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
965 dev->wiphy.max_num_pmkids)) 960 return -ENOBUFS;
966 goto nla_put_failure;
967 961
968 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && 962 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
969 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) 963 sizeof(*tcp->tok), tcp->tok))
970 goto nla_put_failure; 964 return -ENOBUFS;
971 965
972 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, 966 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
973 dev->wiphy.available_antennas_tx) || 967 tcp->data_interval_max))
974 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, 968 return -ENOBUFS;
975 dev->wiphy.available_antennas_rx))
976 goto nla_put_failure;
977 969
978 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && 970 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
979 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, 971 tcp->wake_payload_max))
980 dev->wiphy.probe_resp_offload)) 972 return -ENOBUFS;
981 goto nla_put_failure;
982 973
983 if ((dev->wiphy.available_antennas_tx || 974 nla_nest_end(msg, nl_tcp);
984 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { 975 return 0;
985 u32 tx_ant = 0, rx_ant = 0; 976}
986 int res; 977
987 res = rdev_get_antenna(dev, &tx_ant, &rx_ant); 978static int nl80211_send_wowlan(struct sk_buff *msg,
988 if (!res) { 979 struct cfg80211_registered_device *dev,
989 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, 980 bool large)
990 tx_ant) || 981{
991 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, 982 struct nlattr *nl_wowlan;
992 rx_ant)) 983
993 goto nla_put_failure; 984 if (!dev->wiphy.wowlan.flags && !dev->wiphy.wowlan.n_patterns)
994 } 985 return 0;
986
987 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
988 if (!nl_wowlan)
989 return -ENOBUFS;
990
991 if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
992 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
993 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
994 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
995 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
996 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
997 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
998 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
999 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1000 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1001 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1002 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1003 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1004 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1005 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1006 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1007 return -ENOBUFS;
1008
1009 if (dev->wiphy.wowlan.n_patterns) {
1010 struct nl80211_wowlan_pattern_support pat = {
1011 .max_patterns = dev->wiphy.wowlan.n_patterns,
1012 .min_pattern_len = dev->wiphy.wowlan.pattern_min_len,
1013 .max_pattern_len = dev->wiphy.wowlan.pattern_max_len,
1014 .max_pkt_offset = dev->wiphy.wowlan.max_pkt_offset,
1015 };
1016
1017 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1018 sizeof(pat), &pat))
1019 return -ENOBUFS;
995 } 1020 }
996 1021
997 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, 1022 if (large && nl80211_send_wowlan_tcp_caps(dev, msg))
998 dev->wiphy.interface_modes)) 1023 return -ENOBUFS;
999 goto nla_put_failure;
1000 1024
1001 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); 1025 nla_nest_end(msg, nl_wowlan);
1002 if (!nl_bands)
1003 goto nla_put_failure;
1004 1026
1005 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1027 return 0;
1006 if (!dev->wiphy.bands[band]) 1028}
1007 continue; 1029#endif
1008 1030
1009 nl_band = nla_nest_start(msg, band); 1031static int nl80211_send_band_rateinfo(struct sk_buff *msg,
1010 if (!nl_band) 1032 struct ieee80211_supported_band *sband)
1011 goto nla_put_failure; 1033{
1034 struct nlattr *nl_rates, *nl_rate;
1035 struct ieee80211_rate *rate;
1036 int i;
1012 1037
1013 /* add HT info */ 1038 /* add HT info */
1014 if (dev->wiphy.bands[band]->ht_cap.ht_supported && 1039 if (sband->ht_cap.ht_supported &&
1015 (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET, 1040 (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
1016 sizeof(dev->wiphy.bands[band]->ht_cap.mcs), 1041 sizeof(sband->ht_cap.mcs),
1017 &dev->wiphy.bands[band]->ht_cap.mcs) || 1042 &sband->ht_cap.mcs) ||
1018 nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA, 1043 nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
1019 dev->wiphy.bands[band]->ht_cap.cap) || 1044 sband->ht_cap.cap) ||
1020 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, 1045 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
1021 dev->wiphy.bands[band]->ht_cap.ampdu_factor) || 1046 sband->ht_cap.ampdu_factor) ||
1022 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, 1047 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
1023 dev->wiphy.bands[band]->ht_cap.ampdu_density))) 1048 sband->ht_cap.ampdu_density)))
1024 goto nla_put_failure; 1049 return -ENOBUFS;
1025 1050
1026 /* add VHT info */ 1051 /* add VHT info */
1027 if (dev->wiphy.bands[band]->vht_cap.vht_supported && 1052 if (sband->vht_cap.vht_supported &&
1028 (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET, 1053 (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET,
1029 sizeof(dev->wiphy.bands[band]->vht_cap.vht_mcs), 1054 sizeof(sband->vht_cap.vht_mcs),
1030 &dev->wiphy.bands[band]->vht_cap.vht_mcs) || 1055 &sband->vht_cap.vht_mcs) ||
1031 nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA, 1056 nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA,
1032 dev->wiphy.bands[band]->vht_cap.cap))) 1057 sband->vht_cap.cap)))
1033 goto nla_put_failure; 1058 return -ENOBUFS;
1034 1059
1035 /* add frequencies */ 1060 /* add bitrates */
1036 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); 1061 nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
1037 if (!nl_freqs) 1062 if (!nl_rates)
1038 goto nla_put_failure; 1063 return -ENOBUFS;
1039 1064
1040 for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) { 1065 for (i = 0; i < sband->n_bitrates; i++) {
1041 nl_freq = nla_nest_start(msg, i); 1066 nl_rate = nla_nest_start(msg, i);
1042 if (!nl_freq) 1067 if (!nl_rate)
1043 goto nla_put_failure; 1068 return -ENOBUFS;
1044 1069
1045 chan = &dev->wiphy.bands[band]->channels[i]; 1070 rate = &sband->bitrates[i];
1071 if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
1072 rate->bitrate))
1073 return -ENOBUFS;
1074 if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
1075 nla_put_flag(msg,
1076 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
1077 return -ENOBUFS;
1046 1078
1047 if (nl80211_msg_put_channel(msg, chan)) 1079 nla_nest_end(msg, nl_rate);
1048 goto nla_put_failure; 1080 }
1049 1081
1050 nla_nest_end(msg, nl_freq); 1082 nla_nest_end(msg, nl_rates);
1051 }
1052 1083
1053 nla_nest_end(msg, nl_freqs); 1084 return 0;
1085}
1054 1086
1055 /* add bitrates */ 1087static int
1056 nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); 1088nl80211_send_mgmt_stypes(struct sk_buff *msg,
1057 if (!nl_rates) 1089 const struct ieee80211_txrx_stypes *mgmt_stypes)
1058 goto nla_put_failure; 1090{
1091 u16 stypes;
1092 struct nlattr *nl_ftypes, *nl_ifs;
1093 enum nl80211_iftype ift;
1094 int i;
1059 1095
1060 for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) { 1096 if (!mgmt_stypes)
1061 nl_rate = nla_nest_start(msg, i); 1097 return 0;
1062 if (!nl_rate)
1063 goto nla_put_failure;
1064 1098
1065 rate = &dev->wiphy.bands[band]->bitrates[i]; 1099 nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES);
1066 if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE, 1100 if (!nl_ifs)
1067 rate->bitrate)) 1101 return -ENOBUFS;
1068 goto nla_put_failure;
1069 if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
1070 nla_put_flag(msg,
1071 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
1072 goto nla_put_failure;
1073 1102
1074 nla_nest_end(msg, nl_rate); 1103 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
1104 nl_ftypes = nla_nest_start(msg, ift);
1105 if (!nl_ftypes)
1106 return -ENOBUFS;
1107 i = 0;
1108 stypes = mgmt_stypes[ift].tx;
1109 while (stypes) {
1110 if ((stypes & 1) &&
1111 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1112 (i << 4) | IEEE80211_FTYPE_MGMT))
1113 return -ENOBUFS;
1114 stypes >>= 1;
1115 i++;
1075 } 1116 }
1117 nla_nest_end(msg, nl_ftypes);
1118 }
1076 1119
1077 nla_nest_end(msg, nl_rates); 1120 nla_nest_end(msg, nl_ifs);
1078 1121
1079 nla_nest_end(msg, nl_band); 1122 nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES);
1123 if (!nl_ifs)
1124 return -ENOBUFS;
1125
1126 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
1127 nl_ftypes = nla_nest_start(msg, ift);
1128 if (!nl_ftypes)
1129 return -ENOBUFS;
1130 i = 0;
1131 stypes = mgmt_stypes[ift].rx;
1132 while (stypes) {
1133 if ((stypes & 1) &&
1134 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1135 (i << 4) | IEEE80211_FTYPE_MGMT))
1136 return -ENOBUFS;
1137 stypes >>= 1;
1138 i++;
1139 }
1140 nla_nest_end(msg, nl_ftypes);
1080 } 1141 }
1081 nla_nest_end(msg, nl_bands); 1142 nla_nest_end(msg, nl_ifs);
1082 1143
1083 nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); 1144 return 0;
1084 if (!nl_cmds) 1145}
1085 goto nla_put_failure;
1086 1146
1087 i = 0; 1147static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1088#define CMD(op, n) \ 1148 struct sk_buff *msg, u32 portid, u32 seq,
1089 do { \ 1149 int flags, bool split, long *split_start,
1090 if (dev->ops->op) { \ 1150 long *band_start, long *chan_start)
1091 i++; \ 1151{
1092 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ 1152 void *hdr;
1093 goto nla_put_failure; \ 1153 struct nlattr *nl_bands, *nl_band;
1094 } \ 1154 struct nlattr *nl_freqs, *nl_freq;
1095 } while (0) 1155 struct nlattr *nl_cmds;
1096 1156 enum ieee80211_band band;
1097 CMD(add_virtual_intf, NEW_INTERFACE); 1157 struct ieee80211_channel *chan;
1098 CMD(change_virtual_intf, SET_INTERFACE); 1158 int i;
1099 CMD(add_key, NEW_KEY); 1159 const struct ieee80211_txrx_stypes *mgmt_stypes =
1100 CMD(start_ap, START_AP); 1160 dev->wiphy.mgmt_stypes;
1101 CMD(add_station, NEW_STATION); 1161 long start = 0, start_chan = 0, start_band = 0;
1102 CMD(add_mpath, NEW_MPATH); 1162 u32 features;
1103 CMD(update_mesh_config, SET_MESH_CONFIG); 1163
1104 CMD(change_bss, SET_BSS); 1164 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
1105 CMD(auth, AUTHENTICATE); 1165 if (!hdr)
1106 CMD(assoc, ASSOCIATE); 1166 return -ENOBUFS;
1107 CMD(deauth, DEAUTHENTICATE); 1167
1108 CMD(disassoc, DISASSOCIATE); 1168 /* allow always using the variables */
1109 CMD(join_ibss, JOIN_IBSS); 1169 if (!split) {
1110 CMD(join_mesh, JOIN_MESH); 1170 split_start = &start;
1111 CMD(set_pmksa, SET_PMKSA); 1171 band_start = &start_band;
1112 CMD(del_pmksa, DEL_PMKSA); 1172 chan_start = &start_chan;
1113 CMD(flush_pmksa, FLUSH_PMKSA);
1114 if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
1115 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
1116 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
1117 CMD(mgmt_tx, FRAME);
1118 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
1119 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
1120 i++;
1121 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
1122 goto nla_put_failure;
1123 } 1173 }
1124 if (dev->ops->set_monitor_channel || dev->ops->start_ap || 1174
1125 dev->ops->join_mesh) { 1175 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
1126 i++; 1176 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME,
1127 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL)) 1177 wiphy_name(&dev->wiphy)) ||
1178 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1179 cfg80211_rdev_list_generation))
1180 goto nla_put_failure;
1181
1182 switch (*split_start) {
1183 case 0:
1184 if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
1185 dev->wiphy.retry_short) ||
1186 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
1187 dev->wiphy.retry_long) ||
1188 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
1189 dev->wiphy.frag_threshold) ||
1190 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
1191 dev->wiphy.rts_threshold) ||
1192 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
1193 dev->wiphy.coverage_class) ||
1194 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
1195 dev->wiphy.max_scan_ssids) ||
1196 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
1197 dev->wiphy.max_sched_scan_ssids) ||
1198 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
1199 dev->wiphy.max_scan_ie_len) ||
1200 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
1201 dev->wiphy.max_sched_scan_ie_len) ||
1202 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
1203 dev->wiphy.max_match_sets))
1128 goto nla_put_failure; 1204 goto nla_put_failure;
1129 } 1205
1130 CMD(set_wds_peer, SET_WDS_PEER); 1206 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
1131 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { 1207 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
1132 CMD(tdls_mgmt, TDLS_MGMT); 1208 goto nla_put_failure;
1133 CMD(tdls_oper, TDLS_OPER); 1209 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
1134 } 1210 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
1135 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) 1211 goto nla_put_failure;
1136 CMD(sched_scan_start, START_SCHED_SCAN); 1212 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
1137 CMD(probe_client, PROBE_CLIENT); 1213 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
1138 CMD(set_noack_map, SET_NOACK_MAP); 1214 goto nla_put_failure;
1139 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { 1215 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
1140 i++; 1216 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
1141 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) 1217 goto nla_put_failure;
1218 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
1219 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
1220 goto nla_put_failure;
1221 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
1222 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
1142 goto nla_put_failure; 1223 goto nla_put_failure;
1143 }
1144 CMD(start_p2p_device, START_P2P_DEVICE);
1145 CMD(set_mcast_rate, SET_MCAST_RATE);
1146 1224
1147#ifdef CONFIG_NL80211_TESTMODE 1225 (*split_start)++;
1148 CMD(testmode_cmd, TESTMODE); 1226 if (split)
1149#endif 1227 break;
1228 case 1:
1229 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
1230 sizeof(u32) * dev->wiphy.n_cipher_suites,
1231 dev->wiphy.cipher_suites))
1232 goto nla_put_failure;
1150 1233
1151#undef CMD 1234 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
1235 dev->wiphy.max_num_pmkids))
1236 goto nla_put_failure;
1152 1237
1153 if (dev->ops->connect || dev->ops->auth) { 1238 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
1154 i++; 1239 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
1155 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
1156 goto nla_put_failure; 1240 goto nla_put_failure;
1157 }
1158 1241
1159 if (dev->ops->disconnect || dev->ops->deauth) { 1242 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
1160 i++; 1243 dev->wiphy.available_antennas_tx) ||
1161 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) 1244 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
1245 dev->wiphy.available_antennas_rx))
1162 goto nla_put_failure; 1246 goto nla_put_failure;
1163 }
1164 1247
1165 nla_nest_end(msg, nl_cmds); 1248 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
1249 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
1250 dev->wiphy.probe_resp_offload))
1251 goto nla_put_failure;
1166 1252
1167 if (dev->ops->remain_on_channel && 1253 if ((dev->wiphy.available_antennas_tx ||
1168 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && 1254 dev->wiphy.available_antennas_rx) &&
1169 nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 1255 dev->ops->get_antenna) {
1170 dev->wiphy.max_remain_on_channel_duration)) 1256 u32 tx_ant = 0, rx_ant = 0;
1171 goto nla_put_failure; 1257 int res;
1258 res = rdev_get_antenna(dev, &tx_ant, &rx_ant);
1259 if (!res) {
1260 if (nla_put_u32(msg,
1261 NL80211_ATTR_WIPHY_ANTENNA_TX,
1262 tx_ant) ||
1263 nla_put_u32(msg,
1264 NL80211_ATTR_WIPHY_ANTENNA_RX,
1265 rx_ant))
1266 goto nla_put_failure;
1267 }
1268 }
1172 1269
1173 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && 1270 (*split_start)++;
1174 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) 1271 if (split)
1175 goto nla_put_failure; 1272 break;
1273 case 2:
1274 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
1275 dev->wiphy.interface_modes))
1276 goto nla_put_failure;
1277 (*split_start)++;
1278 if (split)
1279 break;
1280 case 3:
1281 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
1282 if (!nl_bands)
1283 goto nla_put_failure;
1176 1284
1177 if (mgmt_stypes) { 1285 for (band = *band_start; band < IEEE80211_NUM_BANDS; band++) {
1178 u16 stypes; 1286 struct ieee80211_supported_band *sband;
1179 struct nlattr *nl_ftypes, *nl_ifs;
1180 enum nl80211_iftype ift;
1181 1287
1182 nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES); 1288 sband = dev->wiphy.bands[band];
1183 if (!nl_ifs)
1184 goto nla_put_failure;
1185 1289
1186 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { 1290 if (!sband)
1187 nl_ftypes = nla_nest_start(msg, ift); 1291 continue;
1188 if (!nl_ftypes) 1292
1293 nl_band = nla_nest_start(msg, band);
1294 if (!nl_band)
1189 goto nla_put_failure; 1295 goto nla_put_failure;
1190 i = 0; 1296
1191 stypes = mgmt_stypes[ift].tx; 1297 switch (*chan_start) {
1192 while (stypes) { 1298 case 0:
1193 if ((stypes & 1) && 1299 if (nl80211_send_band_rateinfo(msg, sband))
1194 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1195 (i << 4) | IEEE80211_FTYPE_MGMT))
1196 goto nla_put_failure; 1300 goto nla_put_failure;
1197 stypes >>= 1; 1301 (*chan_start)++;
1198 i++; 1302 if (split)
1303 break;
1304 default:
1305 /* add frequencies */
1306 nl_freqs = nla_nest_start(
1307 msg, NL80211_BAND_ATTR_FREQS);
1308 if (!nl_freqs)
1309 goto nla_put_failure;
1310
1311 for (i = *chan_start - 1;
1312 i < sband->n_channels;
1313 i++) {
1314 nl_freq = nla_nest_start(msg, i);
1315 if (!nl_freq)
1316 goto nla_put_failure;
1317
1318 chan = &sband->channels[i];
1319
1320 if (nl80211_msg_put_channel(msg, chan,
1321 split))
1322 goto nla_put_failure;
1323
1324 nla_nest_end(msg, nl_freq);
1325 if (split)
1326 break;
1327 }
1328 if (i < sband->n_channels)
1329 *chan_start = i + 2;
1330 else
1331 *chan_start = 0;
1332 nla_nest_end(msg, nl_freqs);
1333 }
1334
1335 nla_nest_end(msg, nl_band);
1336
1337 if (split) {
1338 /* start again here */
1339 if (*chan_start)
1340 band--;
1341 break;
1199 } 1342 }
1200 nla_nest_end(msg, nl_ftypes);
1201 } 1343 }
1344 nla_nest_end(msg, nl_bands);
1202 1345
1203 nla_nest_end(msg, nl_ifs); 1346 if (band < IEEE80211_NUM_BANDS)
1347 *band_start = band + 1;
1348 else
1349 *band_start = 0;
1204 1350
1205 nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES); 1351 /* if bands & channels are done, continue outside */
1206 if (!nl_ifs) 1352 if (*band_start == 0 && *chan_start == 0)
1353 (*split_start)++;
1354 if (split)
1355 break;
1356 case 4:
1357 nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS);
1358 if (!nl_cmds)
1207 goto nla_put_failure; 1359 goto nla_put_failure;
1208 1360
1209 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { 1361 i = 0;
1210 nl_ftypes = nla_nest_start(msg, ift); 1362#define CMD(op, n) \
1211 if (!nl_ftypes) 1363 do { \
1364 if (dev->ops->op) { \
1365 i++; \
1366 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
1367 goto nla_put_failure; \
1368 } \
1369 } while (0)
1370
1371 CMD(add_virtual_intf, NEW_INTERFACE);
1372 CMD(change_virtual_intf, SET_INTERFACE);
1373 CMD(add_key, NEW_KEY);
1374 CMD(start_ap, START_AP);
1375 CMD(add_station, NEW_STATION);
1376 CMD(add_mpath, NEW_MPATH);
1377 CMD(update_mesh_config, SET_MESH_CONFIG);
1378 CMD(change_bss, SET_BSS);
1379 CMD(auth, AUTHENTICATE);
1380 CMD(assoc, ASSOCIATE);
1381 CMD(deauth, DEAUTHENTICATE);
1382 CMD(disassoc, DISASSOCIATE);
1383 CMD(join_ibss, JOIN_IBSS);
1384 CMD(join_mesh, JOIN_MESH);
1385 CMD(set_pmksa, SET_PMKSA);
1386 CMD(del_pmksa, DEL_PMKSA);
1387 CMD(flush_pmksa, FLUSH_PMKSA);
1388 if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
1389 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
1390 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
1391 CMD(mgmt_tx, FRAME);
1392 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
1393 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
1394 i++;
1395 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
1212 goto nla_put_failure; 1396 goto nla_put_failure;
1213 i = 0;
1214 stypes = mgmt_stypes[ift].rx;
1215 while (stypes) {
1216 if ((stypes & 1) &&
1217 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1218 (i << 4) | IEEE80211_FTYPE_MGMT))
1219 goto nla_put_failure;
1220 stypes >>= 1;
1221 i++;
1222 }
1223 nla_nest_end(msg, nl_ftypes);
1224 } 1397 }
1225 nla_nest_end(msg, nl_ifs); 1398 if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
1226 } 1399 dev->ops->join_mesh) {
1400 i++;
1401 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
1402 goto nla_put_failure;
1403 }
1404 CMD(set_wds_peer, SET_WDS_PEER);
1405 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1406 CMD(tdls_mgmt, TDLS_MGMT);
1407 CMD(tdls_oper, TDLS_OPER);
1408 }
1409 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
1410 CMD(sched_scan_start, START_SCHED_SCAN);
1411 CMD(probe_client, PROBE_CLIENT);
1412 CMD(set_noack_map, SET_NOACK_MAP);
1413 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
1414 i++;
1415 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
1416 goto nla_put_failure;
1417 }
1418 CMD(start_p2p_device, START_P2P_DEVICE);
1419 CMD(set_mcast_rate, SET_MCAST_RATE);
1227 1420
1228#ifdef CONFIG_PM 1421#ifdef CONFIG_NL80211_TESTMODE
1229 if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) { 1422 CMD(testmode_cmd, TESTMODE);
1230 struct nlattr *nl_wowlan; 1423#endif
1231 1424
1232 nl_wowlan = nla_nest_start(msg, 1425#undef CMD
1233 NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
1234 if (!nl_wowlan)
1235 goto nla_put_failure;
1236 1426
1237 if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) && 1427 if (dev->ops->connect || dev->ops->auth) {
1238 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || 1428 i++;
1239 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) && 1429 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
1240 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
1241 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
1242 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
1243 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
1244 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
1245 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1246 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1247 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1248 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1249 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1250 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1251 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1252 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1253 goto nla_put_failure;
1254 if (dev->wiphy.wowlan.n_patterns) {
1255 struct nl80211_wowlan_pattern_support pat = {
1256 .max_patterns = dev->wiphy.wowlan.n_patterns,
1257 .min_pattern_len =
1258 dev->wiphy.wowlan.pattern_min_len,
1259 .max_pattern_len =
1260 dev->wiphy.wowlan.pattern_max_len,
1261 .max_pkt_offset =
1262 dev->wiphy.wowlan.max_pkt_offset,
1263 };
1264 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1265 sizeof(pat), &pat))
1266 goto nla_put_failure; 1430 goto nla_put_failure;
1267 } 1431 }
1268 1432
1269 nla_nest_end(msg, nl_wowlan); 1433 if (dev->ops->disconnect || dev->ops->deauth) {
1270 } 1434 i++;
1435 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
1436 goto nla_put_failure;
1437 }
1438
1439 nla_nest_end(msg, nl_cmds);
1440 (*split_start)++;
1441 if (split)
1442 break;
1443 case 5:
1444 if (dev->ops->remain_on_channel &&
1445 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
1446 nla_put_u32(msg,
1447 NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
1448 dev->wiphy.max_remain_on_channel_duration))
1449 goto nla_put_failure;
1450
1451 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
1452 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
1453 goto nla_put_failure;
1454
1455 if (nl80211_send_mgmt_stypes(msg, mgmt_stypes))
1456 goto nla_put_failure;
1457 (*split_start)++;
1458 if (split)
1459 break;
1460 case 6:
1461#ifdef CONFIG_PM
1462 if (nl80211_send_wowlan(msg, dev, split))
1463 goto nla_put_failure;
1464 (*split_start)++;
1465 if (split)
1466 break;
1467#else
1468 (*split_start)++;
1271#endif 1469#endif
1470 case 7:
1471 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
1472 dev->wiphy.software_iftypes))
1473 goto nla_put_failure;
1272 1474
1273 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, 1475 if (nl80211_put_iface_combinations(&dev->wiphy, msg, split))
1274 dev->wiphy.software_iftypes)) 1476 goto nla_put_failure;
1275 goto nla_put_failure;
1276 1477
1277 if (nl80211_put_iface_combinations(&dev->wiphy, msg)) 1478 (*split_start)++;
1278 goto nla_put_failure; 1479 if (split)
1480 break;
1481 case 8:
1482 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
1483 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
1484 dev->wiphy.ap_sme_capa))
1485 goto nla_put_failure;
1279 1486
1280 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && 1487 features = dev->wiphy.features;
1281 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, 1488 /*
1282 dev->wiphy.ap_sme_capa)) 1489 * We can only add the per-channel limit information if the
1283 goto nla_put_failure; 1490 * dump is split, otherwise it makes it too big. Therefore
1491 * only advertise it in that case.
1492 */
1493 if (split)
1494 features |= NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
1495 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features))
1496 goto nla_put_failure;
1284 1497
1285 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, 1498 if (dev->wiphy.ht_capa_mod_mask &&
1286 dev->wiphy.features)) 1499 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
1287 goto nla_put_failure; 1500 sizeof(*dev->wiphy.ht_capa_mod_mask),
1501 dev->wiphy.ht_capa_mod_mask))
1502 goto nla_put_failure;
1288 1503
1289 if (dev->wiphy.ht_capa_mod_mask && 1504 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
1290 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, 1505 dev->wiphy.max_acl_mac_addrs &&
1291 sizeof(*dev->wiphy.ht_capa_mod_mask), 1506 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
1292 dev->wiphy.ht_capa_mod_mask)) 1507 dev->wiphy.max_acl_mac_addrs))
1293 goto nla_put_failure; 1508 goto nla_put_failure;
1294 1509
1295 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME && 1510 /*
1296 dev->wiphy.max_acl_mac_addrs && 1511 * Any information below this point is only available to
1297 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX, 1512 * applications that can deal with it being split. This
1298 dev->wiphy.max_acl_mac_addrs)) 1513 * helps ensure that newly added capabilities don't break
1299 goto nla_put_failure; 1514 * older tools by overrunning their buffers.
1515 *
1516 * We still increment split_start so that in the split
1517 * case we'll continue with more data in the next round,
1518 * but break unconditionally so unsplit data stops here.
1519 */
1520 (*split_start)++;
1521 break;
1522 case 9:
1523 if (dev->wiphy.extended_capabilities &&
1524 (nla_put(msg, NL80211_ATTR_EXT_CAPA,
1525 dev->wiphy.extended_capabilities_len,
1526 dev->wiphy.extended_capabilities) ||
1527 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1528 dev->wiphy.extended_capabilities_len,
1529 dev->wiphy.extended_capabilities_mask)))
1530 goto nla_put_failure;
1300 1531
1532 if (dev->wiphy.vht_capa_mod_mask &&
1533 nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK,
1534 sizeof(*dev->wiphy.vht_capa_mod_mask),
1535 dev->wiphy.vht_capa_mod_mask))
1536 goto nla_put_failure;
1537
1538 /* done */
1539 *split_start = 0;
1540 break;
1541 }
1301 return genlmsg_end(msg, hdr); 1542 return genlmsg_end(msg, hdr);
1302 1543
1303 nla_put_failure: 1544 nla_put_failure:
@@ -1310,39 +1551,80 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1310 int idx = 0, ret; 1551 int idx = 0, ret;
1311 int start = cb->args[0]; 1552 int start = cb->args[0];
1312 struct cfg80211_registered_device *dev; 1553 struct cfg80211_registered_device *dev;
1554 s64 filter_wiphy = -1;
1555 bool split = false;
1556 struct nlattr **tb = nl80211_fam.attrbuf;
1557 int res;
1313 1558
1314 mutex_lock(&cfg80211_mutex); 1559 mutex_lock(&cfg80211_mutex);
1560 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
1561 tb, nl80211_fam.maxattr, nl80211_policy);
1562 if (res == 0) {
1563 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
1564 if (tb[NL80211_ATTR_WIPHY])
1565 filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]);
1566 if (tb[NL80211_ATTR_WDEV])
1567 filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32;
1568 if (tb[NL80211_ATTR_IFINDEX]) {
1569 struct net_device *netdev;
1570 int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]);
1571
1572 netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
1573 if (!netdev) {
1574 mutex_unlock(&cfg80211_mutex);
1575 return -ENODEV;
1576 }
1577 if (netdev->ieee80211_ptr) {
1578 dev = wiphy_to_dev(
1579 netdev->ieee80211_ptr->wiphy);
1580 filter_wiphy = dev->wiphy_idx;
1581 }
1582 dev_put(netdev);
1583 }
1584 }
1585
1315 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 1586 list_for_each_entry(dev, &cfg80211_rdev_list, list) {
1316 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 1587 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
1317 continue; 1588 continue;
1318 if (++idx <= start) 1589 if (++idx <= start)
1319 continue; 1590 continue;
1320 ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, 1591 if (filter_wiphy != -1 && dev->wiphy_idx != filter_wiphy)
1321 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1592 continue;
1322 dev); 1593 /* attempt to fit multiple wiphy data chunks into the skb */
1323 if (ret < 0) { 1594 do {
1324 /* 1595 ret = nl80211_send_wiphy(dev, skb,
1325 * If sending the wiphy data didn't fit (ENOBUFS or 1596 NETLINK_CB(cb->skb).portid,
1326 * EMSGSIZE returned), this SKB is still empty (so 1597 cb->nlh->nlmsg_seq,
1327 * it's not too big because another wiphy dataset is 1598 NLM_F_MULTI,
1328 * already in the skb) and we've not tried to adjust 1599 split, &cb->args[1],
1329 * the dump allocation yet ... then adjust the alloc 1600 &cb->args[2],
1330 * size to be bigger, and return 1 but with the empty 1601 &cb->args[3]);
1331 * skb. This results in an empty message being RX'ed 1602 if (ret < 0) {
1332 * in userspace, but that is ignored. 1603 /*
1333 * 1604 * If sending the wiphy data didn't fit (ENOBUFS
1334 * We can then retry with the larger buffer. 1605 * or EMSGSIZE returned), this SKB is still
1335 */ 1606 * empty (so it's not too big because another
1336 if ((ret == -ENOBUFS || ret == -EMSGSIZE) && 1607 * wiphy dataset is already in the skb) and
1337 !skb->len && 1608 * we've not tried to adjust the dump allocation
1338 cb->min_dump_alloc < 4096) { 1609 * yet ... then adjust the alloc size to be
1339 cb->min_dump_alloc = 4096; 1610 * bigger, and return 1 but with the empty skb.
1340 mutex_unlock(&cfg80211_mutex); 1611 * This results in an empty message being RX'ed
1341 return 1; 1612 * in userspace, but that is ignored.
1613 *
1614 * We can then retry with the larger buffer.
1615 */
1616 if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
1617 !skb->len &&
1618 cb->min_dump_alloc < 4096) {
1619 cb->min_dump_alloc = 4096;
1620 mutex_unlock(&cfg80211_mutex);
1621 return 1;
1622 }
1623 idx--;
1624 break;
1342 } 1625 }
1343 idx--; 1626 } while (cb->args[1] > 0);
1344 break; 1627 break;
1345 }
1346 } 1628 }
1347 mutex_unlock(&cfg80211_mutex); 1629 mutex_unlock(&cfg80211_mutex);
1348 1630
@@ -1360,7 +1642,8 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
1360 if (!msg) 1642 if (!msg)
1361 return -ENOMEM; 1643 return -ENOMEM;
1362 1644
1363 if (nl80211_send_wiphy(msg, info->snd_portid, info->snd_seq, 0, dev) < 0) { 1645 if (nl80211_send_wiphy(dev, msg, info->snd_portid, info->snd_seq, 0,
1646 false, NULL, NULL, NULL) < 0) {
1364 nlmsg_free(msg); 1647 nlmsg_free(msg);
1365 return -ENOBUFS; 1648 return -ENOBUFS;
1366 } 1649 }
@@ -2967,6 +3250,7 @@ static int parse_station_flags(struct genl_info *info,
2967 sta_flags = nla_data(nla); 3250 sta_flags = nla_data(nla);
2968 params->sta_flags_mask = sta_flags->mask; 3251 params->sta_flags_mask = sta_flags->mask;
2969 params->sta_flags_set = sta_flags->set; 3252 params->sta_flags_set = sta_flags->set;
3253 params->sta_flags_set &= params->sta_flags_mask;
2970 if ((params->sta_flags_mask | 3254 if ((params->sta_flags_mask |
2971 params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) 3255 params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID))
2972 return -EINVAL; 3256 return -EINVAL;
@@ -3320,6 +3604,136 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
3320 return genlmsg_reply(msg, info); 3604 return genlmsg_reply(msg, info);
3321} 3605}
3322 3606
3607int cfg80211_check_station_change(struct wiphy *wiphy,
3608 struct station_parameters *params,
3609 enum cfg80211_station_type statype)
3610{
3611 if (params->listen_interval != -1)
3612 return -EINVAL;
3613 if (params->aid)
3614 return -EINVAL;
3615
3616 /* When you run into this, adjust the code below for the new flag */
3617 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
3618
3619 switch (statype) {
3620 case CFG80211_STA_MESH_PEER_KERNEL:
3621 case CFG80211_STA_MESH_PEER_USER:
3622 /*
3623 * No ignoring the TDLS flag here -- the userspace mesh
3624 * code doesn't have the bug of including TDLS in the
3625 * mask everywhere.
3626 */
3627 if (params->sta_flags_mask &
3628 ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3629 BIT(NL80211_STA_FLAG_MFP) |
3630 BIT(NL80211_STA_FLAG_AUTHORIZED)))
3631 return -EINVAL;
3632 break;
3633 case CFG80211_STA_TDLS_PEER_SETUP:
3634 case CFG80211_STA_TDLS_PEER_ACTIVE:
3635 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
3636 return -EINVAL;
3637 /* ignore since it can't change */
3638 params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3639 break;
3640 default:
3641 /* disallow mesh-specific things */
3642 if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION)
3643 return -EINVAL;
3644 if (params->local_pm)
3645 return -EINVAL;
3646 if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE)
3647 return -EINVAL;
3648 }
3649
3650 if (statype != CFG80211_STA_TDLS_PEER_SETUP &&
3651 statype != CFG80211_STA_TDLS_PEER_ACTIVE) {
3652 /* TDLS can't be set, ... */
3653 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3654 return -EINVAL;
3655 /*
3656 * ... but don't bother the driver with it. This works around
3657 * a hostapd/wpa_supplicant issue -- it always includes the
3658 * TLDS_PEER flag in the mask even for AP mode.
3659 */
3660 params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3661 }
3662
3663 if (statype != CFG80211_STA_TDLS_PEER_SETUP) {
3664 /* reject other things that can't change */
3665 if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD)
3666 return -EINVAL;
3667 if (params->sta_modify_mask & STATION_PARAM_APPLY_CAPABILITY)
3668 return -EINVAL;
3669 if (params->supported_rates)
3670 return -EINVAL;
3671 if (params->ext_capab || params->ht_capa || params->vht_capa)
3672 return -EINVAL;
3673 }
3674
3675 if (statype != CFG80211_STA_AP_CLIENT) {
3676 if (params->vlan)
3677 return -EINVAL;
3678 }
3679
3680 switch (statype) {
3681 case CFG80211_STA_AP_MLME_CLIENT:
3682 /* Use this only for authorizing/unauthorizing a station */
3683 if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
3684 return -EOPNOTSUPP;
3685 break;
3686 case CFG80211_STA_AP_CLIENT:
3687 /* accept only the listed bits */
3688 if (params->sta_flags_mask &
3689 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3690 BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3691 BIT(NL80211_STA_FLAG_ASSOCIATED) |
3692 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
3693 BIT(NL80211_STA_FLAG_WME) |
3694 BIT(NL80211_STA_FLAG_MFP)))
3695 return -EINVAL;
3696
3697 /* but authenticated/associated only if driver handles it */
3698 if (!(wiphy->features & NL80211_FEATURE_FULL_AP_CLIENT_STATE) &&
3699 params->sta_flags_mask &
3700 (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3701 BIT(NL80211_STA_FLAG_ASSOCIATED)))
3702 return -EINVAL;
3703 break;
3704 case CFG80211_STA_IBSS:
3705 case CFG80211_STA_AP_STA:
3706 /* reject any changes other than AUTHORIZED */
3707 if (params->sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
3708 return -EINVAL;
3709 break;
3710 case CFG80211_STA_TDLS_PEER_SETUP:
3711 /* reject any changes other than AUTHORIZED or WME */
3712 if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3713 BIT(NL80211_STA_FLAG_WME)))
3714 return -EINVAL;
3715 /* force (at least) rates when authorizing */
3716 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED) &&
3717 !params->supported_rates)
3718 return -EINVAL;
3719 break;
3720 case CFG80211_STA_TDLS_PEER_ACTIVE:
3721 /* reject any changes */
3722 return -EINVAL;
3723 case CFG80211_STA_MESH_PEER_KERNEL:
3724 if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE)
3725 return -EINVAL;
3726 break;
3727 case CFG80211_STA_MESH_PEER_USER:
3728 if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION)
3729 return -EINVAL;
3730 break;
3731 }
3732
3733 return 0;
3734}
3735EXPORT_SYMBOL(cfg80211_check_station_change);
3736
3323/* 3737/*
3324 * Get vlan interface making sure it is running and on the right wiphy. 3738 * Get vlan interface making sure it is running and on the right wiphy.
3325 */ 3739 */
@@ -3342,6 +3756,13 @@ static struct net_device *get_vlan(struct genl_info *info,
3342 goto error; 3756 goto error;
3343 } 3757 }
3344 3758
3759 if (v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
3760 v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
3761 v->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
3762 ret = -EINVAL;
3763 goto error;
3764 }
3765
3345 if (!netif_running(v)) { 3766 if (!netif_running(v)) {
3346 ret = -ENETDOWN; 3767 ret = -ENETDOWN;
3347 goto error; 3768 goto error;
@@ -3359,21 +3780,13 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
3359 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 }, 3780 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
3360}; 3781};
3361 3782
3362static int nl80211_set_station_tdls(struct genl_info *info, 3783static int nl80211_parse_sta_wme(struct genl_info *info,
3363 struct station_parameters *params) 3784 struct station_parameters *params)
3364{ 3785{
3365 struct nlattr *tb[NL80211_STA_WME_MAX + 1]; 3786 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
3366 struct nlattr *nla; 3787 struct nlattr *nla;
3367 int err; 3788 int err;
3368 3789
3369 /* Dummy STA entry gets updated once the peer capabilities are known */
3370 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3371 params->ht_capa =
3372 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
3373 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3374 params->vht_capa =
3375 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3376
3377 /* parse WME attributes if present */ 3790 /* parse WME attributes if present */
3378 if (!info->attrs[NL80211_ATTR_STA_WME]) 3791 if (!info->attrs[NL80211_ATTR_STA_WME])
3379 return 0; 3792 return 0;
@@ -3401,18 +3814,34 @@ static int nl80211_set_station_tdls(struct genl_info *info,
3401 return 0; 3814 return 0;
3402} 3815}
3403 3816
3817static int nl80211_set_station_tdls(struct genl_info *info,
3818 struct station_parameters *params)
3819{
3820 /* Dummy STA entry gets updated once the peer capabilities are known */
3821 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3822 params->ht_capa =
3823 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
3824 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3825 params->vht_capa =
3826 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3827
3828 return nl80211_parse_sta_wme(info, params);
3829}
3830
3404static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) 3831static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3405{ 3832{
3406 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3833 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3407 int err;
3408 struct net_device *dev = info->user_ptr[1]; 3834 struct net_device *dev = info->user_ptr[1];
3409 struct station_parameters params; 3835 struct station_parameters params;
3410 u8 *mac_addr = NULL; 3836 u8 *mac_addr;
3837 int err;
3411 3838
3412 memset(&params, 0, sizeof(params)); 3839 memset(&params, 0, sizeof(params));
3413 3840
3414 params.listen_interval = -1; 3841 params.listen_interval = -1;
3415 params.plink_state = -1; 3842
3843 if (!rdev->ops->change_station)
3844 return -EOPNOTSUPP;
3416 3845
3417 if (info->attrs[NL80211_ATTR_STA_AID]) 3846 if (info->attrs[NL80211_ATTR_STA_AID])
3418 return -EINVAL; 3847 return -EINVAL;
@@ -3445,19 +3874,23 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3445 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 3874 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
3446 return -EINVAL; 3875 return -EINVAL;
3447 3876
3448 if (!rdev->ops->change_station)
3449 return -EOPNOTSUPP;
3450
3451 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) 3877 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
3452 return -EINVAL; 3878 return -EINVAL;
3453 3879
3454 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) 3880 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
3455 params.plink_action = 3881 params.plink_action =
3456 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 3882 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
3883 if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
3884 return -EINVAL;
3885 }
3457 3886
3458 if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) 3887 if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) {
3459 params.plink_state = 3888 params.plink_state =
3460 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); 3889 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
3890 if (params.plink_state >= NUM_NL80211_PLINK_STATES)
3891 return -EINVAL;
3892 params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE;
3893 }
3461 3894
3462 if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) { 3895 if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) {
3463 enum nl80211_mesh_power_mode pm = nla_get_u32( 3896 enum nl80211_mesh_power_mode pm = nla_get_u32(
@@ -3470,127 +3903,33 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3470 params.local_pm = pm; 3903 params.local_pm = pm;
3471 } 3904 }
3472 3905
3906 /* Include parameters for TDLS peer (will check later) */
3907 err = nl80211_set_station_tdls(info, &params);
3908 if (err)
3909 return err;
3910
3911 params.vlan = get_vlan(info, rdev);
3912 if (IS_ERR(params.vlan))
3913 return PTR_ERR(params.vlan);
3914
3473 switch (dev->ieee80211_ptr->iftype) { 3915 switch (dev->ieee80211_ptr->iftype) {
3474 case NL80211_IFTYPE_AP: 3916 case NL80211_IFTYPE_AP:
3475 case NL80211_IFTYPE_AP_VLAN: 3917 case NL80211_IFTYPE_AP_VLAN:
3476 case NL80211_IFTYPE_P2P_GO: 3918 case NL80211_IFTYPE_P2P_GO:
3477 /* disallow mesh-specific things */
3478 if (params.plink_action)
3479 return -EINVAL;
3480 if (params.local_pm)
3481 return -EINVAL;
3482
3483 /* TDLS can't be set, ... */
3484 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3485 return -EINVAL;
3486 /*
3487 * ... but don't bother the driver with it. This works around
3488 * a hostapd/wpa_supplicant issue -- it always includes the
3489 * TLDS_PEER flag in the mask even for AP mode.
3490 */
3491 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3492
3493 /* accept only the listed bits */
3494 if (params.sta_flags_mask &
3495 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3496 BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3497 BIT(NL80211_STA_FLAG_ASSOCIATED) |
3498 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
3499 BIT(NL80211_STA_FLAG_WME) |
3500 BIT(NL80211_STA_FLAG_MFP)))
3501 return -EINVAL;
3502
3503 /* but authenticated/associated only if driver handles it */
3504 if (!(rdev->wiphy.features &
3505 NL80211_FEATURE_FULL_AP_CLIENT_STATE) &&
3506 params.sta_flags_mask &
3507 (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3508 BIT(NL80211_STA_FLAG_ASSOCIATED)))
3509 return -EINVAL;
3510
3511 /* reject other things that can't change */
3512 if (params.supported_rates)
3513 return -EINVAL;
3514 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3515 return -EINVAL;
3516 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3517 return -EINVAL;
3518 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3519 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3520 return -EINVAL;
3521
3522 /* must be last in here for error handling */
3523 params.vlan = get_vlan(info, rdev);
3524 if (IS_ERR(params.vlan))
3525 return PTR_ERR(params.vlan);
3526 break;
3527 case NL80211_IFTYPE_P2P_CLIENT: 3919 case NL80211_IFTYPE_P2P_CLIENT:
3528 case NL80211_IFTYPE_STATION: 3920 case NL80211_IFTYPE_STATION:
3529 /*
3530 * Don't allow userspace to change the TDLS_PEER flag,
3531 * but silently ignore attempts to change it since we
3532 * don't have state here to verify that it doesn't try
3533 * to change the flag.
3534 */
3535 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3536 /* Include parameters for TDLS peer (driver will check) */
3537 err = nl80211_set_station_tdls(info, &params);
3538 if (err)
3539 return err;
3540 /* disallow things sta doesn't support */
3541 if (params.plink_action)
3542 return -EINVAL;
3543 if (params.local_pm)
3544 return -EINVAL;
3545 /* reject any changes other than AUTHORIZED or WME (for TDLS) */
3546 if (params.sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3547 BIT(NL80211_STA_FLAG_WME)))
3548 return -EINVAL;
3549 break;
3550 case NL80211_IFTYPE_ADHOC: 3921 case NL80211_IFTYPE_ADHOC:
3551 /* disallow things sta doesn't support */
3552 if (params.plink_action)
3553 return -EINVAL;
3554 if (params.local_pm)
3555 return -EINVAL;
3556 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3557 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3558 return -EINVAL;
3559 /* reject any changes other than AUTHORIZED */
3560 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
3561 return -EINVAL;
3562 break;
3563 case NL80211_IFTYPE_MESH_POINT: 3922 case NL80211_IFTYPE_MESH_POINT:
3564 /* disallow things mesh doesn't support */
3565 if (params.vlan)
3566 return -EINVAL;
3567 if (params.supported_rates)
3568 return -EINVAL;
3569 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3570 return -EINVAL;
3571 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3572 return -EINVAL;
3573 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3574 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3575 return -EINVAL;
3576 /*
3577 * No special handling for TDLS here -- the userspace
3578 * mesh code doesn't have this bug.
3579 */
3580 if (params.sta_flags_mask &
3581 ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3582 BIT(NL80211_STA_FLAG_MFP) |
3583 BIT(NL80211_STA_FLAG_AUTHORIZED)))
3584 return -EINVAL;
3585 break; 3923 break;
3586 default: 3924 default:
3587 return -EOPNOTSUPP; 3925 err = -EOPNOTSUPP;
3926 goto out_put_vlan;
3588 } 3927 }
3589 3928
3590 /* be aware of params.vlan when changing code here */ 3929 /* driver will call cfg80211_check_station_change() */
3591
3592 err = rdev_change_station(rdev, dev, mac_addr, &params); 3930 err = rdev_change_station(rdev, dev, mac_addr, &params);
3593 3931
3932 out_put_vlan:
3594 if (params.vlan) 3933 if (params.vlan)
3595 dev_put(params.vlan); 3934 dev_put(params.vlan);
3596 3935
@@ -3607,6 +3946,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3607 3946
3608 memset(&params, 0, sizeof(params)); 3947 memset(&params, 0, sizeof(params));
3609 3948
3949 if (!rdev->ops->add_station)
3950 return -EOPNOTSUPP;
3951
3610 if (!info->attrs[NL80211_ATTR_MAC]) 3952 if (!info->attrs[NL80211_ATTR_MAC])
3611 return -EINVAL; 3953 return -EINVAL;
3612 3954
@@ -3652,50 +3994,32 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3652 params.vht_capa = 3994 params.vht_capa =
3653 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); 3995 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3654 3996
3655 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) 3997 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
3656 params.plink_action = 3998 params.plink_action =
3657 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 3999 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
4000 if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
4001 return -EINVAL;
4002 }
3658 4003
3659 if (!rdev->ops->add_station) 4004 err = nl80211_parse_sta_wme(info, &params);
3660 return -EOPNOTSUPP; 4005 if (err)
4006 return err;
3661 4007
3662 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) 4008 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
3663 return -EINVAL; 4009 return -EINVAL;
3664 4010
4011 /* When you run into this, adjust the code below for the new flag */
4012 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
4013
3665 switch (dev->ieee80211_ptr->iftype) { 4014 switch (dev->ieee80211_ptr->iftype) {
3666 case NL80211_IFTYPE_AP: 4015 case NL80211_IFTYPE_AP:
3667 case NL80211_IFTYPE_AP_VLAN: 4016 case NL80211_IFTYPE_AP_VLAN:
3668 case NL80211_IFTYPE_P2P_GO: 4017 case NL80211_IFTYPE_P2P_GO:
3669 /* parse WME attributes if sta is WME capable */ 4018 /* ignore WME attributes if iface/sta is not capable */
3670 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && 4019 if (!(rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) ||
3671 (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) && 4020 !(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)))
3672 info->attrs[NL80211_ATTR_STA_WME]) { 4021 params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD;
3673 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
3674 struct nlattr *nla;
3675
3676 nla = info->attrs[NL80211_ATTR_STA_WME];
3677 err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
3678 nl80211_sta_wme_policy);
3679 if (err)
3680 return err;
3681 4022
3682 if (tb[NL80211_STA_WME_UAPSD_QUEUES])
3683 params.uapsd_queues =
3684 nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
3685 if (params.uapsd_queues &
3686 ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
3687 return -EINVAL;
3688
3689 if (tb[NL80211_STA_WME_MAX_SP])
3690 params.max_sp =
3691 nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
3692
3693 if (params.max_sp &
3694 ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
3695 return -EINVAL;
3696
3697 params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
3698 }
3699 /* TDLS peers cannot be added */ 4023 /* TDLS peers cannot be added */
3700 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 4024 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3701 return -EINVAL; 4025 return -EINVAL;
@@ -3716,6 +4040,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3716 return PTR_ERR(params.vlan); 4040 return PTR_ERR(params.vlan);
3717 break; 4041 break;
3718 case NL80211_IFTYPE_MESH_POINT: 4042 case NL80211_IFTYPE_MESH_POINT:
4043 /* ignore uAPSD data */
4044 params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD;
4045
3719 /* associated is disallowed */ 4046 /* associated is disallowed */
3720 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) 4047 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED))
3721 return -EINVAL; 4048 return -EINVAL;
@@ -3724,8 +4051,14 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3724 return -EINVAL; 4051 return -EINVAL;
3725 break; 4052 break;
3726 case NL80211_IFTYPE_STATION: 4053 case NL80211_IFTYPE_STATION:
3727 /* associated is disallowed */ 4054 case NL80211_IFTYPE_P2P_CLIENT:
3728 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) 4055 /* ignore uAPSD data */
4056 params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD;
4057
4058 /* these are disallowed */
4059 if (params.sta_flags_mask &
4060 (BIT(NL80211_STA_FLAG_ASSOCIATED) |
4061 BIT(NL80211_STA_FLAG_AUTHENTICATED)))
3729 return -EINVAL; 4062 return -EINVAL;
3730 /* Only TDLS peers can be added */ 4063 /* Only TDLS peers can be added */
3731 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) 4064 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
@@ -3736,6 +4069,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3736 /* ... with external setup is supported */ 4069 /* ... with external setup is supported */
3737 if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)) 4070 if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
3738 return -EOPNOTSUPP; 4071 return -EOPNOTSUPP;
4072 /*
4073 * Older wpa_supplicant versions always mark the TDLS peer
4074 * as authorized, but it shouldn't yet be.
4075 */
4076 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_AUTHORIZED);
3739 break; 4077 break;
3740 default: 4078 default:
3741 return -EOPNOTSUPP; 4079 return -EOPNOTSUPP;
@@ -4280,6 +4618,7 @@ static const struct nla_policy
4280 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, 4618 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
4281 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 4619 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
4282 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, 4620 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
4621 [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG },
4283 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, 4622 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
4284 .len = IEEE80211_MAX_DATA_LEN }, 4623 .len = IEEE80211_MAX_DATA_LEN },
4285 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, 4624 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
@@ -4418,6 +4757,7 @@ do { \
4418static int nl80211_parse_mesh_setup(struct genl_info *info, 4757static int nl80211_parse_mesh_setup(struct genl_info *info,
4419 struct mesh_setup *setup) 4758 struct mesh_setup *setup)
4420{ 4759{
4760 struct cfg80211_registered_device *rdev = info->user_ptr[0];
4421 struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1]; 4761 struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1];
4422 4762
4423 if (!info->attrs[NL80211_ATTR_MESH_SETUP]) 4763 if (!info->attrs[NL80211_ATTR_MESH_SETUP])
@@ -4454,8 +4794,14 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
4454 setup->ie = nla_data(ieattr); 4794 setup->ie = nla_data(ieattr);
4455 setup->ie_len = nla_len(ieattr); 4795 setup->ie_len = nla_len(ieattr);
4456 } 4796 }
4797 if (tb[NL80211_MESH_SETUP_USERSPACE_MPM] &&
4798 !(rdev->wiphy.features & NL80211_FEATURE_USERSPACE_MPM))
4799 return -EINVAL;
4800 setup->user_mpm = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_MPM]);
4457 setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]); 4801 setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]);
4458 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]); 4802 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]);
4803 if (setup->is_secure)
4804 setup->user_mpm = true;
4459 4805
4460 return 0; 4806 return 0;
4461} 4807}
@@ -5663,14 +6009,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
5663{ 6009{
5664 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6010 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5665 struct net_device *dev = info->user_ptr[1]; 6011 struct net_device *dev = info->user_ptr[1];
5666 struct cfg80211_crypto_settings crypto;
5667 struct ieee80211_channel *chan; 6012 struct ieee80211_channel *chan;
5668 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; 6013 struct cfg80211_assoc_request req = {};
5669 int err, ssid_len, ie_len = 0; 6014 const u8 *bssid, *ssid;
5670 bool use_mfp = false; 6015 int err, ssid_len = 0;
5671 u32 flags = 0;
5672 struct ieee80211_ht_cap *ht_capa = NULL;
5673 struct ieee80211_ht_cap *ht_capa_mask = NULL;
5674 6016
5675 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 6017 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
5676 return -EINVAL; 6018 return -EINVAL;
@@ -5698,41 +6040,58 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
5698 ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); 6040 ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
5699 6041
5700 if (info->attrs[NL80211_ATTR_IE]) { 6042 if (info->attrs[NL80211_ATTR_IE]) {
5701 ie = nla_data(info->attrs[NL80211_ATTR_IE]); 6043 req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
5702 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 6044 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
5703 } 6045 }
5704 6046
5705 if (info->attrs[NL80211_ATTR_USE_MFP]) { 6047 if (info->attrs[NL80211_ATTR_USE_MFP]) {
5706 enum nl80211_mfp mfp = 6048 enum nl80211_mfp mfp =
5707 nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); 6049 nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
5708 if (mfp == NL80211_MFP_REQUIRED) 6050 if (mfp == NL80211_MFP_REQUIRED)
5709 use_mfp = true; 6051 req.use_mfp = true;
5710 else if (mfp != NL80211_MFP_NO) 6052 else if (mfp != NL80211_MFP_NO)
5711 return -EINVAL; 6053 return -EINVAL;
5712 } 6054 }
5713 6055
5714 if (info->attrs[NL80211_ATTR_PREV_BSSID]) 6056 if (info->attrs[NL80211_ATTR_PREV_BSSID])
5715 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); 6057 req.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
5716 6058
5717 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) 6059 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
5718 flags |= ASSOC_REQ_DISABLE_HT; 6060 req.flags |= ASSOC_REQ_DISABLE_HT;
5719 6061
5720 if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) 6062 if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
5721 ht_capa_mask = 6063 memcpy(&req.ht_capa_mask,
5722 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]); 6064 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
6065 sizeof(req.ht_capa_mask));
5723 6066
5724 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { 6067 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
5725 if (!ht_capa_mask) 6068 if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
5726 return -EINVAL; 6069 return -EINVAL;
5727 ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 6070 memcpy(&req.ht_capa,
6071 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
6072 sizeof(req.ht_capa));
5728 } 6073 }
5729 6074
5730 err = nl80211_crypto_settings(rdev, info, &crypto, 1); 6075 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT]))
6076 req.flags |= ASSOC_REQ_DISABLE_VHT;
6077
6078 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
6079 memcpy(&req.vht_capa_mask,
6080 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]),
6081 sizeof(req.vht_capa_mask));
6082
6083 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) {
6084 if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
6085 return -EINVAL;
6086 memcpy(&req.vht_capa,
6087 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]),
6088 sizeof(req.vht_capa));
6089 }
6090
6091 err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
5731 if (!err) 6092 if (!err)
5732 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 6093 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
5733 ssid, ssid_len, ie, ie_len, use_mfp, 6094 ssid, ssid_len, &req);
5734 &crypto, flags, ht_capa,
5735 ht_capa_mask);
5736 6095
5737 return err; 6096 return err;
5738} 6097}
@@ -6312,6 +6671,24 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
6312 sizeof(connect.ht_capa)); 6671 sizeof(connect.ht_capa));
6313 } 6672 }
6314 6673
6674 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT]))
6675 connect.flags |= ASSOC_REQ_DISABLE_VHT;
6676
6677 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
6678 memcpy(&connect.vht_capa_mask,
6679 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]),
6680 sizeof(connect.vht_capa_mask));
6681
6682 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) {
6683 if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) {
6684 kfree(connkeys);
6685 return -EINVAL;
6686 }
6687 memcpy(&connect.vht_capa,
6688 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]),
6689 sizeof(connect.vht_capa));
6690 }
6691
6315 err = cfg80211_connect(rdev, dev, &connect, connkeys); 6692 err = cfg80211_connect(rdev, dev, &connect, connkeys);
6316 if (err) 6693 if (err)
6317 kfree(connkeys); 6694 kfree(connkeys);
@@ -7085,6 +7462,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
7085 return err; 7462 return err;
7086 } 7463 }
7087 7464
7465 if (setup.user_mpm)
7466 cfg.auto_open_plinks = false;
7467
7088 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 7468 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
7089 err = nl80211_parse_chandef(rdev, info, &setup.chandef); 7469 err = nl80211_parse_chandef(rdev, info, &setup.chandef);
7090 if (err) 7470 if (err)
@@ -7284,7 +7664,8 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
7284 return -EINVAL; 7664 return -EINVAL;
7285 7665
7286 if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > 7666 if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) >
7287 rdev->wiphy.wowlan.tcp->data_interval_max) 7667 rdev->wiphy.wowlan.tcp->data_interval_max ||
7668 nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0)
7288 return -EINVAL; 7669 return -EINVAL;
7289 7670
7290 wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); 7671 wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]);
@@ -7769,6 +8150,54 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
7769 return 0; 8150 return 0;
7770} 8151}
7771 8152
8153static int nl80211_get_protocol_features(struct sk_buff *skb,
8154 struct genl_info *info)
8155{
8156 void *hdr;
8157 struct sk_buff *msg;
8158
8159 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
8160 if (!msg)
8161 return -ENOMEM;
8162
8163 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
8164 NL80211_CMD_GET_PROTOCOL_FEATURES);
8165 if (!hdr)
8166 goto nla_put_failure;
8167
8168 if (nla_put_u32(msg, NL80211_ATTR_PROTOCOL_FEATURES,
8169 NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP))
8170 goto nla_put_failure;
8171
8172 genlmsg_end(msg, hdr);
8173 return genlmsg_reply(msg, info);
8174
8175 nla_put_failure:
8176 kfree_skb(msg);
8177 return -ENOBUFS;
8178}
8179
8180static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
8181{
8182 struct cfg80211_registered_device *rdev = info->user_ptr[0];
8183 struct cfg80211_update_ft_ies_params ft_params;
8184 struct net_device *dev = info->user_ptr[1];
8185
8186 if (!rdev->ops->update_ft_ies)
8187 return -EOPNOTSUPP;
8188
8189 if (!info->attrs[NL80211_ATTR_MDID] ||
8190 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
8191 return -EINVAL;
8192
8193 memset(&ft_params, 0, sizeof(ft_params));
8194 ft_params.md = nla_get_u16(info->attrs[NL80211_ATTR_MDID]);
8195 ft_params.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
8196 ft_params.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
8197
8198 return rdev_update_ft_ies(rdev, dev, &ft_params);
8199}
8200
7772#define NL80211_FLAG_NEED_WIPHY 0x01 8201#define NL80211_FLAG_NEED_WIPHY 0x01
7773#define NL80211_FLAG_NEED_NETDEV 0x02 8202#define NL80211_FLAG_NEED_NETDEV 0x02
7774#define NL80211_FLAG_NEED_RTNL 0x04 8203#define NL80211_FLAG_NEED_RTNL 0x04
@@ -8445,6 +8874,19 @@ static struct genl_ops nl80211_ops[] = {
8445 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 8874 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
8446 NL80211_FLAG_NEED_RTNL, 8875 NL80211_FLAG_NEED_RTNL,
8447 }, 8876 },
8877 {
8878 .cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
8879 .doit = nl80211_get_protocol_features,
8880 .policy = nl80211_policy,
8881 },
8882 {
8883 .cmd = NL80211_CMD_UPDATE_FT_IES,
8884 .doit = nl80211_update_ft_ies,
8885 .policy = nl80211_policy,
8886 .flags = GENL_ADMIN_PERM,
8887 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
8888 NL80211_FLAG_NEED_RTNL,
8889 },
8448}; 8890};
8449 8891
8450static struct genl_multicast_group nl80211_mlme_mcgrp = { 8892static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -8472,7 +8914,8 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
8472 if (!msg) 8914 if (!msg)
8473 return; 8915 return;
8474 8916
8475 if (nl80211_send_wiphy(msg, 0, 0, 0, rdev) < 0) { 8917 if (nl80211_send_wiphy(rdev, msg, 0, 0, 0,
8918 false, NULL, NULL, NULL) < 0) {
8476 nlmsg_free(msg); 8919 nlmsg_free(msg);
8477 return; 8920 return;
8478 } 8921 }
@@ -8796,21 +9239,31 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
8796 NL80211_CMD_DISASSOCIATE, gfp); 9239 NL80211_CMD_DISASSOCIATE, gfp);
8797} 9240}
8798 9241
8799void nl80211_send_unprot_deauth(struct cfg80211_registered_device *rdev, 9242void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
8800 struct net_device *netdev, const u8 *buf, 9243 size_t len)
8801 size_t len, gfp_t gfp)
8802{ 9244{
8803 nl80211_send_mlme_event(rdev, netdev, buf, len, 9245 struct wireless_dev *wdev = dev->ieee80211_ptr;
8804 NL80211_CMD_UNPROT_DEAUTHENTICATE, gfp); 9246 struct wiphy *wiphy = wdev->wiphy;
9247 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9248
9249 trace_cfg80211_send_unprot_deauth(dev);
9250 nl80211_send_mlme_event(rdev, dev, buf, len,
9251 NL80211_CMD_UNPROT_DEAUTHENTICATE, GFP_ATOMIC);
8805} 9252}
9253EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
8806 9254
8807void nl80211_send_unprot_disassoc(struct cfg80211_registered_device *rdev, 9255void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
8808 struct net_device *netdev, const u8 *buf, 9256 size_t len)
8809 size_t len, gfp_t gfp)
8810{ 9257{
8811 nl80211_send_mlme_event(rdev, netdev, buf, len, 9258 struct wireless_dev *wdev = dev->ieee80211_ptr;
8812 NL80211_CMD_UNPROT_DISASSOCIATE, gfp); 9259 struct wiphy *wiphy = wdev->wiphy;
9260 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9261
9262 trace_cfg80211_send_unprot_disassoc(dev);
9263 nl80211_send_mlme_event(rdev, dev, buf, len,
9264 NL80211_CMD_UNPROT_DISASSOCIATE, GFP_ATOMIC);
8813} 9265}
9266EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
8814 9267
8815static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, 9268static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
8816 struct net_device *netdev, int cmd, 9269 struct net_device *netdev, int cmd,
@@ -9013,14 +9466,19 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
9013 nlmsg_free(msg); 9466 nlmsg_free(msg);
9014} 9467}
9015 9468
9016void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev, 9469void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
9017 struct net_device *netdev, 9470 const u8* ie, u8 ie_len, gfp_t gfp)
9018 const u8 *macaddr, const u8* ie, u8 ie_len,
9019 gfp_t gfp)
9020{ 9471{
9472 struct wireless_dev *wdev = dev->ieee80211_ptr;
9473 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
9021 struct sk_buff *msg; 9474 struct sk_buff *msg;
9022 void *hdr; 9475 void *hdr;
9023 9476
9477 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
9478 return;
9479
9480 trace_cfg80211_notify_new_peer_candidate(dev, addr);
9481
9024 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9482 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9025 if (!msg) 9483 if (!msg)
9026 return; 9484 return;
@@ -9032,8 +9490,8 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
9032 } 9490 }
9033 9491
9034 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 9492 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9035 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 9493 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
9036 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) || 9494 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
9037 (ie_len && ie && 9495 (ie_len && ie &&
9038 nla_put(msg, NL80211_ATTR_IE, ie_len , ie))) 9496 nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
9039 goto nla_put_failure; 9497 goto nla_put_failure;
@@ -9048,6 +9506,7 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
9048 genlmsg_cancel(msg, hdr); 9506 genlmsg_cancel(msg, hdr);
9049 nlmsg_free(msg); 9507 nlmsg_free(msg);
9050} 9508}
9509EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate);
9051 9510
9052void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 9511void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
9053 struct net_device *netdev, const u8 *addr, 9512 struct net_device *netdev, const u8 *addr,
@@ -9116,7 +9575,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
9116 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); 9575 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
9117 if (!nl_freq) 9576 if (!nl_freq)
9118 goto nla_put_failure; 9577 goto nla_put_failure;
9119 if (nl80211_msg_put_channel(msg, channel_before)) 9578 if (nl80211_msg_put_channel(msg, channel_before, false))
9120 goto nla_put_failure; 9579 goto nla_put_failure;
9121 nla_nest_end(msg, nl_freq); 9580 nla_nest_end(msg, nl_freq);
9122 9581
@@ -9124,7 +9583,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
9124 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER); 9583 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER);
9125 if (!nl_freq) 9584 if (!nl_freq)
9126 goto nla_put_failure; 9585 goto nla_put_failure;
9127 if (nl80211_msg_put_channel(msg, channel_after)) 9586 if (nl80211_msg_put_channel(msg, channel_after, false))
9128 goto nla_put_failure; 9587 goto nla_put_failure;
9129 nla_nest_end(msg, nl_freq); 9588 nla_nest_end(msg, nl_freq);
9130 9589
@@ -9186,31 +9645,42 @@ static void nl80211_send_remain_on_chan_event(
9186 nlmsg_free(msg); 9645 nlmsg_free(msg);
9187} 9646}
9188 9647
9189void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, 9648void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
9190 struct wireless_dev *wdev, u64 cookie, 9649 struct ieee80211_channel *chan,
9191 struct ieee80211_channel *chan, 9650 unsigned int duration, gfp_t gfp)
9192 unsigned int duration, gfp_t gfp)
9193{ 9651{
9652 struct wiphy *wiphy = wdev->wiphy;
9653 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9654
9655 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
9194 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, 9656 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
9195 rdev, wdev, cookie, chan, 9657 rdev, wdev, cookie, chan,
9196 duration, gfp); 9658 duration, gfp);
9197} 9659}
9660EXPORT_SYMBOL(cfg80211_ready_on_channel);
9198 9661
9199void nl80211_send_remain_on_channel_cancel( 9662void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
9200 struct cfg80211_registered_device *rdev, 9663 struct ieee80211_channel *chan,
9201 struct wireless_dev *wdev, 9664 gfp_t gfp)
9202 u64 cookie, struct ieee80211_channel *chan, gfp_t gfp)
9203{ 9665{
9666 struct wiphy *wiphy = wdev->wiphy;
9667 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9668
9669 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
9204 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, 9670 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
9205 rdev, wdev, cookie, chan, 0, gfp); 9671 rdev, wdev, cookie, chan, 0, gfp);
9206} 9672}
9673EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
9207 9674
9208void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, 9675void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
9209 struct net_device *dev, const u8 *mac_addr, 9676 struct station_info *sinfo, gfp_t gfp)
9210 struct station_info *sinfo, gfp_t gfp)
9211{ 9677{
9678 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
9679 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9212 struct sk_buff *msg; 9680 struct sk_buff *msg;
9213 9681
9682 trace_cfg80211_new_sta(dev, mac_addr, sinfo);
9683
9214 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9684 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9215 if (!msg) 9685 if (!msg)
9216 return; 9686 return;
@@ -9224,14 +9694,17 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
9224 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9694 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
9225 nl80211_mlme_mcgrp.id, gfp); 9695 nl80211_mlme_mcgrp.id, gfp);
9226} 9696}
9697EXPORT_SYMBOL(cfg80211_new_sta);
9227 9698
9228void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, 9699void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
9229 struct net_device *dev, const u8 *mac_addr,
9230 gfp_t gfp)
9231{ 9700{
9701 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
9702 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9232 struct sk_buff *msg; 9703 struct sk_buff *msg;
9233 void *hdr; 9704 void *hdr;
9234 9705
9706 trace_cfg80211_del_sta(dev, mac_addr);
9707
9235 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9708 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9236 if (!msg) 9709 if (!msg)
9237 return; 9710 return;
@@ -9256,12 +9729,14 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
9256 genlmsg_cancel(msg, hdr); 9729 genlmsg_cancel(msg, hdr);
9257 nlmsg_free(msg); 9730 nlmsg_free(msg);
9258} 9731}
9732EXPORT_SYMBOL(cfg80211_del_sta);
9259 9733
9260void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev, 9734void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
9261 struct net_device *dev, const u8 *mac_addr, 9735 enum nl80211_connect_failed_reason reason,
9262 enum nl80211_connect_failed_reason reason, 9736 gfp_t gfp)
9263 gfp_t gfp)
9264{ 9737{
9738 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
9739 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9265 struct sk_buff *msg; 9740 struct sk_buff *msg;
9266 void *hdr; 9741 void *hdr;
9267 9742
@@ -9290,6 +9765,7 @@ void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
9290 genlmsg_cancel(msg, hdr); 9765 genlmsg_cancel(msg, hdr);
9291 nlmsg_free(msg); 9766 nlmsg_free(msg);
9292} 9767}
9768EXPORT_SYMBOL(cfg80211_conn_failed);
9293 9769
9294static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, 9770static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
9295 const u8 *addr, gfp_t gfp) 9771 const u8 *addr, gfp_t gfp)
@@ -9334,19 +9810,47 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
9334 return true; 9810 return true;
9335} 9811}
9336 9812
9337bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) 9813bool cfg80211_rx_spurious_frame(struct net_device *dev,
9814 const u8 *addr, gfp_t gfp)
9338{ 9815{
9339 return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME, 9816 struct wireless_dev *wdev = dev->ieee80211_ptr;
9340 addr, gfp); 9817 bool ret;
9818
9819 trace_cfg80211_rx_spurious_frame(dev, addr);
9820
9821 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
9822 wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
9823 trace_cfg80211_return_bool(false);
9824 return false;
9825 }
9826 ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
9827 addr, gfp);
9828 trace_cfg80211_return_bool(ret);
9829 return ret;
9341} 9830}
9831EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
9342 9832
9343bool nl80211_unexpected_4addr_frame(struct net_device *dev, 9833bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
9344 const u8 *addr, gfp_t gfp) 9834 const u8 *addr, gfp_t gfp)
9345{ 9835{
9346 return __nl80211_unexpected_frame(dev, 9836 struct wireless_dev *wdev = dev->ieee80211_ptr;
9347 NL80211_CMD_UNEXPECTED_4ADDR_FRAME, 9837 bool ret;
9348 addr, gfp); 9838
9839 trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
9840
9841 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
9842 wdev->iftype != NL80211_IFTYPE_P2P_GO &&
9843 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) {
9844 trace_cfg80211_return_bool(false);
9845 return false;
9846 }
9847 ret = __nl80211_unexpected_frame(dev,
9848 NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
9849 addr, gfp);
9850 trace_cfg80211_return_bool(ret);
9851 return ret;
9349} 9852}
9853EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
9350 9854
9351int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 9855int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
9352 struct wireless_dev *wdev, u32 nlportid, 9856 struct wireless_dev *wdev, u32 nlportid,
@@ -9386,15 +9890,17 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
9386 return -ENOBUFS; 9890 return -ENOBUFS;
9387} 9891}
9388 9892
9389void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, 9893void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
9390 struct wireless_dev *wdev, u64 cookie, 9894 const u8 *buf, size_t len, bool ack, gfp_t gfp)
9391 const u8 *buf, size_t len, bool ack,
9392 gfp_t gfp)
9393{ 9895{
9896 struct wiphy *wiphy = wdev->wiphy;
9897 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9394 struct net_device *netdev = wdev->netdev; 9898 struct net_device *netdev = wdev->netdev;
9395 struct sk_buff *msg; 9899 struct sk_buff *msg;
9396 void *hdr; 9900 void *hdr;
9397 9901
9902 trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
9903
9398 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9904 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9399 if (!msg) 9905 if (!msg)
9400 return; 9906 return;
@@ -9422,17 +9928,21 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
9422 genlmsg_cancel(msg, hdr); 9928 genlmsg_cancel(msg, hdr);
9423 nlmsg_free(msg); 9929 nlmsg_free(msg);
9424} 9930}
9931EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
9425 9932
9426void 9933void cfg80211_cqm_rssi_notify(struct net_device *dev,
9427nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, 9934 enum nl80211_cqm_rssi_threshold_event rssi_event,
9428 struct net_device *netdev, 9935 gfp_t gfp)
9429 enum nl80211_cqm_rssi_threshold_event rssi_event,
9430 gfp_t gfp)
9431{ 9936{
9937 struct wireless_dev *wdev = dev->ieee80211_ptr;
9938 struct wiphy *wiphy = wdev->wiphy;
9939 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9432 struct sk_buff *msg; 9940 struct sk_buff *msg;
9433 struct nlattr *pinfoattr; 9941 struct nlattr *pinfoattr;
9434 void *hdr; 9942 void *hdr;
9435 9943
9944 trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
9945
9436 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9946 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9437 if (!msg) 9947 if (!msg)
9438 return; 9948 return;
@@ -9444,7 +9954,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
9444 } 9954 }
9445 9955
9446 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 9956 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9447 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) 9957 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
9448 goto nla_put_failure; 9958 goto nla_put_failure;
9449 9959
9450 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); 9960 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
@@ -9467,10 +9977,11 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
9467 genlmsg_cancel(msg, hdr); 9977 genlmsg_cancel(msg, hdr);
9468 nlmsg_free(msg); 9978 nlmsg_free(msg);
9469} 9979}
9980EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
9470 9981
9471void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, 9982static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
9472 struct net_device *netdev, const u8 *bssid, 9983 struct net_device *netdev, const u8 *bssid,
9473 const u8 *replay_ctr, gfp_t gfp) 9984 const u8 *replay_ctr, gfp_t gfp)
9474{ 9985{
9475 struct sk_buff *msg; 9986 struct sk_buff *msg;
9476 struct nlattr *rekey_attr; 9987 struct nlattr *rekey_attr;
@@ -9512,9 +10023,22 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
9512 nlmsg_free(msg); 10023 nlmsg_free(msg);
9513} 10024}
9514 10025
9515void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, 10026void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
9516 struct net_device *netdev, int index, 10027 const u8 *replay_ctr, gfp_t gfp)
9517 const u8 *bssid, bool preauth, gfp_t gfp) 10028{
10029 struct wireless_dev *wdev = dev->ieee80211_ptr;
10030 struct wiphy *wiphy = wdev->wiphy;
10031 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10032
10033 trace_cfg80211_gtk_rekey_notify(dev, bssid);
10034 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
10035}
10036EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
10037
10038static void
10039nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
10040 struct net_device *netdev, int index,
10041 const u8 *bssid, bool preauth, gfp_t gfp)
9518{ 10042{
9519 struct sk_buff *msg; 10043 struct sk_buff *msg;
9520 struct nlattr *attr; 10044 struct nlattr *attr;
@@ -9557,9 +10081,22 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
9557 nlmsg_free(msg); 10081 nlmsg_free(msg);
9558} 10082}
9559 10083
9560void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, 10084void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
9561 struct net_device *netdev, 10085 const u8 *bssid, bool preauth, gfp_t gfp)
9562 struct cfg80211_chan_def *chandef, gfp_t gfp) 10086{
10087 struct wireless_dev *wdev = dev->ieee80211_ptr;
10088 struct wiphy *wiphy = wdev->wiphy;
10089 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10090
10091 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
10092 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
10093}
10094EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
10095
10096static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
10097 struct net_device *netdev,
10098 struct cfg80211_chan_def *chandef,
10099 gfp_t gfp)
9563{ 10100{
9564 struct sk_buff *msg; 10101 struct sk_buff *msg;
9565 void *hdr; 10102 void *hdr;
@@ -9591,11 +10128,36 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
9591 nlmsg_free(msg); 10128 nlmsg_free(msg);
9592} 10129}
9593 10130
9594void 10131void cfg80211_ch_switch_notify(struct net_device *dev,
9595nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev, 10132 struct cfg80211_chan_def *chandef)
9596 struct net_device *netdev, const u8 *peer,
9597 u32 num_packets, u32 rate, u32 intvl, gfp_t gfp)
9598{ 10133{
10134 struct wireless_dev *wdev = dev->ieee80211_ptr;
10135 struct wiphy *wiphy = wdev->wiphy;
10136 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10137
10138 trace_cfg80211_ch_switch_notify(dev, chandef);
10139
10140 wdev_lock(wdev);
10141
10142 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
10143 wdev->iftype != NL80211_IFTYPE_P2P_GO))
10144 goto out;
10145
10146 wdev->channel = chandef->chan;
10147 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
10148out:
10149 wdev_unlock(wdev);
10150 return;
10151}
10152EXPORT_SYMBOL(cfg80211_ch_switch_notify);
10153
10154void cfg80211_cqm_txe_notify(struct net_device *dev,
10155 const u8 *peer, u32 num_packets,
10156 u32 rate, u32 intvl, gfp_t gfp)
10157{
10158 struct wireless_dev *wdev = dev->ieee80211_ptr;
10159 struct wiphy *wiphy = wdev->wiphy;
10160 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9599 struct sk_buff *msg; 10161 struct sk_buff *msg;
9600 struct nlattr *pinfoattr; 10162 struct nlattr *pinfoattr;
9601 void *hdr; 10163 void *hdr;
@@ -9611,7 +10173,7 @@ nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
9611 } 10173 }
9612 10174
9613 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 10175 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9614 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 10176 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
9615 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) 10177 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
9616 goto nla_put_failure; 10178 goto nla_put_failure;
9617 10179
@@ -9640,6 +10202,7 @@ nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
9640 genlmsg_cancel(msg, hdr); 10202 genlmsg_cancel(msg, hdr);
9641 nlmsg_free(msg); 10203 nlmsg_free(msg);
9642} 10204}
10205EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
9643 10206
9644void 10207void
9645nl80211_radar_notify(struct cfg80211_registered_device *rdev, 10208nl80211_radar_notify(struct cfg80211_registered_device *rdev,
@@ -9692,15 +10255,18 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
9692 nlmsg_free(msg); 10255 nlmsg_free(msg);
9693} 10256}
9694 10257
9695void 10258void cfg80211_cqm_pktloss_notify(struct net_device *dev,
9696nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 10259 const u8 *peer, u32 num_packets, gfp_t gfp)
9697 struct net_device *netdev, const u8 *peer,
9698 u32 num_packets, gfp_t gfp)
9699{ 10260{
10261 struct wireless_dev *wdev = dev->ieee80211_ptr;
10262 struct wiphy *wiphy = wdev->wiphy;
10263 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9700 struct sk_buff *msg; 10264 struct sk_buff *msg;
9701 struct nlattr *pinfoattr; 10265 struct nlattr *pinfoattr;
9702 void *hdr; 10266 void *hdr;
9703 10267
10268 trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
10269
9704 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 10270 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9705 if (!msg) 10271 if (!msg)
9706 return; 10272 return;
@@ -9712,7 +10278,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
9712 } 10278 }
9713 10279
9714 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 10280 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9715 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 10281 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
9716 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) 10282 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
9717 goto nla_put_failure; 10283 goto nla_put_failure;
9718 10284
@@ -9735,6 +10301,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
9735 genlmsg_cancel(msg, hdr); 10301 genlmsg_cancel(msg, hdr);
9736 nlmsg_free(msg); 10302 nlmsg_free(msg);
9737} 10303}
10304EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
9738 10305
9739void cfg80211_probe_status(struct net_device *dev, const u8 *addr, 10306void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
9740 u64 cookie, bool acked, gfp_t gfp) 10307 u64 cookie, bool acked, gfp_t gfp)
@@ -10021,6 +10588,50 @@ static struct notifier_block nl80211_netlink_notifier = {
10021 .notifier_call = nl80211_netlink_notify, 10588 .notifier_call = nl80211_netlink_notify,
10022}; 10589};
10023 10590
10591void cfg80211_ft_event(struct net_device *netdev,
10592 struct cfg80211_ft_event_params *ft_event)
10593{
10594 struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
10595 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10596 struct sk_buff *msg;
10597 void *hdr;
10598 int err;
10599
10600 trace_cfg80211_ft_event(wiphy, netdev, ft_event);
10601
10602 if (!ft_event->target_ap)
10603 return;
10604
10605 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10606 if (!msg)
10607 return;
10608
10609 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT);
10610 if (!hdr) {
10611 nlmsg_free(msg);
10612 return;
10613 }
10614
10615 nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
10616 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
10617 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap);
10618 if (ft_event->ies)
10619 nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies);
10620 if (ft_event->ric_ies)
10621 nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len,
10622 ft_event->ric_ies);
10623
10624 err = genlmsg_end(msg, hdr);
10625 if (err < 0) {
10626 nlmsg_free(msg);
10627 return;
10628 }
10629
10630 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
10631 nl80211_mlme_mcgrp.id, GFP_KERNEL);
10632}
10633EXPORT_SYMBOL(cfg80211_ft_event);
10634
10024/* initialisation/exit functions */ 10635/* initialisation/exit functions */
10025 10636
10026int nl80211_init(void) 10637int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b061da4919e1..a4073e808c13 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -29,12 +29,6 @@ void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
29void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, 29void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
30 struct net_device *netdev, 30 struct net_device *netdev,
31 const u8 *buf, size_t len, gfp_t gfp); 31 const u8 *buf, size_t len, gfp_t gfp);
32void nl80211_send_unprot_deauth(struct cfg80211_registered_device *rdev,
33 struct net_device *netdev,
34 const u8 *buf, size_t len, gfp_t gfp);
35void nl80211_send_unprot_disassoc(struct cfg80211_registered_device *rdev,
36 struct net_device *netdev,
37 const u8 *buf, size_t len, gfp_t gfp);
38void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, 32void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
39 struct net_device *netdev, 33 struct net_device *netdev,
40 const u8 *addr, gfp_t gfp); 34 const u8 *addr, gfp_t gfp);
@@ -54,10 +48,6 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
54 struct net_device *netdev, u16 reason, 48 struct net_device *netdev, u16 reason,
55 const u8 *ie, size_t ie_len, bool from_ap); 49 const u8 *ie, size_t ie_len, bool from_ap);
56 50
57void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
58 struct net_device *netdev,
59 const u8 *macaddr, const u8* ie, u8 ie_len,
60 gfp_t gfp);
61void 51void
62nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 52nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
63 struct net_device *netdev, const u8 *addr, 53 struct net_device *netdev, const u8 *addr,
@@ -73,41 +63,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
73 struct net_device *netdev, const u8 *bssid, 63 struct net_device *netdev, const u8 *bssid,
74 gfp_t gfp); 64 gfp_t gfp);
75 65
76void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
77 struct wireless_dev *wdev, u64 cookie,
78 struct ieee80211_channel *chan,
79 unsigned int duration, gfp_t gfp);
80void nl80211_send_remain_on_channel_cancel(
81 struct cfg80211_registered_device *rdev,
82 struct wireless_dev *wdev,
83 u64 cookie, struct ieee80211_channel *chan, gfp_t gfp);
84
85void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
86 struct net_device *dev, const u8 *mac_addr,
87 struct station_info *sinfo, gfp_t gfp);
88void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
89 struct net_device *dev, const u8 *mac_addr,
90 gfp_t gfp);
91
92void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
93 struct net_device *dev, const u8 *mac_addr,
94 enum nl80211_connect_failed_reason reason,
95 gfp_t gfp);
96
97int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 66int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
98 struct wireless_dev *wdev, u32 nlpid, 67 struct wireless_dev *wdev, u32 nlpid,
99 int freq, int sig_dbm, 68 int freq, int sig_dbm,
100 const u8 *buf, size_t len, gfp_t gfp); 69 const u8 *buf, size_t len, gfp_t gfp);
101void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
102 struct wireless_dev *wdev, u64 cookie,
103 const u8 *buf, size_t len, bool ack,
104 gfp_t gfp);
105
106void
107nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
108 struct net_device *netdev,
109 enum nl80211_cqm_rssi_threshold_event rssi_event,
110 gfp_t gfp);
111 70
112void 71void
113nl80211_radar_notify(struct cfg80211_registered_device *rdev, 72nl80211_radar_notify(struct cfg80211_registered_device *rdev,
@@ -115,31 +74,4 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
115 enum nl80211_radar_event event, 74 enum nl80211_radar_event event,
116 struct net_device *netdev, gfp_t gfp); 75 struct net_device *netdev, gfp_t gfp);
117 76
118void
119nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
120 struct net_device *netdev, const u8 *peer,
121 u32 num_packets, gfp_t gfp);
122
123void
124nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
125 struct net_device *netdev, const u8 *peer,
126 u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
127
128void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
129 struct net_device *netdev, const u8 *bssid,
130 const u8 *replay_ctr, gfp_t gfp);
131
132void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
133 struct net_device *netdev, int index,
134 const u8 *bssid, bool preauth, gfp_t gfp);
135
136void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
137 struct net_device *dev,
138 struct cfg80211_chan_def *chandef, gfp_t gfp);
139
140bool nl80211_unexpected_frame(struct net_device *dev,
141 const u8 *addr, gfp_t gfp);
142bool nl80211_unexpected_4addr_frame(struct net_device *dev,
143 const u8 *addr, gfp_t gfp);
144
145#endif /* __NET_WIRELESS_NL80211_H */ 77#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 422d38291d66..d77e1c1d3a0e 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -6,11 +6,12 @@
6#include "core.h" 6#include "core.h"
7#include "trace.h" 7#include "trace.h"
8 8
9static inline int rdev_suspend(struct cfg80211_registered_device *rdev) 9static inline int rdev_suspend(struct cfg80211_registered_device *rdev,
10 struct cfg80211_wowlan *wowlan)
10{ 11{
11 int ret; 12 int ret;
12 trace_rdev_suspend(&rdev->wiphy, rdev->wowlan); 13 trace_rdev_suspend(&rdev->wiphy, wowlan);
13 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); 14 ret = rdev->ops->suspend(&rdev->wiphy, wowlan);
14 trace_rdev_return_int(&rdev->wiphy, ret); 15 trace_rdev_return_int(&rdev->wiphy, ret);
15 return ret; 16 return ret;
16} 17}
@@ -887,4 +888,17 @@ static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev,
887 trace_rdev_return_int(&rdev->wiphy, ret); 888 trace_rdev_return_int(&rdev->wiphy, ret);
888 return ret; 889 return ret;
889} 890}
891
892static inline int rdev_update_ft_ies(struct cfg80211_registered_device *rdev,
893 struct net_device *dev,
894 struct cfg80211_update_ft_ies_params *ftie)
895{
896 int ret;
897
898 trace_rdev_update_ft_ies(&rdev->wiphy, dev, ftie);
899 ret = rdev->ops->update_ft_ies(&rdev->wiphy, dev, ftie);
900 trace_rdev_return_int(&rdev->wiphy, ret);
901 return ret;
902}
903
890#endif /* __CFG80211_RDEV_OPS */ 904#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 98532c00242d..e6df52dc8c69 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -184,14 +184,14 @@ static const struct ieee80211_regdomain world_regdom = {
184 NL80211_RRF_NO_IBSS | 184 NL80211_RRF_NO_IBSS |
185 NL80211_RRF_NO_OFDM), 185 NL80211_RRF_NO_OFDM),
186 /* IEEE 802.11a, channel 36..48 */ 186 /* IEEE 802.11a, channel 36..48 */
187 REG_RULE(5180-10, 5240+10, 40, 6, 20, 187 REG_RULE(5180-10, 5240+10, 80, 6, 20,
188 NL80211_RRF_PASSIVE_SCAN | 188 NL80211_RRF_PASSIVE_SCAN |
189 NL80211_RRF_NO_IBSS), 189 NL80211_RRF_NO_IBSS),
190 190
191 /* NB: 5260 MHz - 5700 MHz requies DFS */ 191 /* NB: 5260 MHz - 5700 MHz requires DFS */
192 192
193 /* IEEE 802.11a, channel 149..165 */ 193 /* IEEE 802.11a, channel 149..165 */
194 REG_RULE(5745-10, 5825+10, 40, 6, 20, 194 REG_RULE(5745-10, 5825+10, 80, 6, 20,
195 NL80211_RRF_PASSIVE_SCAN | 195 NL80211_RRF_PASSIVE_SCAN |
196 NL80211_RRF_NO_IBSS), 196 NL80211_RRF_NO_IBSS),
197 197
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 09d994d192ff..818ad637819a 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -160,7 +160,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
160{ 160{
161 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 161 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
162 struct cfg80211_connect_params *params; 162 struct cfg80211_connect_params *params;
163 const u8 *prev_bssid = NULL; 163 struct cfg80211_assoc_request req = {};
164 int err; 164 int err;
165 165
166 ASSERT_WDEV_LOCK(wdev); 166 ASSERT_WDEV_LOCK(wdev);
@@ -187,16 +187,20 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
187 BUG_ON(!rdev->ops->assoc); 187 BUG_ON(!rdev->ops->assoc);
188 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 188 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
189 if (wdev->conn->prev_bssid_valid) 189 if (wdev->conn->prev_bssid_valid)
190 prev_bssid = wdev->conn->prev_bssid; 190 req.prev_bssid = wdev->conn->prev_bssid;
191 err = __cfg80211_mlme_assoc(rdev, wdev->netdev, 191 req.ie = params->ie;
192 params->channel, params->bssid, 192 req.ie_len = params->ie_len;
193 prev_bssid, 193 req.use_mfp = params->mfp != NL80211_MFP_NO;
194 params->ssid, params->ssid_len, 194 req.crypto = params->crypto;
195 params->ie, params->ie_len, 195 req.flags = params->flags;
196 params->mfp != NL80211_MFP_NO, 196 req.ht_capa = params->ht_capa;
197 &params->crypto, 197 req.ht_capa_mask = params->ht_capa_mask;
198 params->flags, &params->ht_capa, 198 req.vht_capa = params->vht_capa;
199 &params->ht_capa_mask); 199 req.vht_capa_mask = params->vht_capa_mask;
200
201 err = __cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel,
202 params->bssid, params->ssid,
203 params->ssid_len, &req);
200 if (err) 204 if (err)
201 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 205 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
202 NULL, 0, 206 NULL, 0,
@@ -231,7 +235,7 @@ void cfg80211_conn_work(struct work_struct *work)
231 wdev_unlock(wdev); 235 wdev_unlock(wdev);
232 continue; 236 continue;
233 } 237 }
234 if (wdev->sme_state != CFG80211_SME_CONNECTING) { 238 if (wdev->sme_state != CFG80211_SME_CONNECTING || !wdev->conn) {
235 wdev_unlock(wdev); 239 wdev_unlock(wdev);
236 continue; 240 continue;
237 } 241 }
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 238ee49b3868..8f28b9f798d8 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -83,6 +83,14 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
83 return 0; 83 return 0;
84} 84}
85 85
86static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
87{
88 struct wireless_dev *wdev;
89
90 list_for_each_entry(wdev, &rdev->wdev_list, list)
91 cfg80211_leave(rdev, wdev);
92}
93
86static int wiphy_suspend(struct device *dev, pm_message_t state) 94static int wiphy_suspend(struct device *dev, pm_message_t state)
87{ 95{
88 struct cfg80211_registered_device *rdev = dev_to_rdev(dev); 96 struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
@@ -90,12 +98,19 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
90 98
91 rdev->suspend_at = get_seconds(); 99 rdev->suspend_at = get_seconds();
92 100
93 if (rdev->ops->suspend) { 101 rtnl_lock();
94 rtnl_lock(); 102 if (rdev->wiphy.registered) {
95 if (rdev->wiphy.registered) 103 if (!rdev->wowlan)
96 ret = rdev_suspend(rdev); 104 cfg80211_leave_all(rdev);
97 rtnl_unlock(); 105 if (rdev->ops->suspend)
106 ret = rdev_suspend(rdev, rdev->wowlan);
107 if (ret == 1) {
108 /* Driver refuse to configure wowlan */
109 cfg80211_leave_all(rdev);
110 ret = rdev_suspend(rdev, NULL);
111 }
98 } 112 }
113 rtnl_unlock();
99 114
100 return ret; 115 return ret;
101} 116}
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 7586de77a2f8..3c2033b8f596 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1786,6 +1786,26 @@ TRACE_EVENT(rdev_set_mac_acl,
1786 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy) 1786 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy)
1787); 1787);
1788 1788
1789TRACE_EVENT(rdev_update_ft_ies,
1790 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1791 struct cfg80211_update_ft_ies_params *ftie),
1792 TP_ARGS(wiphy, netdev, ftie),
1793 TP_STRUCT__entry(
1794 WIPHY_ENTRY
1795 NETDEV_ENTRY
1796 __field(u16, md)
1797 __dynamic_array(u8, ie, ftie->ie_len)
1798 ),
1799 TP_fast_assign(
1800 WIPHY_ASSIGN;
1801 NETDEV_ASSIGN;
1802 __entry->md = ftie->md;
1803 memcpy(__get_dynamic_array(ie), ftie->ie, ftie->ie_len);
1804 ),
1805 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", md: 0x%x",
1806 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->md)
1807);
1808
1789/************************************************************* 1809/*************************************************************
1790 * cfg80211 exported functions traces * 1810 * cfg80211 exported functions traces *
1791 *************************************************************/ 1811 *************************************************************/
@@ -2414,6 +2434,32 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup,
2414 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) 2434 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
2415); 2435);
2416 2436
2437TRACE_EVENT(cfg80211_ft_event,
2438 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
2439 struct cfg80211_ft_event_params *ft_event),
2440 TP_ARGS(wiphy, netdev, ft_event),
2441 TP_STRUCT__entry(
2442 WIPHY_ENTRY
2443 NETDEV_ENTRY
2444 __dynamic_array(u8, ies, ft_event->ies_len)
2445 MAC_ENTRY(target_ap)
2446 __dynamic_array(u8, ric_ies, ft_event->ric_ies_len)
2447 ),
2448 TP_fast_assign(
2449 WIPHY_ASSIGN;
2450 NETDEV_ASSIGN;
2451 if (ft_event->ies)
2452 memcpy(__get_dynamic_array(ies), ft_event->ies,
2453 ft_event->ies_len);
2454 MAC_ASSIGN(target_ap, ft_event->target_ap);
2455 if (ft_event->ric_ies)
2456 memcpy(__get_dynamic_array(ric_ies), ft_event->ric_ies,
2457 ft_event->ric_ies_len);
2458 ),
2459 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", target_ap: " MAC_PR_FMT,
2460 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
2461);
2462
2417#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ 2463#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
2418 2464
2419#undef TRACE_INCLUDE_PATH 2465#undef TRACE_INCLUDE_PATH