diff options
Diffstat (limited to 'net')
39 files changed, 1592 insertions, 1192 deletions
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index eb0f4b16ff09..17f33a62f6db 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c | |||
@@ -397,13 +397,12 @@ static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb, | |||
397 | if (ctrl) { | 397 | if (ctrl) { |
398 | u8 *assoc; | 398 | u8 *assoc; |
399 | 399 | ||
400 | assoc = kzalloc(assoc_len, GFP_KERNEL); | 400 | assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL); |
401 | if (!assoc) { | 401 | if (!assoc) { |
402 | amp_ctrl_put(ctrl); | 402 | amp_ctrl_put(ctrl); |
403 | return -ENOMEM; | 403 | return -ENOMEM; |
404 | } | 404 | } |
405 | 405 | ||
406 | memcpy(assoc, rsp->amp_assoc, assoc_len); | ||
407 | ctrl->assoc = assoc; | 406 | ctrl->assoc = assoc; |
408 | ctrl->assoc_len = assoc_len; | 407 | ctrl->assoc_len = assoc_len; |
409 | ctrl->assoc_rem_len = assoc_len; | 408 | ctrl->assoc_rem_len = assoc_len; |
@@ -472,13 +471,12 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
472 | size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); | 471 | size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); |
473 | u8 *assoc; | 472 | u8 *assoc; |
474 | 473 | ||
475 | assoc = kzalloc(assoc_len, GFP_KERNEL); | 474 | assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); |
476 | if (!assoc) { | 475 | if (!assoc) { |
477 | amp_ctrl_put(ctrl); | 476 | amp_ctrl_put(ctrl); |
478 | return -ENOMEM; | 477 | return -ENOMEM; |
479 | } | 478 | } |
480 | 479 | ||
481 | memcpy(assoc, req->amp_assoc, assoc_len); | ||
482 | ctrl->assoc = assoc; | 480 | ctrl->assoc = assoc; |
483 | ctrl->assoc_len = assoc_len; | 481 | ctrl->assoc_len = assoc_len; |
484 | ctrl->assoc_rem_len = assoc_len; | 482 | ctrl->assoc_rem_len = assoc_len; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d5a973bf8a6f..e5338f787d68 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -92,23 +92,14 @@ int bt_sock_register(int proto, const struct net_proto_family *ops) | |||
92 | } | 92 | } |
93 | EXPORT_SYMBOL(bt_sock_register); | 93 | EXPORT_SYMBOL(bt_sock_register); |
94 | 94 | ||
95 | int bt_sock_unregister(int proto) | 95 | void bt_sock_unregister(int proto) |
96 | { | 96 | { |
97 | int err = 0; | ||
98 | |||
99 | if (proto < 0 || proto >= BT_MAX_PROTO) | 97 | if (proto < 0 || proto >= BT_MAX_PROTO) |
100 | return -EINVAL; | 98 | return; |
101 | 99 | ||
102 | write_lock(&bt_proto_lock); | 100 | write_lock(&bt_proto_lock); |
103 | 101 | bt_proto[proto] = NULL; | |
104 | if (!bt_proto[proto]) | ||
105 | err = -ENOENT; | ||
106 | else | ||
107 | bt_proto[proto] = NULL; | ||
108 | |||
109 | write_unlock(&bt_proto_lock); | 102 | write_unlock(&bt_proto_lock); |
110 | |||
111 | return err; | ||
112 | } | 103 | } |
113 | EXPORT_SYMBOL(bt_sock_unregister); | 104 | EXPORT_SYMBOL(bt_sock_unregister); |
114 | 105 | ||
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index e7154a58465f..5b1c04e28821 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -253,8 +253,6 @@ error: | |||
253 | void __exit bnep_sock_cleanup(void) | 253 | void __exit bnep_sock_cleanup(void) |
254 | { | 254 | { |
255 | bt_procfs_cleanup(&init_net, "bnep"); | 255 | bt_procfs_cleanup(&init_net, "bnep"); |
256 | if (bt_sock_unregister(BTPROTO_BNEP) < 0) | 256 | bt_sock_unregister(BTPROTO_BNEP); |
257 | BT_ERR("Can't unregister BNEP socket"); | ||
258 | |||
259 | proto_unregister(&bnep_proto); | 257 | proto_unregister(&bnep_proto); |
260 | } | 258 | } |
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index 1c57482112b6..58d9edebab4b 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
@@ -264,8 +264,6 @@ error: | |||
264 | void cmtp_cleanup_sockets(void) | 264 | void cmtp_cleanup_sockets(void) |
265 | { | 265 | { |
266 | bt_procfs_cleanup(&init_net, "cmtp"); | 266 | bt_procfs_cleanup(&init_net, "cmtp"); |
267 | if (bt_sock_unregister(BTPROTO_CMTP) < 0) | 267 | bt_sock_unregister(BTPROTO_CMTP); |
268 | BT_ERR("Can't unregister CMTP socket"); | ||
269 | |||
270 | proto_unregister(&cmtp_proto); | 268 | proto_unregister(&cmtp_proto); |
271 | } | 269 | } |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 4925a02ae7e4..b9f90169940b 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -117,7 +117,7 @@ static void hci_acl_create_connection_cancel(struct hci_conn *conn) | |||
117 | hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); | 117 | hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); |
118 | } | 118 | } |
119 | 119 | ||
120 | void hci_acl_disconn(struct hci_conn *conn, __u8 reason) | 120 | void hci_disconnect(struct hci_conn *conn, __u8 reason) |
121 | { | 121 | { |
122 | struct hci_cp_disconnect cp; | 122 | struct hci_cp_disconnect cp; |
123 | 123 | ||
@@ -253,7 +253,7 @@ static void hci_conn_disconnect(struct hci_conn *conn) | |||
253 | hci_amp_disconn(conn, reason); | 253 | hci_amp_disconn(conn, reason); |
254 | break; | 254 | break; |
255 | default: | 255 | default: |
256 | hci_acl_disconn(conn, reason); | 256 | hci_disconnect(conn, reason); |
257 | break; | 257 | break; |
258 | } | 258 | } |
259 | } | 259 | } |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 60793e7b768b..cfcad5423f1c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -57,36 +57,9 @@ static void hci_notify(struct hci_dev *hdev, int event) | |||
57 | 57 | ||
58 | /* ---- HCI requests ---- */ | 58 | /* ---- HCI requests ---- */ |
59 | 59 | ||
60 | void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) | 60 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) |
61 | { | 61 | { |
62 | BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result); | 62 | BT_DBG("%s result 0x%2.2x", hdev->name, result); |
63 | |||
64 | /* If this is the init phase check if the completed command matches | ||
65 | * the last init command, and if not just return. | ||
66 | */ | ||
67 | if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) { | ||
68 | struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; | ||
69 | u16 opcode = __le16_to_cpu(sent->opcode); | ||
70 | struct sk_buff *skb; | ||
71 | |||
72 | /* Some CSR based controllers generate a spontaneous | ||
73 | * reset complete event during init and any pending | ||
74 | * command will never be completed. In such a case we | ||
75 | * need to resend whatever was the last sent | ||
76 | * command. | ||
77 | */ | ||
78 | |||
79 | if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET) | ||
80 | return; | ||
81 | |||
82 | skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC); | ||
83 | if (skb) { | ||
84 | skb_queue_head(&hdev->cmd_q, skb); | ||
85 | queue_work(hdev->workqueue, &hdev->cmd_work); | ||
86 | } | ||
87 | |||
88 | return; | ||
89 | } | ||
90 | 63 | ||
91 | if (hdev->req_status == HCI_REQ_PEND) { | 64 | if (hdev->req_status == HCI_REQ_PEND) { |
92 | hdev->req_result = result; | 65 | hdev->req_result = result; |
@@ -107,21 +80,41 @@ static void hci_req_cancel(struct hci_dev *hdev, int err) | |||
107 | } | 80 | } |
108 | 81 | ||
109 | /* Execute request and wait for completion. */ | 82 | /* Execute request and wait for completion. */ |
110 | static int __hci_request(struct hci_dev *hdev, | 83 | static int __hci_req_sync(struct hci_dev *hdev, |
111 | void (*req)(struct hci_dev *hdev, unsigned long opt), | 84 | void (*func)(struct hci_request *req, |
112 | unsigned long opt, __u32 timeout) | 85 | unsigned long opt), |
86 | unsigned long opt, __u32 timeout) | ||
113 | { | 87 | { |
88 | struct hci_request req; | ||
114 | DECLARE_WAITQUEUE(wait, current); | 89 | DECLARE_WAITQUEUE(wait, current); |
115 | int err = 0; | 90 | int err = 0; |
116 | 91 | ||
117 | BT_DBG("%s start", hdev->name); | 92 | BT_DBG("%s start", hdev->name); |
118 | 93 | ||
94 | hci_req_init(&req, hdev); | ||
95 | |||
119 | hdev->req_status = HCI_REQ_PEND; | 96 | hdev->req_status = HCI_REQ_PEND; |
120 | 97 | ||
98 | func(&req, opt); | ||
99 | |||
100 | err = hci_req_run(&req, hci_req_sync_complete); | ||
101 | if (err < 0) { | ||
102 | hdev->req_status = 0; | ||
103 | |||
104 | /* ENODATA means the HCI request command queue is empty. | ||
105 | * This can happen when a request with conditionals doesn't | ||
106 | * trigger any commands to be sent. This is normal behavior | ||
107 | * and should not trigger an error return. | ||
108 | */ | ||
109 | if (err == -ENODATA) | ||
110 | return 0; | ||
111 | |||
112 | return err; | ||
113 | } | ||
114 | |||
121 | add_wait_queue(&hdev->req_wait_q, &wait); | 115 | add_wait_queue(&hdev->req_wait_q, &wait); |
122 | set_current_state(TASK_INTERRUPTIBLE); | 116 | set_current_state(TASK_INTERRUPTIBLE); |
123 | 117 | ||
124 | req(hdev, opt); | ||
125 | schedule_timeout(timeout); | 118 | schedule_timeout(timeout); |
126 | 119 | ||
127 | remove_wait_queue(&hdev->req_wait_q, &wait); | 120 | remove_wait_queue(&hdev->req_wait_q, &wait); |
@@ -150,9 +143,10 @@ static int __hci_request(struct hci_dev *hdev, | |||
150 | return err; | 143 | return err; |
151 | } | 144 | } |
152 | 145 | ||
153 | static int hci_request(struct hci_dev *hdev, | 146 | static int hci_req_sync(struct hci_dev *hdev, |
154 | void (*req)(struct hci_dev *hdev, unsigned long opt), | 147 | void (*req)(struct hci_request *req, |
155 | unsigned long opt, __u32 timeout) | 148 | unsigned long opt), |
149 | unsigned long opt, __u32 timeout) | ||
156 | { | 150 | { |
157 | int ret; | 151 | int ret; |
158 | 152 | ||
@@ -161,75 +155,86 @@ static int hci_request(struct hci_dev *hdev, | |||
161 | 155 | ||
162 | /* Serialize all requests */ | 156 | /* Serialize all requests */ |
163 | hci_req_lock(hdev); | 157 | hci_req_lock(hdev); |
164 | ret = __hci_request(hdev, req, opt, timeout); | 158 | ret = __hci_req_sync(hdev, req, opt, timeout); |
165 | hci_req_unlock(hdev); | 159 | hci_req_unlock(hdev); |
166 | 160 | ||
167 | return ret; | 161 | return ret; |
168 | } | 162 | } |
169 | 163 | ||
170 | static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) | 164 | static void hci_reset_req(struct hci_request *req, unsigned long opt) |
171 | { | 165 | { |
172 | BT_DBG("%s %ld", hdev->name, opt); | 166 | BT_DBG("%s %ld", req->hdev->name, opt); |
173 | 167 | ||
174 | /* Reset device */ | 168 | /* Reset device */ |
175 | set_bit(HCI_RESET, &hdev->flags); | 169 | set_bit(HCI_RESET, &req->hdev->flags); |
176 | hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); | 170 | hci_req_add(req, HCI_OP_RESET, 0, NULL); |
177 | } | 171 | } |
178 | 172 | ||
179 | static void bredr_init(struct hci_dev *hdev) | 173 | static void bredr_init(struct hci_request *req) |
180 | { | 174 | { |
181 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; | 175 | req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; |
182 | 176 | ||
183 | /* Read Local Supported Features */ | 177 | /* Read Local Supported Features */ |
184 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); | 178 | hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); |
185 | 179 | ||
186 | /* Read Local Version */ | 180 | /* Read Local Version */ |
187 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); | 181 | hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); |
182 | |||
183 | /* Read BD Address */ | ||
184 | hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); | ||
188 | } | 185 | } |
189 | 186 | ||
190 | static void amp_init(struct hci_dev *hdev) | 187 | static void amp_init(struct hci_request *req) |
191 | { | 188 | { |
192 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; | 189 | req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; |
193 | 190 | ||
194 | /* Read Local Version */ | 191 | /* Read Local Version */ |
195 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); | 192 | hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); |
196 | 193 | ||
197 | /* Read Local AMP Info */ | 194 | /* Read Local AMP Info */ |
198 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); | 195 | hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); |
199 | 196 | ||
200 | /* Read Data Blk size */ | 197 | /* Read Data Blk size */ |
201 | hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); | 198 | hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); |
202 | } | 199 | } |
203 | 200 | ||
204 | static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | 201 | static void hci_init1_req(struct hci_request *req, unsigned long opt) |
205 | { | 202 | { |
203 | struct hci_dev *hdev = req->hdev; | ||
204 | struct hci_request init_req; | ||
206 | struct sk_buff *skb; | 205 | struct sk_buff *skb; |
207 | 206 | ||
208 | BT_DBG("%s %ld", hdev->name, opt); | 207 | BT_DBG("%s %ld", hdev->name, opt); |
209 | 208 | ||
210 | /* Driver initialization */ | 209 | /* Driver initialization */ |
211 | 210 | ||
211 | hci_req_init(&init_req, hdev); | ||
212 | |||
212 | /* Special commands */ | 213 | /* Special commands */ |
213 | while ((skb = skb_dequeue(&hdev->driver_init))) { | 214 | while ((skb = skb_dequeue(&hdev->driver_init))) { |
214 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; | 215 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; |
215 | skb->dev = (void *) hdev; | 216 | skb->dev = (void *) hdev; |
216 | 217 | ||
217 | skb_queue_tail(&hdev->cmd_q, skb); | 218 | if (skb_queue_empty(&init_req.cmd_q)) |
218 | queue_work(hdev->workqueue, &hdev->cmd_work); | 219 | bt_cb(skb)->req.start = true; |
220 | |||
221 | skb_queue_tail(&init_req.cmd_q, skb); | ||
219 | } | 222 | } |
220 | skb_queue_purge(&hdev->driver_init); | 223 | skb_queue_purge(&hdev->driver_init); |
221 | 224 | ||
225 | hci_req_run(&init_req, NULL); | ||
226 | |||
222 | /* Reset */ | 227 | /* Reset */ |
223 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) | 228 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) |
224 | hci_reset_req(hdev, 0); | 229 | hci_reset_req(req, 0); |
225 | 230 | ||
226 | switch (hdev->dev_type) { | 231 | switch (hdev->dev_type) { |
227 | case HCI_BREDR: | 232 | case HCI_BREDR: |
228 | bredr_init(hdev); | 233 | bredr_init(req); |
229 | break; | 234 | break; |
230 | 235 | ||
231 | case HCI_AMP: | 236 | case HCI_AMP: |
232 | amp_init(hdev); | 237 | amp_init(req); |
233 | break; | 238 | break; |
234 | 239 | ||
235 | default: | 240 | default: |
@@ -238,44 +243,327 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | |||
238 | } | 243 | } |
239 | } | 244 | } |
240 | 245 | ||
241 | static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) | 246 | static void bredr_setup(struct hci_request *req) |
247 | { | ||
248 | struct hci_cp_delete_stored_link_key cp; | ||
249 | __le16 param; | ||
250 | __u8 flt_type; | ||
251 | |||
252 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | ||
253 | hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); | ||
254 | |||
255 | /* Read Class of Device */ | ||
256 | hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); | ||
257 | |||
258 | /* Read Local Name */ | ||
259 | hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); | ||
260 | |||
261 | /* Read Voice Setting */ | ||
262 | hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); | ||
263 | |||
264 | /* Clear Event Filters */ | ||
265 | flt_type = HCI_FLT_CLEAR_ALL; | ||
266 | hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); | ||
267 | |||
268 | /* Connection accept timeout ~20 secs */ | ||
269 | param = __constant_cpu_to_le16(0x7d00); | ||
270 | hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); | ||
271 | |||
272 | bacpy(&cp.bdaddr, BDADDR_ANY); | ||
273 | cp.delete_all = 0x01; | ||
274 | hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); | ||
275 | |||
276 | /* Read page scan parameters */ | ||
277 | if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { | ||
278 | hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); | ||
279 | hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static void le_setup(struct hci_request *req) | ||
284 | { | ||
285 | /* Read LE Buffer Size */ | ||
286 | hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); | ||
287 | |||
288 | /* Read LE Local Supported Features */ | ||
289 | hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); | ||
290 | |||
291 | /* Read LE Advertising Channel TX Power */ | ||
292 | hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); | ||
293 | |||
294 | /* Read LE White List Size */ | ||
295 | hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); | ||
296 | |||
297 | /* Read LE Supported States */ | ||
298 | hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); | ||
299 | } | ||
300 | |||
301 | static u8 hci_get_inquiry_mode(struct hci_dev *hdev) | ||
302 | { | ||
303 | if (lmp_ext_inq_capable(hdev)) | ||
304 | return 0x02; | ||
305 | |||
306 | if (lmp_inq_rssi_capable(hdev)) | ||
307 | return 0x01; | ||
308 | |||
309 | if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && | ||
310 | hdev->lmp_subver == 0x0757) | ||
311 | return 0x01; | ||
312 | |||
313 | if (hdev->manufacturer == 15) { | ||
314 | if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) | ||
315 | return 0x01; | ||
316 | if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) | ||
317 | return 0x01; | ||
318 | if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) | ||
319 | return 0x01; | ||
320 | } | ||
321 | |||
322 | if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && | ||
323 | hdev->lmp_subver == 0x1805) | ||
324 | return 0x01; | ||
325 | |||
326 | return 0x00; | ||
327 | } | ||
328 | |||
329 | static void hci_setup_inquiry_mode(struct hci_request *req) | ||
330 | { | ||
331 | u8 mode; | ||
332 | |||
333 | mode = hci_get_inquiry_mode(req->hdev); | ||
334 | |||
335 | hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); | ||
336 | } | ||
337 | |||
338 | static void hci_setup_event_mask(struct hci_request *req) | ||
339 | { | ||
340 | struct hci_dev *hdev = req->hdev; | ||
341 | |||
342 | /* The second byte is 0xff instead of 0x9f (two reserved bits | ||
343 | * disabled) since a Broadcom 1.2 dongle doesn't respond to the | ||
344 | * command otherwise. | ||
345 | */ | ||
346 | u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; | ||
347 | |||
348 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set | ||
349 | * any event mask for pre 1.2 devices. | ||
350 | */ | ||
351 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | ||
352 | return; | ||
353 | |||
354 | if (lmp_bredr_capable(hdev)) { | ||
355 | events[4] |= 0x01; /* Flow Specification Complete */ | ||
356 | events[4] |= 0x02; /* Inquiry Result with RSSI */ | ||
357 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ | ||
358 | events[5] |= 0x08; /* Synchronous Connection Complete */ | ||
359 | events[5] |= 0x10; /* Synchronous Connection Changed */ | ||
360 | } | ||
361 | |||
362 | if (lmp_inq_rssi_capable(hdev)) | ||
363 | events[4] |= 0x02; /* Inquiry Result with RSSI */ | ||
364 | |||
365 | if (lmp_sniffsubr_capable(hdev)) | ||
366 | events[5] |= 0x20; /* Sniff Subrating */ | ||
367 | |||
368 | if (lmp_pause_enc_capable(hdev)) | ||
369 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ | ||
370 | |||
371 | if (lmp_ext_inq_capable(hdev)) | ||
372 | events[5] |= 0x40; /* Extended Inquiry Result */ | ||
373 | |||
374 | if (lmp_no_flush_capable(hdev)) | ||
375 | events[7] |= 0x01; /* Enhanced Flush Complete */ | ||
376 | |||
377 | if (lmp_lsto_capable(hdev)) | ||
378 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ | ||
379 | |||
380 | if (lmp_ssp_capable(hdev)) { | ||
381 | events[6] |= 0x01; /* IO Capability Request */ | ||
382 | events[6] |= 0x02; /* IO Capability Response */ | ||
383 | events[6] |= 0x04; /* User Confirmation Request */ | ||
384 | events[6] |= 0x08; /* User Passkey Request */ | ||
385 | events[6] |= 0x10; /* Remote OOB Data Request */ | ||
386 | events[6] |= 0x20; /* Simple Pairing Complete */ | ||
387 | events[7] |= 0x04; /* User Passkey Notification */ | ||
388 | events[7] |= 0x08; /* Keypress Notification */ | ||
389 | events[7] |= 0x10; /* Remote Host Supported | ||
390 | * Features Notification | ||
391 | */ | ||
392 | } | ||
393 | |||
394 | if (lmp_le_capable(hdev)) | ||
395 | events[7] |= 0x20; /* LE Meta-Event */ | ||
396 | |||
397 | hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); | ||
398 | |||
399 | if (lmp_le_capable(hdev)) { | ||
400 | memset(events, 0, sizeof(events)); | ||
401 | events[0] = 0x1f; | ||
402 | hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, | ||
403 | sizeof(events), events); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void hci_init2_req(struct hci_request *req, unsigned long opt) | ||
408 | { | ||
409 | struct hci_dev *hdev = req->hdev; | ||
410 | |||
411 | if (lmp_bredr_capable(hdev)) | ||
412 | bredr_setup(req); | ||
413 | |||
414 | if (lmp_le_capable(hdev)) | ||
415 | le_setup(req); | ||
416 | |||
417 | hci_setup_event_mask(req); | ||
418 | |||
419 | if (hdev->hci_ver > BLUETOOTH_VER_1_1) | ||
420 | hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); | ||
421 | |||
422 | if (lmp_ssp_capable(hdev)) { | ||
423 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { | ||
424 | u8 mode = 0x01; | ||
425 | hci_req_add(req, HCI_OP_WRITE_SSP_MODE, | ||
426 | sizeof(mode), &mode); | ||
427 | } else { | ||
428 | struct hci_cp_write_eir cp; | ||
429 | |||
430 | memset(hdev->eir, 0, sizeof(hdev->eir)); | ||
431 | memset(&cp, 0, sizeof(cp)); | ||
432 | |||
433 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | ||
434 | } | ||
435 | } | ||
436 | |||
437 | if (lmp_inq_rssi_capable(hdev)) | ||
438 | hci_setup_inquiry_mode(req); | ||
439 | |||
440 | if (lmp_inq_tx_pwr_capable(hdev)) | ||
441 | hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); | ||
442 | |||
443 | if (lmp_ext_feat_capable(hdev)) { | ||
444 | struct hci_cp_read_local_ext_features cp; | ||
445 | |||
446 | cp.page = 0x01; | ||
447 | hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, | ||
448 | sizeof(cp), &cp); | ||
449 | } | ||
450 | |||
451 | if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { | ||
452 | u8 enable = 1; | ||
453 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), | ||
454 | &enable); | ||
455 | } | ||
456 | } | ||
457 | |||
458 | static void hci_setup_link_policy(struct hci_request *req) | ||
459 | { | ||
460 | struct hci_dev *hdev = req->hdev; | ||
461 | struct hci_cp_write_def_link_policy cp; | ||
462 | u16 link_policy = 0; | ||
463 | |||
464 | if (lmp_rswitch_capable(hdev)) | ||
465 | link_policy |= HCI_LP_RSWITCH; | ||
466 | if (lmp_hold_capable(hdev)) | ||
467 | link_policy |= HCI_LP_HOLD; | ||
468 | if (lmp_sniff_capable(hdev)) | ||
469 | link_policy |= HCI_LP_SNIFF; | ||
470 | if (lmp_park_capable(hdev)) | ||
471 | link_policy |= HCI_LP_PARK; | ||
472 | |||
473 | cp.policy = cpu_to_le16(link_policy); | ||
474 | hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); | ||
475 | } | ||
476 | |||
477 | static void hci_set_le_support(struct hci_request *req) | ||
478 | { | ||
479 | struct hci_dev *hdev = req->hdev; | ||
480 | struct hci_cp_write_le_host_supported cp; | ||
481 | |||
482 | memset(&cp, 0, sizeof(cp)); | ||
483 | |||
484 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { | ||
485 | cp.le = 0x01; | ||
486 | cp.simul = lmp_le_br_capable(hdev); | ||
487 | } | ||
488 | |||
489 | if (cp.le != lmp_host_le_capable(hdev)) | ||
490 | hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), | ||
491 | &cp); | ||
492 | } | ||
493 | |||
494 | static void hci_init3_req(struct hci_request *req, unsigned long opt) | ||
495 | { | ||
496 | struct hci_dev *hdev = req->hdev; | ||
497 | |||
498 | if (hdev->commands[5] & 0x10) | ||
499 | hci_setup_link_policy(req); | ||
500 | |||
501 | if (lmp_le_capable(hdev)) { | ||
502 | hci_set_le_support(req); | ||
503 | hci_update_ad(req); | ||
504 | } | ||
505 | } | ||
506 | |||
507 | static int __hci_init(struct hci_dev *hdev) | ||
508 | { | ||
509 | int err; | ||
510 | |||
511 | err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT); | ||
512 | if (err < 0) | ||
513 | return err; | ||
514 | |||
515 | /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode | ||
516 | * BR/EDR/LE type controllers. AMP controllers only need the | ||
517 | * first stage init. | ||
518 | */ | ||
519 | if (hdev->dev_type != HCI_BREDR) | ||
520 | return 0; | ||
521 | |||
522 | err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); | ||
523 | if (err < 0) | ||
524 | return err; | ||
525 | |||
526 | return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); | ||
527 | } | ||
528 | |||
529 | static void hci_scan_req(struct hci_request *req, unsigned long opt) | ||
242 | { | 530 | { |
243 | __u8 scan = opt; | 531 | __u8 scan = opt; |
244 | 532 | ||
245 | BT_DBG("%s %x", hdev->name, scan); | 533 | BT_DBG("%s %x", req->hdev->name, scan); |
246 | 534 | ||
247 | /* Inquiry and Page scans */ | 535 | /* Inquiry and Page scans */ |
248 | hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | 536 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
249 | } | 537 | } |
250 | 538 | ||
251 | static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) | 539 | static void hci_auth_req(struct hci_request *req, unsigned long opt) |
252 | { | 540 | { |
253 | __u8 auth = opt; | 541 | __u8 auth = opt; |
254 | 542 | ||
255 | BT_DBG("%s %x", hdev->name, auth); | 543 | BT_DBG("%s %x", req->hdev->name, auth); |
256 | 544 | ||
257 | /* Authentication */ | 545 | /* Authentication */ |
258 | hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); | 546 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); |
259 | } | 547 | } |
260 | 548 | ||
261 | static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) | 549 | static void hci_encrypt_req(struct hci_request *req, unsigned long opt) |
262 | { | 550 | { |
263 | __u8 encrypt = opt; | 551 | __u8 encrypt = opt; |
264 | 552 | ||
265 | BT_DBG("%s %x", hdev->name, encrypt); | 553 | BT_DBG("%s %x", req->hdev->name, encrypt); |
266 | 554 | ||
267 | /* Encryption */ | 555 | /* Encryption */ |
268 | hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); | 556 | hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); |
269 | } | 557 | } |
270 | 558 | ||
271 | static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) | 559 | static void hci_linkpol_req(struct hci_request *req, unsigned long opt) |
272 | { | 560 | { |
273 | __le16 policy = cpu_to_le16(opt); | 561 | __le16 policy = cpu_to_le16(opt); |
274 | 562 | ||
275 | BT_DBG("%s %x", hdev->name, policy); | 563 | BT_DBG("%s %x", req->hdev->name, policy); |
276 | 564 | ||
277 | /* Default link policy */ | 565 | /* Default link policy */ |
278 | hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); | 566 | hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); |
279 | } | 567 | } |
280 | 568 | ||
281 | /* Get HCI device by index. | 569 | /* Get HCI device by index. |
@@ -512,9 +800,10 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) | |||
512 | return copied; | 800 | return copied; |
513 | } | 801 | } |
514 | 802 | ||
515 | static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) | 803 | static void hci_inq_req(struct hci_request *req, unsigned long opt) |
516 | { | 804 | { |
517 | struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; | 805 | struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; |
806 | struct hci_dev *hdev = req->hdev; | ||
518 | struct hci_cp_inquiry cp; | 807 | struct hci_cp_inquiry cp; |
519 | 808 | ||
520 | BT_DBG("%s", hdev->name); | 809 | BT_DBG("%s", hdev->name); |
@@ -526,7 +815,7 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) | |||
526 | memcpy(&cp.lap, &ir->lap, 3); | 815 | memcpy(&cp.lap, &ir->lap, 3); |
527 | cp.length = ir->length; | 816 | cp.length = ir->length; |
528 | cp.num_rsp = ir->num_rsp; | 817 | cp.num_rsp = ir->num_rsp; |
529 | hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); | 818 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); |
530 | } | 819 | } |
531 | 820 | ||
532 | int hci_inquiry(void __user *arg) | 821 | int hci_inquiry(void __user *arg) |
@@ -556,7 +845,8 @@ int hci_inquiry(void __user *arg) | |||
556 | timeo = ir.length * msecs_to_jiffies(2000); | 845 | timeo = ir.length * msecs_to_jiffies(2000); |
557 | 846 | ||
558 | if (do_inquiry) { | 847 | if (do_inquiry) { |
559 | err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo); | 848 | err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, |
849 | timeo); | ||
560 | if (err < 0) | 850 | if (err < 0) |
561 | goto done; | 851 | goto done; |
562 | } | 852 | } |
@@ -654,39 +944,29 @@ static u8 create_ad(struct hci_dev *hdev, u8 *ptr) | |||
654 | return ad_len; | 944 | return ad_len; |
655 | } | 945 | } |
656 | 946 | ||
657 | int hci_update_ad(struct hci_dev *hdev) | 947 | void hci_update_ad(struct hci_request *req) |
658 | { | 948 | { |
949 | struct hci_dev *hdev = req->hdev; | ||
659 | struct hci_cp_le_set_adv_data cp; | 950 | struct hci_cp_le_set_adv_data cp; |
660 | u8 len; | 951 | u8 len; |
661 | int err; | ||
662 | 952 | ||
663 | hci_dev_lock(hdev); | 953 | if (!lmp_le_capable(hdev)) |
664 | 954 | return; | |
665 | if (!lmp_le_capable(hdev)) { | ||
666 | err = -EINVAL; | ||
667 | goto unlock; | ||
668 | } | ||
669 | 955 | ||
670 | memset(&cp, 0, sizeof(cp)); | 956 | memset(&cp, 0, sizeof(cp)); |
671 | 957 | ||
672 | len = create_ad(hdev, cp.data); | 958 | len = create_ad(hdev, cp.data); |
673 | 959 | ||
674 | if (hdev->adv_data_len == len && | 960 | if (hdev->adv_data_len == len && |
675 | memcmp(cp.data, hdev->adv_data, len) == 0) { | 961 | memcmp(cp.data, hdev->adv_data, len) == 0) |
676 | err = 0; | 962 | return; |
677 | goto unlock; | ||
678 | } | ||
679 | 963 | ||
680 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); | 964 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); |
681 | hdev->adv_data_len = len; | 965 | hdev->adv_data_len = len; |
682 | 966 | ||
683 | cp.length = len; | 967 | cp.length = len; |
684 | err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); | ||
685 | 968 | ||
686 | unlock: | 969 | hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); |
687 | hci_dev_unlock(hdev); | ||
688 | |||
689 | return err; | ||
690 | } | 970 | } |
691 | 971 | ||
692 | /* ---- HCI ioctl helpers ---- */ | 972 | /* ---- HCI ioctl helpers ---- */ |
@@ -735,10 +1015,7 @@ int hci_dev_open(__u16 dev) | |||
735 | if (!test_bit(HCI_RAW, &hdev->flags)) { | 1015 | if (!test_bit(HCI_RAW, &hdev->flags)) { |
736 | atomic_set(&hdev->cmd_cnt, 1); | 1016 | atomic_set(&hdev->cmd_cnt, 1); |
737 | set_bit(HCI_INIT, &hdev->flags); | 1017 | set_bit(HCI_INIT, &hdev->flags); |
738 | hdev->init_last_cmd = 0; | 1018 | ret = __hci_init(hdev); |
739 | |||
740 | ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); | ||
741 | |||
742 | clear_bit(HCI_INIT, &hdev->flags); | 1019 | clear_bit(HCI_INIT, &hdev->flags); |
743 | } | 1020 | } |
744 | 1021 | ||
@@ -746,7 +1023,6 @@ int hci_dev_open(__u16 dev) | |||
746 | hci_dev_hold(hdev); | 1023 | hci_dev_hold(hdev); |
747 | set_bit(HCI_UP, &hdev->flags); | 1024 | set_bit(HCI_UP, &hdev->flags); |
748 | hci_notify(hdev, HCI_DEV_UP); | 1025 | hci_notify(hdev, HCI_DEV_UP); |
749 | hci_update_ad(hdev); | ||
750 | if (!test_bit(HCI_SETUP, &hdev->dev_flags) && | 1026 | if (!test_bit(HCI_SETUP, &hdev->dev_flags) && |
751 | mgmt_valid_hdev(hdev)) { | 1027 | mgmt_valid_hdev(hdev)) { |
752 | hci_dev_lock(hdev); | 1028 | hci_dev_lock(hdev); |
@@ -828,7 +1104,7 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
828 | if (!test_bit(HCI_RAW, &hdev->flags) && | 1104 | if (!test_bit(HCI_RAW, &hdev->flags) && |
829 | test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { | 1105 | test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
830 | set_bit(HCI_INIT, &hdev->flags); | 1106 | set_bit(HCI_INIT, &hdev->flags); |
831 | __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); | 1107 | __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); |
832 | clear_bit(HCI_INIT, &hdev->flags); | 1108 | clear_bit(HCI_INIT, &hdev->flags); |
833 | } | 1109 | } |
834 | 1110 | ||
@@ -851,6 +1127,10 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
851 | * and no tasks are scheduled. */ | 1127 | * and no tasks are scheduled. */ |
852 | hdev->close(hdev); | 1128 | hdev->close(hdev); |
853 | 1129 | ||
1130 | /* Clear flags */ | ||
1131 | hdev->flags = 0; | ||
1132 | hdev->dev_flags &= ~HCI_PERSISTENT_MASK; | ||
1133 | |||
854 | if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && | 1134 | if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && |
855 | mgmt_valid_hdev(hdev)) { | 1135 | mgmt_valid_hdev(hdev)) { |
856 | hci_dev_lock(hdev); | 1136 | hci_dev_lock(hdev); |
@@ -858,9 +1138,6 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
858 | hci_dev_unlock(hdev); | 1138 | hci_dev_unlock(hdev); |
859 | } | 1139 | } |
860 | 1140 | ||
861 | /* Clear flags */ | ||
862 | hdev->flags = 0; | ||
863 | |||
864 | /* Controller radio is available but is currently powered down */ | 1141 | /* Controller radio is available but is currently powered down */ |
865 | hdev->amp_status = 0; | 1142 | hdev->amp_status = 0; |
866 | 1143 | ||
@@ -921,7 +1198,7 @@ int hci_dev_reset(__u16 dev) | |||
921 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; | 1198 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; |
922 | 1199 | ||
923 | if (!test_bit(HCI_RAW, &hdev->flags)) | 1200 | if (!test_bit(HCI_RAW, &hdev->flags)) |
924 | ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); | 1201 | ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); |
925 | 1202 | ||
926 | done: | 1203 | done: |
927 | hci_req_unlock(hdev); | 1204 | hci_req_unlock(hdev); |
@@ -960,8 +1237,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
960 | 1237 | ||
961 | switch (cmd) { | 1238 | switch (cmd) { |
962 | case HCISETAUTH: | 1239 | case HCISETAUTH: |
963 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, | 1240 | err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, |
964 | HCI_INIT_TIMEOUT); | 1241 | HCI_INIT_TIMEOUT); |
965 | break; | 1242 | break; |
966 | 1243 | ||
967 | case HCISETENCRYPT: | 1244 | case HCISETENCRYPT: |
@@ -972,24 +1249,24 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) | |||
972 | 1249 | ||
973 | if (!test_bit(HCI_AUTH, &hdev->flags)) { | 1250 | if (!test_bit(HCI_AUTH, &hdev->flags)) { |
974 | /* Auth must be enabled first */ | 1251 | /* Auth must be enabled first */ |
975 | err = hci_request(hdev, hci_auth_req, dr.dev_opt, | 1252 | err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, |
976 | HCI_INIT_TIMEOUT); | 1253 | HCI_INIT_TIMEOUT); |
977 | if (err) | 1254 | if (err) |
978 | break; | 1255 | break; |
979 | } | 1256 | } |
980 | 1257 | ||
981 | err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, | 1258 | err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, |
982 | HCI_INIT_TIMEOUT); | 1259 | HCI_INIT_TIMEOUT); |
983 | break; | 1260 | break; |
984 | 1261 | ||
985 | case HCISETSCAN: | 1262 | case HCISETSCAN: |
986 | err = hci_request(hdev, hci_scan_req, dr.dev_opt, | 1263 | err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, |
987 | HCI_INIT_TIMEOUT); | 1264 | HCI_INIT_TIMEOUT); |
988 | break; | 1265 | break; |
989 | 1266 | ||
990 | case HCISETLINKPOL: | 1267 | case HCISETLINKPOL: |
991 | err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, | 1268 | err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, |
992 | HCI_INIT_TIMEOUT); | 1269 | HCI_INIT_TIMEOUT); |
993 | break; | 1270 | break; |
994 | 1271 | ||
995 | case HCISETLINKMODE: | 1272 | case HCISETLINKMODE: |
@@ -1566,7 +1843,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) | |||
1566 | return mgmt_device_unblocked(hdev, bdaddr, type); | 1843 | return mgmt_device_unblocked(hdev, bdaddr, type); |
1567 | } | 1844 | } |
1568 | 1845 | ||
1569 | static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt) | 1846 | static void le_scan_param_req(struct hci_request *req, unsigned long opt) |
1570 | { | 1847 | { |
1571 | struct le_scan_params *param = (struct le_scan_params *) opt; | 1848 | struct le_scan_params *param = (struct le_scan_params *) opt; |
1572 | struct hci_cp_le_set_scan_param cp; | 1849 | struct hci_cp_le_set_scan_param cp; |
@@ -1576,10 +1853,10 @@ static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt) | |||
1576 | cp.interval = cpu_to_le16(param->interval); | 1853 | cp.interval = cpu_to_le16(param->interval); |
1577 | cp.window = cpu_to_le16(param->window); | 1854 | cp.window = cpu_to_le16(param->window); |
1578 | 1855 | ||
1579 | hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp); | 1856 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp); |
1580 | } | 1857 | } |
1581 | 1858 | ||
1582 | static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) | 1859 | static void le_scan_enable_req(struct hci_request *req, unsigned long opt) |
1583 | { | 1860 | { |
1584 | struct hci_cp_le_set_scan_enable cp; | 1861 | struct hci_cp_le_set_scan_enable cp; |
1585 | 1862 | ||
@@ -1587,7 +1864,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) | |||
1587 | cp.enable = 1; | 1864 | cp.enable = 1; |
1588 | cp.filter_dup = 1; | 1865 | cp.filter_dup = 1; |
1589 | 1866 | ||
1590 | hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | 1867 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); |
1591 | } | 1868 | } |
1592 | 1869 | ||
1593 | static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, | 1870 | static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, |
@@ -1608,10 +1885,10 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, | |||
1608 | 1885 | ||
1609 | hci_req_lock(hdev); | 1886 | hci_req_lock(hdev); |
1610 | 1887 | ||
1611 | err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m, | 1888 | err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m, |
1612 | timeo); | 1889 | timeo); |
1613 | if (!err) | 1890 | if (!err) |
1614 | err = __hci_request(hdev, le_scan_enable_req, 0, timeo); | 1891 | err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo); |
1615 | 1892 | ||
1616 | hci_req_unlock(hdev); | 1893 | hci_req_unlock(hdev); |
1617 | 1894 | ||
@@ -2160,20 +2437,55 @@ static int hci_send_frame(struct sk_buff *skb) | |||
2160 | return hdev->send(skb); | 2437 | return hdev->send(skb); |
2161 | } | 2438 | } |
2162 | 2439 | ||
2163 | /* Send HCI command */ | 2440 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) |
2164 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) | 2441 | { |
2442 | skb_queue_head_init(&req->cmd_q); | ||
2443 | req->hdev = hdev; | ||
2444 | req->err = 0; | ||
2445 | } | ||
2446 | |||
2447 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) | ||
2448 | { | ||
2449 | struct hci_dev *hdev = req->hdev; | ||
2450 | struct sk_buff *skb; | ||
2451 | unsigned long flags; | ||
2452 | |||
2453 | BT_DBG("length %u", skb_queue_len(&req->cmd_q)); | ||
2454 | |||
2455 | /* If an error occured during request building, remove all HCI | ||
2456 | * commands queued on the HCI request queue. | ||
2457 | */ | ||
2458 | if (req->err) { | ||
2459 | skb_queue_purge(&req->cmd_q); | ||
2460 | return req->err; | ||
2461 | } | ||
2462 | |||
2463 | /* Do not allow empty requests */ | ||
2464 | if (skb_queue_empty(&req->cmd_q)) | ||
2465 | return -ENODATA; | ||
2466 | |||
2467 | skb = skb_peek_tail(&req->cmd_q); | ||
2468 | bt_cb(skb)->req.complete = complete; | ||
2469 | |||
2470 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | ||
2471 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); | ||
2472 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | ||
2473 | |||
2474 | queue_work(hdev->workqueue, &hdev->cmd_work); | ||
2475 | |||
2476 | return 0; | ||
2477 | } | ||
2478 | |||
2479 | static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, | ||
2480 | u32 plen, void *param) | ||
2165 | { | 2481 | { |
2166 | int len = HCI_COMMAND_HDR_SIZE + plen; | 2482 | int len = HCI_COMMAND_HDR_SIZE + plen; |
2167 | struct hci_command_hdr *hdr; | 2483 | struct hci_command_hdr *hdr; |
2168 | struct sk_buff *skb; | 2484 | struct sk_buff *skb; |
2169 | 2485 | ||
2170 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); | ||
2171 | |||
2172 | skb = bt_skb_alloc(len, GFP_ATOMIC); | 2486 | skb = bt_skb_alloc(len, GFP_ATOMIC); |
2173 | if (!skb) { | 2487 | if (!skb) |
2174 | BT_ERR("%s no memory for command", hdev->name); | 2488 | return NULL; |
2175 | return -ENOMEM; | ||
2176 | } | ||
2177 | 2489 | ||
2178 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); | 2490 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); |
2179 | hdr->opcode = cpu_to_le16(opcode); | 2491 | hdr->opcode = cpu_to_le16(opcode); |
@@ -2187,8 +2499,26 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) | |||
2187 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; | 2499 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; |
2188 | skb->dev = (void *) hdev; | 2500 | skb->dev = (void *) hdev; |
2189 | 2501 | ||
2190 | if (test_bit(HCI_INIT, &hdev->flags)) | 2502 | return skb; |
2191 | hdev->init_last_cmd = opcode; | 2503 | } |
2504 | |||
2505 | /* Send HCI command */ | ||
2506 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) | ||
2507 | { | ||
2508 | struct sk_buff *skb; | ||
2509 | |||
2510 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); | ||
2511 | |||
2512 | skb = hci_prepare_cmd(hdev, opcode, plen, param); | ||
2513 | if (!skb) { | ||
2514 | BT_ERR("%s no memory for command", hdev->name); | ||
2515 | return -ENOMEM; | ||
2516 | } | ||
2517 | |||
2518 | /* Stand-alone HCI commands must be flaged as | ||
2519 | * single-command requests. | ||
2520 | */ | ||
2521 | bt_cb(skb)->req.start = true; | ||
2192 | 2522 | ||
2193 | skb_queue_tail(&hdev->cmd_q, skb); | 2523 | skb_queue_tail(&hdev->cmd_q, skb); |
2194 | queue_work(hdev->workqueue, &hdev->cmd_work); | 2524 | queue_work(hdev->workqueue, &hdev->cmd_work); |
@@ -2196,6 +2526,34 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) | |||
2196 | return 0; | 2526 | return 0; |
2197 | } | 2527 | } |
2198 | 2528 | ||
2529 | /* Queue a command to an asynchronous HCI request */ | ||
2530 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param) | ||
2531 | { | ||
2532 | struct hci_dev *hdev = req->hdev; | ||
2533 | struct sk_buff *skb; | ||
2534 | |||
2535 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); | ||
2536 | |||
2537 | /* If an error occured during request building, there is no point in | ||
2538 | * queueing the HCI command. We can simply return. | ||
2539 | */ | ||
2540 | if (req->err) | ||
2541 | return; | ||
2542 | |||
2543 | skb = hci_prepare_cmd(hdev, opcode, plen, param); | ||
2544 | if (!skb) { | ||
2545 | BT_ERR("%s no memory for command (opcode 0x%4.4x)", | ||
2546 | hdev->name, opcode); | ||
2547 | req->err = -ENOMEM; | ||
2548 | return; | ||
2549 | } | ||
2550 | |||
2551 | if (skb_queue_empty(&req->cmd_q)) | ||
2552 | bt_cb(skb)->req.start = true; | ||
2553 | |||
2554 | skb_queue_tail(&req->cmd_q, skb); | ||
2555 | } | ||
2556 | |||
2199 | /* Get data from the previously sent command */ | 2557 | /* Get data from the previously sent command */ |
2200 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) | 2558 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) |
2201 | { | 2559 | { |
@@ -2398,7 +2756,7 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | |||
2398 | if (c->type == type && c->sent) { | 2756 | if (c->type == type && c->sent) { |
2399 | BT_ERR("%s killing stalled connection %pMR", | 2757 | BT_ERR("%s killing stalled connection %pMR", |
2400 | hdev->name, &c->dst); | 2758 | hdev->name, &c->dst); |
2401 | hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); | 2759 | hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); |
2402 | } | 2760 | } |
2403 | } | 2761 | } |
2404 | 2762 | ||
@@ -2860,6 +3218,123 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
2860 | kfree_skb(skb); | 3218 | kfree_skb(skb); |
2861 | } | 3219 | } |
2862 | 3220 | ||
3221 | static bool hci_req_is_complete(struct hci_dev *hdev) | ||
3222 | { | ||
3223 | struct sk_buff *skb; | ||
3224 | |||
3225 | skb = skb_peek(&hdev->cmd_q); | ||
3226 | if (!skb) | ||
3227 | return true; | ||
3228 | |||
3229 | return bt_cb(skb)->req.start; | ||
3230 | } | ||
3231 | |||
3232 | static void hci_resend_last(struct hci_dev *hdev) | ||
3233 | { | ||
3234 | struct hci_command_hdr *sent; | ||
3235 | struct sk_buff *skb; | ||
3236 | u16 opcode; | ||
3237 | |||
3238 | if (!hdev->sent_cmd) | ||
3239 | return; | ||
3240 | |||
3241 | sent = (void *) hdev->sent_cmd->data; | ||
3242 | opcode = __le16_to_cpu(sent->opcode); | ||
3243 | if (opcode == HCI_OP_RESET) | ||
3244 | return; | ||
3245 | |||
3246 | skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); | ||
3247 | if (!skb) | ||
3248 | return; | ||
3249 | |||
3250 | skb_queue_head(&hdev->cmd_q, skb); | ||
3251 | queue_work(hdev->workqueue, &hdev->cmd_work); | ||
3252 | } | ||
3253 | |||
3254 | void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) | ||
3255 | { | ||
3256 | hci_req_complete_t req_complete = NULL; | ||
3257 | struct sk_buff *skb; | ||
3258 | unsigned long flags; | ||
3259 | |||
3260 | BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); | ||
3261 | |||
3262 | /* If the completed command doesn't match the last one that was | ||
3263 | * sent we need to do special handling of it. | ||
3264 | */ | ||
3265 | if (!hci_sent_cmd_data(hdev, opcode)) { | ||
3266 | /* Some CSR based controllers generate a spontaneous | ||
3267 | * reset complete event during init and any pending | ||
3268 | * command will never be completed. In such a case we | ||
3269 | * need to resend whatever was the last sent | ||
3270 | * command. | ||
3271 | */ | ||
3272 | if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) | ||
3273 | hci_resend_last(hdev); | ||
3274 | |||
3275 | return; | ||
3276 | } | ||
3277 | |||
3278 | /* If the command succeeded and there's still more commands in | ||
3279 | * this request the request is not yet complete. | ||
3280 | */ | ||
3281 | if (!status && !hci_req_is_complete(hdev)) | ||
3282 | return; | ||
3283 | |||
3284 | /* If this was the last command in a request the complete | ||
3285 | * callback would be found in hdev->sent_cmd instead of the | ||
3286 | * command queue (hdev->cmd_q). | ||
3287 | */ | ||
3288 | if (hdev->sent_cmd) { | ||
3289 | req_complete = bt_cb(hdev->sent_cmd)->req.complete; | ||
3290 | if (req_complete) | ||
3291 | goto call_complete; | ||
3292 | } | ||
3293 | |||
3294 | /* Remove all pending commands belonging to this request */ | ||
3295 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | ||
3296 | while ((skb = __skb_dequeue(&hdev->cmd_q))) { | ||
3297 | if (bt_cb(skb)->req.start) { | ||
3298 | __skb_queue_head(&hdev->cmd_q, skb); | ||
3299 | break; | ||
3300 | } | ||
3301 | |||
3302 | req_complete = bt_cb(skb)->req.complete; | ||
3303 | kfree_skb(skb); | ||
3304 | } | ||
3305 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | ||
3306 | |||
3307 | call_complete: | ||
3308 | if (req_complete) | ||
3309 | req_complete(hdev, status); | ||
3310 | } | ||
3311 | |||
3312 | void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status) | ||
3313 | { | ||
3314 | hci_req_complete_t req_complete = NULL; | ||
3315 | |||
3316 | BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); | ||
3317 | |||
3318 | if (status) { | ||
3319 | hci_req_cmd_complete(hdev, opcode, status); | ||
3320 | return; | ||
3321 | } | ||
3322 | |||
3323 | /* No need to handle success status if there are more commands */ | ||
3324 | if (!hci_req_is_complete(hdev)) | ||
3325 | return; | ||
3326 | |||
3327 | if (hdev->sent_cmd) | ||
3328 | req_complete = bt_cb(hdev->sent_cmd)->req.complete; | ||
3329 | |||
3330 | /* If the request doesn't have a complete callback or there | ||
3331 | * are other commands/requests in the hdev queue we consider | ||
3332 | * this request as completed. | ||
3333 | */ | ||
3334 | if (!req_complete || !skb_queue_empty(&hdev->cmd_q)) | ||
3335 | hci_req_cmd_complete(hdev, opcode, status); | ||
3336 | } | ||
3337 | |||
2863 | static void hci_rx_work(struct work_struct *work) | 3338 | static void hci_rx_work(struct work_struct *work) |
2864 | { | 3339 | { |
2865 | struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); | 3340 | struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 477726a63512..138580745c2c 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -53,7 +53,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) | |||
53 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | 53 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); |
54 | hci_dev_unlock(hdev); | 54 | hci_dev_unlock(hdev); |
55 | 55 | ||
56 | hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); | 56 | hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status); |
57 | 57 | ||
58 | hci_conn_check_pending(hdev); | 58 | hci_conn_check_pending(hdev); |
59 | } | 59 | } |
@@ -183,8 +183,6 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev, | |||
183 | 183 | ||
184 | if (!status) | 184 | if (!status) |
185 | hdev->link_policy = get_unaligned_le16(sent); | 185 | hdev->link_policy = get_unaligned_le16(sent); |
186 | |||
187 | hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status); | ||
188 | } | 186 | } |
189 | 187 | ||
190 | static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) | 188 | static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -195,11 +193,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) | |||
195 | 193 | ||
196 | clear_bit(HCI_RESET, &hdev->flags); | 194 | clear_bit(HCI_RESET, &hdev->flags); |
197 | 195 | ||
198 | hci_req_complete(hdev, HCI_OP_RESET, status); | ||
199 | |||
200 | /* Reset all non-persistent flags */ | 196 | /* Reset all non-persistent flags */ |
201 | hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) | | 197 | hdev->dev_flags &= ~HCI_PERSISTENT_MASK; |
202 | BIT(HCI_PERIODIC_INQ)); | ||
203 | 198 | ||
204 | hdev->discovery.state = DISCOVERY_STOPPED; | 199 | hdev->discovery.state = DISCOVERY_STOPPED; |
205 | hdev->inq_tx_power = HCI_TX_POWER_INVALID; | 200 | hdev->inq_tx_power = HCI_TX_POWER_INVALID; |
@@ -228,11 +223,6 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) | |||
228 | memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); | 223 | memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); |
229 | 224 | ||
230 | hci_dev_unlock(hdev); | 225 | hci_dev_unlock(hdev); |
231 | |||
232 | if (!status && !test_bit(HCI_INIT, &hdev->flags)) | ||
233 | hci_update_ad(hdev); | ||
234 | |||
235 | hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status); | ||
236 | } | 226 | } |
237 | 227 | ||
238 | static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) | 228 | static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -270,8 +260,6 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) | |||
270 | 260 | ||
271 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) | 261 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) |
272 | mgmt_auth_enable_complete(hdev, status); | 262 | mgmt_auth_enable_complete(hdev, status); |
273 | |||
274 | hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); | ||
275 | } | 263 | } |
276 | 264 | ||
277 | static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) | 265 | static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -293,8 +281,6 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) | |||
293 | else | 281 | else |
294 | clear_bit(HCI_ENCRYPT, &hdev->flags); | 282 | clear_bit(HCI_ENCRYPT, &hdev->flags); |
295 | } | 283 | } |
296 | |||
297 | hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status); | ||
298 | } | 284 | } |
299 | 285 | ||
300 | static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) | 286 | static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -343,7 +329,6 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) | |||
343 | 329 | ||
344 | done: | 330 | done: |
345 | hci_dev_unlock(hdev); | 331 | hci_dev_unlock(hdev); |
346 | hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); | ||
347 | } | 332 | } |
348 | 333 | ||
349 | static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) | 334 | static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -435,15 +420,6 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, | |||
435 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); | 420 | hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); |
436 | } | 421 | } |
437 | 422 | ||
438 | static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) | ||
439 | { | ||
440 | __u8 status = *((__u8 *) skb->data); | ||
441 | |||
442 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
443 | |||
444 | hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); | ||
445 | } | ||
446 | |||
447 | static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) | 423 | static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) |
448 | { | 424 | { |
449 | __u8 status = *((__u8 *) skb->data); | 425 | __u8 status = *((__u8 *) skb->data); |
@@ -472,211 +448,6 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) | |||
472 | } | 448 | } |
473 | } | 449 | } |
474 | 450 | ||
475 | static u8 hci_get_inquiry_mode(struct hci_dev *hdev) | ||
476 | { | ||
477 | if (lmp_ext_inq_capable(hdev)) | ||
478 | return 2; | ||
479 | |||
480 | if (lmp_inq_rssi_capable(hdev)) | ||
481 | return 1; | ||
482 | |||
483 | if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && | ||
484 | hdev->lmp_subver == 0x0757) | ||
485 | return 1; | ||
486 | |||
487 | if (hdev->manufacturer == 15) { | ||
488 | if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) | ||
489 | return 1; | ||
490 | if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) | ||
491 | return 1; | ||
492 | if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) | ||
493 | return 1; | ||
494 | } | ||
495 | |||
496 | if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && | ||
497 | hdev->lmp_subver == 0x1805) | ||
498 | return 1; | ||
499 | |||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | static void hci_setup_inquiry_mode(struct hci_dev *hdev) | ||
504 | { | ||
505 | u8 mode; | ||
506 | |||
507 | mode = hci_get_inquiry_mode(hdev); | ||
508 | |||
509 | hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); | ||
510 | } | ||
511 | |||
512 | static void hci_setup_event_mask(struct hci_dev *hdev) | ||
513 | { | ||
514 | /* The second byte is 0xff instead of 0x9f (two reserved bits | ||
515 | * disabled) since a Broadcom 1.2 dongle doesn't respond to the | ||
516 | * command otherwise */ | ||
517 | u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; | ||
518 | |||
519 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set | ||
520 | * any event mask for pre 1.2 devices */ | ||
521 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | ||
522 | return; | ||
523 | |||
524 | if (lmp_bredr_capable(hdev)) { | ||
525 | events[4] |= 0x01; /* Flow Specification Complete */ | ||
526 | events[4] |= 0x02; /* Inquiry Result with RSSI */ | ||
527 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ | ||
528 | events[5] |= 0x08; /* Synchronous Connection Complete */ | ||
529 | events[5] |= 0x10; /* Synchronous Connection Changed */ | ||
530 | } | ||
531 | |||
532 | if (lmp_inq_rssi_capable(hdev)) | ||
533 | events[4] |= 0x02; /* Inquiry Result with RSSI */ | ||
534 | |||
535 | if (lmp_sniffsubr_capable(hdev)) | ||
536 | events[5] |= 0x20; /* Sniff Subrating */ | ||
537 | |||
538 | if (lmp_pause_enc_capable(hdev)) | ||
539 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ | ||
540 | |||
541 | if (lmp_ext_inq_capable(hdev)) | ||
542 | events[5] |= 0x40; /* Extended Inquiry Result */ | ||
543 | |||
544 | if (lmp_no_flush_capable(hdev)) | ||
545 | events[7] |= 0x01; /* Enhanced Flush Complete */ | ||
546 | |||
547 | if (lmp_lsto_capable(hdev)) | ||
548 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ | ||
549 | |||
550 | if (lmp_ssp_capable(hdev)) { | ||
551 | events[6] |= 0x01; /* IO Capability Request */ | ||
552 | events[6] |= 0x02; /* IO Capability Response */ | ||
553 | events[6] |= 0x04; /* User Confirmation Request */ | ||
554 | events[6] |= 0x08; /* User Passkey Request */ | ||
555 | events[6] |= 0x10; /* Remote OOB Data Request */ | ||
556 | events[6] |= 0x20; /* Simple Pairing Complete */ | ||
557 | events[7] |= 0x04; /* User Passkey Notification */ | ||
558 | events[7] |= 0x08; /* Keypress Notification */ | ||
559 | events[7] |= 0x10; /* Remote Host Supported | ||
560 | * Features Notification */ | ||
561 | } | ||
562 | |||
563 | if (lmp_le_capable(hdev)) | ||
564 | events[7] |= 0x20; /* LE Meta-Event */ | ||
565 | |||
566 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); | ||
567 | |||
568 | if (lmp_le_capable(hdev)) { | ||
569 | memset(events, 0, sizeof(events)); | ||
570 | events[0] = 0x1f; | ||
571 | hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK, | ||
572 | sizeof(events), events); | ||
573 | } | ||
574 | } | ||
575 | |||
576 | static void bredr_setup(struct hci_dev *hdev) | ||
577 | { | ||
578 | struct hci_cp_delete_stored_link_key cp; | ||
579 | __le16 param; | ||
580 | __u8 flt_type; | ||
581 | |||
582 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | ||
583 | hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); | ||
584 | |||
585 | /* Read Class of Device */ | ||
586 | hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); | ||
587 | |||
588 | /* Read Local Name */ | ||
589 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); | ||
590 | |||
591 | /* Read Voice Setting */ | ||
592 | hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); | ||
593 | |||
594 | /* Clear Event Filters */ | ||
595 | flt_type = HCI_FLT_CLEAR_ALL; | ||
596 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); | ||
597 | |||
598 | /* Connection accept timeout ~20 secs */ | ||
599 | param = __constant_cpu_to_le16(0x7d00); | ||
600 | hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); | ||
601 | |||
602 | bacpy(&cp.bdaddr, BDADDR_ANY); | ||
603 | cp.delete_all = 1; | ||
604 | hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); | ||
605 | } | ||
606 | |||
607 | static void le_setup(struct hci_dev *hdev) | ||
608 | { | ||
609 | /* Read LE Buffer Size */ | ||
610 | hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); | ||
611 | |||
612 | /* Read LE Local Supported Features */ | ||
613 | hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); | ||
614 | |||
615 | /* Read LE Advertising Channel TX Power */ | ||
616 | hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); | ||
617 | |||
618 | /* Read LE White List Size */ | ||
619 | hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); | ||
620 | |||
621 | /* Read LE Supported States */ | ||
622 | hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); | ||
623 | } | ||
624 | |||
625 | static void hci_setup(struct hci_dev *hdev) | ||
626 | { | ||
627 | if (hdev->dev_type != HCI_BREDR) | ||
628 | return; | ||
629 | |||
630 | /* Read BD Address */ | ||
631 | hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); | ||
632 | |||
633 | if (lmp_bredr_capable(hdev)) | ||
634 | bredr_setup(hdev); | ||
635 | |||
636 | if (lmp_le_capable(hdev)) | ||
637 | le_setup(hdev); | ||
638 | |||
639 | hci_setup_event_mask(hdev); | ||
640 | |||
641 | if (hdev->hci_ver > BLUETOOTH_VER_1_1) | ||
642 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); | ||
643 | |||
644 | if (lmp_ssp_capable(hdev)) { | ||
645 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { | ||
646 | u8 mode = 0x01; | ||
647 | hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, | ||
648 | sizeof(mode), &mode); | ||
649 | } else { | ||
650 | struct hci_cp_write_eir cp; | ||
651 | |||
652 | memset(hdev->eir, 0, sizeof(hdev->eir)); | ||
653 | memset(&cp, 0, sizeof(cp)); | ||
654 | |||
655 | hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | ||
656 | } | ||
657 | } | ||
658 | |||
659 | if (lmp_inq_rssi_capable(hdev)) | ||
660 | hci_setup_inquiry_mode(hdev); | ||
661 | |||
662 | if (lmp_inq_tx_pwr_capable(hdev)) | ||
663 | hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); | ||
664 | |||
665 | if (lmp_ext_feat_capable(hdev)) { | ||
666 | struct hci_cp_read_local_ext_features cp; | ||
667 | |||
668 | cp.page = 0x01; | ||
669 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), | ||
670 | &cp); | ||
671 | } | ||
672 | |||
673 | if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { | ||
674 | u8 enable = 1; | ||
675 | hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), | ||
676 | &enable); | ||
677 | } | ||
678 | } | ||
679 | |||
680 | static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) | 451 | static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) |
681 | { | 452 | { |
682 | struct hci_rp_read_local_version *rp = (void *) skb->data; | 453 | struct hci_rp_read_local_version *rp = (void *) skb->data; |
@@ -684,7 +455,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) | |||
684 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 455 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
685 | 456 | ||
686 | if (rp->status) | 457 | if (rp->status) |
687 | goto done; | 458 | return; |
688 | 459 | ||
689 | hdev->hci_ver = rp->hci_ver; | 460 | hdev->hci_ver = rp->hci_ver; |
690 | hdev->hci_rev = __le16_to_cpu(rp->hci_rev); | 461 | hdev->hci_rev = __le16_to_cpu(rp->hci_rev); |
@@ -694,30 +465,6 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) | |||
694 | 465 | ||
695 | BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, | 466 | BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, |
696 | hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); | 467 | hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); |
697 | |||
698 | if (test_bit(HCI_INIT, &hdev->flags)) | ||
699 | hci_setup(hdev); | ||
700 | |||
701 | done: | ||
702 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status); | ||
703 | } | ||
704 | |||
705 | static void hci_setup_link_policy(struct hci_dev *hdev) | ||
706 | { | ||
707 | struct hci_cp_write_def_link_policy cp; | ||
708 | u16 link_policy = 0; | ||
709 | |||
710 | if (lmp_rswitch_capable(hdev)) | ||
711 | link_policy |= HCI_LP_RSWITCH; | ||
712 | if (lmp_hold_capable(hdev)) | ||
713 | link_policy |= HCI_LP_HOLD; | ||
714 | if (lmp_sniff_capable(hdev)) | ||
715 | link_policy |= HCI_LP_SNIFF; | ||
716 | if (lmp_park_capable(hdev)) | ||
717 | link_policy |= HCI_LP_PARK; | ||
718 | |||
719 | cp.policy = cpu_to_le16(link_policy); | ||
720 | hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); | ||
721 | } | 468 | } |
722 | 469 | ||
723 | static void hci_cc_read_local_commands(struct hci_dev *hdev, | 470 | static void hci_cc_read_local_commands(struct hci_dev *hdev, |
@@ -727,16 +474,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, | |||
727 | 474 | ||
728 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 475 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
729 | 476 | ||
730 | if (rp->status) | 477 | if (!rp->status) |
731 | goto done; | 478 | memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); |
732 | |||
733 | memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); | ||
734 | |||
735 | if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10)) | ||
736 | hci_setup_link_policy(hdev); | ||
737 | |||
738 | done: | ||
739 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); | ||
740 | } | 479 | } |
741 | 480 | ||
742 | static void hci_cc_read_local_features(struct hci_dev *hdev, | 481 | static void hci_cc_read_local_features(struct hci_dev *hdev, |
@@ -795,22 +534,6 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, | |||
795 | hdev->features[6], hdev->features[7]); | 534 | hdev->features[6], hdev->features[7]); |
796 | } | 535 | } |
797 | 536 | ||
798 | static void hci_set_le_support(struct hci_dev *hdev) | ||
799 | { | ||
800 | struct hci_cp_write_le_host_supported cp; | ||
801 | |||
802 | memset(&cp, 0, sizeof(cp)); | ||
803 | |||
804 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { | ||
805 | cp.le = 1; | ||
806 | cp.simul = lmp_le_br_capable(hdev); | ||
807 | } | ||
808 | |||
809 | if (cp.le != lmp_host_le_capable(hdev)) | ||
810 | hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), | ||
811 | &cp); | ||
812 | } | ||
813 | |||
814 | static void hci_cc_read_local_ext_features(struct hci_dev *hdev, | 537 | static void hci_cc_read_local_ext_features(struct hci_dev *hdev, |
815 | struct sk_buff *skb) | 538 | struct sk_buff *skb) |
816 | { | 539 | { |
@@ -819,7 +542,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev, | |||
819 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 542 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
820 | 543 | ||
821 | if (rp->status) | 544 | if (rp->status) |
822 | goto done; | 545 | return; |
823 | 546 | ||
824 | switch (rp->page) { | 547 | switch (rp->page) { |
825 | case 0: | 548 | case 0: |
@@ -829,12 +552,6 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev, | |||
829 | memcpy(hdev->host_features, rp->features, 8); | 552 | memcpy(hdev->host_features, rp->features, 8); |
830 | break; | 553 | break; |
831 | } | 554 | } |
832 | |||
833 | if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev)) | ||
834 | hci_set_le_support(hdev); | ||
835 | |||
836 | done: | ||
837 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); | ||
838 | } | 555 | } |
839 | 556 | ||
840 | static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, | 557 | static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, |
@@ -844,12 +561,8 @@ static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, | |||
844 | 561 | ||
845 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 562 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
846 | 563 | ||
847 | if (rp->status) | 564 | if (!rp->status) |
848 | return; | 565 | hdev->flow_ctl_mode = rp->mode; |
849 | |||
850 | hdev->flow_ctl_mode = rp->mode; | ||
851 | |||
852 | hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status); | ||
853 | } | 566 | } |
854 | 567 | ||
855 | static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) | 568 | static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -886,8 +599,65 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) | |||
886 | 599 | ||
887 | if (!rp->status) | 600 | if (!rp->status) |
888 | bacpy(&hdev->bdaddr, &rp->bdaddr); | 601 | bacpy(&hdev->bdaddr, &rp->bdaddr); |
602 | } | ||
603 | |||
604 | static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, | ||
605 | struct sk_buff *skb) | ||
606 | { | ||
607 | struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; | ||
608 | |||
609 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | ||
610 | |||
611 | if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) { | ||
612 | hdev->page_scan_interval = __le16_to_cpu(rp->interval); | ||
613 | hdev->page_scan_window = __le16_to_cpu(rp->window); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, | ||
618 | struct sk_buff *skb) | ||
619 | { | ||
620 | u8 status = *((u8 *) skb->data); | ||
621 | struct hci_cp_write_page_scan_activity *sent; | ||
622 | |||
623 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
624 | |||
625 | if (status) | ||
626 | return; | ||
627 | |||
628 | sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); | ||
629 | if (!sent) | ||
630 | return; | ||
631 | |||
632 | hdev->page_scan_interval = __le16_to_cpu(sent->interval); | ||
633 | hdev->page_scan_window = __le16_to_cpu(sent->window); | ||
634 | } | ||
635 | |||
636 | static void hci_cc_read_page_scan_type(struct hci_dev *hdev, | ||
637 | struct sk_buff *skb) | ||
638 | { | ||
639 | struct hci_rp_read_page_scan_type *rp = (void *) skb->data; | ||
640 | |||
641 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | ||
642 | |||
643 | if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) | ||
644 | hdev->page_scan_type = rp->type; | ||
645 | } | ||
646 | |||
647 | static void hci_cc_write_page_scan_type(struct hci_dev *hdev, | ||
648 | struct sk_buff *skb) | ||
649 | { | ||
650 | u8 status = *((u8 *) skb->data); | ||
651 | u8 *type; | ||
889 | 652 | ||
890 | hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); | 653 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
654 | |||
655 | if (status) | ||
656 | return; | ||
657 | |||
658 | type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); | ||
659 | if (type) | ||
660 | hdev->page_scan_type = *type; | ||
891 | } | 661 | } |
892 | 662 | ||
893 | static void hci_cc_read_data_block_size(struct hci_dev *hdev, | 663 | static void hci_cc_read_data_block_size(struct hci_dev *hdev, |
@@ -908,17 +678,6 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev, | |||
908 | 678 | ||
909 | BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, | 679 | BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, |
910 | hdev->block_cnt, hdev->block_len); | 680 | hdev->block_cnt, hdev->block_len); |
911 | |||
912 | hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); | ||
913 | } | ||
914 | |||
915 | static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) | ||
916 | { | ||
917 | __u8 status = *((__u8 *) skb->data); | ||
918 | |||
919 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
920 | |||
921 | hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); | ||
922 | } | 681 | } |
923 | 682 | ||
924 | static void hci_cc_read_local_amp_info(struct hci_dev *hdev, | 683 | static void hci_cc_read_local_amp_info(struct hci_dev *hdev, |
@@ -942,8 +701,6 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev, | |||
942 | hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); | 701 | hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); |
943 | hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); | 702 | hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); |
944 | 703 | ||
945 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); | ||
946 | |||
947 | a2mp_rsp: | 704 | a2mp_rsp: |
948 | a2mp_send_getinfo_rsp(hdev); | 705 | a2mp_send_getinfo_rsp(hdev); |
949 | } | 706 | } |
@@ -985,35 +742,6 @@ a2mp_rsp: | |||
985 | a2mp_send_create_phy_link_req(hdev, rp->status); | 742 | a2mp_send_create_phy_link_req(hdev, rp->status); |
986 | } | 743 | } |
987 | 744 | ||
988 | static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, | ||
989 | struct sk_buff *skb) | ||
990 | { | ||
991 | __u8 status = *((__u8 *) skb->data); | ||
992 | |||
993 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
994 | |||
995 | hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); | ||
996 | } | ||
997 | |||
998 | static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) | ||
999 | { | ||
1000 | __u8 status = *((__u8 *) skb->data); | ||
1001 | |||
1002 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1003 | |||
1004 | hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); | ||
1005 | } | ||
1006 | |||
1007 | static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, | ||
1008 | struct sk_buff *skb) | ||
1009 | { | ||
1010 | __u8 status = *((__u8 *) skb->data); | ||
1011 | |||
1012 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1013 | |||
1014 | hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); | ||
1015 | } | ||
1016 | |||
1017 | static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, | 745 | static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, |
1018 | struct sk_buff *skb) | 746 | struct sk_buff *skb) |
1019 | { | 747 | { |
@@ -1023,17 +751,6 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, | |||
1023 | 751 | ||
1024 | if (!rp->status) | 752 | if (!rp->status) |
1025 | hdev->inq_tx_power = rp->tx_power; | 753 | hdev->inq_tx_power = rp->tx_power; |
1026 | |||
1027 | hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status); | ||
1028 | } | ||
1029 | |||
1030 | static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) | ||
1031 | { | ||
1032 | __u8 status = *((__u8 *) skb->data); | ||
1033 | |||
1034 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1035 | |||
1036 | hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); | ||
1037 | } | 754 | } |
1038 | 755 | ||
1039 | static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) | 756 | static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -1095,8 +812,6 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, | |||
1095 | hdev->le_cnt = hdev->le_pkts; | 812 | hdev->le_cnt = hdev->le_pkts; |
1096 | 813 | ||
1097 | BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); | 814 | BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); |
1098 | |||
1099 | hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); | ||
1100 | } | 815 | } |
1101 | 816 | ||
1102 | static void hci_cc_le_read_local_features(struct hci_dev *hdev, | 817 | static void hci_cc_le_read_local_features(struct hci_dev *hdev, |
@@ -1108,8 +823,6 @@ static void hci_cc_le_read_local_features(struct hci_dev *hdev, | |||
1108 | 823 | ||
1109 | if (!rp->status) | 824 | if (!rp->status) |
1110 | memcpy(hdev->le_features, rp->features, 8); | 825 | memcpy(hdev->le_features, rp->features, 8); |
1111 | |||
1112 | hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status); | ||
1113 | } | 826 | } |
1114 | 827 | ||
1115 | static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, | 828 | static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, |
@@ -1119,22 +832,8 @@ static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, | |||
1119 | 832 | ||
1120 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 833 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
1121 | 834 | ||
1122 | if (!rp->status) { | 835 | if (!rp->status) |
1123 | hdev->adv_tx_power = rp->tx_power; | 836 | hdev->adv_tx_power = rp->tx_power; |
1124 | if (!test_bit(HCI_INIT, &hdev->flags)) | ||
1125 | hci_update_ad(hdev); | ||
1126 | } | ||
1127 | |||
1128 | hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status); | ||
1129 | } | ||
1130 | |||
1131 | static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) | ||
1132 | { | ||
1133 | __u8 status = *((__u8 *) skb->data); | ||
1134 | |||
1135 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1136 | |||
1137 | hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status); | ||
1138 | } | 837 | } |
1139 | 838 | ||
1140 | static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) | 839 | static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -1231,12 +930,15 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) | |||
1231 | clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); | 930 | clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); |
1232 | } | 931 | } |
1233 | 932 | ||
1234 | hci_dev_unlock(hdev); | 933 | if (!test_bit(HCI_INIT, &hdev->flags)) { |
934 | struct hci_request req; | ||
1235 | 935 | ||
1236 | if (!test_bit(HCI_INIT, &hdev->flags)) | 936 | hci_req_init(&req, hdev); |
1237 | hci_update_ad(hdev); | 937 | hci_update_ad(&req); |
938 | hci_req_run(&req, NULL); | ||
939 | } | ||
1238 | 940 | ||
1239 | hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status); | 941 | hci_dev_unlock(hdev); |
1240 | } | 942 | } |
1241 | 943 | ||
1242 | static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) | 944 | static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -1245,8 +947,6 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) | |||
1245 | 947 | ||
1246 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | 948 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
1247 | 949 | ||
1248 | hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status); | ||
1249 | |||
1250 | if (status) { | 950 | if (status) { |
1251 | hci_dev_lock(hdev); | 951 | hci_dev_lock(hdev); |
1252 | mgmt_start_discovery_failed(hdev, status); | 952 | mgmt_start_discovery_failed(hdev, status); |
@@ -1269,8 +969,6 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, | |||
1269 | 969 | ||
1270 | switch (cp->enable) { | 970 | switch (cp->enable) { |
1271 | case LE_SCANNING_ENABLED: | 971 | case LE_SCANNING_ENABLED: |
1272 | hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status); | ||
1273 | |||
1274 | if (status) { | 972 | if (status) { |
1275 | hci_dev_lock(hdev); | 973 | hci_dev_lock(hdev); |
1276 | mgmt_start_discovery_failed(hdev, status); | 974 | mgmt_start_discovery_failed(hdev, status); |
@@ -1321,32 +1019,6 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, | |||
1321 | 1019 | ||
1322 | if (!rp->status) | 1020 | if (!rp->status) |
1323 | hdev->le_white_list_size = rp->size; | 1021 | hdev->le_white_list_size = rp->size; |
1324 | |||
1325 | hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status); | ||
1326 | } | ||
1327 | |||
1328 | static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) | ||
1329 | { | ||
1330 | struct hci_rp_le_ltk_reply *rp = (void *) skb->data; | ||
1331 | |||
1332 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | ||
1333 | |||
1334 | if (rp->status) | ||
1335 | return; | ||
1336 | |||
1337 | hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); | ||
1338 | } | ||
1339 | |||
1340 | static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) | ||
1341 | { | ||
1342 | struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; | ||
1343 | |||
1344 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | ||
1345 | |||
1346 | if (rp->status) | ||
1347 | return; | ||
1348 | |||
1349 | hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); | ||
1350 | } | 1022 | } |
1351 | 1023 | ||
1352 | static void hci_cc_le_read_supported_states(struct hci_dev *hdev, | 1024 | static void hci_cc_le_read_supported_states(struct hci_dev *hdev, |
@@ -1358,8 +1030,6 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev, | |||
1358 | 1030 | ||
1359 | if (!rp->status) | 1031 | if (!rp->status) |
1360 | memcpy(hdev->le_states, rp->le_states, 8); | 1032 | memcpy(hdev->le_states, rp->le_states, 8); |
1361 | |||
1362 | hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status); | ||
1363 | } | 1033 | } |
1364 | 1034 | ||
1365 | static void hci_cc_write_le_host_supported(struct hci_dev *hdev, | 1035 | static void hci_cc_write_le_host_supported(struct hci_dev *hdev, |
@@ -1389,8 +1059,6 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, | |||
1389 | if (test_bit(HCI_MGMT, &hdev->dev_flags) && | 1059 | if (test_bit(HCI_MGMT, &hdev->dev_flags) && |
1390 | !test_bit(HCI_INIT, &hdev->flags)) | 1060 | !test_bit(HCI_INIT, &hdev->flags)) |
1391 | mgmt_le_enable_complete(hdev, sent->le, status); | 1061 | mgmt_le_enable_complete(hdev, sent->le, status); |
1392 | |||
1393 | hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); | ||
1394 | } | 1062 | } |
1395 | 1063 | ||
1396 | static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, | 1064 | static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, |
@@ -1412,7 +1080,6 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) | |||
1412 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | 1080 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
1413 | 1081 | ||
1414 | if (status) { | 1082 | if (status) { |
1415 | hci_req_complete(hdev, HCI_OP_INQUIRY, status); | ||
1416 | hci_conn_check_pending(hdev); | 1083 | hci_conn_check_pending(hdev); |
1417 | hci_dev_lock(hdev); | 1084 | hci_dev_lock(hdev); |
1418 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) | 1085 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) |
@@ -1884,11 +1551,6 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) | |||
1884 | } | 1551 | } |
1885 | } | 1552 | } |
1886 | 1553 | ||
1887 | static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) | ||
1888 | { | ||
1889 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1890 | } | ||
1891 | |||
1892 | static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) | 1554 | static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) |
1893 | { | 1555 | { |
1894 | struct hci_cp_create_phy_link *cp; | 1556 | struct hci_cp_create_phy_link *cp; |
@@ -1930,11 +1592,6 @@ static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) | |||
1930 | amp_write_remote_assoc(hdev, cp->phy_handle); | 1592 | amp_write_remote_assoc(hdev, cp->phy_handle); |
1931 | } | 1593 | } |
1932 | 1594 | ||
1933 | static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status) | ||
1934 | { | ||
1935 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1936 | } | ||
1937 | |||
1938 | static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1595 | static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1939 | { | 1596 | { |
1940 | __u8 status = *((__u8 *) skb->data); | 1597 | __u8 status = *((__u8 *) skb->data); |
@@ -1943,7 +1600,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
1943 | 1600 | ||
1944 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | 1601 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
1945 | 1602 | ||
1946 | hci_req_complete(hdev, HCI_OP_INQUIRY, status); | 1603 | hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status); |
1947 | 1604 | ||
1948 | hci_conn_check_pending(hdev); | 1605 | hci_conn_check_pending(hdev); |
1949 | 1606 | ||
@@ -2399,7 +2056,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2399 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); | 2056 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); |
2400 | 2057 | ||
2401 | if (ev->status && conn->state == BT_CONNECTED) { | 2058 | if (ev->status && conn->state == BT_CONNECTED) { |
2402 | hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); | 2059 | hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); |
2403 | hci_conn_put(conn); | 2060 | hci_conn_put(conn); |
2404 | goto unlock; | 2061 | goto unlock; |
2405 | } | 2062 | } |
@@ -2491,20 +2148,10 @@ unlock: | |||
2491 | hci_dev_unlock(hdev); | 2148 | hci_dev_unlock(hdev); |
2492 | } | 2149 | } |
2493 | 2150 | ||
2494 | static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
2495 | { | ||
2496 | BT_DBG("%s", hdev->name); | ||
2497 | } | ||
2498 | |||
2499 | static void hci_qos_setup_complete_evt(struct hci_dev *hdev, | ||
2500 | struct sk_buff *skb) | ||
2501 | { | ||
2502 | BT_DBG("%s", hdev->name); | ||
2503 | } | ||
2504 | |||
2505 | static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2151 | static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2506 | { | 2152 | { |
2507 | struct hci_ev_cmd_complete *ev = (void *) skb->data; | 2153 | struct hci_ev_cmd_complete *ev = (void *) skb->data; |
2154 | u8 status = skb->data[sizeof(*ev)]; | ||
2508 | __u16 opcode; | 2155 | __u16 opcode; |
2509 | 2156 | ||
2510 | skb_pull(skb, sizeof(*ev)); | 2157 | skb_pull(skb, sizeof(*ev)); |
@@ -2588,10 +2235,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2588 | hci_cc_write_voice_setting(hdev, skb); | 2235 | hci_cc_write_voice_setting(hdev, skb); |
2589 | break; | 2236 | break; |
2590 | 2237 | ||
2591 | case HCI_OP_HOST_BUFFER_SIZE: | ||
2592 | hci_cc_host_buffer_size(hdev, skb); | ||
2593 | break; | ||
2594 | |||
2595 | case HCI_OP_WRITE_SSP_MODE: | 2238 | case HCI_OP_WRITE_SSP_MODE: |
2596 | hci_cc_write_ssp_mode(hdev, skb); | 2239 | hci_cc_write_ssp_mode(hdev, skb); |
2597 | break; | 2240 | break; |
@@ -2620,46 +2263,42 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2620 | hci_cc_read_bd_addr(hdev, skb); | 2263 | hci_cc_read_bd_addr(hdev, skb); |
2621 | break; | 2264 | break; |
2622 | 2265 | ||
2623 | case HCI_OP_READ_DATA_BLOCK_SIZE: | 2266 | case HCI_OP_READ_PAGE_SCAN_ACTIVITY: |
2624 | hci_cc_read_data_block_size(hdev, skb); | 2267 | hci_cc_read_page_scan_activity(hdev, skb); |
2625 | break; | 2268 | break; |
2626 | 2269 | ||
2627 | case HCI_OP_WRITE_CA_TIMEOUT: | 2270 | case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: |
2628 | hci_cc_write_ca_timeout(hdev, skb); | 2271 | hci_cc_write_page_scan_activity(hdev, skb); |
2629 | break; | 2272 | break; |
2630 | 2273 | ||
2631 | case HCI_OP_READ_FLOW_CONTROL_MODE: | 2274 | case HCI_OP_READ_PAGE_SCAN_TYPE: |
2632 | hci_cc_read_flow_control_mode(hdev, skb); | 2275 | hci_cc_read_page_scan_type(hdev, skb); |
2633 | break; | 2276 | break; |
2634 | 2277 | ||
2635 | case HCI_OP_READ_LOCAL_AMP_INFO: | 2278 | case HCI_OP_WRITE_PAGE_SCAN_TYPE: |
2636 | hci_cc_read_local_amp_info(hdev, skb); | 2279 | hci_cc_write_page_scan_type(hdev, skb); |
2637 | break; | 2280 | break; |
2638 | 2281 | ||
2639 | case HCI_OP_READ_LOCAL_AMP_ASSOC: | 2282 | case HCI_OP_READ_DATA_BLOCK_SIZE: |
2640 | hci_cc_read_local_amp_assoc(hdev, skb); | 2283 | hci_cc_read_data_block_size(hdev, skb); |
2641 | break; | 2284 | break; |
2642 | 2285 | ||
2643 | case HCI_OP_DELETE_STORED_LINK_KEY: | 2286 | case HCI_OP_READ_FLOW_CONTROL_MODE: |
2644 | hci_cc_delete_stored_link_key(hdev, skb); | 2287 | hci_cc_read_flow_control_mode(hdev, skb); |
2645 | break; | 2288 | break; |
2646 | 2289 | ||
2647 | case HCI_OP_SET_EVENT_MASK: | 2290 | case HCI_OP_READ_LOCAL_AMP_INFO: |
2648 | hci_cc_set_event_mask(hdev, skb); | 2291 | hci_cc_read_local_amp_info(hdev, skb); |
2649 | break; | 2292 | break; |
2650 | 2293 | ||
2651 | case HCI_OP_WRITE_INQUIRY_MODE: | 2294 | case HCI_OP_READ_LOCAL_AMP_ASSOC: |
2652 | hci_cc_write_inquiry_mode(hdev, skb); | 2295 | hci_cc_read_local_amp_assoc(hdev, skb); |
2653 | break; | 2296 | break; |
2654 | 2297 | ||
2655 | case HCI_OP_READ_INQ_RSP_TX_POWER: | 2298 | case HCI_OP_READ_INQ_RSP_TX_POWER: |
2656 | hci_cc_read_inq_rsp_tx_power(hdev, skb); | 2299 | hci_cc_read_inq_rsp_tx_power(hdev, skb); |
2657 | break; | 2300 | break; |
2658 | 2301 | ||
2659 | case HCI_OP_SET_EVENT_FLT: | ||
2660 | hci_cc_set_event_flt(hdev, skb); | ||
2661 | break; | ||
2662 | |||
2663 | case HCI_OP_PIN_CODE_REPLY: | 2302 | case HCI_OP_PIN_CODE_REPLY: |
2664 | hci_cc_pin_code_reply(hdev, skb); | 2303 | hci_cc_pin_code_reply(hdev, skb); |
2665 | break; | 2304 | break; |
@@ -2684,10 +2323,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2684 | hci_cc_le_read_adv_tx_power(hdev, skb); | 2323 | hci_cc_le_read_adv_tx_power(hdev, skb); |
2685 | break; | 2324 | break; |
2686 | 2325 | ||
2687 | case HCI_OP_LE_SET_EVENT_MASK: | ||
2688 | hci_cc_le_set_event_mask(hdev, skb); | ||
2689 | break; | ||
2690 | |||
2691 | case HCI_OP_USER_CONFIRM_REPLY: | 2326 | case HCI_OP_USER_CONFIRM_REPLY: |
2692 | hci_cc_user_confirm_reply(hdev, skb); | 2327 | hci_cc_user_confirm_reply(hdev, skb); |
2693 | break; | 2328 | break; |
@@ -2720,14 +2355,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2720 | hci_cc_le_read_white_list_size(hdev, skb); | 2355 | hci_cc_le_read_white_list_size(hdev, skb); |
2721 | break; | 2356 | break; |
2722 | 2357 | ||
2723 | case HCI_OP_LE_LTK_REPLY: | ||
2724 | hci_cc_le_ltk_reply(hdev, skb); | ||
2725 | break; | ||
2726 | |||
2727 | case HCI_OP_LE_LTK_NEG_REPLY: | ||
2728 | hci_cc_le_ltk_neg_reply(hdev, skb); | ||
2729 | break; | ||
2730 | |||
2731 | case HCI_OP_LE_READ_SUPPORTED_STATES: | 2358 | case HCI_OP_LE_READ_SUPPORTED_STATES: |
2732 | hci_cc_le_read_supported_states(hdev, skb); | 2359 | hci_cc_le_read_supported_states(hdev, skb); |
2733 | break; | 2360 | break; |
@@ -2745,9 +2372,11 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2745 | break; | 2372 | break; |
2746 | } | 2373 | } |
2747 | 2374 | ||
2748 | if (ev->opcode != HCI_OP_NOP) | 2375 | if (opcode != HCI_OP_NOP) |
2749 | del_timer(&hdev->cmd_timer); | 2376 | del_timer(&hdev->cmd_timer); |
2750 | 2377 | ||
2378 | hci_req_cmd_complete(hdev, opcode, status); | ||
2379 | |||
2751 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { | 2380 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { |
2752 | atomic_set(&hdev->cmd_cnt, 1); | 2381 | atomic_set(&hdev->cmd_cnt, 1); |
2753 | if (!skb_queue_empty(&hdev->cmd_q)) | 2382 | if (!skb_queue_empty(&hdev->cmd_q)) |
@@ -2817,10 +2446,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2817 | hci_cs_le_create_conn(hdev, ev->status); | 2446 | hci_cs_le_create_conn(hdev, ev->status); |
2818 | break; | 2447 | break; |
2819 | 2448 | ||
2820 | case HCI_OP_LE_START_ENC: | ||
2821 | hci_cs_le_start_enc(hdev, ev->status); | ||
2822 | break; | ||
2823 | |||
2824 | case HCI_OP_CREATE_PHY_LINK: | 2449 | case HCI_OP_CREATE_PHY_LINK: |
2825 | hci_cs_create_phylink(hdev, ev->status); | 2450 | hci_cs_create_phylink(hdev, ev->status); |
2826 | break; | 2451 | break; |
@@ -2829,18 +2454,16 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2829 | hci_cs_accept_phylink(hdev, ev->status); | 2454 | hci_cs_accept_phylink(hdev, ev->status); |
2830 | break; | 2455 | break; |
2831 | 2456 | ||
2832 | case HCI_OP_CREATE_LOGICAL_LINK: | ||
2833 | hci_cs_create_logical_link(hdev, ev->status); | ||
2834 | break; | ||
2835 | |||
2836 | default: | 2457 | default: |
2837 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); | 2458 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); |
2838 | break; | 2459 | break; |
2839 | } | 2460 | } |
2840 | 2461 | ||
2841 | if (ev->opcode != HCI_OP_NOP) | 2462 | if (opcode != HCI_OP_NOP) |
2842 | del_timer(&hdev->cmd_timer); | 2463 | del_timer(&hdev->cmd_timer); |
2843 | 2464 | ||
2465 | hci_req_cmd_status(hdev, opcode, ev->status); | ||
2466 | |||
2844 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { | 2467 | if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { |
2845 | atomic_set(&hdev->cmd_cnt, 1); | 2468 | atomic_set(&hdev->cmd_cnt, 1); |
2846 | if (!skb_queue_empty(&hdev->cmd_q)) | 2469 | if (!skb_queue_empty(&hdev->cmd_q)) |
@@ -3391,18 +3014,6 @@ unlock: | |||
3391 | hci_dev_unlock(hdev); | 3014 | hci_dev_unlock(hdev); |
3392 | } | 3015 | } |
3393 | 3016 | ||
3394 | static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
3395 | { | ||
3396 | BT_DBG("%s", hdev->name); | ||
3397 | } | ||
3398 | |||
3399 | static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
3400 | { | ||
3401 | struct hci_ev_sniff_subrate *ev = (void *) skb->data; | ||
3402 | |||
3403 | BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); | ||
3404 | } | ||
3405 | |||
3406 | static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, | 3017 | static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, |
3407 | struct sk_buff *skb) | 3018 | struct sk_buff *skb) |
3408 | { | 3019 | { |
@@ -3472,7 +3083,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev, | |||
3472 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); | 3083 | clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); |
3473 | 3084 | ||
3474 | if (ev->status && conn->state == BT_CONNECTED) { | 3085 | if (ev->status && conn->state == BT_CONNECTED) { |
3475 | hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); | 3086 | hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); |
3476 | hci_conn_put(conn); | 3087 | hci_conn_put(conn); |
3477 | goto unlock; | 3088 | goto unlock; |
3478 | } | 3089 | } |
@@ -4130,14 +3741,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
4130 | hci_remote_features_evt(hdev, skb); | 3741 | hci_remote_features_evt(hdev, skb); |
4131 | break; | 3742 | break; |
4132 | 3743 | ||
4133 | case HCI_EV_REMOTE_VERSION: | ||
4134 | hci_remote_version_evt(hdev, skb); | ||
4135 | break; | ||
4136 | |||
4137 | case HCI_EV_QOS_SETUP_COMPLETE: | ||
4138 | hci_qos_setup_complete_evt(hdev, skb); | ||
4139 | break; | ||
4140 | |||
4141 | case HCI_EV_CMD_COMPLETE: | 3744 | case HCI_EV_CMD_COMPLETE: |
4142 | hci_cmd_complete_evt(hdev, skb); | 3745 | hci_cmd_complete_evt(hdev, skb); |
4143 | break; | 3746 | break; |
@@ -4194,14 +3797,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
4194 | hci_sync_conn_complete_evt(hdev, skb); | 3797 | hci_sync_conn_complete_evt(hdev, skb); |
4195 | break; | 3798 | break; |
4196 | 3799 | ||
4197 | case HCI_EV_SYNC_CONN_CHANGED: | ||
4198 | hci_sync_conn_changed_evt(hdev, skb); | ||
4199 | break; | ||
4200 | |||
4201 | case HCI_EV_SNIFF_SUBRATE: | ||
4202 | hci_sniff_subrate_evt(hdev, skb); | ||
4203 | break; | ||
4204 | |||
4205 | case HCI_EV_EXTENDED_INQUIRY_RESULT: | 3800 | case HCI_EV_EXTENDED_INQUIRY_RESULT: |
4206 | hci_extended_inquiry_result_evt(hdev, skb); | 3801 | hci_extended_inquiry_result_evt(hdev, skb); |
4207 | break; | 3802 | break; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 6a93614f2c49..aa4354fca77c 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -854,6 +854,11 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
854 | skb_queue_tail(&hdev->raw_q, skb); | 854 | skb_queue_tail(&hdev->raw_q, skb); |
855 | queue_work(hdev->workqueue, &hdev->tx_work); | 855 | queue_work(hdev->workqueue, &hdev->tx_work); |
856 | } else { | 856 | } else { |
857 | /* Stand-alone HCI commands must be flaged as | ||
858 | * single-command requests. | ||
859 | */ | ||
860 | bt_cb(skb)->req.start = true; | ||
861 | |||
857 | skb_queue_tail(&hdev->cmd_q, skb); | 862 | skb_queue_tail(&hdev->cmd_q, skb); |
858 | queue_work(hdev->workqueue, &hdev->cmd_work); | 863 | queue_work(hdev->workqueue, &hdev->cmd_work); |
859 | } | 864 | } |
@@ -1121,8 +1126,6 @@ error: | |||
1121 | void hci_sock_cleanup(void) | 1126 | void hci_sock_cleanup(void) |
1122 | { | 1127 | { |
1123 | bt_procfs_cleanup(&init_net, "hci"); | 1128 | bt_procfs_cleanup(&init_net, "hci"); |
1124 | if (bt_sock_unregister(BTPROTO_HCI) < 0) | 1129 | bt_sock_unregister(BTPROTO_HCI); |
1125 | BT_ERR("HCI socket unregistration failed"); | ||
1126 | |||
1127 | proto_unregister(&hci_sk_proto); | 1130 | proto_unregister(&hci_sk_proto); |
1128 | } | 1131 | } |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 23b4e242a31a..ff38561385de 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -590,10 +590,8 @@ int __init bt_sysfs_init(void) | |||
590 | bt_debugfs = debugfs_create_dir("bluetooth", NULL); | 590 | bt_debugfs = debugfs_create_dir("bluetooth", NULL); |
591 | 591 | ||
592 | bt_class = class_create(THIS_MODULE, "bluetooth"); | 592 | bt_class = class_create(THIS_MODULE, "bluetooth"); |
593 | if (IS_ERR(bt_class)) | ||
594 | return PTR_ERR(bt_class); | ||
595 | 593 | ||
596 | return 0; | 594 | return PTR_RET(bt_class); |
597 | } | 595 | } |
598 | 596 | ||
599 | void bt_sysfs_cleanup(void) | 597 | void bt_sysfs_cleanup(void) |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index a7352ff3fd1e..2342327f3335 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -311,6 +311,9 @@ static int hidp_get_raw_report(struct hid_device *hid, | |||
311 | int numbered_reports = hid->report_enum[report_type].numbered; | 311 | int numbered_reports = hid->report_enum[report_type].numbered; |
312 | int ret; | 312 | int ret; |
313 | 313 | ||
314 | if (atomic_read(&session->terminate)) | ||
315 | return -EIO; | ||
316 | |||
314 | switch (report_type) { | 317 | switch (report_type) { |
315 | case HID_FEATURE_REPORT: | 318 | case HID_FEATURE_REPORT: |
316 | report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE; | 319 | report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE; |
@@ -722,6 +725,7 @@ static int hidp_session(void *arg) | |||
722 | set_current_state(TASK_INTERRUPTIBLE); | 725 | set_current_state(TASK_INTERRUPTIBLE); |
723 | } | 726 | } |
724 | set_current_state(TASK_RUNNING); | 727 | set_current_state(TASK_RUNNING); |
728 | atomic_inc(&session->terminate); | ||
725 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); | 729 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); |
726 | remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); | 730 | remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); |
727 | 731 | ||
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 82a829d90b0f..5d0f1ca0a314 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
@@ -304,8 +304,6 @@ error: | |||
304 | void __exit hidp_cleanup_sockets(void) | 304 | void __exit hidp_cleanup_sockets(void) |
305 | { | 305 | { |
306 | bt_procfs_cleanup(&init_net, "hidp"); | 306 | bt_procfs_cleanup(&init_net, "hidp"); |
307 | if (bt_sock_unregister(BTPROTO_HIDP) < 0) | 307 | bt_sock_unregister(BTPROTO_HIDP); |
308 | BT_ERR("Can't unregister HIDP socket"); | ||
309 | |||
310 | proto_unregister(&hidp_proto); | 308 | proto_unregister(&hidp_proto); |
311 | } | 309 | } |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 1bcfb8422fdc..7f9704993b74 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -1312,8 +1312,6 @@ error: | |||
1312 | void l2cap_cleanup_sockets(void) | 1312 | void l2cap_cleanup_sockets(void) |
1313 | { | 1313 | { |
1314 | bt_procfs_cleanup(&init_net, "l2cap"); | 1314 | bt_procfs_cleanup(&init_net, "l2cap"); |
1315 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) | 1315 | bt_sock_unregister(BTPROTO_L2CAP); |
1316 | BT_ERR("L2CAP socket unregistration failed"); | ||
1317 | |||
1318 | proto_unregister(&l2cap_proto); | 1316 | proto_unregister(&l2cap_proto); |
1319 | } | 1317 | } |
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 39395c7144aa..03e7e732215f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -384,7 +384,8 @@ static u32 get_supported_settings(struct hci_dev *hdev) | |||
384 | 384 | ||
385 | if (lmp_bredr_capable(hdev)) { | 385 | if (lmp_bredr_capable(hdev)) { |
386 | settings |= MGMT_SETTING_CONNECTABLE; | 386 | settings |= MGMT_SETTING_CONNECTABLE; |
387 | settings |= MGMT_SETTING_FAST_CONNECTABLE; | 387 | if (hdev->hci_ver >= BLUETOOTH_VER_1_2) |
388 | settings |= MGMT_SETTING_FAST_CONNECTABLE; | ||
388 | settings |= MGMT_SETTING_DISCOVERABLE; | 389 | settings |= MGMT_SETTING_DISCOVERABLE; |
389 | settings |= MGMT_SETTING_BREDR; | 390 | settings |= MGMT_SETTING_BREDR; |
390 | settings |= MGMT_SETTING_LINK_SECURITY; | 391 | settings |= MGMT_SETTING_LINK_SECURITY; |
@@ -409,6 +410,9 @@ static u32 get_current_settings(struct hci_dev *hdev) | |||
409 | if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) | 410 | if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) |
410 | settings |= MGMT_SETTING_CONNECTABLE; | 411 | settings |= MGMT_SETTING_CONNECTABLE; |
411 | 412 | ||
413 | if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) | ||
414 | settings |= MGMT_SETTING_FAST_CONNECTABLE; | ||
415 | |||
412 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) | 416 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) |
413 | settings |= MGMT_SETTING_DISCOVERABLE; | 417 | settings |= MGMT_SETTING_DISCOVERABLE; |
414 | 418 | ||
@@ -591,32 +595,33 @@ static void create_eir(struct hci_dev *hdev, u8 *data) | |||
591 | ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | 595 | ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); |
592 | } | 596 | } |
593 | 597 | ||
594 | static int update_eir(struct hci_dev *hdev) | 598 | static void update_eir(struct hci_request *req) |
595 | { | 599 | { |
600 | struct hci_dev *hdev = req->hdev; | ||
596 | struct hci_cp_write_eir cp; | 601 | struct hci_cp_write_eir cp; |
597 | 602 | ||
598 | if (!hdev_is_powered(hdev)) | 603 | if (!hdev_is_powered(hdev)) |
599 | return 0; | 604 | return; |
600 | 605 | ||
601 | if (!lmp_ext_inq_capable(hdev)) | 606 | if (!lmp_ext_inq_capable(hdev)) |
602 | return 0; | 607 | return; |
603 | 608 | ||
604 | if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) | 609 | if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) |
605 | return 0; | 610 | return; |
606 | 611 | ||
607 | if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) | 612 | if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) |
608 | return 0; | 613 | return; |
609 | 614 | ||
610 | memset(&cp, 0, sizeof(cp)); | 615 | memset(&cp, 0, sizeof(cp)); |
611 | 616 | ||
612 | create_eir(hdev, cp.data); | 617 | create_eir(hdev, cp.data); |
613 | 618 | ||
614 | if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) | 619 | if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) |
615 | return 0; | 620 | return; |
616 | 621 | ||
617 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); | 622 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); |
618 | 623 | ||
619 | return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | 624 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); |
620 | } | 625 | } |
621 | 626 | ||
622 | static u8 get_service_classes(struct hci_dev *hdev) | 627 | static u8 get_service_classes(struct hci_dev *hdev) |
@@ -630,47 +635,48 @@ static u8 get_service_classes(struct hci_dev *hdev) | |||
630 | return val; | 635 | return val; |
631 | } | 636 | } |
632 | 637 | ||
633 | static int update_class(struct hci_dev *hdev) | 638 | static void update_class(struct hci_request *req) |
634 | { | 639 | { |
640 | struct hci_dev *hdev = req->hdev; | ||
635 | u8 cod[3]; | 641 | u8 cod[3]; |
636 | int err; | ||
637 | 642 | ||
638 | BT_DBG("%s", hdev->name); | 643 | BT_DBG("%s", hdev->name); |
639 | 644 | ||
640 | if (!hdev_is_powered(hdev)) | 645 | if (!hdev_is_powered(hdev)) |
641 | return 0; | 646 | return; |
642 | 647 | ||
643 | if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) | 648 | if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) |
644 | return 0; | 649 | return; |
645 | 650 | ||
646 | cod[0] = hdev->minor_class; | 651 | cod[0] = hdev->minor_class; |
647 | cod[1] = hdev->major_class; | 652 | cod[1] = hdev->major_class; |
648 | cod[2] = get_service_classes(hdev); | 653 | cod[2] = get_service_classes(hdev); |
649 | 654 | ||
650 | if (memcmp(cod, hdev->dev_class, 3) == 0) | 655 | if (memcmp(cod, hdev->dev_class, 3) == 0) |
651 | return 0; | 656 | return; |
652 | |||
653 | err = hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); | ||
654 | if (err == 0) | ||
655 | set_bit(HCI_PENDING_CLASS, &hdev->dev_flags); | ||
656 | 657 | ||
657 | return err; | 658 | hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); |
658 | } | 659 | } |
659 | 660 | ||
660 | static void service_cache_off(struct work_struct *work) | 661 | static void service_cache_off(struct work_struct *work) |
661 | { | 662 | { |
662 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 663 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
663 | service_cache.work); | 664 | service_cache.work); |
665 | struct hci_request req; | ||
664 | 666 | ||
665 | if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) | 667 | if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) |
666 | return; | 668 | return; |
667 | 669 | ||
670 | hci_req_init(&req, hdev); | ||
671 | |||
668 | hci_dev_lock(hdev); | 672 | hci_dev_lock(hdev); |
669 | 673 | ||
670 | update_eir(hdev); | 674 | update_eir(&req); |
671 | update_class(hdev); | 675 | update_class(&req); |
672 | 676 | ||
673 | hci_dev_unlock(hdev); | 677 | hci_dev_unlock(hdev); |
678 | |||
679 | hci_req_run(&req, NULL); | ||
674 | } | 680 | } |
675 | 681 | ||
676 | static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) | 682 | static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) |
@@ -994,11 +1000,64 @@ failed: | |||
994 | return err; | 1000 | return err; |
995 | } | 1001 | } |
996 | 1002 | ||
1003 | static void write_fast_connectable(struct hci_request *req, bool enable) | ||
1004 | { | ||
1005 | struct hci_dev *hdev = req->hdev; | ||
1006 | struct hci_cp_write_page_scan_activity acp; | ||
1007 | u8 type; | ||
1008 | |||
1009 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | ||
1010 | return; | ||
1011 | |||
1012 | if (enable) { | ||
1013 | type = PAGE_SCAN_TYPE_INTERLACED; | ||
1014 | |||
1015 | /* 160 msec page scan interval */ | ||
1016 | acp.interval = __constant_cpu_to_le16(0x0100); | ||
1017 | } else { | ||
1018 | type = PAGE_SCAN_TYPE_STANDARD; /* default */ | ||
1019 | |||
1020 | /* default 1.28 sec page scan */ | ||
1021 | acp.interval = __constant_cpu_to_le16(0x0800); | ||
1022 | } | ||
1023 | |||
1024 | acp.window = __constant_cpu_to_le16(0x0012); | ||
1025 | |||
1026 | if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || | ||
1027 | __cpu_to_le16(hdev->page_scan_window) != acp.window) | ||
1028 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, | ||
1029 | sizeof(acp), &acp); | ||
1030 | |||
1031 | if (hdev->page_scan_type != type) | ||
1032 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); | ||
1033 | } | ||
1034 | |||
1035 | static void set_connectable_complete(struct hci_dev *hdev, u8 status) | ||
1036 | { | ||
1037 | struct pending_cmd *cmd; | ||
1038 | |||
1039 | BT_DBG("status 0x%02x", status); | ||
1040 | |||
1041 | hci_dev_lock(hdev); | ||
1042 | |||
1043 | cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); | ||
1044 | if (!cmd) | ||
1045 | goto unlock; | ||
1046 | |||
1047 | send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); | ||
1048 | |||
1049 | mgmt_pending_remove(cmd); | ||
1050 | |||
1051 | unlock: | ||
1052 | hci_dev_unlock(hdev); | ||
1053 | } | ||
1054 | |||
997 | static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | 1055 | static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, |
998 | u16 len) | 1056 | u16 len) |
999 | { | 1057 | { |
1000 | struct mgmt_mode *cp = data; | 1058 | struct mgmt_mode *cp = data; |
1001 | struct pending_cmd *cmd; | 1059 | struct pending_cmd *cmd; |
1060 | struct hci_request req; | ||
1002 | u8 scan; | 1061 | u8 scan; |
1003 | int err; | 1062 | int err; |
1004 | 1063 | ||
@@ -1065,7 +1124,20 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1065 | cancel_delayed_work(&hdev->discov_off); | 1124 | cancel_delayed_work(&hdev->discov_off); |
1066 | } | 1125 | } |
1067 | 1126 | ||
1068 | err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | 1127 | hci_req_init(&req, hdev); |
1128 | |||
1129 | hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | ||
1130 | |||
1131 | /* If we're going from non-connectable to connectable or | ||
1132 | * vice-versa when fast connectable is enabled ensure that fast | ||
1133 | * connectable gets disabled. write_fast_connectable won't do | ||
1134 | * anything if the page scan parameters are already what they | ||
1135 | * should be. | ||
1136 | */ | ||
1137 | if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) | ||
1138 | write_fast_connectable(&req, false); | ||
1139 | |||
1140 | err = hci_req_run(&req, set_connectable_complete); | ||
1069 | if (err < 0) | 1141 | if (err < 0) |
1070 | mgmt_pending_remove(cmd); | 1142 | mgmt_pending_remove(cmd); |
1071 | 1143 | ||
@@ -1332,6 +1404,29 @@ unlock: | |||
1332 | return err; | 1404 | return err; |
1333 | } | 1405 | } |
1334 | 1406 | ||
1407 | /* This is a helper function to test for pending mgmt commands that can | ||
1408 | * cause CoD or EIR HCI commands. We can only allow one such pending | ||
1409 | * mgmt command at a time since otherwise we cannot easily track what | ||
1410 | * the current values are, will be, and based on that calculate if a new | ||
1411 | * HCI command needs to be sent and if yes with what value. | ||
1412 | */ | ||
1413 | static bool pending_eir_or_class(struct hci_dev *hdev) | ||
1414 | { | ||
1415 | struct pending_cmd *cmd; | ||
1416 | |||
1417 | list_for_each_entry(cmd, &hdev->mgmt_pending, list) { | ||
1418 | switch (cmd->opcode) { | ||
1419 | case MGMT_OP_ADD_UUID: | ||
1420 | case MGMT_OP_REMOVE_UUID: | ||
1421 | case MGMT_OP_SET_DEV_CLASS: | ||
1422 | case MGMT_OP_SET_POWERED: | ||
1423 | return true; | ||
1424 | } | ||
1425 | } | ||
1426 | |||
1427 | return false; | ||
1428 | } | ||
1429 | |||
1335 | static const u8 bluetooth_base_uuid[] = { | 1430 | static const u8 bluetooth_base_uuid[] = { |
1336 | 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80, | 1431 | 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80, |
1337 | 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 1432 | 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
@@ -1351,10 +1446,37 @@ static u8 get_uuid_size(const u8 *uuid) | |||
1351 | return 16; | 1446 | return 16; |
1352 | } | 1447 | } |
1353 | 1448 | ||
1449 | static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status) | ||
1450 | { | ||
1451 | struct pending_cmd *cmd; | ||
1452 | |||
1453 | hci_dev_lock(hdev); | ||
1454 | |||
1455 | cmd = mgmt_pending_find(mgmt_op, hdev); | ||
1456 | if (!cmd) | ||
1457 | goto unlock; | ||
1458 | |||
1459 | cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), | ||
1460 | hdev->dev_class, 3); | ||
1461 | |||
1462 | mgmt_pending_remove(cmd); | ||
1463 | |||
1464 | unlock: | ||
1465 | hci_dev_unlock(hdev); | ||
1466 | } | ||
1467 | |||
1468 | static void add_uuid_complete(struct hci_dev *hdev, u8 status) | ||
1469 | { | ||
1470 | BT_DBG("status 0x%02x", status); | ||
1471 | |||
1472 | mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status); | ||
1473 | } | ||
1474 | |||
1354 | static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) | 1475 | static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) |
1355 | { | 1476 | { |
1356 | struct mgmt_cp_add_uuid *cp = data; | 1477 | struct mgmt_cp_add_uuid *cp = data; |
1357 | struct pending_cmd *cmd; | 1478 | struct pending_cmd *cmd; |
1479 | struct hci_request req; | ||
1358 | struct bt_uuid *uuid; | 1480 | struct bt_uuid *uuid; |
1359 | int err; | 1481 | int err; |
1360 | 1482 | ||
@@ -1362,7 +1484,7 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) | |||
1362 | 1484 | ||
1363 | hci_dev_lock(hdev); | 1485 | hci_dev_lock(hdev); |
1364 | 1486 | ||
1365 | if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { | 1487 | if (pending_eir_or_class(hdev)) { |
1366 | err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, | 1488 | err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, |
1367 | MGMT_STATUS_BUSY); | 1489 | MGMT_STATUS_BUSY); |
1368 | goto failed; | 1490 | goto failed; |
@@ -1380,23 +1502,28 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) | |||
1380 | 1502 | ||
1381 | list_add_tail(&uuid->list, &hdev->uuids); | 1503 | list_add_tail(&uuid->list, &hdev->uuids); |
1382 | 1504 | ||
1383 | err = update_class(hdev); | 1505 | hci_req_init(&req, hdev); |
1384 | if (err < 0) | ||
1385 | goto failed; | ||
1386 | 1506 | ||
1387 | err = update_eir(hdev); | 1507 | update_class(&req); |
1388 | if (err < 0) | 1508 | update_eir(&req); |
1389 | goto failed; | 1509 | |
1510 | err = hci_req_run(&req, add_uuid_complete); | ||
1511 | if (err < 0) { | ||
1512 | if (err != -ENODATA) | ||
1513 | goto failed; | ||
1390 | 1514 | ||
1391 | if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { | ||
1392 | err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, | 1515 | err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, |
1393 | hdev->dev_class, 3); | 1516 | hdev->dev_class, 3); |
1394 | goto failed; | 1517 | goto failed; |
1395 | } | 1518 | } |
1396 | 1519 | ||
1397 | cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); | 1520 | cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); |
1398 | if (!cmd) | 1521 | if (!cmd) { |
1399 | err = -ENOMEM; | 1522 | err = -ENOMEM; |
1523 | goto failed; | ||
1524 | } | ||
1525 | |||
1526 | err = 0; | ||
1400 | 1527 | ||
1401 | failed: | 1528 | failed: |
1402 | hci_dev_unlock(hdev); | 1529 | hci_dev_unlock(hdev); |
@@ -1417,6 +1544,13 @@ static bool enable_service_cache(struct hci_dev *hdev) | |||
1417 | return false; | 1544 | return false; |
1418 | } | 1545 | } |
1419 | 1546 | ||
1547 | static void remove_uuid_complete(struct hci_dev *hdev, u8 status) | ||
1548 | { | ||
1549 | BT_DBG("status 0x%02x", status); | ||
1550 | |||
1551 | mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status); | ||
1552 | } | ||
1553 | |||
1420 | static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, | 1554 | static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, |
1421 | u16 len) | 1555 | u16 len) |
1422 | { | 1556 | { |
@@ -1424,13 +1558,14 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1424 | struct pending_cmd *cmd; | 1558 | struct pending_cmd *cmd; |
1425 | struct bt_uuid *match, *tmp; | 1559 | struct bt_uuid *match, *tmp; |
1426 | u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; | 1560 | u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
1561 | struct hci_request req; | ||
1427 | int err, found; | 1562 | int err, found; |
1428 | 1563 | ||
1429 | BT_DBG("request for %s", hdev->name); | 1564 | BT_DBG("request for %s", hdev->name); |
1430 | 1565 | ||
1431 | hci_dev_lock(hdev); | 1566 | hci_dev_lock(hdev); |
1432 | 1567 | ||
1433 | if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { | 1568 | if (pending_eir_or_class(hdev)) { |
1434 | err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, | 1569 | err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, |
1435 | MGMT_STATUS_BUSY); | 1570 | MGMT_STATUS_BUSY); |
1436 | goto unlock; | 1571 | goto unlock; |
@@ -1466,34 +1601,47 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1466 | } | 1601 | } |
1467 | 1602 | ||
1468 | update_class: | 1603 | update_class: |
1469 | err = update_class(hdev); | 1604 | hci_req_init(&req, hdev); |
1470 | if (err < 0) | ||
1471 | goto unlock; | ||
1472 | 1605 | ||
1473 | err = update_eir(hdev); | 1606 | update_class(&req); |
1474 | if (err < 0) | 1607 | update_eir(&req); |
1475 | goto unlock; | 1608 | |
1609 | err = hci_req_run(&req, remove_uuid_complete); | ||
1610 | if (err < 0) { | ||
1611 | if (err != -ENODATA) | ||
1612 | goto unlock; | ||
1476 | 1613 | ||
1477 | if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { | ||
1478 | err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, | 1614 | err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, |
1479 | hdev->dev_class, 3); | 1615 | hdev->dev_class, 3); |
1480 | goto unlock; | 1616 | goto unlock; |
1481 | } | 1617 | } |
1482 | 1618 | ||
1483 | cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); | 1619 | cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); |
1484 | if (!cmd) | 1620 | if (!cmd) { |
1485 | err = -ENOMEM; | 1621 | err = -ENOMEM; |
1622 | goto unlock; | ||
1623 | } | ||
1624 | |||
1625 | err = 0; | ||
1486 | 1626 | ||
1487 | unlock: | 1627 | unlock: |
1488 | hci_dev_unlock(hdev); | 1628 | hci_dev_unlock(hdev); |
1489 | return err; | 1629 | return err; |
1490 | } | 1630 | } |
1491 | 1631 | ||
1632 | static void set_class_complete(struct hci_dev *hdev, u8 status) | ||
1633 | { | ||
1634 | BT_DBG("status 0x%02x", status); | ||
1635 | |||
1636 | mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status); | ||
1637 | } | ||
1638 | |||
1492 | static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, | 1639 | static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, |
1493 | u16 len) | 1640 | u16 len) |
1494 | { | 1641 | { |
1495 | struct mgmt_cp_set_dev_class *cp = data; | 1642 | struct mgmt_cp_set_dev_class *cp = data; |
1496 | struct pending_cmd *cmd; | 1643 | struct pending_cmd *cmd; |
1644 | struct hci_request req; | ||
1497 | int err; | 1645 | int err; |
1498 | 1646 | ||
1499 | BT_DBG("request for %s", hdev->name); | 1647 | BT_DBG("request for %s", hdev->name); |
@@ -1502,15 +1650,19 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1502 | return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, | 1650 | return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, |
1503 | MGMT_STATUS_NOT_SUPPORTED); | 1651 | MGMT_STATUS_NOT_SUPPORTED); |
1504 | 1652 | ||
1505 | if (test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) | 1653 | hci_dev_lock(hdev); |
1506 | return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, | ||
1507 | MGMT_STATUS_BUSY); | ||
1508 | 1654 | ||
1509 | if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) | 1655 | if (pending_eir_or_class(hdev)) { |
1510 | return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, | 1656 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, |
1511 | MGMT_STATUS_INVALID_PARAMS); | 1657 | MGMT_STATUS_BUSY); |
1658 | goto unlock; | ||
1659 | } | ||
1512 | 1660 | ||
1513 | hci_dev_lock(hdev); | 1661 | if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { |
1662 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, | ||
1663 | MGMT_STATUS_INVALID_PARAMS); | ||
1664 | goto unlock; | ||
1665 | } | ||
1514 | 1666 | ||
1515 | hdev->major_class = cp->major; | 1667 | hdev->major_class = cp->major; |
1516 | hdev->minor_class = cp->minor; | 1668 | hdev->minor_class = cp->minor; |
@@ -1521,26 +1673,34 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1521 | goto unlock; | 1673 | goto unlock; |
1522 | } | 1674 | } |
1523 | 1675 | ||
1676 | hci_req_init(&req, hdev); | ||
1677 | |||
1524 | if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { | 1678 | if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { |
1525 | hci_dev_unlock(hdev); | 1679 | hci_dev_unlock(hdev); |
1526 | cancel_delayed_work_sync(&hdev->service_cache); | 1680 | cancel_delayed_work_sync(&hdev->service_cache); |
1527 | hci_dev_lock(hdev); | 1681 | hci_dev_lock(hdev); |
1528 | update_eir(hdev); | 1682 | update_eir(&req); |
1529 | } | 1683 | } |
1530 | 1684 | ||
1531 | err = update_class(hdev); | 1685 | update_class(&req); |
1532 | if (err < 0) | 1686 | |
1533 | goto unlock; | 1687 | err = hci_req_run(&req, set_class_complete); |
1688 | if (err < 0) { | ||
1689 | if (err != -ENODATA) | ||
1690 | goto unlock; | ||
1534 | 1691 | ||
1535 | if (!test_bit(HCI_PENDING_CLASS, &hdev->dev_flags)) { | ||
1536 | err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, | 1692 | err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, |
1537 | hdev->dev_class, 3); | 1693 | hdev->dev_class, 3); |
1538 | goto unlock; | 1694 | goto unlock; |
1539 | } | 1695 | } |
1540 | 1696 | ||
1541 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); | 1697 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); |
1542 | if (!cmd) | 1698 | if (!cmd) { |
1543 | err = -ENOMEM; | 1699 | err = -ENOMEM; |
1700 | goto unlock; | ||
1701 | } | ||
1702 | |||
1703 | err = 0; | ||
1544 | 1704 | ||
1545 | unlock: | 1705 | unlock: |
1546 | hci_dev_unlock(hdev); | 1706 | hci_dev_unlock(hdev); |
@@ -2140,7 +2300,7 @@ unlock: | |||
2140 | } | 2300 | } |
2141 | 2301 | ||
2142 | static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, | 2302 | static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, |
2143 | bdaddr_t *bdaddr, u8 type, u16 mgmt_op, | 2303 | struct mgmt_addr_info *addr, u16 mgmt_op, |
2144 | u16 hci_op, __le32 passkey) | 2304 | u16 hci_op, __le32 passkey) |
2145 | { | 2305 | { |
2146 | struct pending_cmd *cmd; | 2306 | struct pending_cmd *cmd; |
@@ -2150,37 +2310,41 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, | |||
2150 | hci_dev_lock(hdev); | 2310 | hci_dev_lock(hdev); |
2151 | 2311 | ||
2152 | if (!hdev_is_powered(hdev)) { | 2312 | if (!hdev_is_powered(hdev)) { |
2153 | err = cmd_status(sk, hdev->id, mgmt_op, | 2313 | err = cmd_complete(sk, hdev->id, mgmt_op, |
2154 | MGMT_STATUS_NOT_POWERED); | 2314 | MGMT_STATUS_NOT_POWERED, addr, |
2315 | sizeof(*addr)); | ||
2155 | goto done; | 2316 | goto done; |
2156 | } | 2317 | } |
2157 | 2318 | ||
2158 | if (type == BDADDR_BREDR) | 2319 | if (addr->type == BDADDR_BREDR) |
2159 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr); | 2320 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr); |
2160 | else | 2321 | else |
2161 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); | 2322 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr); |
2162 | 2323 | ||
2163 | if (!conn) { | 2324 | if (!conn) { |
2164 | err = cmd_status(sk, hdev->id, mgmt_op, | 2325 | err = cmd_complete(sk, hdev->id, mgmt_op, |
2165 | MGMT_STATUS_NOT_CONNECTED); | 2326 | MGMT_STATUS_NOT_CONNECTED, addr, |
2327 | sizeof(*addr)); | ||
2166 | goto done; | 2328 | goto done; |
2167 | } | 2329 | } |
2168 | 2330 | ||
2169 | if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) { | 2331 | if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { |
2170 | /* Continue with pairing via SMP */ | 2332 | /* Continue with pairing via SMP */ |
2171 | err = smp_user_confirm_reply(conn, mgmt_op, passkey); | 2333 | err = smp_user_confirm_reply(conn, mgmt_op, passkey); |
2172 | 2334 | ||
2173 | if (!err) | 2335 | if (!err) |
2174 | err = cmd_status(sk, hdev->id, mgmt_op, | 2336 | err = cmd_complete(sk, hdev->id, mgmt_op, |
2175 | MGMT_STATUS_SUCCESS); | 2337 | MGMT_STATUS_SUCCESS, addr, |
2338 | sizeof(*addr)); | ||
2176 | else | 2339 | else |
2177 | err = cmd_status(sk, hdev->id, mgmt_op, | 2340 | err = cmd_complete(sk, hdev->id, mgmt_op, |
2178 | MGMT_STATUS_FAILED); | 2341 | MGMT_STATUS_FAILED, addr, |
2342 | sizeof(*addr)); | ||
2179 | 2343 | ||
2180 | goto done; | 2344 | goto done; |
2181 | } | 2345 | } |
2182 | 2346 | ||
2183 | cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr)); | 2347 | cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr)); |
2184 | if (!cmd) { | 2348 | if (!cmd) { |
2185 | err = -ENOMEM; | 2349 | err = -ENOMEM; |
2186 | goto done; | 2350 | goto done; |
@@ -2190,11 +2354,12 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, | |||
2190 | if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { | 2354 | if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { |
2191 | struct hci_cp_user_passkey_reply cp; | 2355 | struct hci_cp_user_passkey_reply cp; |
2192 | 2356 | ||
2193 | bacpy(&cp.bdaddr, bdaddr); | 2357 | bacpy(&cp.bdaddr, &addr->bdaddr); |
2194 | cp.passkey = passkey; | 2358 | cp.passkey = passkey; |
2195 | err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); | 2359 | err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); |
2196 | } else | 2360 | } else |
2197 | err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr); | 2361 | err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr), |
2362 | &addr->bdaddr); | ||
2198 | 2363 | ||
2199 | if (err < 0) | 2364 | if (err < 0) |
2200 | mgmt_pending_remove(cmd); | 2365 | mgmt_pending_remove(cmd); |
@@ -2211,7 +2376,7 @@ static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, | |||
2211 | 2376 | ||
2212 | BT_DBG(""); | 2377 | BT_DBG(""); |
2213 | 2378 | ||
2214 | return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, | 2379 | return user_pairing_resp(sk, hdev, &cp->addr, |
2215 | MGMT_OP_PIN_CODE_NEG_REPLY, | 2380 | MGMT_OP_PIN_CODE_NEG_REPLY, |
2216 | HCI_OP_PIN_CODE_NEG_REPLY, 0); | 2381 | HCI_OP_PIN_CODE_NEG_REPLY, 0); |
2217 | } | 2382 | } |
@@ -2227,7 +2392,7 @@ static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2227 | return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, | 2392 | return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, |
2228 | MGMT_STATUS_INVALID_PARAMS); | 2393 | MGMT_STATUS_INVALID_PARAMS); |
2229 | 2394 | ||
2230 | return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, | 2395 | return user_pairing_resp(sk, hdev, &cp->addr, |
2231 | MGMT_OP_USER_CONFIRM_REPLY, | 2396 | MGMT_OP_USER_CONFIRM_REPLY, |
2232 | HCI_OP_USER_CONFIRM_REPLY, 0); | 2397 | HCI_OP_USER_CONFIRM_REPLY, 0); |
2233 | } | 2398 | } |
@@ -2239,7 +2404,7 @@ static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev, | |||
2239 | 2404 | ||
2240 | BT_DBG(""); | 2405 | BT_DBG(""); |
2241 | 2406 | ||
2242 | return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, | 2407 | return user_pairing_resp(sk, hdev, &cp->addr, |
2243 | MGMT_OP_USER_CONFIRM_NEG_REPLY, | 2408 | MGMT_OP_USER_CONFIRM_NEG_REPLY, |
2244 | HCI_OP_USER_CONFIRM_NEG_REPLY, 0); | 2409 | HCI_OP_USER_CONFIRM_NEG_REPLY, 0); |
2245 | } | 2410 | } |
@@ -2251,7 +2416,7 @@ static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2251 | 2416 | ||
2252 | BT_DBG(""); | 2417 | BT_DBG(""); |
2253 | 2418 | ||
2254 | return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, | 2419 | return user_pairing_resp(sk, hdev, &cp->addr, |
2255 | MGMT_OP_USER_PASSKEY_REPLY, | 2420 | MGMT_OP_USER_PASSKEY_REPLY, |
2256 | HCI_OP_USER_PASSKEY_REPLY, cp->passkey); | 2421 | HCI_OP_USER_PASSKEY_REPLY, cp->passkey); |
2257 | } | 2422 | } |
@@ -2263,18 +2428,47 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, | |||
2263 | 2428 | ||
2264 | BT_DBG(""); | 2429 | BT_DBG(""); |
2265 | 2430 | ||
2266 | return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type, | 2431 | return user_pairing_resp(sk, hdev, &cp->addr, |
2267 | MGMT_OP_USER_PASSKEY_NEG_REPLY, | 2432 | MGMT_OP_USER_PASSKEY_NEG_REPLY, |
2268 | HCI_OP_USER_PASSKEY_NEG_REPLY, 0); | 2433 | HCI_OP_USER_PASSKEY_NEG_REPLY, 0); |
2269 | } | 2434 | } |
2270 | 2435 | ||
2271 | static int update_name(struct hci_dev *hdev, const char *name) | 2436 | static void update_name(struct hci_request *req) |
2272 | { | 2437 | { |
2438 | struct hci_dev *hdev = req->hdev; | ||
2273 | struct hci_cp_write_local_name cp; | 2439 | struct hci_cp_write_local_name cp; |
2274 | 2440 | ||
2275 | memcpy(cp.name, name, sizeof(cp.name)); | 2441 | memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); |
2442 | |||
2443 | hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); | ||
2444 | } | ||
2445 | |||
2446 | static void set_name_complete(struct hci_dev *hdev, u8 status) | ||
2447 | { | ||
2448 | struct mgmt_cp_set_local_name *cp; | ||
2449 | struct pending_cmd *cmd; | ||
2450 | |||
2451 | BT_DBG("status 0x%02x", status); | ||
2452 | |||
2453 | hci_dev_lock(hdev); | ||
2454 | |||
2455 | cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); | ||
2456 | if (!cmd) | ||
2457 | goto unlock; | ||
2458 | |||
2459 | cp = cmd->param; | ||
2276 | 2460 | ||
2277 | return hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); | 2461 | if (status) |
2462 | cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, | ||
2463 | mgmt_status(status)); | ||
2464 | else | ||
2465 | cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, | ||
2466 | cp, sizeof(*cp)); | ||
2467 | |||
2468 | mgmt_pending_remove(cmd); | ||
2469 | |||
2470 | unlock: | ||
2471 | hci_dev_unlock(hdev); | ||
2278 | } | 2472 | } |
2279 | 2473 | ||
2280 | static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, | 2474 | static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, |
@@ -2282,12 +2476,24 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2282 | { | 2476 | { |
2283 | struct mgmt_cp_set_local_name *cp = data; | 2477 | struct mgmt_cp_set_local_name *cp = data; |
2284 | struct pending_cmd *cmd; | 2478 | struct pending_cmd *cmd; |
2479 | struct hci_request req; | ||
2285 | int err; | 2480 | int err; |
2286 | 2481 | ||
2287 | BT_DBG(""); | 2482 | BT_DBG(""); |
2288 | 2483 | ||
2289 | hci_dev_lock(hdev); | 2484 | hci_dev_lock(hdev); |
2290 | 2485 | ||
2486 | /* If the old values are the same as the new ones just return a | ||
2487 | * direct command complete event. | ||
2488 | */ | ||
2489 | if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && | ||
2490 | !memcmp(hdev->short_name, cp->short_name, | ||
2491 | sizeof(hdev->short_name))) { | ||
2492 | err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, | ||
2493 | data, len); | ||
2494 | goto failed; | ||
2495 | } | ||
2496 | |||
2291 | memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); | 2497 | memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); |
2292 | 2498 | ||
2293 | if (!hdev_is_powered(hdev)) { | 2499 | if (!hdev_is_powered(hdev)) { |
@@ -2310,7 +2516,19 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2310 | goto failed; | 2516 | goto failed; |
2311 | } | 2517 | } |
2312 | 2518 | ||
2313 | err = update_name(hdev, cp->name); | 2519 | memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); |
2520 | |||
2521 | hci_req_init(&req, hdev); | ||
2522 | |||
2523 | if (lmp_bredr_capable(hdev)) { | ||
2524 | update_name(&req); | ||
2525 | update_eir(&req); | ||
2526 | } | ||
2527 | |||
2528 | if (lmp_le_capable(hdev)) | ||
2529 | hci_update_ad(&req); | ||
2530 | |||
2531 | err = hci_req_run(&req, set_name_complete); | ||
2314 | if (err < 0) | 2532 | if (err < 0) |
2315 | mgmt_pending_remove(cmd); | 2533 | mgmt_pending_remove(cmd); |
2316 | 2534 | ||
@@ -2698,6 +2916,7 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2698 | u16 len) | 2916 | u16 len) |
2699 | { | 2917 | { |
2700 | struct mgmt_cp_set_device_id *cp = data; | 2918 | struct mgmt_cp_set_device_id *cp = data; |
2919 | struct hci_request req; | ||
2701 | int err; | 2920 | int err; |
2702 | __u16 source; | 2921 | __u16 source; |
2703 | 2922 | ||
@@ -2718,24 +2937,59 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, | |||
2718 | 2937 | ||
2719 | err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); | 2938 | err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); |
2720 | 2939 | ||
2721 | update_eir(hdev); | 2940 | hci_req_init(&req, hdev); |
2941 | update_eir(&req); | ||
2942 | hci_req_run(&req, NULL); | ||
2722 | 2943 | ||
2723 | hci_dev_unlock(hdev); | 2944 | hci_dev_unlock(hdev); |
2724 | 2945 | ||
2725 | return err; | 2946 | return err; |
2726 | } | 2947 | } |
2727 | 2948 | ||
2949 | static void fast_connectable_complete(struct hci_dev *hdev, u8 status) | ||
2950 | { | ||
2951 | struct pending_cmd *cmd; | ||
2952 | |||
2953 | BT_DBG("status 0x%02x", status); | ||
2954 | |||
2955 | hci_dev_lock(hdev); | ||
2956 | |||
2957 | cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); | ||
2958 | if (!cmd) | ||
2959 | goto unlock; | ||
2960 | |||
2961 | if (status) { | ||
2962 | cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, | ||
2963 | mgmt_status(status)); | ||
2964 | } else { | ||
2965 | struct mgmt_mode *cp = cmd->param; | ||
2966 | |||
2967 | if (cp->val) | ||
2968 | set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); | ||
2969 | else | ||
2970 | clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); | ||
2971 | |||
2972 | send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); | ||
2973 | new_settings(hdev, cmd->sk); | ||
2974 | } | ||
2975 | |||
2976 | mgmt_pending_remove(cmd); | ||
2977 | |||
2978 | unlock: | ||
2979 | hci_dev_unlock(hdev); | ||
2980 | } | ||
2981 | |||
2728 | static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, | 2982 | static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, |
2729 | void *data, u16 len) | 2983 | void *data, u16 len) |
2730 | { | 2984 | { |
2731 | struct mgmt_mode *cp = data; | 2985 | struct mgmt_mode *cp = data; |
2732 | struct hci_cp_write_page_scan_activity acp; | 2986 | struct pending_cmd *cmd; |
2733 | u8 type; | 2987 | struct hci_request req; |
2734 | int err; | 2988 | int err; |
2735 | 2989 | ||
2736 | BT_DBG("%s", hdev->name); | 2990 | BT_DBG("%s", hdev->name); |
2737 | 2991 | ||
2738 | if (!lmp_bredr_capable(hdev)) | 2992 | if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2) |
2739 | return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, | 2993 | return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, |
2740 | MGMT_STATUS_NOT_SUPPORTED); | 2994 | MGMT_STATUS_NOT_SUPPORTED); |
2741 | 2995 | ||
@@ -2753,40 +3007,39 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, | |||
2753 | 3007 | ||
2754 | hci_dev_lock(hdev); | 3008 | hci_dev_lock(hdev); |
2755 | 3009 | ||
2756 | if (cp->val) { | 3010 | if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { |
2757 | type = PAGE_SCAN_TYPE_INTERLACED; | 3011 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, |
3012 | MGMT_STATUS_BUSY); | ||
3013 | goto unlock; | ||
3014 | } | ||
2758 | 3015 | ||
2759 | /* 160 msec page scan interval */ | 3016 | if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) { |
2760 | acp.interval = __constant_cpu_to_le16(0x0100); | 3017 | err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, |
2761 | } else { | 3018 | hdev); |
2762 | type = PAGE_SCAN_TYPE_STANDARD; /* default */ | 3019 | goto unlock; |
3020 | } | ||
2763 | 3021 | ||
2764 | /* default 1.28 sec page scan */ | 3022 | cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, |
2765 | acp.interval = __constant_cpu_to_le16(0x0800); | 3023 | data, len); |
3024 | if (!cmd) { | ||
3025 | err = -ENOMEM; | ||
3026 | goto unlock; | ||
2766 | } | 3027 | } |
2767 | 3028 | ||
2768 | /* default 11.25 msec page scan window */ | 3029 | hci_req_init(&req, hdev); |
2769 | acp.window = __constant_cpu_to_le16(0x0012); | ||
2770 | 3030 | ||
2771 | err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), | 3031 | write_fast_connectable(&req, cp->val); |
2772 | &acp); | ||
2773 | if (err < 0) { | ||
2774 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, | ||
2775 | MGMT_STATUS_FAILED); | ||
2776 | goto done; | ||
2777 | } | ||
2778 | 3032 | ||
2779 | err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); | 3033 | err = hci_req_run(&req, fast_connectable_complete); |
2780 | if (err < 0) { | 3034 | if (err < 0) { |
2781 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, | 3035 | err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, |
2782 | MGMT_STATUS_FAILED); | 3036 | MGMT_STATUS_FAILED); |
2783 | goto done; | 3037 | mgmt_pending_remove(cmd); |
2784 | } | 3038 | } |
2785 | 3039 | ||
2786 | err = cmd_complete(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 0, | 3040 | unlock: |
2787 | NULL, 0); | ||
2788 | done: | ||
2789 | hci_dev_unlock(hdev); | 3041 | hci_dev_unlock(hdev); |
3042 | |||
2790 | return err; | 3043 | return err; |
2791 | } | 3044 | } |
2792 | 3045 | ||
@@ -3043,79 +3296,115 @@ static void settings_rsp(struct pending_cmd *cmd, void *data) | |||
3043 | mgmt_pending_free(cmd); | 3296 | mgmt_pending_free(cmd); |
3044 | } | 3297 | } |
3045 | 3298 | ||
3046 | static int set_bredr_scan(struct hci_dev *hdev) | 3299 | static void set_bredr_scan(struct hci_request *req) |
3047 | { | 3300 | { |
3301 | struct hci_dev *hdev = req->hdev; | ||
3048 | u8 scan = 0; | 3302 | u8 scan = 0; |
3049 | 3303 | ||
3304 | /* Ensure that fast connectable is disabled. This function will | ||
3305 | * not do anything if the page scan parameters are already what | ||
3306 | * they should be. | ||
3307 | */ | ||
3308 | write_fast_connectable(req, false); | ||
3309 | |||
3050 | if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) | 3310 | if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) |
3051 | scan |= SCAN_PAGE; | 3311 | scan |= SCAN_PAGE; |
3052 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) | 3312 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) |
3053 | scan |= SCAN_INQUIRY; | 3313 | scan |= SCAN_INQUIRY; |
3054 | 3314 | ||
3055 | if (!scan) | 3315 | if (scan) |
3056 | return 0; | 3316 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
3057 | |||
3058 | return hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | ||
3059 | } | 3317 | } |
3060 | 3318 | ||
3061 | int mgmt_powered(struct hci_dev *hdev, u8 powered) | 3319 | static void powered_complete(struct hci_dev *hdev, u8 status) |
3062 | { | 3320 | { |
3063 | struct cmd_lookup match = { NULL, hdev }; | 3321 | struct cmd_lookup match = { NULL, hdev }; |
3064 | int err; | ||
3065 | 3322 | ||
3066 | if (!test_bit(HCI_MGMT, &hdev->dev_flags)) | 3323 | BT_DBG("status 0x%02x", status); |
3067 | return 0; | 3324 | |
3325 | hci_dev_lock(hdev); | ||
3068 | 3326 | ||
3069 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); | 3327 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); |
3070 | 3328 | ||
3071 | if (powered) { | 3329 | new_settings(hdev, match.sk); |
3072 | u8 link_sec; | ||
3073 | 3330 | ||
3074 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && | 3331 | hci_dev_unlock(hdev); |
3075 | !lmp_host_ssp_capable(hdev)) { | ||
3076 | u8 ssp = 1; | ||
3077 | 3332 | ||
3078 | hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); | 3333 | if (match.sk) |
3079 | } | 3334 | sock_put(match.sk); |
3335 | } | ||
3080 | 3336 | ||
3081 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { | 3337 | static int powered_update_hci(struct hci_dev *hdev) |
3082 | struct hci_cp_write_le_host_supported cp; | 3338 | { |
3339 | struct hci_request req; | ||
3340 | u8 link_sec; | ||
3083 | 3341 | ||
3084 | cp.le = 1; | 3342 | hci_req_init(&req, hdev); |
3085 | cp.simul = lmp_le_br_capable(hdev); | ||
3086 | 3343 | ||
3087 | /* Check first if we already have the right | 3344 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && |
3088 | * host state (host features set) | 3345 | !lmp_host_ssp_capable(hdev)) { |
3089 | */ | 3346 | u8 ssp = 1; |
3090 | if (cp.le != lmp_host_le_capable(hdev) || | ||
3091 | cp.simul != lmp_host_le_br_capable(hdev)) | ||
3092 | hci_send_cmd(hdev, | ||
3093 | HCI_OP_WRITE_LE_HOST_SUPPORTED, | ||
3094 | sizeof(cp), &cp); | ||
3095 | } | ||
3096 | 3347 | ||
3097 | link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags); | 3348 | hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp); |
3098 | if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) | 3349 | } |
3099 | hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, | ||
3100 | sizeof(link_sec), &link_sec); | ||
3101 | 3350 | ||
3102 | if (lmp_bredr_capable(hdev)) { | 3351 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { |
3103 | set_bredr_scan(hdev); | 3352 | struct hci_cp_write_le_host_supported cp; |
3104 | update_class(hdev); | ||
3105 | update_name(hdev, hdev->dev_name); | ||
3106 | update_eir(hdev); | ||
3107 | } | ||
3108 | } else { | ||
3109 | u8 status = MGMT_STATUS_NOT_POWERED; | ||
3110 | u8 zero_cod[] = { 0, 0, 0 }; | ||
3111 | 3353 | ||
3112 | mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); | 3354 | cp.le = 1; |
3355 | cp.simul = lmp_le_br_capable(hdev); | ||
3113 | 3356 | ||
3114 | if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) | 3357 | /* Check first if we already have the right |
3115 | mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, | 3358 | * host state (host features set) |
3116 | zero_cod, sizeof(zero_cod), NULL); | 3359 | */ |
3360 | if (cp.le != lmp_host_le_capable(hdev) || | ||
3361 | cp.simul != lmp_host_le_br_capable(hdev)) | ||
3362 | hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, | ||
3363 | sizeof(cp), &cp); | ||
3117 | } | 3364 | } |
3118 | 3365 | ||
3366 | link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags); | ||
3367 | if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) | ||
3368 | hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE, | ||
3369 | sizeof(link_sec), &link_sec); | ||
3370 | |||
3371 | if (lmp_bredr_capable(hdev)) { | ||
3372 | set_bredr_scan(&req); | ||
3373 | update_class(&req); | ||
3374 | update_name(&req); | ||
3375 | update_eir(&req); | ||
3376 | } | ||
3377 | |||
3378 | return hci_req_run(&req, powered_complete); | ||
3379 | } | ||
3380 | |||
3381 | int mgmt_powered(struct hci_dev *hdev, u8 powered) | ||
3382 | { | ||
3383 | struct cmd_lookup match = { NULL, hdev }; | ||
3384 | u8 status_not_powered = MGMT_STATUS_NOT_POWERED; | ||
3385 | u8 zero_cod[] = { 0, 0, 0 }; | ||
3386 | int err; | ||
3387 | |||
3388 | if (!test_bit(HCI_MGMT, &hdev->dev_flags)) | ||
3389 | return 0; | ||
3390 | |||
3391 | if (powered) { | ||
3392 | if (powered_update_hci(hdev) == 0) | ||
3393 | return 0; | ||
3394 | |||
3395 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, | ||
3396 | &match); | ||
3397 | goto new_settings; | ||
3398 | } | ||
3399 | |||
3400 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); | ||
3401 | mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered); | ||
3402 | |||
3403 | if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) | ||
3404 | mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, | ||
3405 | zero_cod, sizeof(zero_cod), NULL); | ||
3406 | |||
3407 | new_settings: | ||
3119 | err = new_settings(hdev, match.sk); | 3408 | err = new_settings(hdev, match.sk); |
3120 | 3409 | ||
3121 | if (match.sk) | 3410 | if (match.sk) |
@@ -3152,7 +3441,7 @@ int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) | |||
3152 | 3441 | ||
3153 | int mgmt_connectable(struct hci_dev *hdev, u8 connectable) | 3442 | int mgmt_connectable(struct hci_dev *hdev, u8 connectable) |
3154 | { | 3443 | { |
3155 | struct cmd_lookup match = { NULL, hdev }; | 3444 | struct pending_cmd *cmd; |
3156 | bool changed = false; | 3445 | bool changed = false; |
3157 | int err = 0; | 3446 | int err = 0; |
3158 | 3447 | ||
@@ -3164,14 +3453,10 @@ int mgmt_connectable(struct hci_dev *hdev, u8 connectable) | |||
3164 | changed = true; | 3453 | changed = true; |
3165 | } | 3454 | } |
3166 | 3455 | ||
3167 | mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp, | 3456 | cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); |
3168 | &match); | ||
3169 | 3457 | ||
3170 | if (changed) | 3458 | if (changed) |
3171 | err = new_settings(hdev, match.sk); | 3459 | err = new_settings(hdev, cmd ? cmd->sk : NULL); |
3172 | |||
3173 | if (match.sk) | ||
3174 | sock_put(match.sk); | ||
3175 | 3460 | ||
3176 | return err; | 3461 | return err; |
3177 | } | 3462 | } |
@@ -3555,23 +3840,25 @@ int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) | |||
3555 | return err; | 3840 | return err; |
3556 | } | 3841 | } |
3557 | 3842 | ||
3558 | static int clear_eir(struct hci_dev *hdev) | 3843 | static void clear_eir(struct hci_request *req) |
3559 | { | 3844 | { |
3845 | struct hci_dev *hdev = req->hdev; | ||
3560 | struct hci_cp_write_eir cp; | 3846 | struct hci_cp_write_eir cp; |
3561 | 3847 | ||
3562 | if (!lmp_ext_inq_capable(hdev)) | 3848 | if (!lmp_ext_inq_capable(hdev)) |
3563 | return 0; | 3849 | return; |
3564 | 3850 | ||
3565 | memset(hdev->eir, 0, sizeof(hdev->eir)); | 3851 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
3566 | 3852 | ||
3567 | memset(&cp, 0, sizeof(cp)); | 3853 | memset(&cp, 0, sizeof(cp)); |
3568 | 3854 | ||
3569 | return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | 3855 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); |
3570 | } | 3856 | } |
3571 | 3857 | ||
3572 | int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) | 3858 | int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) |
3573 | { | 3859 | { |
3574 | struct cmd_lookup match = { NULL, hdev }; | 3860 | struct cmd_lookup match = { NULL, hdev }; |
3861 | struct hci_request req; | ||
3575 | bool changed = false; | 3862 | bool changed = false; |
3576 | int err = 0; | 3863 | int err = 0; |
3577 | 3864 | ||
@@ -3604,29 +3891,26 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) | |||
3604 | if (match.sk) | 3891 | if (match.sk) |
3605 | sock_put(match.sk); | 3892 | sock_put(match.sk); |
3606 | 3893 | ||
3894 | hci_req_init(&req, hdev); | ||
3895 | |||
3607 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) | 3896 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) |
3608 | update_eir(hdev); | 3897 | update_eir(&req); |
3609 | else | 3898 | else |
3610 | clear_eir(hdev); | 3899 | clear_eir(&req); |
3900 | |||
3901 | hci_req_run(&req, NULL); | ||
3611 | 3902 | ||
3612 | return err; | 3903 | return err; |
3613 | } | 3904 | } |
3614 | 3905 | ||
3615 | static void class_rsp(struct pending_cmd *cmd, void *data) | 3906 | static void sk_lookup(struct pending_cmd *cmd, void *data) |
3616 | { | 3907 | { |
3617 | struct cmd_lookup *match = data; | 3908 | struct cmd_lookup *match = data; |
3618 | 3909 | ||
3619 | cmd_complete(cmd->sk, cmd->index, cmd->opcode, match->mgmt_status, | ||
3620 | match->hdev->dev_class, 3); | ||
3621 | |||
3622 | list_del(&cmd->list); | ||
3623 | |||
3624 | if (match->sk == NULL) { | 3910 | if (match->sk == NULL) { |
3625 | match->sk = cmd->sk; | 3911 | match->sk = cmd->sk; |
3626 | sock_hold(match->sk); | 3912 | sock_hold(match->sk); |
3627 | } | 3913 | } |
3628 | |||
3629 | mgmt_pending_free(cmd); | ||
3630 | } | 3914 | } |
3631 | 3915 | ||
3632 | int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, | 3916 | int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, |
@@ -3635,11 +3919,9 @@ int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, | |||
3635 | struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; | 3919 | struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; |
3636 | int err = 0; | 3920 | int err = 0; |
3637 | 3921 | ||
3638 | clear_bit(HCI_PENDING_CLASS, &hdev->dev_flags); | 3922 | mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); |
3639 | 3923 | mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); | |
3640 | mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, class_rsp, &match); | 3924 | mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); |
3641 | mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, class_rsp, &match); | ||
3642 | mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, class_rsp, &match); | ||
3643 | 3925 | ||
3644 | if (!status) | 3926 | if (!status) |
3645 | err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, | 3927 | err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, |
@@ -3653,55 +3935,29 @@ int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, | |||
3653 | 3935 | ||
3654 | int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) | 3936 | int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) |
3655 | { | 3937 | { |
3656 | struct pending_cmd *cmd; | ||
3657 | struct mgmt_cp_set_local_name ev; | 3938 | struct mgmt_cp_set_local_name ev; |
3658 | bool changed = false; | 3939 | struct pending_cmd *cmd; |
3659 | int err = 0; | ||
3660 | 3940 | ||
3661 | if (memcmp(name, hdev->dev_name, sizeof(hdev->dev_name)) != 0) { | 3941 | if (status) |
3662 | memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); | 3942 | return 0; |
3663 | changed = true; | ||
3664 | } | ||
3665 | 3943 | ||
3666 | memset(&ev, 0, sizeof(ev)); | 3944 | memset(&ev, 0, sizeof(ev)); |
3667 | memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); | 3945 | memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); |
3668 | memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); | 3946 | memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); |
3669 | 3947 | ||
3670 | cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); | 3948 | cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); |
3671 | if (!cmd) | 3949 | if (!cmd) { |
3672 | goto send_event; | 3950 | memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); |
3673 | |||
3674 | /* Always assume that either the short or the complete name has | ||
3675 | * changed if there was a pending mgmt command */ | ||
3676 | changed = true; | ||
3677 | 3951 | ||
3678 | if (status) { | 3952 | /* If this is a HCI command related to powering on the |
3679 | err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, | 3953 | * HCI dev don't send any mgmt signals. |
3680 | mgmt_status(status)); | 3954 | */ |
3681 | goto failed; | 3955 | if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) |
3956 | return 0; | ||
3682 | } | 3957 | } |
3683 | 3958 | ||
3684 | err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, &ev, | 3959 | return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), |
3685 | sizeof(ev)); | 3960 | cmd ? cmd->sk : NULL); |
3686 | if (err < 0) | ||
3687 | goto failed; | ||
3688 | |||
3689 | send_event: | ||
3690 | if (changed) | ||
3691 | err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, | ||
3692 | sizeof(ev), cmd ? cmd->sk : NULL); | ||
3693 | |||
3694 | /* EIR is taken care of separately when powering on the | ||
3695 | * adapter so only update them here if this is a name change | ||
3696 | * unrelated to power on. | ||
3697 | */ | ||
3698 | if (!test_bit(HCI_INIT, &hdev->flags)) | ||
3699 | update_eir(hdev); | ||
3700 | |||
3701 | failed: | ||
3702 | if (cmd) | ||
3703 | mgmt_pending_remove(cmd); | ||
3704 | return err; | ||
3705 | } | 3961 | } |
3706 | 3962 | ||
3707 | int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, | 3963 | int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index b23e2713fea8..ca957d34b0c8 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -69,7 +69,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, | |||
69 | u8 sec_level, | 69 | u8 sec_level, |
70 | int *err); | 70 | int *err); |
71 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); | 71 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); |
72 | static void rfcomm_session_del(struct rfcomm_session *s); | 72 | static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); |
73 | 73 | ||
74 | /* ---- RFCOMM frame parsing macros ---- */ | 74 | /* ---- RFCOMM frame parsing macros ---- */ |
75 | #define __get_dlci(b) ((b & 0xfc) >> 2) | 75 | #define __get_dlci(b) ((b & 0xfc) >> 2) |
@@ -108,12 +108,6 @@ static void rfcomm_schedule(void) | |||
108 | wake_up_process(rfcomm_thread); | 108 | wake_up_process(rfcomm_thread); |
109 | } | 109 | } |
110 | 110 | ||
111 | static void rfcomm_session_put(struct rfcomm_session *s) | ||
112 | { | ||
113 | if (atomic_dec_and_test(&s->refcnt)) | ||
114 | rfcomm_session_del(s); | ||
115 | } | ||
116 | |||
117 | /* ---- RFCOMM FCS computation ---- */ | 111 | /* ---- RFCOMM FCS computation ---- */ |
118 | 112 | ||
119 | /* reversed, 8-bit, poly=0x07 */ | 113 | /* reversed, 8-bit, poly=0x07 */ |
@@ -249,16 +243,14 @@ static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) | |||
249 | { | 243 | { |
250 | BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); | 244 | BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); |
251 | 245 | ||
252 | if (!mod_timer(&s->timer, jiffies + timeout)) | 246 | mod_timer(&s->timer, jiffies + timeout); |
253 | rfcomm_session_hold(s); | ||
254 | } | 247 | } |
255 | 248 | ||
256 | static void rfcomm_session_clear_timer(struct rfcomm_session *s) | 249 | static void rfcomm_session_clear_timer(struct rfcomm_session *s) |
257 | { | 250 | { |
258 | BT_DBG("session %p state %ld", s, s->state); | 251 | BT_DBG("session %p state %ld", s, s->state); |
259 | 252 | ||
260 | if (del_timer(&s->timer)) | 253 | del_timer_sync(&s->timer); |
261 | rfcomm_session_put(s); | ||
262 | } | 254 | } |
263 | 255 | ||
264 | /* ---- RFCOMM DLCs ---- */ | 256 | /* ---- RFCOMM DLCs ---- */ |
@@ -336,8 +328,6 @@ static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d) | |||
336 | { | 328 | { |
337 | BT_DBG("dlc %p session %p", d, s); | 329 | BT_DBG("dlc %p session %p", d, s); |
338 | 330 | ||
339 | rfcomm_session_hold(s); | ||
340 | |||
341 | rfcomm_session_clear_timer(s); | 331 | rfcomm_session_clear_timer(s); |
342 | rfcomm_dlc_hold(d); | 332 | rfcomm_dlc_hold(d); |
343 | list_add(&d->list, &s->dlcs); | 333 | list_add(&d->list, &s->dlcs); |
@@ -356,8 +346,6 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) | |||
356 | 346 | ||
357 | if (list_empty(&s->dlcs)) | 347 | if (list_empty(&s->dlcs)) |
358 | rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); | 348 | rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); |
359 | |||
360 | rfcomm_session_put(s); | ||
361 | } | 349 | } |
362 | 350 | ||
363 | static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) | 351 | static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) |
@@ -493,12 +481,34 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) | |||
493 | 481 | ||
494 | int rfcomm_dlc_close(struct rfcomm_dlc *d, int err) | 482 | int rfcomm_dlc_close(struct rfcomm_dlc *d, int err) |
495 | { | 483 | { |
496 | int r; | 484 | int r = 0; |
485 | struct rfcomm_dlc *d_list; | ||
486 | struct rfcomm_session *s, *s_list; | ||
487 | |||
488 | BT_DBG("dlc %p state %ld dlci %d err %d", d, d->state, d->dlci, err); | ||
497 | 489 | ||
498 | rfcomm_lock(); | 490 | rfcomm_lock(); |
499 | 491 | ||
500 | r = __rfcomm_dlc_close(d, err); | 492 | s = d->session; |
493 | if (!s) | ||
494 | goto no_session; | ||
495 | |||
496 | /* after waiting on the mutex check the session still exists | ||
497 | * then check the dlc still exists | ||
498 | */ | ||
499 | list_for_each_entry(s_list, &session_list, list) { | ||
500 | if (s_list == s) { | ||
501 | list_for_each_entry(d_list, &s->dlcs, list) { | ||
502 | if (d_list == d) { | ||
503 | r = __rfcomm_dlc_close(d, err); | ||
504 | break; | ||
505 | } | ||
506 | } | ||
507 | break; | ||
508 | } | ||
509 | } | ||
501 | 510 | ||
511 | no_session: | ||
502 | rfcomm_unlock(); | 512 | rfcomm_unlock(); |
503 | return r; | 513 | return r; |
504 | } | 514 | } |
@@ -609,7 +619,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) | |||
609 | return s; | 619 | return s; |
610 | } | 620 | } |
611 | 621 | ||
612 | static void rfcomm_session_del(struct rfcomm_session *s) | 622 | static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s) |
613 | { | 623 | { |
614 | int state = s->state; | 624 | int state = s->state; |
615 | 625 | ||
@@ -617,15 +627,14 @@ static void rfcomm_session_del(struct rfcomm_session *s) | |||
617 | 627 | ||
618 | list_del(&s->list); | 628 | list_del(&s->list); |
619 | 629 | ||
620 | if (state == BT_CONNECTED) | ||
621 | rfcomm_send_disc(s, 0); | ||
622 | |||
623 | rfcomm_session_clear_timer(s); | 630 | rfcomm_session_clear_timer(s); |
624 | sock_release(s->sock); | 631 | sock_release(s->sock); |
625 | kfree(s); | 632 | kfree(s); |
626 | 633 | ||
627 | if (state != BT_LISTEN) | 634 | if (state != BT_LISTEN) |
628 | module_put(THIS_MODULE); | 635 | module_put(THIS_MODULE); |
636 | |||
637 | return NULL; | ||
629 | } | 638 | } |
630 | 639 | ||
631 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst) | 640 | static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst) |
@@ -644,17 +653,16 @@ static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst) | |||
644 | return NULL; | 653 | return NULL; |
645 | } | 654 | } |
646 | 655 | ||
647 | static void rfcomm_session_close(struct rfcomm_session *s, int err) | 656 | static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s, |
657 | int err) | ||
648 | { | 658 | { |
649 | struct rfcomm_dlc *d; | 659 | struct rfcomm_dlc *d; |
650 | struct list_head *p, *n; | 660 | struct list_head *p, *n; |
651 | 661 | ||
652 | BT_DBG("session %p state %ld err %d", s, s->state, err); | ||
653 | |||
654 | rfcomm_session_hold(s); | ||
655 | |||
656 | s->state = BT_CLOSED; | 662 | s->state = BT_CLOSED; |
657 | 663 | ||
664 | BT_DBG("session %p state %ld err %d", s, s->state, err); | ||
665 | |||
658 | /* Close all dlcs */ | 666 | /* Close all dlcs */ |
659 | list_for_each_safe(p, n, &s->dlcs) { | 667 | list_for_each_safe(p, n, &s->dlcs) { |
660 | d = list_entry(p, struct rfcomm_dlc, list); | 668 | d = list_entry(p, struct rfcomm_dlc, list); |
@@ -663,7 +671,7 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err) | |||
663 | } | 671 | } |
664 | 672 | ||
665 | rfcomm_session_clear_timer(s); | 673 | rfcomm_session_clear_timer(s); |
666 | rfcomm_session_put(s); | 674 | return rfcomm_session_del(s); |
667 | } | 675 | } |
668 | 676 | ||
669 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, | 677 | static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, |
@@ -715,8 +723,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, | |||
715 | if (*err == 0 || *err == -EINPROGRESS) | 723 | if (*err == 0 || *err == -EINPROGRESS) |
716 | return s; | 724 | return s; |
717 | 725 | ||
718 | rfcomm_session_del(s); | 726 | return rfcomm_session_del(s); |
719 | return NULL; | ||
720 | 727 | ||
721 | failed: | 728 | failed: |
722 | sock_release(sock); | 729 | sock_release(sock); |
@@ -1105,7 +1112,7 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) | |||
1105 | } | 1112 | } |
1106 | 1113 | ||
1107 | /* ---- RFCOMM frame reception ---- */ | 1114 | /* ---- RFCOMM frame reception ---- */ |
1108 | static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) | 1115 | static struct rfcomm_session *rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) |
1109 | { | 1116 | { |
1110 | BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); | 1117 | BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); |
1111 | 1118 | ||
@@ -1114,7 +1121,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) | |||
1114 | struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); | 1121 | struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); |
1115 | if (!d) { | 1122 | if (!d) { |
1116 | rfcomm_send_dm(s, dlci); | 1123 | rfcomm_send_dm(s, dlci); |
1117 | return 0; | 1124 | return s; |
1118 | } | 1125 | } |
1119 | 1126 | ||
1120 | switch (d->state) { | 1127 | switch (d->state) { |
@@ -1150,25 +1157,14 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) | |||
1150 | break; | 1157 | break; |
1151 | 1158 | ||
1152 | case BT_DISCONN: | 1159 | case BT_DISCONN: |
1153 | /* rfcomm_session_put is called later so don't do | 1160 | s = rfcomm_session_close(s, ECONNRESET); |
1154 | * anything here otherwise we will mess up the session | ||
1155 | * reference counter: | ||
1156 | * | ||
1157 | * (a) when we are the initiator dlc_unlink will drive | ||
1158 | * the reference counter to 0 (there is no initial put | ||
1159 | * after session_add) | ||
1160 | * | ||
1161 | * (b) when we are not the initiator rfcomm_rx_process | ||
1162 | * will explicitly call put to balance the initial hold | ||
1163 | * done after session add. | ||
1164 | */ | ||
1165 | break; | 1161 | break; |
1166 | } | 1162 | } |
1167 | } | 1163 | } |
1168 | return 0; | 1164 | return s; |
1169 | } | 1165 | } |
1170 | 1166 | ||
1171 | static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci) | 1167 | static struct rfcomm_session *rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci) |
1172 | { | 1168 | { |
1173 | int err = 0; | 1169 | int err = 0; |
1174 | 1170 | ||
@@ -1192,13 +1188,13 @@ static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci) | |||
1192 | else | 1188 | else |
1193 | err = ECONNRESET; | 1189 | err = ECONNRESET; |
1194 | 1190 | ||
1195 | s->state = BT_CLOSED; | 1191 | s = rfcomm_session_close(s, err); |
1196 | rfcomm_session_close(s, err); | ||
1197 | } | 1192 | } |
1198 | return 0; | 1193 | return s; |
1199 | } | 1194 | } |
1200 | 1195 | ||
1201 | static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci) | 1196 | static struct rfcomm_session *rfcomm_recv_disc(struct rfcomm_session *s, |
1197 | u8 dlci) | ||
1202 | { | 1198 | { |
1203 | int err = 0; | 1199 | int err = 0; |
1204 | 1200 | ||
@@ -1227,11 +1223,9 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci) | |||
1227 | else | 1223 | else |
1228 | err = ECONNRESET; | 1224 | err = ECONNRESET; |
1229 | 1225 | ||
1230 | s->state = BT_CLOSED; | 1226 | s = rfcomm_session_close(s, err); |
1231 | rfcomm_session_close(s, err); | ||
1232 | } | 1227 | } |
1233 | 1228 | return s; | |
1234 | return 0; | ||
1235 | } | 1229 | } |
1236 | 1230 | ||
1237 | void rfcomm_dlc_accept(struct rfcomm_dlc *d) | 1231 | void rfcomm_dlc_accept(struct rfcomm_dlc *d) |
@@ -1652,11 +1646,18 @@ drop: | |||
1652 | return 0; | 1646 | return 0; |
1653 | } | 1647 | } |
1654 | 1648 | ||
1655 | static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) | 1649 | static struct rfcomm_session *rfcomm_recv_frame(struct rfcomm_session *s, |
1650 | struct sk_buff *skb) | ||
1656 | { | 1651 | { |
1657 | struct rfcomm_hdr *hdr = (void *) skb->data; | 1652 | struct rfcomm_hdr *hdr = (void *) skb->data; |
1658 | u8 type, dlci, fcs; | 1653 | u8 type, dlci, fcs; |
1659 | 1654 | ||
1655 | if (!s) { | ||
1656 | /* no session, so free socket data */ | ||
1657 | kfree_skb(skb); | ||
1658 | return s; | ||
1659 | } | ||
1660 | |||
1660 | dlci = __get_dlci(hdr->addr); | 1661 | dlci = __get_dlci(hdr->addr); |
1661 | type = __get_type(hdr->ctrl); | 1662 | type = __get_type(hdr->ctrl); |
1662 | 1663 | ||
@@ -1667,7 +1668,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) | |||
1667 | if (__check_fcs(skb->data, type, fcs)) { | 1668 | if (__check_fcs(skb->data, type, fcs)) { |
1668 | BT_ERR("bad checksum in packet"); | 1669 | BT_ERR("bad checksum in packet"); |
1669 | kfree_skb(skb); | 1670 | kfree_skb(skb); |
1670 | return -EILSEQ; | 1671 | return s; |
1671 | } | 1672 | } |
1672 | 1673 | ||
1673 | if (__test_ea(hdr->len)) | 1674 | if (__test_ea(hdr->len)) |
@@ -1683,22 +1684,23 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) | |||
1683 | 1684 | ||
1684 | case RFCOMM_DISC: | 1685 | case RFCOMM_DISC: |
1685 | if (__test_pf(hdr->ctrl)) | 1686 | if (__test_pf(hdr->ctrl)) |
1686 | rfcomm_recv_disc(s, dlci); | 1687 | s = rfcomm_recv_disc(s, dlci); |
1687 | break; | 1688 | break; |
1688 | 1689 | ||
1689 | case RFCOMM_UA: | 1690 | case RFCOMM_UA: |
1690 | if (__test_pf(hdr->ctrl)) | 1691 | if (__test_pf(hdr->ctrl)) |
1691 | rfcomm_recv_ua(s, dlci); | 1692 | s = rfcomm_recv_ua(s, dlci); |
1692 | break; | 1693 | break; |
1693 | 1694 | ||
1694 | case RFCOMM_DM: | 1695 | case RFCOMM_DM: |
1695 | rfcomm_recv_dm(s, dlci); | 1696 | s = rfcomm_recv_dm(s, dlci); |
1696 | break; | 1697 | break; |
1697 | 1698 | ||
1698 | case RFCOMM_UIH: | 1699 | case RFCOMM_UIH: |
1699 | if (dlci) | 1700 | if (dlci) { |
1700 | return rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb); | 1701 | rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb); |
1701 | 1702 | return s; | |
1703 | } | ||
1702 | rfcomm_recv_mcc(s, skb); | 1704 | rfcomm_recv_mcc(s, skb); |
1703 | break; | 1705 | break; |
1704 | 1706 | ||
@@ -1707,7 +1709,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) | |||
1707 | break; | 1709 | break; |
1708 | } | 1710 | } |
1709 | kfree_skb(skb); | 1711 | kfree_skb(skb); |
1710 | return 0; | 1712 | return s; |
1711 | } | 1713 | } |
1712 | 1714 | ||
1713 | /* ---- Connection and data processing ---- */ | 1715 | /* ---- Connection and data processing ---- */ |
@@ -1844,7 +1846,7 @@ static void rfcomm_process_dlcs(struct rfcomm_session *s) | |||
1844 | } | 1846 | } |
1845 | } | 1847 | } |
1846 | 1848 | ||
1847 | static void rfcomm_process_rx(struct rfcomm_session *s) | 1849 | static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s) |
1848 | { | 1850 | { |
1849 | struct socket *sock = s->sock; | 1851 | struct socket *sock = s->sock; |
1850 | struct sock *sk = sock->sk; | 1852 | struct sock *sk = sock->sk; |
@@ -1856,17 +1858,15 @@ static void rfcomm_process_rx(struct rfcomm_session *s) | |||
1856 | while ((skb = skb_dequeue(&sk->sk_receive_queue))) { | 1858 | while ((skb = skb_dequeue(&sk->sk_receive_queue))) { |
1857 | skb_orphan(skb); | 1859 | skb_orphan(skb); |
1858 | if (!skb_linearize(skb)) | 1860 | if (!skb_linearize(skb)) |
1859 | rfcomm_recv_frame(s, skb); | 1861 | s = rfcomm_recv_frame(s, skb); |
1860 | else | 1862 | else |
1861 | kfree_skb(skb); | 1863 | kfree_skb(skb); |
1862 | } | 1864 | } |
1863 | 1865 | ||
1864 | if (sk->sk_state == BT_CLOSED) { | 1866 | if (s && (sk->sk_state == BT_CLOSED)) |
1865 | if (!s->initiator) | 1867 | s = rfcomm_session_close(s, sk->sk_err); |
1866 | rfcomm_session_put(s); | ||
1867 | 1868 | ||
1868 | rfcomm_session_close(s, sk->sk_err); | 1869 | return s; |
1869 | } | ||
1870 | } | 1870 | } |
1871 | 1871 | ||
1872 | static void rfcomm_accept_connection(struct rfcomm_session *s) | 1872 | static void rfcomm_accept_connection(struct rfcomm_session *s) |
@@ -1891,8 +1891,6 @@ static void rfcomm_accept_connection(struct rfcomm_session *s) | |||
1891 | 1891 | ||
1892 | s = rfcomm_session_add(nsock, BT_OPEN); | 1892 | s = rfcomm_session_add(nsock, BT_OPEN); |
1893 | if (s) { | 1893 | if (s) { |
1894 | rfcomm_session_hold(s); | ||
1895 | |||
1896 | /* We should adjust MTU on incoming sessions. | 1894 | /* We should adjust MTU on incoming sessions. |
1897 | * L2CAP MTU minus UIH header and FCS. */ | 1895 | * L2CAP MTU minus UIH header and FCS. */ |
1898 | s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu, | 1896 | s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu, |
@@ -1903,7 +1901,7 @@ static void rfcomm_accept_connection(struct rfcomm_session *s) | |||
1903 | sock_release(nsock); | 1901 | sock_release(nsock); |
1904 | } | 1902 | } |
1905 | 1903 | ||
1906 | static void rfcomm_check_connection(struct rfcomm_session *s) | 1904 | static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s) |
1907 | { | 1905 | { |
1908 | struct sock *sk = s->sock->sk; | 1906 | struct sock *sk = s->sock->sk; |
1909 | 1907 | ||
@@ -1921,10 +1919,10 @@ static void rfcomm_check_connection(struct rfcomm_session *s) | |||
1921 | break; | 1919 | break; |
1922 | 1920 | ||
1923 | case BT_CLOSED: | 1921 | case BT_CLOSED: |
1924 | s->state = BT_CLOSED; | 1922 | s = rfcomm_session_close(s, sk->sk_err); |
1925 | rfcomm_session_close(s, sk->sk_err); | ||
1926 | break; | 1923 | break; |
1927 | } | 1924 | } |
1925 | return s; | ||
1928 | } | 1926 | } |
1929 | 1927 | ||
1930 | static void rfcomm_process_sessions(void) | 1928 | static void rfcomm_process_sessions(void) |
@@ -1940,7 +1938,6 @@ static void rfcomm_process_sessions(void) | |||
1940 | if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { | 1938 | if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { |
1941 | s->state = BT_DISCONN; | 1939 | s->state = BT_DISCONN; |
1942 | rfcomm_send_disc(s, 0); | 1940 | rfcomm_send_disc(s, 0); |
1943 | rfcomm_session_put(s); | ||
1944 | continue; | 1941 | continue; |
1945 | } | 1942 | } |
1946 | 1943 | ||
@@ -1949,21 +1946,18 @@ static void rfcomm_process_sessions(void) | |||
1949 | continue; | 1946 | continue; |
1950 | } | 1947 | } |
1951 | 1948 | ||
1952 | rfcomm_session_hold(s); | ||
1953 | |||
1954 | switch (s->state) { | 1949 | switch (s->state) { |
1955 | case BT_BOUND: | 1950 | case BT_BOUND: |
1956 | rfcomm_check_connection(s); | 1951 | s = rfcomm_check_connection(s); |
1957 | break; | 1952 | break; |
1958 | 1953 | ||
1959 | default: | 1954 | default: |
1960 | rfcomm_process_rx(s); | 1955 | s = rfcomm_process_rx(s); |
1961 | break; | 1956 | break; |
1962 | } | 1957 | } |
1963 | 1958 | ||
1964 | rfcomm_process_dlcs(s); | 1959 | if (s) |
1965 | 1960 | rfcomm_process_dlcs(s); | |
1966 | rfcomm_session_put(s); | ||
1967 | } | 1961 | } |
1968 | 1962 | ||
1969 | rfcomm_unlock(); | 1963 | rfcomm_unlock(); |
@@ -2010,10 +2004,11 @@ static int rfcomm_add_listener(bdaddr_t *ba) | |||
2010 | 2004 | ||
2011 | /* Add listening session */ | 2005 | /* Add listening session */ |
2012 | s = rfcomm_session_add(sock, BT_LISTEN); | 2006 | s = rfcomm_session_add(sock, BT_LISTEN); |
2013 | if (!s) | 2007 | if (!s) { |
2008 | err = -ENOMEM; | ||
2014 | goto failed; | 2009 | goto failed; |
2010 | } | ||
2015 | 2011 | ||
2016 | rfcomm_session_hold(s); | ||
2017 | return 0; | 2012 | return 0; |
2018 | failed: | 2013 | failed: |
2019 | sock_release(sock); | 2014 | sock_release(sock); |
@@ -2071,8 +2066,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) | |||
2071 | if (!s) | 2066 | if (!s) |
2072 | return; | 2067 | return; |
2073 | 2068 | ||
2074 | rfcomm_session_hold(s); | ||
2075 | |||
2076 | list_for_each_safe(p, n, &s->dlcs) { | 2069 | list_for_each_safe(p, n, &s->dlcs) { |
2077 | d = list_entry(p, struct rfcomm_dlc, list); | 2070 | d = list_entry(p, struct rfcomm_dlc, list); |
2078 | 2071 | ||
@@ -2104,8 +2097,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) | |||
2104 | set_bit(RFCOMM_AUTH_REJECT, &d->flags); | 2097 | set_bit(RFCOMM_AUTH_REJECT, &d->flags); |
2105 | } | 2098 | } |
2106 | 2099 | ||
2107 | rfcomm_session_put(s); | ||
2108 | |||
2109 | rfcomm_schedule(); | 2100 | rfcomm_schedule(); |
2110 | } | 2101 | } |
2111 | 2102 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7c9224bcce17..a8638b58c4bf 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -1066,8 +1066,7 @@ void __exit rfcomm_cleanup_sockets(void) | |||
1066 | 1066 | ||
1067 | debugfs_remove(rfcomm_sock_debugfs); | 1067 | debugfs_remove(rfcomm_sock_debugfs); |
1068 | 1068 | ||
1069 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) | 1069 | bt_sock_unregister(BTPROTO_RFCOMM); |
1070 | BT_ERR("RFCOMM socket layer unregistration failed"); | ||
1071 | 1070 | ||
1072 | proto_unregister(&rfcomm_proto); | 1071 | proto_unregister(&rfcomm_proto); |
1073 | } | 1072 | } |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index fb6192c9812e..2c8055350510 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -1113,8 +1113,7 @@ void __exit sco_exit(void) | |||
1113 | 1113 | ||
1114 | debugfs_remove(sco_debugfs); | 1114 | debugfs_remove(sco_debugfs); |
1115 | 1115 | ||
1116 | if (bt_sock_unregister(BTPROTO_SCO) < 0) | 1116 | bt_sock_unregister(BTPROTO_SCO); |
1117 | BT_ERR("SCO socket unregistration failed"); | ||
1118 | 1117 | ||
1119 | proto_unregister(&sco_proto); | 1118 | proto_unregister(&sco_proto); |
1120 | } | 1119 | } |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c34e6d78a592..c50c19402588 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -175,7 +175,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
175 | * add it to the device after the station. | 175 | * add it to the device after the station. |
176 | */ | 176 | */ |
177 | if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { | 177 | if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { |
178 | ieee80211_key_free(sdata->local, key); | 178 | ieee80211_key_free_unused(key); |
179 | err = -ENOENT; | 179 | err = -ENOENT; |
180 | goto out_unlock; | 180 | goto out_unlock; |
181 | } | 181 | } |
@@ -214,8 +214,6 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
214 | } | 214 | } |
215 | 215 | ||
216 | err = ieee80211_key_link(key, sdata, sta); | 216 | err = ieee80211_key_link(key, sdata, sta); |
217 | if (err) | ||
218 | ieee80211_key_free(sdata->local, key); | ||
219 | 217 | ||
220 | out_unlock: | 218 | out_unlock: |
221 | mutex_unlock(&sdata->local->sta_mtx); | 219 | mutex_unlock(&sdata->local->sta_mtx); |
@@ -254,7 +252,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
254 | goto out_unlock; | 252 | goto out_unlock; |
255 | } | 253 | } |
256 | 254 | ||
257 | __ieee80211_key_free(key, true); | 255 | ieee80211_key_free(key, true); |
258 | 256 | ||
259 | ret = 0; | 257 | ret = 0; |
260 | out_unlock: | 258 | out_unlock: |
@@ -445,12 +443,14 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
445 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 443 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
446 | struct ieee80211_local *local = sdata->local; | 444 | struct ieee80211_local *local = sdata->local; |
447 | struct timespec uptime; | 445 | struct timespec uptime; |
446 | u64 packets = 0; | ||
447 | int ac; | ||
448 | 448 | ||
449 | sinfo->generation = sdata->local->sta_generation; | 449 | sinfo->generation = sdata->local->sta_generation; |
450 | 450 | ||
451 | sinfo->filled = STATION_INFO_INACTIVE_TIME | | 451 | sinfo->filled = STATION_INFO_INACTIVE_TIME | |
452 | STATION_INFO_RX_BYTES | | 452 | STATION_INFO_RX_BYTES64 | |
453 | STATION_INFO_TX_BYTES | | 453 | STATION_INFO_TX_BYTES64 | |
454 | STATION_INFO_RX_PACKETS | | 454 | STATION_INFO_RX_PACKETS | |
455 | STATION_INFO_TX_PACKETS | | 455 | STATION_INFO_TX_PACKETS | |
456 | STATION_INFO_TX_RETRIES | | 456 | STATION_INFO_TX_RETRIES | |
@@ -467,10 +467,14 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
467 | sinfo->connected_time = uptime.tv_sec - sta->last_connected; | 467 | sinfo->connected_time = uptime.tv_sec - sta->last_connected; |
468 | 468 | ||
469 | sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); | 469 | sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); |
470 | sinfo->tx_bytes = 0; | ||
471 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | ||
472 | sinfo->tx_bytes += sta->tx_bytes[ac]; | ||
473 | packets += sta->tx_packets[ac]; | ||
474 | } | ||
475 | sinfo->tx_packets = packets; | ||
470 | sinfo->rx_bytes = sta->rx_bytes; | 476 | sinfo->rx_bytes = sta->rx_bytes; |
471 | sinfo->tx_bytes = sta->tx_bytes; | ||
472 | sinfo->rx_packets = sta->rx_packets; | 477 | sinfo->rx_packets = sta->rx_packets; |
473 | sinfo->tx_packets = sta->tx_packets; | ||
474 | sinfo->tx_retries = sta->tx_retry_count; | 478 | sinfo->tx_retries = sta->tx_retry_count; |
475 | sinfo->tx_failed = sta->tx_retry_failed; | 479 | sinfo->tx_failed = sta->tx_retry_failed; |
476 | sinfo->rx_dropped_misc = sta->rx_dropped; | 480 | sinfo->rx_dropped_misc = sta->rx_dropped; |
@@ -598,8 +602,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, | |||
598 | data[i++] += sta->rx_fragments; \ | 602 | data[i++] += sta->rx_fragments; \ |
599 | data[i++] += sta->rx_dropped; \ | 603 | data[i++] += sta->rx_dropped; \ |
600 | \ | 604 | \ |
601 | data[i++] += sta->tx_packets; \ | 605 | data[i++] += sinfo.tx_packets; \ |
602 | data[i++] += sta->tx_bytes; \ | 606 | data[i++] += sinfo.tx_bytes; \ |
603 | data[i++] += sta->tx_fragments; \ | 607 | data[i++] += sta->tx_fragments; \ |
604 | data[i++] += sta->tx_filtered_count; \ | 608 | data[i++] += sta->tx_filtered_count; \ |
605 | data[i++] += sta->tx_retry_failed; \ | 609 | data[i++] += sta->tx_retry_failed; \ |
@@ -621,13 +625,14 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, | |||
621 | if (!(sta && !WARN_ON(sta->sdata->dev != dev))) | 625 | if (!(sta && !WARN_ON(sta->sdata->dev != dev))) |
622 | goto do_survey; | 626 | goto do_survey; |
623 | 627 | ||
628 | sinfo.filled = 0; | ||
629 | sta_set_sinfo(sta, &sinfo); | ||
630 | |||
624 | i = 0; | 631 | i = 0; |
625 | ADD_STA_STATS(sta); | 632 | ADD_STA_STATS(sta); |
626 | 633 | ||
627 | data[i++] = sta->sta_state; | 634 | data[i++] = sta->sta_state; |
628 | 635 | ||
629 | sinfo.filled = 0; | ||
630 | sta_set_sinfo(sta, &sinfo); | ||
631 | 636 | ||
632 | if (sinfo.filled & STATION_INFO_TX_BITRATE) | 637 | if (sinfo.filled & STATION_INFO_TX_BITRATE) |
633 | data[i] = 100000 * | 638 | data[i] = 100000 * |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index c3a3082b72e5..1521cabad3d6 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -295,7 +295,7 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) | |||
295 | char buf[50]; | 295 | char buf[50]; |
296 | struct ieee80211_key *key; | 296 | struct ieee80211_key *key; |
297 | 297 | ||
298 | if (!sdata->debugfs.dir) | 298 | if (!sdata->vif.debugfs_dir) |
299 | return; | 299 | return; |
300 | 300 | ||
301 | lockdep_assert_held(&sdata->local->key_mtx); | 301 | lockdep_assert_held(&sdata->local->key_mtx); |
@@ -311,7 +311,7 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) | |||
311 | sprintf(buf, "../keys/%d", key->debugfs.cnt); | 311 | sprintf(buf, "../keys/%d", key->debugfs.cnt); |
312 | sdata->debugfs.default_unicast_key = | 312 | sdata->debugfs.default_unicast_key = |
313 | debugfs_create_symlink("default_unicast_key", | 313 | debugfs_create_symlink("default_unicast_key", |
314 | sdata->debugfs.dir, buf); | 314 | sdata->vif.debugfs_dir, buf); |
315 | } | 315 | } |
316 | 316 | ||
317 | if (sdata->debugfs.default_multicast_key) { | 317 | if (sdata->debugfs.default_multicast_key) { |
@@ -325,7 +325,7 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata) | |||
325 | sprintf(buf, "../keys/%d", key->debugfs.cnt); | 325 | sprintf(buf, "../keys/%d", key->debugfs.cnt); |
326 | sdata->debugfs.default_multicast_key = | 326 | sdata->debugfs.default_multicast_key = |
327 | debugfs_create_symlink("default_multicast_key", | 327 | debugfs_create_symlink("default_multicast_key", |
328 | sdata->debugfs.dir, buf); | 328 | sdata->vif.debugfs_dir, buf); |
329 | } | 329 | } |
330 | } | 330 | } |
331 | 331 | ||
@@ -334,7 +334,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) | |||
334 | char buf[50]; | 334 | char buf[50]; |
335 | struct ieee80211_key *key; | 335 | struct ieee80211_key *key; |
336 | 336 | ||
337 | if (!sdata->debugfs.dir) | 337 | if (!sdata->vif.debugfs_dir) |
338 | return; | 338 | return; |
339 | 339 | ||
340 | key = key_mtx_dereference(sdata->local, | 340 | key = key_mtx_dereference(sdata->local, |
@@ -343,7 +343,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) | |||
343 | sprintf(buf, "../keys/%d", key->debugfs.cnt); | 343 | sprintf(buf, "../keys/%d", key->debugfs.cnt); |
344 | sdata->debugfs.default_mgmt_key = | 344 | sdata->debugfs.default_mgmt_key = |
345 | debugfs_create_symlink("default_mgmt_key", | 345 | debugfs_create_symlink("default_mgmt_key", |
346 | sdata->debugfs.dir, buf); | 346 | sdata->vif.debugfs_dir, buf); |
347 | } else | 347 | } else |
348 | ieee80211_debugfs_key_remove_mgmt_default(sdata); | 348 | ieee80211_debugfs_key_remove_mgmt_default(sdata); |
349 | } | 349 | } |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 059bbb82e84f..ddb426867904 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -521,7 +521,7 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, | |||
521 | #endif | 521 | #endif |
522 | 522 | ||
523 | #define DEBUGFS_ADD_MODE(name, mode) \ | 523 | #define DEBUGFS_ADD_MODE(name, mode) \ |
524 | debugfs_create_file(#name, mode, sdata->debugfs.dir, \ | 524 | debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \ |
525 | sdata, &name##_ops); | 525 | sdata, &name##_ops); |
526 | 526 | ||
527 | #define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) | 527 | #define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) |
@@ -577,7 +577,7 @@ static void add_mesh_files(struct ieee80211_sub_if_data *sdata) | |||
577 | static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) | 577 | static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) |
578 | { | 578 | { |
579 | struct dentry *dir = debugfs_create_dir("mesh_stats", | 579 | struct dentry *dir = debugfs_create_dir("mesh_stats", |
580 | sdata->debugfs.dir); | 580 | sdata->vif.debugfs_dir); |
581 | #define MESHSTATS_ADD(name)\ | 581 | #define MESHSTATS_ADD(name)\ |
582 | debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); | 582 | debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); |
583 | 583 | ||
@@ -594,7 +594,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) | |||
594 | static void add_mesh_config(struct ieee80211_sub_if_data *sdata) | 594 | static void add_mesh_config(struct ieee80211_sub_if_data *sdata) |
595 | { | 595 | { |
596 | struct dentry *dir = debugfs_create_dir("mesh_config", | 596 | struct dentry *dir = debugfs_create_dir("mesh_config", |
597 | sdata->debugfs.dir); | 597 | sdata->vif.debugfs_dir); |
598 | 598 | ||
599 | #define MESHPARAMS_ADD(name) \ | 599 | #define MESHPARAMS_ADD(name) \ |
600 | debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); | 600 | debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); |
@@ -631,7 +631,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) | |||
631 | 631 | ||
632 | static void add_files(struct ieee80211_sub_if_data *sdata) | 632 | static void add_files(struct ieee80211_sub_if_data *sdata) |
633 | { | 633 | { |
634 | if (!sdata->debugfs.dir) | 634 | if (!sdata->vif.debugfs_dir) |
635 | return; | 635 | return; |
636 | 636 | ||
637 | DEBUGFS_ADD(flags); | 637 | DEBUGFS_ADD(flags); |
@@ -673,21 +673,21 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) | |||
673 | char buf[10+IFNAMSIZ]; | 673 | char buf[10+IFNAMSIZ]; |
674 | 674 | ||
675 | sprintf(buf, "netdev:%s", sdata->name); | 675 | sprintf(buf, "netdev:%s", sdata->name); |
676 | sdata->debugfs.dir = debugfs_create_dir(buf, | 676 | sdata->vif.debugfs_dir = debugfs_create_dir(buf, |
677 | sdata->local->hw.wiphy->debugfsdir); | 677 | sdata->local->hw.wiphy->debugfsdir); |
678 | if (sdata->debugfs.dir) | 678 | if (sdata->vif.debugfs_dir) |
679 | sdata->debugfs.subdir_stations = debugfs_create_dir("stations", | 679 | sdata->debugfs.subdir_stations = debugfs_create_dir("stations", |
680 | sdata->debugfs.dir); | 680 | sdata->vif.debugfs_dir); |
681 | add_files(sdata); | 681 | add_files(sdata); |
682 | } | 682 | } |
683 | 683 | ||
684 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) | 684 | void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) |
685 | { | 685 | { |
686 | if (!sdata->debugfs.dir) | 686 | if (!sdata->vif.debugfs_dir) |
687 | return; | 687 | return; |
688 | 688 | ||
689 | debugfs_remove_recursive(sdata->debugfs.dir); | 689 | debugfs_remove_recursive(sdata->vif.debugfs_dir); |
690 | sdata->debugfs.dir = NULL; | 690 | sdata->vif.debugfs_dir = NULL; |
691 | } | 691 | } |
692 | 692 | ||
693 | void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) | 693 | void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) |
@@ -695,7 +695,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) | |||
695 | struct dentry *dir; | 695 | struct dentry *dir; |
696 | char buf[10 + IFNAMSIZ]; | 696 | char buf[10 + IFNAMSIZ]; |
697 | 697 | ||
698 | dir = sdata->debugfs.dir; | 698 | dir = sdata->vif.debugfs_dir; |
699 | 699 | ||
700 | if (!dir) | 700 | if (!dir) |
701 | return; | 701 | return; |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 832acea4a5cb..169664c122e2 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -241,6 +241,22 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | |||
241 | return ret; | 241 | return ret; |
242 | } | 242 | } |
243 | 243 | ||
244 | static inline void drv_set_multicast_list(struct ieee80211_local *local, | ||
245 | struct ieee80211_sub_if_data *sdata, | ||
246 | struct netdev_hw_addr_list *mc_list) | ||
247 | { | ||
248 | bool allmulti = sdata->flags & IEEE80211_SDATA_ALLMULTI; | ||
249 | |||
250 | trace_drv_set_multicast_list(local, sdata, mc_list->count); | ||
251 | |||
252 | check_sdata_in_driver(sdata); | ||
253 | |||
254 | if (local->ops->set_multicast_list) | ||
255 | local->ops->set_multicast_list(&local->hw, &sdata->vif, | ||
256 | allmulti, mc_list); | ||
257 | trace_drv_return_void(local); | ||
258 | } | ||
259 | |||
244 | static inline void drv_configure_filter(struct ieee80211_local *local, | 260 | static inline void drv_configure_filter(struct ieee80211_local *local, |
245 | unsigned int changed_flags, | 261 | unsigned int changed_flags, |
246 | unsigned int *total_flags, | 262 | unsigned int *total_flags, |
@@ -531,43 +547,6 @@ static inline void drv_sta_remove_debugfs(struct ieee80211_local *local, | |||
531 | local->ops->sta_remove_debugfs(&local->hw, &sdata->vif, | 547 | local->ops->sta_remove_debugfs(&local->hw, &sdata->vif, |
532 | sta, dir); | 548 | sta, dir); |
533 | } | 549 | } |
534 | |||
535 | static inline | ||
536 | void drv_add_interface_debugfs(struct ieee80211_local *local, | ||
537 | struct ieee80211_sub_if_data *sdata) | ||
538 | { | ||
539 | might_sleep(); | ||
540 | |||
541 | check_sdata_in_driver(sdata); | ||
542 | |||
543 | if (!local->ops->add_interface_debugfs) | ||
544 | return; | ||
545 | |||
546 | local->ops->add_interface_debugfs(&local->hw, &sdata->vif, | ||
547 | sdata->debugfs.dir); | ||
548 | } | ||
549 | |||
550 | static inline | ||
551 | void drv_remove_interface_debugfs(struct ieee80211_local *local, | ||
552 | struct ieee80211_sub_if_data *sdata) | ||
553 | { | ||
554 | might_sleep(); | ||
555 | |||
556 | check_sdata_in_driver(sdata); | ||
557 | |||
558 | if (!local->ops->remove_interface_debugfs) | ||
559 | return; | ||
560 | |||
561 | local->ops->remove_interface_debugfs(&local->hw, &sdata->vif, | ||
562 | sdata->debugfs.dir); | ||
563 | } | ||
564 | #else | ||
565 | static inline | ||
566 | void drv_add_interface_debugfs(struct ieee80211_local *local, | ||
567 | struct ieee80211_sub_if_data *sdata) {} | ||
568 | static inline | ||
569 | void drv_remove_interface_debugfs(struct ieee80211_local *local, | ||
570 | struct ieee80211_sub_if_data *sdata) {} | ||
571 | #endif | 550 | #endif |
572 | 551 | ||
573 | static inline __must_check | 552 | static inline __must_check |
@@ -741,13 +720,14 @@ static inline void drv_rfkill_poll(struct ieee80211_local *local) | |||
741 | local->ops->rfkill_poll(&local->hw); | 720 | local->ops->rfkill_poll(&local->hw); |
742 | } | 721 | } |
743 | 722 | ||
744 | static inline void drv_flush(struct ieee80211_local *local, bool drop) | 723 | static inline void drv_flush(struct ieee80211_local *local, |
724 | u32 queues, bool drop) | ||
745 | { | 725 | { |
746 | might_sleep(); | 726 | might_sleep(); |
747 | 727 | ||
748 | trace_drv_flush(local, drop); | 728 | trace_drv_flush(local, queues, drop); |
749 | if (local->ops->flush) | 729 | if (local->ops->flush) |
750 | local->ops->flush(&local->hw, drop); | 730 | local->ops->flush(&local->hw, queues, drop); |
751 | trace_drv_return_void(local); | 731 | trace_drv_return_void(local); |
752 | } | 732 | } |
753 | 733 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index e140184c28ce..0b09716d22ad 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -759,7 +759,6 @@ struct ieee80211_sub_if_data { | |||
759 | 759 | ||
760 | #ifdef CONFIG_MAC80211_DEBUGFS | 760 | #ifdef CONFIG_MAC80211_DEBUGFS |
761 | struct { | 761 | struct { |
762 | struct dentry *dir; | ||
763 | struct dentry *subdir_stations; | 762 | struct dentry *subdir_stations; |
764 | struct dentry *default_unicast_key; | 763 | struct dentry *default_unicast_key; |
765 | struct dentry *default_multicast_key; | 764 | struct dentry *default_multicast_key; |
@@ -801,11 +800,6 @@ enum sdata_queue_type { | |||
801 | enum { | 800 | enum { |
802 | IEEE80211_RX_MSG = 1, | 801 | IEEE80211_RX_MSG = 1, |
803 | IEEE80211_TX_STATUS_MSG = 2, | 802 | IEEE80211_TX_STATUS_MSG = 2, |
804 | IEEE80211_EOSP_MSG = 3, | ||
805 | }; | ||
806 | |||
807 | struct skb_eosp_msg_data { | ||
808 | u8 sta[ETH_ALEN], iface[ETH_ALEN]; | ||
809 | }; | 803 | }; |
810 | 804 | ||
811 | enum queue_stop_reason { | 805 | enum queue_stop_reason { |
@@ -816,6 +810,7 @@ enum queue_stop_reason { | |||
816 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, | 810 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, |
817 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD, | 811 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD, |
818 | IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, | 812 | IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL, |
813 | IEEE80211_QUEUE_STOP_REASON_FLUSH, | ||
819 | }; | 814 | }; |
820 | 815 | ||
821 | #ifdef CONFIG_MAC80211_LEDS | 816 | #ifdef CONFIG_MAC80211_LEDS |
@@ -1530,8 +1525,10 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, | |||
1530 | struct ieee80211_hdr *hdr, bool ack); | 1525 | struct ieee80211_hdr *hdr, bool ack); |
1531 | 1526 | ||
1532 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | 1527 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, |
1528 | unsigned long queues, | ||
1533 | enum queue_stop_reason reason); | 1529 | enum queue_stop_reason reason); |
1534 | void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, | 1530 | void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, |
1531 | unsigned long queues, | ||
1535 | enum queue_stop_reason reason); | 1532 | enum queue_stop_reason reason); |
1536 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | 1533 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, |
1537 | enum queue_stop_reason reason); | 1534 | enum queue_stop_reason reason); |
@@ -1548,6 +1545,8 @@ static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, | |||
1548 | { | 1545 | { |
1549 | ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); | 1546 | ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); |
1550 | } | 1547 | } |
1548 | void ieee80211_flush_queues(struct ieee80211_local *local, | ||
1549 | struct ieee80211_sub_if_data *sdata); | ||
1551 | 1550 | ||
1552 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | 1551 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
1553 | u16 transaction, u16 auth_alg, u16 status, | 1552 | u16 transaction, u16 auth_alg, u16 status, |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index a2b5e17036bb..69aaba79a9f7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -92,7 +92,7 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local) | |||
92 | if (local->hw.conf.flags & IEEE80211_CONF_IDLE) | 92 | if (local->hw.conf.flags & IEEE80211_CONF_IDLE) |
93 | return 0; | 93 | return 0; |
94 | 94 | ||
95 | drv_flush(local, false); | 95 | ieee80211_flush_queues(local, NULL); |
96 | 96 | ||
97 | local->hw.conf.flags |= IEEE80211_CONF_IDLE; | 97 | local->hw.conf.flags |= IEEE80211_CONF_IDLE; |
98 | return IEEE80211_CONF_CHANGE_IDLE; | 98 | return IEEE80211_CONF_CHANGE_IDLE; |
@@ -560,8 +560,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) | |||
560 | goto err_del_interface; | 560 | goto err_del_interface; |
561 | } | 561 | } |
562 | 562 | ||
563 | drv_add_interface_debugfs(local, sdata); | ||
564 | |||
565 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 563 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
566 | local->fif_pspoll++; | 564 | local->fif_pspoll++; |
567 | local->fif_probe_req++; | 565 | local->fif_probe_req++; |
@@ -849,8 +847,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
849 | case NL80211_IFTYPE_AP: | 847 | case NL80211_IFTYPE_AP: |
850 | skb_queue_purge(&sdata->skb_queue); | 848 | skb_queue_purge(&sdata->skb_queue); |
851 | 849 | ||
852 | drv_remove_interface_debugfs(local, sdata); | ||
853 | |||
854 | if (going_down) | 850 | if (going_down) |
855 | drv_remove_interface(local, sdata); | 851 | drv_remove_interface(local, sdata); |
856 | } | 852 | } |
@@ -922,6 +918,17 @@ static void ieee80211_set_multicast_list(struct net_device *dev) | |||
922 | atomic_dec(&local->iff_promiscs); | 918 | atomic_dec(&local->iff_promiscs); |
923 | sdata->flags ^= IEEE80211_SDATA_PROMISC; | 919 | sdata->flags ^= IEEE80211_SDATA_PROMISC; |
924 | } | 920 | } |
921 | |||
922 | /* | ||
923 | * TODO: If somebody needs this on AP interfaces, | ||
924 | * it can be enabled easily but multicast | ||
925 | * addresses from VLANs need to be synced. | ||
926 | */ | ||
927 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR && | ||
928 | sdata->vif.type != NL80211_IFTYPE_AP_VLAN && | ||
929 | sdata->vif.type != NL80211_IFTYPE_AP) | ||
930 | drv_set_multicast_list(local, sdata, &dev->mc); | ||
931 | |||
925 | spin_lock_bh(&local->filter_lock); | 932 | spin_lock_bh(&local->filter_lock); |
926 | __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); | 933 | __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); |
927 | spin_unlock_bh(&local->filter_lock); | 934 | spin_unlock_bh(&local->filter_lock); |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 99e9f6ae6a54..67059b88fea5 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -248,11 +248,11 @@ void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, | |||
248 | } | 248 | } |
249 | 249 | ||
250 | 250 | ||
251 | static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, | 251 | static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, |
252 | struct sta_info *sta, | 252 | struct sta_info *sta, |
253 | bool pairwise, | 253 | bool pairwise, |
254 | struct ieee80211_key *old, | 254 | struct ieee80211_key *old, |
255 | struct ieee80211_key *new) | 255 | struct ieee80211_key *new) |
256 | { | 256 | { |
257 | int idx; | 257 | int idx; |
258 | bool defunikey, defmultikey, defmgmtkey; | 258 | bool defunikey, defmultikey, defmgmtkey; |
@@ -397,25 +397,21 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, | |||
397 | return key; | 397 | return key; |
398 | } | 398 | } |
399 | 399 | ||
400 | static void ieee80211_key_free_common(struct ieee80211_key *key) | ||
401 | { | ||
402 | if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP) | ||
403 | ieee80211_aes_key_free(key->u.ccmp.tfm); | ||
404 | if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) | ||
405 | ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); | ||
406 | kfree(key); | ||
407 | } | ||
408 | |||
400 | static void __ieee80211_key_destroy(struct ieee80211_key *key, | 409 | static void __ieee80211_key_destroy(struct ieee80211_key *key, |
401 | bool delay_tailroom) | 410 | bool delay_tailroom) |
402 | { | 411 | { |
403 | if (!key) | ||
404 | return; | ||
405 | |||
406 | /* | ||
407 | * Synchronize so the TX path can no longer be using | ||
408 | * this key before we free/remove it. | ||
409 | */ | ||
410 | synchronize_net(); | ||
411 | |||
412 | if (key->local) | 412 | if (key->local) |
413 | ieee80211_key_disable_hw_accel(key); | 413 | ieee80211_key_disable_hw_accel(key); |
414 | 414 | ||
415 | if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP) | ||
416 | ieee80211_aes_key_free(key->u.ccmp.tfm); | ||
417 | if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) | ||
418 | ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); | ||
419 | if (key->local) { | 415 | if (key->local) { |
420 | struct ieee80211_sub_if_data *sdata = key->sdata; | 416 | struct ieee80211_sub_if_data *sdata = key->sdata; |
421 | 417 | ||
@@ -431,7 +427,28 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key, | |||
431 | } | 427 | } |
432 | } | 428 | } |
433 | 429 | ||
434 | kfree(key); | 430 | ieee80211_key_free_common(key); |
431 | } | ||
432 | |||
433 | static void ieee80211_key_destroy(struct ieee80211_key *key, | ||
434 | bool delay_tailroom) | ||
435 | { | ||
436 | if (!key) | ||
437 | return; | ||
438 | |||
439 | /* | ||
440 | * Synchronize so the TX path can no longer be using | ||
441 | * this key before we free/remove it. | ||
442 | */ | ||
443 | synchronize_net(); | ||
444 | |||
445 | __ieee80211_key_destroy(key, delay_tailroom); | ||
446 | } | ||
447 | |||
448 | void ieee80211_key_free_unused(struct ieee80211_key *key) | ||
449 | { | ||
450 | WARN_ON(key->sdata || key->local); | ||
451 | ieee80211_key_free_common(key); | ||
435 | } | 452 | } |
436 | 453 | ||
437 | int ieee80211_key_link(struct ieee80211_key *key, | 454 | int ieee80211_key_link(struct ieee80211_key *key, |
@@ -462,19 +479,22 @@ int ieee80211_key_link(struct ieee80211_key *key, | |||
462 | 479 | ||
463 | increment_tailroom_need_count(sdata); | 480 | increment_tailroom_need_count(sdata); |
464 | 481 | ||
465 | __ieee80211_key_replace(sdata, sta, pairwise, old_key, key); | 482 | ieee80211_key_replace(sdata, sta, pairwise, old_key, key); |
466 | __ieee80211_key_destroy(old_key, true); | 483 | ieee80211_key_destroy(old_key, true); |
467 | 484 | ||
468 | ieee80211_debugfs_key_add(key); | 485 | ieee80211_debugfs_key_add(key); |
469 | 486 | ||
470 | ret = ieee80211_key_enable_hw_accel(key); | 487 | ret = ieee80211_key_enable_hw_accel(key); |
471 | 488 | ||
489 | if (ret) | ||
490 | ieee80211_key_free(key, true); | ||
491 | |||
472 | mutex_unlock(&sdata->local->key_mtx); | 492 | mutex_unlock(&sdata->local->key_mtx); |
473 | 493 | ||
474 | return ret; | 494 | return ret; |
475 | } | 495 | } |
476 | 496 | ||
477 | void __ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) | 497 | void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) |
478 | { | 498 | { |
479 | if (!key) | 499 | if (!key) |
480 | return; | 500 | return; |
@@ -483,18 +503,10 @@ void __ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) | |||
483 | * Replace key with nothingness if it was ever used. | 503 | * Replace key with nothingness if it was ever used. |
484 | */ | 504 | */ |
485 | if (key->sdata) | 505 | if (key->sdata) |
486 | __ieee80211_key_replace(key->sdata, key->sta, | 506 | ieee80211_key_replace(key->sdata, key->sta, |
487 | key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, | 507 | key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, |
488 | key, NULL); | 508 | key, NULL); |
489 | __ieee80211_key_destroy(key, delay_tailroom); | 509 | ieee80211_key_destroy(key, delay_tailroom); |
490 | } | ||
491 | |||
492 | void ieee80211_key_free(struct ieee80211_local *local, | ||
493 | struct ieee80211_key *key) | ||
494 | { | ||
495 | mutex_lock(&local->key_mtx); | ||
496 | __ieee80211_key_free(key, true); | ||
497 | mutex_unlock(&local->key_mtx); | ||
498 | } | 510 | } |
499 | 511 | ||
500 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | 512 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) |
@@ -554,6 +566,7 @@ EXPORT_SYMBOL(ieee80211_iter_keys); | |||
554 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) | 566 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) |
555 | { | 567 | { |
556 | struct ieee80211_key *key, *tmp; | 568 | struct ieee80211_key *key, *tmp; |
569 | LIST_HEAD(keys); | ||
557 | 570 | ||
558 | cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk); | 571 | cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk); |
559 | 572 | ||
@@ -565,17 +578,65 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) | |||
565 | 578 | ||
566 | ieee80211_debugfs_key_remove_mgmt_default(sdata); | 579 | ieee80211_debugfs_key_remove_mgmt_default(sdata); |
567 | 580 | ||
568 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) | 581 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { |
569 | __ieee80211_key_free(key, false); | 582 | ieee80211_key_replace(key->sdata, key->sta, |
583 | key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, | ||
584 | key, NULL); | ||
585 | list_add_tail(&key->list, &keys); | ||
586 | } | ||
570 | 587 | ||
571 | ieee80211_debugfs_key_update_default(sdata); | 588 | ieee80211_debugfs_key_update_default(sdata); |
572 | 589 | ||
590 | if (!list_empty(&keys)) { | ||
591 | synchronize_net(); | ||
592 | list_for_each_entry_safe(key, tmp, &keys, list) | ||
593 | __ieee80211_key_destroy(key, false); | ||
594 | } | ||
595 | |||
573 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || | 596 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || |
574 | sdata->crypto_tx_tailroom_pending_dec); | 597 | sdata->crypto_tx_tailroom_pending_dec); |
575 | 598 | ||
576 | mutex_unlock(&sdata->local->key_mtx); | 599 | mutex_unlock(&sdata->local->key_mtx); |
577 | } | 600 | } |
578 | 601 | ||
602 | void ieee80211_free_sta_keys(struct ieee80211_local *local, | ||
603 | struct sta_info *sta) | ||
604 | { | ||
605 | struct ieee80211_key *key, *tmp; | ||
606 | LIST_HEAD(keys); | ||
607 | int i; | ||
608 | |||
609 | mutex_lock(&local->key_mtx); | ||
610 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { | ||
611 | key = key_mtx_dereference(local, sta->gtk[i]); | ||
612 | if (!key) | ||
613 | continue; | ||
614 | ieee80211_key_replace(key->sdata, key->sta, | ||
615 | key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, | ||
616 | key, NULL); | ||
617 | list_add(&key->list, &keys); | ||
618 | } | ||
619 | |||
620 | key = key_mtx_dereference(local, sta->ptk); | ||
621 | if (key) { | ||
622 | ieee80211_key_replace(key->sdata, key->sta, | ||
623 | key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, | ||
624 | key, NULL); | ||
625 | list_add(&key->list, &keys); | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * NB: the station code relies on this being | ||
630 | * done even if there aren't any keys | ||
631 | */ | ||
632 | synchronize_net(); | ||
633 | |||
634 | list_for_each_entry_safe(key, tmp, &keys, list) | ||
635 | __ieee80211_key_destroy(key, true); | ||
636 | |||
637 | mutex_unlock(&local->key_mtx); | ||
638 | } | ||
639 | |||
579 | void ieee80211_delayed_tailroom_dec(struct work_struct *wk) | 640 | void ieee80211_delayed_tailroom_dec(struct work_struct *wk) |
580 | { | 641 | { |
581 | struct ieee80211_sub_if_data *sdata; | 642 | struct ieee80211_sub_if_data *sdata; |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index 2a682d81cee9..e8de3e6d7804 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -129,19 +129,20 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, | |||
129 | size_t seq_len, const u8 *seq); | 129 | size_t seq_len, const u8 *seq); |
130 | /* | 130 | /* |
131 | * Insert a key into data structures (sdata, sta if necessary) | 131 | * Insert a key into data structures (sdata, sta if necessary) |
132 | * to make it used, free old key. | 132 | * to make it used, free old key. On failure, also free the new key. |
133 | */ | 133 | */ |
134 | int __must_check ieee80211_key_link(struct ieee80211_key *key, | 134 | int ieee80211_key_link(struct ieee80211_key *key, |
135 | struct ieee80211_sub_if_data *sdata, | 135 | struct ieee80211_sub_if_data *sdata, |
136 | struct sta_info *sta); | 136 | struct sta_info *sta); |
137 | void __ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom); | 137 | void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom); |
138 | void ieee80211_key_free(struct ieee80211_local *local, | 138 | void ieee80211_key_free_unused(struct ieee80211_key *key); |
139 | struct ieee80211_key *key); | ||
140 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, | 139 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, |
141 | bool uni, bool multi); | 140 | bool uni, bool multi); |
142 | void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, | 141 | void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, |
143 | int idx); | 142 | int idx); |
144 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); | 143 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); |
144 | void ieee80211_free_sta_keys(struct ieee80211_local *local, | ||
145 | struct sta_info *sta); | ||
145 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); | 146 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); |
146 | 147 | ||
147 | #define key_mtx_dereference(local, ref) \ | 148 | #define key_mtx_dereference(local, ref) \ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 5a53aa5ede80..c6f81ecc36a1 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -100,7 +100,6 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) | |||
100 | int power; | 100 | int power; |
101 | enum nl80211_channel_type channel_type; | 101 | enum nl80211_channel_type channel_type; |
102 | u32 offchannel_flag; | 102 | u32 offchannel_flag; |
103 | bool scanning = false; | ||
104 | 103 | ||
105 | offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; | 104 | offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; |
106 | if (local->scan_channel) { | 105 | if (local->scan_channel) { |
@@ -147,9 +146,6 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) | |||
147 | changed |= IEEE80211_CONF_CHANGE_SMPS; | 146 | changed |= IEEE80211_CONF_CHANGE_SMPS; |
148 | } | 147 | } |
149 | 148 | ||
150 | scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) || | ||
151 | test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || | ||
152 | test_bit(SCAN_HW_SCANNING, &local->scanning); | ||
153 | power = chan->max_power; | 149 | power = chan->max_power; |
154 | 150 | ||
155 | rcu_read_lock(); | 151 | rcu_read_lock(); |
@@ -226,8 +222,6 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) | |||
226 | static void ieee80211_tasklet_handler(unsigned long data) | 222 | static void ieee80211_tasklet_handler(unsigned long data) |
227 | { | 223 | { |
228 | struct ieee80211_local *local = (struct ieee80211_local *) data; | 224 | struct ieee80211_local *local = (struct ieee80211_local *) data; |
229 | struct sta_info *sta, *tmp; | ||
230 | struct skb_eosp_msg_data *eosp_data; | ||
231 | struct sk_buff *skb; | 225 | struct sk_buff *skb; |
232 | 226 | ||
233 | while ((skb = skb_dequeue(&local->skb_queue)) || | 227 | while ((skb = skb_dequeue(&local->skb_queue)) || |
@@ -243,18 +237,6 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
243 | skb->pkt_type = 0; | 237 | skb->pkt_type = 0; |
244 | ieee80211_tx_status(&local->hw, skb); | 238 | ieee80211_tx_status(&local->hw, skb); |
245 | break; | 239 | break; |
246 | case IEEE80211_EOSP_MSG: | ||
247 | eosp_data = (void *)skb->cb; | ||
248 | for_each_sta_info(local, eosp_data->sta, sta, tmp) { | ||
249 | /* skip wrong virtual interface */ | ||
250 | if (memcmp(eosp_data->iface, | ||
251 | sta->sdata->vif.addr, ETH_ALEN)) | ||
252 | continue; | ||
253 | clear_sta_flag(sta, WLAN_STA_SP); | ||
254 | break; | ||
255 | } | ||
256 | dev_kfree_skb(skb); | ||
257 | break; | ||
258 | default: | 240 | default: |
259 | WARN(1, "mac80211: Packet is of unknown type %d\n", | 241 | WARN(1, "mac80211: Packet is of unknown type %d\n", |
260 | skb->pkt_type); | 242 | skb->pkt_type); |
@@ -295,8 +277,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw) | |||
295 | "Hardware restart was requested\n"); | 277 | "Hardware restart was requested\n"); |
296 | 278 | ||
297 | /* use this reason, ieee80211_reconfig will unblock it */ | 279 | /* use this reason, ieee80211_reconfig will unblock it */ |
298 | ieee80211_stop_queues_by_reason(hw, | 280 | ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, |
299 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); | 281 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); |
300 | 282 | ||
301 | /* | 283 | /* |
302 | * Stop all Rx during the reconfig. We don't want state changes | 284 | * Stop all Rx during the reconfig. We don't want state changes |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 77b5710db241..123a300cef57 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -699,10 +699,8 @@ out_free: | |||
699 | static int | 699 | static int |
700 | ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh) | 700 | ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh) |
701 | { | 701 | { |
702 | struct ieee80211_sub_if_data *sdata; | ||
703 | struct beacon_data *old_bcn; | 702 | struct beacon_data *old_bcn; |
704 | int ret; | 703 | int ret; |
705 | sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh); | ||
706 | 704 | ||
707 | mutex_lock(&ifmsh->mtx); | 705 | mutex_lock(&ifmsh->mtx); |
708 | 706 | ||
@@ -833,9 +831,8 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata, | |||
833 | struct ieee80211_mgmt *hdr; | 831 | struct ieee80211_mgmt *hdr; |
834 | struct ieee802_11_elems elems; | 832 | struct ieee802_11_elems elems; |
835 | size_t baselen; | 833 | size_t baselen; |
836 | u8 *pos, *end; | 834 | u8 *pos; |
837 | 835 | ||
838 | end = ((u8 *) mgmt) + len; | ||
839 | pos = mgmt->u.probe_req.variable; | 836 | pos = mgmt->u.probe_req.variable; |
840 | baselen = (u8 *) pos - (u8 *) mgmt; | 837 | baselen = (u8 *) pos - (u8 *) mgmt; |
841 | if (baselen > len) | 838 | if (baselen > len) |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 167158646593..e06dbbf8cb4c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1009,6 +1009,7 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
1009 | 1009 | ||
1010 | /* XXX: wait for a beacon first? */ | 1010 | /* XXX: wait for a beacon first? */ |
1011 | ieee80211_wake_queues_by_reason(&sdata->local->hw, | 1011 | ieee80211_wake_queues_by_reason(&sdata->local->hw, |
1012 | IEEE80211_MAX_QUEUE_MAP, | ||
1012 | IEEE80211_QUEUE_STOP_REASON_CSA); | 1013 | IEEE80211_QUEUE_STOP_REASON_CSA); |
1013 | out: | 1014 | out: |
1014 | ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; | 1015 | ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; |
@@ -1108,6 +1109,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1108 | 1109 | ||
1109 | if (sw_elem->mode) | 1110 | if (sw_elem->mode) |
1110 | ieee80211_stop_queues_by_reason(&sdata->local->hw, | 1111 | ieee80211_stop_queues_by_reason(&sdata->local->hw, |
1112 | IEEE80211_MAX_QUEUE_MAP, | ||
1111 | IEEE80211_QUEUE_STOP_REASON_CSA); | 1113 | IEEE80211_QUEUE_STOP_REASON_CSA); |
1112 | 1114 | ||
1113 | if (sdata->local->ops->channel_switch) { | 1115 | if (sdata->local->ops->channel_switch) { |
@@ -1375,6 +1377,7 @@ void ieee80211_dynamic_ps_disable_work(struct work_struct *work) | |||
1375 | } | 1377 | } |
1376 | 1378 | ||
1377 | ieee80211_wake_queues_by_reason(&local->hw, | 1379 | ieee80211_wake_queues_by_reason(&local->hw, |
1380 | IEEE80211_MAX_QUEUE_MAP, | ||
1378 | IEEE80211_QUEUE_STOP_REASON_PS); | 1381 | IEEE80211_QUEUE_STOP_REASON_PS); |
1379 | } | 1382 | } |
1380 | 1383 | ||
@@ -1436,7 +1439,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work) | |||
1436 | else { | 1439 | else { |
1437 | ieee80211_send_nullfunc(local, sdata, 1); | 1440 | ieee80211_send_nullfunc(local, sdata, 1); |
1438 | /* Flush to get the tx status of nullfunc frame */ | 1441 | /* Flush to get the tx status of nullfunc frame */ |
1439 | drv_flush(local, false); | 1442 | ieee80211_flush_queues(local, sdata); |
1440 | } | 1443 | } |
1441 | } | 1444 | } |
1442 | 1445 | ||
@@ -1767,7 +1770,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1767 | 1770 | ||
1768 | /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */ | 1771 | /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */ |
1769 | if (tx) | 1772 | if (tx) |
1770 | drv_flush(local, false); | 1773 | ieee80211_flush_queues(local, sdata); |
1771 | 1774 | ||
1772 | /* deauthenticate/disassociate now */ | 1775 | /* deauthenticate/disassociate now */ |
1773 | if (tx || frame_buf) | 1776 | if (tx || frame_buf) |
@@ -1776,7 +1779,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1776 | 1779 | ||
1777 | /* flush out frame */ | 1780 | /* flush out frame */ |
1778 | if (tx) | 1781 | if (tx) |
1779 | drv_flush(local, false); | 1782 | ieee80211_flush_queues(local, sdata); |
1780 | 1783 | ||
1781 | /* clear bssid only after building the needed mgmt frames */ | 1784 | /* clear bssid only after building the needed mgmt frames */ |
1782 | memset(ifmgd->bssid, 0, ETH_ALEN); | 1785 | memset(ifmgd->bssid, 0, ETH_ALEN); |
@@ -1948,7 +1951,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) | |||
1948 | ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); | 1951 | ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); |
1949 | run_again(ifmgd, ifmgd->probe_timeout); | 1952 | run_again(ifmgd, ifmgd->probe_timeout); |
1950 | if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) | 1953 | if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) |
1951 | drv_flush(sdata->local, false); | 1954 | ieee80211_flush_queues(sdata->local, sdata); |
1952 | } | 1955 | } |
1953 | 1956 | ||
1954 | static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, | 1957 | static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, |
@@ -2071,6 +2074,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2071 | true, frame_buf); | 2074 | true, frame_buf); |
2072 | ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; | 2075 | ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; |
2073 | ieee80211_wake_queues_by_reason(&sdata->local->hw, | 2076 | ieee80211_wake_queues_by_reason(&sdata->local->hw, |
2077 | IEEE80211_MAX_QUEUE_MAP, | ||
2074 | IEEE80211_QUEUE_STOP_REASON_CSA); | 2078 | IEEE80211_QUEUE_STOP_REASON_CSA); |
2075 | mutex_unlock(&ifmgd->mtx); | 2079 | mutex_unlock(&ifmgd->mtx); |
2076 | 2080 | ||
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 950c95bec13d..cce795871ab1 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -118,9 +118,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local) | |||
118 | * Stop queues and transmit all frames queued by the driver | 118 | * Stop queues and transmit all frames queued by the driver |
119 | * before sending nullfunc to enable powersave at the AP. | 119 | * before sending nullfunc to enable powersave at the AP. |
120 | */ | 120 | */ |
121 | ieee80211_stop_queues_by_reason(&local->hw, | 121 | ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, |
122 | IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL); | 122 | IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL); |
123 | drv_flush(local, false); | 123 | ieee80211_flush_queues(local, NULL); |
124 | 124 | ||
125 | mutex_lock(&local->iflist_mtx); | 125 | mutex_lock(&local->iflist_mtx); |
126 | list_for_each_entry(sdata, &local->interfaces, list) { | 126 | list_for_each_entry(sdata, &local->interfaces, list) { |
@@ -181,7 +181,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local) | |||
181 | } | 181 | } |
182 | mutex_unlock(&local->iflist_mtx); | 182 | mutex_unlock(&local->iflist_mtx); |
183 | 183 | ||
184 | ieee80211_wake_queues_by_reason(&local->hw, | 184 | ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, |
185 | IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL); | 185 | IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL); |
186 | } | 186 | } |
187 | 187 | ||
@@ -382,7 +382,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
382 | ieee80211_roc_notify_destroy(roc, !roc->abort); | 382 | ieee80211_roc_notify_destroy(roc, !roc->abort); |
383 | 383 | ||
384 | if (started) { | 384 | if (started) { |
385 | drv_flush(local, false); | 385 | ieee80211_flush_queues(local, NULL); |
386 | 386 | ||
387 | local->tmp_channel = NULL; | 387 | local->tmp_channel = NULL; |
388 | ieee80211_hw_config(local, 0); | 388 | ieee80211_hw_config(local, 0); |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index b471a67f224d..3d16f4e61743 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -30,12 +30,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
30 | } | 30 | } |
31 | 31 | ||
32 | ieee80211_stop_queues_by_reason(hw, | 32 | ieee80211_stop_queues_by_reason(hw, |
33 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); | 33 | IEEE80211_MAX_QUEUE_MAP, |
34 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); | ||
34 | 35 | ||
35 | /* flush out all packets */ | 36 | /* flush out all packets */ |
36 | synchronize_net(); | 37 | synchronize_net(); |
37 | 38 | ||
38 | drv_flush(local, false); | 39 | ieee80211_flush_queues(local, NULL); |
39 | 40 | ||
40 | local->quiescing = true; | 41 | local->quiescing = true; |
41 | /* make quiescing visible to timers everywhere */ | 42 | /* make quiescing visible to timers everywhere */ |
@@ -68,6 +69,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
68 | mutex_unlock(&local->sta_mtx); | 69 | mutex_unlock(&local->sta_mtx); |
69 | } | 70 | } |
70 | ieee80211_wake_queues_by_reason(hw, | 71 | ieee80211_wake_queues_by_reason(hw, |
72 | IEEE80211_MAX_QUEUE_MAP, | ||
71 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); | 73 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); |
72 | return err; | 74 | return err; |
73 | } else if (err > 0) { | 75 | } else if (err > 0) { |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 749552bdcfe1..d2b264d1311d 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -202,14 +202,23 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) | |||
202 | struct minstrel_rate_stats *mr; | 202 | struct minstrel_rate_stats *mr; |
203 | unsigned int nsecs = 0; | 203 | unsigned int nsecs = 0; |
204 | unsigned int tp; | 204 | unsigned int tp; |
205 | unsigned int prob; | ||
205 | 206 | ||
206 | mr = &mi->groups[group].rates[rate]; | 207 | mr = &mi->groups[group].rates[rate]; |
208 | prob = mr->probability; | ||
207 | 209 | ||
208 | if (mr->probability < MINSTREL_FRAC(1, 10)) { | 210 | if (prob < MINSTREL_FRAC(1, 10)) { |
209 | mr->cur_tp = 0; | 211 | mr->cur_tp = 0; |
210 | return; | 212 | return; |
211 | } | 213 | } |
212 | 214 | ||
215 | /* | ||
216 | * For the throughput calculation, limit the probability value to 90% to | ||
217 | * account for collision related packet error rate fluctuation | ||
218 | */ | ||
219 | if (prob > MINSTREL_FRAC(9, 10)) | ||
220 | prob = MINSTREL_FRAC(9, 10); | ||
221 | |||
213 | if (group != MINSTREL_CCK_GROUP) | 222 | if (group != MINSTREL_CCK_GROUP) |
214 | nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); | 223 | nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); |
215 | 224 | ||
@@ -639,15 +648,18 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
639 | /* | 648 | /* |
640 | * Sampling might add some overhead (RTS, no aggregation) | 649 | * Sampling might add some overhead (RTS, no aggregation) |
641 | * to the frame. Hence, don't use sampling for the currently | 650 | * to the frame. Hence, don't use sampling for the currently |
642 | * used max TP rate. | 651 | * used rates. |
643 | */ | 652 | */ |
644 | if (sample_idx == mi->max_tp_rate) | 653 | if (sample_idx == mi->max_tp_rate || |
654 | sample_idx == mi->max_tp_rate2 || | ||
655 | sample_idx == mi->max_prob_rate) | ||
645 | return -1; | 656 | return -1; |
657 | |||
646 | /* | 658 | /* |
647 | * When not using MRR, do not sample if the probability is already | 659 | * Do not sample if the probability is already higher than 95% |
648 | * higher than 95% to avoid wasting airtime | 660 | * to avoid wasting airtime. |
649 | */ | 661 | */ |
650 | if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100))) | 662 | if (mr->probability > MINSTREL_FRAC(95, 100)) |
651 | return -1; | 663 | return -1; |
652 | 664 | ||
653 | /* | 665 | /* |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 43a45cf00e06..cb34cbbaa20c 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -153,7 +153,6 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) | |||
153 | u8 *elements; | 153 | u8 *elements; |
154 | struct ieee80211_channel *channel; | 154 | struct ieee80211_channel *channel; |
155 | size_t baselen; | 155 | size_t baselen; |
156 | bool beacon; | ||
157 | struct ieee802_11_elems elems; | 156 | struct ieee802_11_elems elems; |
158 | 157 | ||
159 | if (skb->len < 24 || | 158 | if (skb->len < 24 || |
@@ -175,11 +174,9 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) | |||
175 | 174 | ||
176 | elements = mgmt->u.probe_resp.variable; | 175 | elements = mgmt->u.probe_resp.variable; |
177 | baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); | 176 | baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); |
178 | beacon = false; | ||
179 | } else { | 177 | } else { |
180 | baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); | 178 | baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); |
181 | elements = mgmt->u.beacon.variable; | 179 | elements = mgmt->u.beacon.variable; |
182 | beacon = true; | ||
183 | } | 180 | } |
184 | 181 | ||
185 | if (baselen > skb->len) | 182 | if (baselen > skb->len) |
@@ -335,7 +332,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) | |||
335 | ieee80211_offchannel_stop_vifs(local); | 332 | ieee80211_offchannel_stop_vifs(local); |
336 | 333 | ||
337 | /* ensure nullfunc is transmitted before leaving operating channel */ | 334 | /* ensure nullfunc is transmitted before leaving operating channel */ |
338 | drv_flush(local, false); | 335 | ieee80211_flush_queues(local, NULL); |
339 | 336 | ||
340 | ieee80211_configure_filter(local); | 337 | ieee80211_configure_filter(local); |
341 | 338 | ||
@@ -671,7 +668,7 @@ static void ieee80211_scan_state_resume(struct ieee80211_local *local, | |||
671 | ieee80211_offchannel_stop_vifs(local); | 668 | ieee80211_offchannel_stop_vifs(local); |
672 | 669 | ||
673 | if (local->ops->flush) { | 670 | if (local->ops->flush) { |
674 | drv_flush(local, false); | 671 | ieee80211_flush_queues(local, NULL); |
675 | *next_delay = 0; | 672 | *next_delay = 0; |
676 | } else | 673 | } else |
677 | *next_delay = HZ / 10; | 674 | *next_delay = HZ / 10; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 85458a28ffa0..11216bc13b27 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -556,6 +556,15 @@ static inline void __bss_tim_clear(u8 *tim, u16 id) | |||
556 | tim[id / 8] &= ~(1 << (id % 8)); | 556 | tim[id / 8] &= ~(1 << (id % 8)); |
557 | } | 557 | } |
558 | 558 | ||
559 | static inline bool __bss_tim_get(u8 *tim, u16 id) | ||
560 | { | ||
561 | /* | ||
562 | * This format has been mandated by the IEEE specifications, | ||
563 | * so this line may not be changed to use the test_bit() format. | ||
564 | */ | ||
565 | return tim[id / 8] & (1 << (id % 8)); | ||
566 | } | ||
567 | |||
559 | static unsigned long ieee80211_tids_for_ac(int ac) | 568 | static unsigned long ieee80211_tids_for_ac(int ac) |
560 | { | 569 | { |
561 | /* If we ever support TIDs > 7, this obviously needs to be adjusted */ | 570 | /* If we ever support TIDs > 7, this obviously needs to be adjusted */ |
@@ -636,6 +645,9 @@ void sta_info_recalc_tim(struct sta_info *sta) | |||
636 | done: | 645 | done: |
637 | spin_lock_bh(&local->tim_lock); | 646 | spin_lock_bh(&local->tim_lock); |
638 | 647 | ||
648 | if (indicate_tim == __bss_tim_get(ps->tim, id)) | ||
649 | goto out_unlock; | ||
650 | |||
639 | if (indicate_tim) | 651 | if (indicate_tim) |
640 | __bss_tim_set(ps->tim, id); | 652 | __bss_tim_set(ps->tim, id); |
641 | else | 653 | else |
@@ -647,6 +659,7 @@ void sta_info_recalc_tim(struct sta_info *sta) | |||
647 | local->tim_in_locked_section = false; | 659 | local->tim_in_locked_section = false; |
648 | } | 660 | } |
649 | 661 | ||
662 | out_unlock: | ||
650 | spin_unlock_bh(&local->tim_lock); | 663 | spin_unlock_bh(&local->tim_lock); |
651 | } | 664 | } |
652 | 665 | ||
@@ -770,8 +783,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
770 | { | 783 | { |
771 | struct ieee80211_local *local; | 784 | struct ieee80211_local *local; |
772 | struct ieee80211_sub_if_data *sdata; | 785 | struct ieee80211_sub_if_data *sdata; |
773 | int ret, i; | 786 | int ret; |
774 | bool have_key = false; | ||
775 | 787 | ||
776 | might_sleep(); | 788 | might_sleep(); |
777 | 789 | ||
@@ -798,22 +810,8 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
798 | 810 | ||
799 | list_del_rcu(&sta->list); | 811 | list_del_rcu(&sta->list); |
800 | 812 | ||
801 | mutex_lock(&local->key_mtx); | 813 | /* this always calls synchronize_net() */ |
802 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { | 814 | ieee80211_free_sta_keys(local, sta); |
803 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]), | ||
804 | true); | ||
805 | have_key = true; | ||
806 | } | ||
807 | if (sta->ptk) { | ||
808 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk), | ||
809 | true); | ||
810 | have_key = true; | ||
811 | } | ||
812 | |||
813 | mutex_unlock(&local->key_mtx); | ||
814 | |||
815 | if (!have_key) | ||
816 | synchronize_net(); | ||
817 | 815 | ||
818 | sta->dead = true; | 816 | sta->dead = true; |
819 | 817 | ||
@@ -1399,30 +1397,16 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw, | |||
1399 | } | 1397 | } |
1400 | EXPORT_SYMBOL(ieee80211_sta_block_awake); | 1398 | EXPORT_SYMBOL(ieee80211_sta_block_awake); |
1401 | 1399 | ||
1402 | void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta) | 1400 | void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) |
1403 | { | 1401 | { |
1404 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 1402 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
1405 | struct ieee80211_local *local = sta->local; | 1403 | struct ieee80211_local *local = sta->local; |
1406 | struct sk_buff *skb; | ||
1407 | struct skb_eosp_msg_data *data; | ||
1408 | 1404 | ||
1409 | trace_api_eosp(local, pubsta); | 1405 | trace_api_eosp(local, pubsta); |
1410 | 1406 | ||
1411 | skb = alloc_skb(0, GFP_ATOMIC); | 1407 | clear_sta_flag(sta, WLAN_STA_SP); |
1412 | if (!skb) { | ||
1413 | /* too bad ... but race is better than loss */ | ||
1414 | clear_sta_flag(sta, WLAN_STA_SP); | ||
1415 | return; | ||
1416 | } | ||
1417 | |||
1418 | data = (void *)skb->cb; | ||
1419 | memcpy(data->sta, pubsta->addr, ETH_ALEN); | ||
1420 | memcpy(data->iface, sta->sdata->vif.addr, ETH_ALEN); | ||
1421 | skb->pkt_type = IEEE80211_EOSP_MSG; | ||
1422 | skb_queue_tail(&local->skb_queue, skb); | ||
1423 | tasklet_schedule(&local->tasklet); | ||
1424 | } | 1408 | } |
1425 | EXPORT_SYMBOL(ieee80211_sta_eosp_irqsafe); | 1409 | EXPORT_SYMBOL(ieee80211_sta_eosp); |
1426 | 1410 | ||
1427 | void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, | 1411 | void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, |
1428 | u8 tid, bool buffered) | 1412 | u8 tid, bool buffered) |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index e5868c32d1a3..adc30045f99e 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -333,7 +333,8 @@ struct sta_info { | |||
333 | unsigned long driver_buffered_tids; | 333 | unsigned long driver_buffered_tids; |
334 | 334 | ||
335 | /* Updated from RX path only, no locking requirements */ | 335 | /* Updated from RX path only, no locking requirements */ |
336 | unsigned long rx_packets, rx_bytes; | 336 | unsigned long rx_packets; |
337 | u64 rx_bytes; | ||
337 | unsigned long wep_weak_iv_count; | 338 | unsigned long wep_weak_iv_count; |
338 | unsigned long last_rx; | 339 | unsigned long last_rx; |
339 | long last_connected; | 340 | long last_connected; |
@@ -353,9 +354,9 @@ struct sta_info { | |||
353 | unsigned int fail_avg; | 354 | unsigned int fail_avg; |
354 | 355 | ||
355 | /* Updated from TX path only, no locking requirements */ | 356 | /* Updated from TX path only, no locking requirements */ |
356 | unsigned long tx_packets; | 357 | u32 tx_fragments; |
357 | unsigned long tx_bytes; | 358 | u64 tx_packets[IEEE80211_NUM_ACS]; |
358 | unsigned long tx_fragments; | 359 | u64 tx_bytes[IEEE80211_NUM_ACS]; |
359 | struct ieee80211_tx_rate last_tx_rate; | 360 | struct ieee80211_tx_rate last_tx_rate; |
360 | int last_rx_rate_idx; | 361 | int last_rx_rate_idx; |
361 | u32 last_rx_rate_flag; | 362 | u32 last_rx_rate_flag; |
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index e7db2b804e0c..c5899797a8d4 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h | |||
@@ -431,6 +431,30 @@ TRACE_EVENT(drv_prepare_multicast, | |||
431 | ) | 431 | ) |
432 | ); | 432 | ); |
433 | 433 | ||
434 | TRACE_EVENT(drv_set_multicast_list, | ||
435 | TP_PROTO(struct ieee80211_local *local, | ||
436 | struct ieee80211_sub_if_data *sdata, int mc_count), | ||
437 | |||
438 | TP_ARGS(local, sdata, mc_count), | ||
439 | |||
440 | TP_STRUCT__entry( | ||
441 | LOCAL_ENTRY | ||
442 | __field(bool, allmulti) | ||
443 | __field(int, mc_count) | ||
444 | ), | ||
445 | |||
446 | TP_fast_assign( | ||
447 | LOCAL_ASSIGN; | ||
448 | __entry->allmulti = sdata->flags & IEEE80211_SDATA_ALLMULTI; | ||
449 | __entry->mc_count = mc_count; | ||
450 | ), | ||
451 | |||
452 | TP_printk( | ||
453 | LOCAL_PR_FMT " configure mc filter, count=%d, allmulti=%d", | ||
454 | LOCAL_PR_ARG, __entry->mc_count, __entry->allmulti | ||
455 | ) | ||
456 | ); | ||
457 | |||
434 | TRACE_EVENT(drv_configure_filter, | 458 | TRACE_EVENT(drv_configure_filter, |
435 | TP_PROTO(struct ieee80211_local *local, | 459 | TP_PROTO(struct ieee80211_local *local, |
436 | unsigned int changed_flags, | 460 | unsigned int changed_flags, |
@@ -940,23 +964,26 @@ TRACE_EVENT(drv_get_survey, | |||
940 | ); | 964 | ); |
941 | 965 | ||
942 | TRACE_EVENT(drv_flush, | 966 | TRACE_EVENT(drv_flush, |
943 | TP_PROTO(struct ieee80211_local *local, bool drop), | 967 | TP_PROTO(struct ieee80211_local *local, |
968 | u32 queues, bool drop), | ||
944 | 969 | ||
945 | TP_ARGS(local, drop), | 970 | TP_ARGS(local, queues, drop), |
946 | 971 | ||
947 | TP_STRUCT__entry( | 972 | TP_STRUCT__entry( |
948 | LOCAL_ENTRY | 973 | LOCAL_ENTRY |
949 | __field(bool, drop) | 974 | __field(bool, drop) |
975 | __field(u32, queues) | ||
950 | ), | 976 | ), |
951 | 977 | ||
952 | TP_fast_assign( | 978 | TP_fast_assign( |
953 | LOCAL_ASSIGN; | 979 | LOCAL_ASSIGN; |
954 | __entry->drop = drop; | 980 | __entry->drop = drop; |
981 | __entry->queues = queues; | ||
955 | ), | 982 | ), |
956 | 983 | ||
957 | TP_printk( | 984 | TP_printk( |
958 | LOCAL_PR_FMT " drop:%d", | 985 | LOCAL_PR_FMT " queues:0x%x drop:%d", |
959 | LOCAL_PR_ARG, __entry->drop | 986 | LOCAL_PR_ARG, __entry->queues, __entry->drop |
960 | ) | 987 | ) |
961 | ); | 988 | ); |
962 | 989 | ||
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 4e8a86163fc7..9e67cc97b87b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -233,6 +233,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) | |||
233 | 233 | ||
234 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | 234 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { |
235 | ieee80211_stop_queues_by_reason(&local->hw, | 235 | ieee80211_stop_queues_by_reason(&local->hw, |
236 | IEEE80211_MAX_QUEUE_MAP, | ||
236 | IEEE80211_QUEUE_STOP_REASON_PS); | 237 | IEEE80211_QUEUE_STOP_REASON_PS); |
237 | ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; | 238 | ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; |
238 | ieee80211_queue_work(&local->hw, | 239 | ieee80211_queue_work(&local->hw, |
@@ -991,15 +992,18 @@ static ieee80211_tx_result debug_noinline | |||
991 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) | 992 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) |
992 | { | 993 | { |
993 | struct sk_buff *skb; | 994 | struct sk_buff *skb; |
995 | int ac = -1; | ||
994 | 996 | ||
995 | if (!tx->sta) | 997 | if (!tx->sta) |
996 | return TX_CONTINUE; | 998 | return TX_CONTINUE; |
997 | 999 | ||
998 | tx->sta->tx_packets++; | ||
999 | skb_queue_walk(&tx->skbs, skb) { | 1000 | skb_queue_walk(&tx->skbs, skb) { |
1001 | ac = skb_get_queue_mapping(skb); | ||
1000 | tx->sta->tx_fragments++; | 1002 | tx->sta->tx_fragments++; |
1001 | tx->sta->tx_bytes += skb->len; | 1003 | tx->sta->tx_bytes[ac] += skb->len; |
1002 | } | 1004 | } |
1005 | if (ac >= 0) | ||
1006 | tx->sta->tx_packets[ac]++; | ||
1003 | 1007 | ||
1004 | return TX_CONTINUE; | 1008 | return TX_CONTINUE; |
1005 | } | 1009 | } |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index b7a856e3281b..a7368870c8ee 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -453,7 +453,8 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, | |||
453 | } | 453 | } |
454 | 454 | ||
455 | void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, | 455 | void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, |
456 | enum queue_stop_reason reason) | 456 | unsigned long queues, |
457 | enum queue_stop_reason reason) | ||
457 | { | 458 | { |
458 | struct ieee80211_local *local = hw_to_local(hw); | 459 | struct ieee80211_local *local = hw_to_local(hw); |
459 | unsigned long flags; | 460 | unsigned long flags; |
@@ -461,7 +462,7 @@ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, | |||
461 | 462 | ||
462 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 463 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
463 | 464 | ||
464 | for (i = 0; i < hw->queues; i++) | 465 | for_each_set_bit(i, &queues, hw->queues) |
465 | __ieee80211_stop_queue(hw, i, reason); | 466 | __ieee80211_stop_queue(hw, i, reason); |
466 | 467 | ||
467 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 468 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
@@ -469,7 +470,7 @@ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, | |||
469 | 470 | ||
470 | void ieee80211_stop_queues(struct ieee80211_hw *hw) | 471 | void ieee80211_stop_queues(struct ieee80211_hw *hw) |
471 | { | 472 | { |
472 | ieee80211_stop_queues_by_reason(hw, | 473 | ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, |
473 | IEEE80211_QUEUE_STOP_REASON_DRIVER); | 474 | IEEE80211_QUEUE_STOP_REASON_DRIVER); |
474 | } | 475 | } |
475 | EXPORT_SYMBOL(ieee80211_stop_queues); | 476 | EXPORT_SYMBOL(ieee80211_stop_queues); |
@@ -491,6 +492,7 @@ int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) | |||
491 | EXPORT_SYMBOL(ieee80211_queue_stopped); | 492 | EXPORT_SYMBOL(ieee80211_queue_stopped); |
492 | 493 | ||
493 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | 494 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, |
495 | unsigned long queues, | ||
494 | enum queue_stop_reason reason) | 496 | enum queue_stop_reason reason) |
495 | { | 497 | { |
496 | struct ieee80211_local *local = hw_to_local(hw); | 498 | struct ieee80211_local *local = hw_to_local(hw); |
@@ -499,7 +501,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | |||
499 | 501 | ||
500 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 502 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
501 | 503 | ||
502 | for (i = 0; i < hw->queues; i++) | 504 | for_each_set_bit(i, &queues, hw->queues) |
503 | __ieee80211_wake_queue(hw, i, reason); | 505 | __ieee80211_wake_queue(hw, i, reason); |
504 | 506 | ||
505 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 507 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
@@ -507,10 +509,42 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | |||
507 | 509 | ||
508 | void ieee80211_wake_queues(struct ieee80211_hw *hw) | 510 | void ieee80211_wake_queues(struct ieee80211_hw *hw) |
509 | { | 511 | { |
510 | ieee80211_wake_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_DRIVER); | 512 | ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, |
513 | IEEE80211_QUEUE_STOP_REASON_DRIVER); | ||
511 | } | 514 | } |
512 | EXPORT_SYMBOL(ieee80211_wake_queues); | 515 | EXPORT_SYMBOL(ieee80211_wake_queues); |
513 | 516 | ||
517 | void ieee80211_flush_queues(struct ieee80211_local *local, | ||
518 | struct ieee80211_sub_if_data *sdata) | ||
519 | { | ||
520 | u32 queues; | ||
521 | |||
522 | if (!local->ops->flush) | ||
523 | return; | ||
524 | |||
525 | if (sdata && local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) { | ||
526 | int ac; | ||
527 | |||
528 | queues = 0; | ||
529 | |||
530 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) | ||
531 | queues |= BIT(sdata->vif.hw_queue[ac]); | ||
532 | if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE) | ||
533 | queues |= BIT(sdata->vif.cab_queue); | ||
534 | } else { | ||
535 | /* all queues */ | ||
536 | queues = BIT(local->hw.queues) - 1; | ||
537 | } | ||
538 | |||
539 | ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, | ||
540 | IEEE80211_QUEUE_STOP_REASON_FLUSH); | ||
541 | |||
542 | drv_flush(local, queues, false); | ||
543 | |||
544 | ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, | ||
545 | IEEE80211_QUEUE_STOP_REASON_FLUSH); | ||
546 | } | ||
547 | |||
514 | void ieee80211_iterate_active_interfaces( | 548 | void ieee80211_iterate_active_interfaces( |
515 | struct ieee80211_hw *hw, u32 iter_flags, | 549 | struct ieee80211_hw *hw, u32 iter_flags, |
516 | void (*iterator)(void *data, u8 *mac, | 550 | void (*iterator)(void *data, u8 *mac, |
@@ -1651,8 +1685,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1651 | mutex_unlock(&local->sta_mtx); | 1685 | mutex_unlock(&local->sta_mtx); |
1652 | } | 1686 | } |
1653 | 1687 | ||
1654 | ieee80211_wake_queues_by_reason(hw, | 1688 | ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, |
1655 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); | 1689 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); |
1656 | 1690 | ||
1657 | /* | 1691 | /* |
1658 | * If this is for hw restart things are still running. | 1692 | * If this is for hw restart things are still running. |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 00be55530a32..84c9ad7e1dca 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -1126,8 +1126,10 @@ static int __init cfg80211_init(void) | |||
1126 | goto out_fail_reg; | 1126 | goto out_fail_reg; |
1127 | 1127 | ||
1128 | cfg80211_wq = create_singlethread_workqueue("cfg80211"); | 1128 | cfg80211_wq = create_singlethread_workqueue("cfg80211"); |
1129 | if (!cfg80211_wq) | 1129 | if (!cfg80211_wq) { |
1130 | err = -ENOMEM; | ||
1130 | goto out_fail_wq; | 1131 | goto out_fail_wq; |
1132 | } | ||
1131 | 1133 | ||
1132 | return 0; | 1134 | return 0; |
1133 | 1135 | ||
diff --git a/net/wireless/core.h b/net/wireless/core.h index b5174f65cc9a..124e5e773fbc 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -500,14 +500,12 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, | |||
500 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, | 500 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, |
501 | enum nl80211_iftype iftype, int num); | 501 | enum nl80211_iftype iftype, int num); |
502 | 502 | ||
503 | |||
504 | void cfg80211_leave(struct cfg80211_registered_device *rdev, | 503 | void cfg80211_leave(struct cfg80211_registered_device *rdev, |
505 | struct wireless_dev *wdev); | 504 | struct wireless_dev *wdev); |
506 | 505 | ||
507 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | 506 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, |
508 | struct wireless_dev *wdev); | 507 | struct wireless_dev *wdev); |
509 | 508 | ||
510 | |||
511 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | 509 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 |
512 | 510 | ||
513 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | 511 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index ff6f7ae35586..a9dc5c736df0 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -228,6 +228,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
228 | rtnl_lock(); | 228 | rtnl_lock(); |
229 | cfg80211_lock_rdev(rdev); | 229 | cfg80211_lock_rdev(rdev); |
230 | mutex_lock(&rdev->devlist_mtx); | 230 | mutex_lock(&rdev->devlist_mtx); |
231 | mutex_lock(&rdev->sched_scan_mtx); | ||
231 | 232 | ||
232 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 233 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
233 | wdev_lock(wdev); | 234 | wdev_lock(wdev); |
@@ -235,7 +236,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
235 | wdev_unlock(wdev); | 236 | wdev_unlock(wdev); |
236 | continue; | 237 | continue; |
237 | } | 238 | } |
238 | if (wdev->sme_state != CFG80211_SME_CONNECTING) { | 239 | if (wdev->sme_state != CFG80211_SME_CONNECTING || !wdev->conn) { |
239 | wdev_unlock(wdev); | 240 | wdev_unlock(wdev); |
240 | continue; | 241 | continue; |
241 | } | 242 | } |
@@ -252,6 +253,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
252 | wdev_unlock(wdev); | 253 | wdev_unlock(wdev); |
253 | } | 254 | } |
254 | 255 | ||
256 | mutex_unlock(&rdev->sched_scan_mtx); | ||
255 | mutex_unlock(&rdev->devlist_mtx); | 257 | mutex_unlock(&rdev->devlist_mtx); |
256 | cfg80211_unlock_rdev(rdev); | 258 | cfg80211_unlock_rdev(rdev); |
257 | rtnl_unlock(); | 259 | rtnl_unlock(); |