aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJohan Hedberg <johan.hedberg@intel.com>2013-03-05 13:37:49 -0500
committerGustavo Padovan <gustavo.padovan@collabora.co.uk>2013-03-08 08:40:27 -0500
commit42c6b129cd8c2aa5012a78ec39672e7052cc677a (patch)
tree9deac79fe2bac41bf7695c0be6ec151f2afc55eb /net
parent9238f36a5a5097018b90baa42c473d2f916a46f5 (diff)
Bluetooth: Use async requests internally in hci_req_sync
This patch converts the hci_req_sync() procedure to internaly use the asynchronous HCI requests. The hci_req_sync mechanism relies on hci_req_complete() calls from hci_event.c into hci_core.c whenever a HCI command completes. This is very similar to what asynchronous requests do and makes the conversion fairly straight forward by converting hci_req_complete into a request complete callback. By this change hci_req_complete (renamed to hci_req_sync_complete) becomes private to hci_core.c and all calls to it can be removed from hci_event.c. The commands in each hci_req_sync procedure are collected into their own request by passing the hci_request pointer to the request callback (instead of the hci_dev pointer). The one slight exception is the HCI init request which has the special handling of HCI driver specific initialization commands. These commands are run in their own request prior to the "main" init request. One other extra change that this patch must contain is the handling of spontaneous HCI reset complete events that some controllers exhibit. These were previously handled in the hci_req_complete function but the right place for them now becomes the hci_req_cmd_complete function. Signed-off-by: Johan Hedberg <johan.hedberg@intel.com> Acked-by: Marcel Holtmann <marcel@holtmann.org> Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_core.c271
-rw-r--r--net/bluetooth/hci_event.c78
2 files changed, 156 insertions, 193 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 0ada2ec36e7b..6218eced1530 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -57,36 +57,9 @@ static void hci_notify(struct hci_dev *hdev, int event)
57 57
58/* ---- HCI requests ---- */ 58/* ---- HCI requests ---- */
59 59
60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61{ 61{
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result); 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90 63
91 if (hdev->req_status == HCI_REQ_PEND) { 64 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result; 65 hdev->req_result = result;
@@ -108,26 +81,36 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
108 81
109/* Execute request and wait for completion. */ 82/* Execute request and wait for completion. */
110static int __hci_req_sync(struct hci_dev *hdev, 83static int __hci_req_sync(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt), 84 void (*func)(struct hci_request *req,
85 unsigned long opt),
112 unsigned long opt, __u32 timeout) 86 unsigned long opt, __u32 timeout)
113{ 87{
88 struct hci_request req;
114 DECLARE_WAITQUEUE(wait, current); 89 DECLARE_WAITQUEUE(wait, current);
115 int err = 0; 90 int err = 0;
116 91
117 BT_DBG("%s start", hdev->name); 92 BT_DBG("%s start", hdev->name);
118 93
94 hci_req_init(&req, hdev);
95
119 hdev->req_status = HCI_REQ_PEND; 96 hdev->req_status = HCI_REQ_PEND;
120 97
121 add_wait_queue(&hdev->req_wait_q, &wait); 98 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE); 99 set_current_state(TASK_INTERRUPTIBLE);
123 100
124 req(hdev, opt); 101 func(&req, opt);
125 102
126 /* If the request didn't send any commands return immediately */ 103 err = hci_req_run(&req, hci_req_sync_complete);
127 if (skb_queue_empty(&hdev->cmd_q) && atomic_read(&hdev->cmd_cnt)) { 104 if (err < 0) {
128 hdev->req_status = 0; 105 hdev->req_status = 0;
129 remove_wait_queue(&hdev->req_wait_q, &wait); 106 remove_wait_queue(&hdev->req_wait_q, &wait);
130 return err; 107 /* req_run will fail if the request did not add any
108 * commands to the queue, something that can happen when
109 * a request with conditionals doesn't trigger any
110 * commands to be sent. This is normal behavior and
111 * should not trigger an error return.
112 */
113 return 0;
131 } 114 }
132 115
133 schedule_timeout(timeout); 116 schedule_timeout(timeout);
@@ -159,7 +142,8 @@ static int __hci_req_sync(struct hci_dev *hdev,
159} 142}
160 143
161static int hci_req_sync(struct hci_dev *hdev, 144static int hci_req_sync(struct hci_dev *hdev,
162 void (*req)(struct hci_dev *hdev, unsigned long opt), 145 void (*req)(struct hci_request *req,
146 unsigned long opt),
163 unsigned long opt, __u32 timeout) 147 unsigned long opt, __u32 timeout)
164{ 148{
165 int ret; 149 int ret;
@@ -175,72 +159,80 @@ static int hci_req_sync(struct hci_dev *hdev,
175 return ret; 159 return ret;
176} 160}
177 161
178static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) 162static void hci_reset_req(struct hci_request *req, unsigned long opt)
179{ 163{
180 BT_DBG("%s %ld", hdev->name, opt); 164 BT_DBG("%s %ld", req->hdev->name, opt);
181 165
182 /* Reset device */ 166 /* Reset device */
183 set_bit(HCI_RESET, &hdev->flags); 167 set_bit(HCI_RESET, &req->hdev->flags);
184 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 168 hci_req_add(req, HCI_OP_RESET, 0, NULL);
185} 169}
186 170
187static void bredr_init(struct hci_dev *hdev) 171static void bredr_init(struct hci_request *req)
188{ 172{
189 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 173 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
190 174
191 /* Read Local Supported Features */ 175 /* Read Local Supported Features */
192 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 176 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
193 177
194 /* Read Local Version */ 178 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 179 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196 180
197 /* Read BD Address */ 181 /* Read BD Address */
198 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); 182 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
199} 183}
200 184
201static void amp_init(struct hci_dev *hdev) 185static void amp_init(struct hci_request *req)
202{ 186{
203 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 187 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
204 188
205 /* Read Local Version */ 189 /* Read Local Version */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 190 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207 191
208 /* Read Local AMP Info */ 192 /* Read Local AMP Info */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 193 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
210 194
211 /* Read Data Blk size */ 195 /* Read Data Blk size */
212 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); 196 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
213} 197}
214 198
215static void hci_init1_req(struct hci_dev *hdev, unsigned long opt) 199static void hci_init1_req(struct hci_request *req, unsigned long opt)
216{ 200{
201 struct hci_dev *hdev = req->hdev;
202 struct hci_request init_req;
217 struct sk_buff *skb; 203 struct sk_buff *skb;
218 204
219 BT_DBG("%s %ld", hdev->name, opt); 205 BT_DBG("%s %ld", hdev->name, opt);
220 206
221 /* Driver initialization */ 207 /* Driver initialization */
222 208
209 hci_req_init(&init_req, hdev);
210
223 /* Special commands */ 211 /* Special commands */
224 while ((skb = skb_dequeue(&hdev->driver_init))) { 212 while ((skb = skb_dequeue(&hdev->driver_init))) {
225 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
226 skb->dev = (void *) hdev; 214 skb->dev = (void *) hdev;
227 215
228 skb_queue_tail(&hdev->cmd_q, skb); 216 if (skb_queue_empty(&init_req.cmd_q))
229 queue_work(hdev->workqueue, &hdev->cmd_work); 217 bt_cb(skb)->req.start = true;
218
219 skb_queue_tail(&init_req.cmd_q, skb);
230 } 220 }
231 skb_queue_purge(&hdev->driver_init); 221 skb_queue_purge(&hdev->driver_init);
232 222
223 hci_req_run(&init_req, NULL);
224
233 /* Reset */ 225 /* Reset */
234 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 226 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
235 hci_reset_req(hdev, 0); 227 hci_reset_req(req, 0);
236 228
237 switch (hdev->dev_type) { 229 switch (hdev->dev_type) {
238 case HCI_BREDR: 230 case HCI_BREDR:
239 bredr_init(hdev); 231 bredr_init(req);
240 break; 232 break;
241 233
242 case HCI_AMP: 234 case HCI_AMP:
243 amp_init(hdev); 235 amp_init(req);
244 break; 236 break;
245 237
246 default: 238 default:
@@ -249,53 +241,53 @@ static void hci_init1_req(struct hci_dev *hdev, unsigned long opt)
249 } 241 }
250} 242}
251 243
252static void bredr_setup(struct hci_dev *hdev) 244static void bredr_setup(struct hci_request *req)
253{ 245{
254 struct hci_cp_delete_stored_link_key cp; 246 struct hci_cp_delete_stored_link_key cp;
255 __le16 param; 247 __le16 param;
256 __u8 flt_type; 248 __u8 flt_type;
257 249
258 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 250 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
259 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); 251 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
260 252
261 /* Read Class of Device */ 253 /* Read Class of Device */
262 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); 254 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
263 255
264 /* Read Local Name */ 256 /* Read Local Name */
265 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); 257 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
266 258
267 /* Read Voice Setting */ 259 /* Read Voice Setting */
268 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); 260 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
269 261
270 /* Clear Event Filters */ 262 /* Clear Event Filters */
271 flt_type = HCI_FLT_CLEAR_ALL; 263 flt_type = HCI_FLT_CLEAR_ALL;
272 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 264 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
273 265
274 /* Connection accept timeout ~20 secs */ 266 /* Connection accept timeout ~20 secs */
275 param = __constant_cpu_to_le16(0x7d00); 267 param = __constant_cpu_to_le16(0x7d00);
276 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 268 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
277 269
278 bacpy(&cp.bdaddr, BDADDR_ANY); 270 bacpy(&cp.bdaddr, BDADDR_ANY);
279 cp.delete_all = 0x01; 271 cp.delete_all = 0x01;
280 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); 272 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
281} 273}
282 274
283static void le_setup(struct hci_dev *hdev) 275static void le_setup(struct hci_request *req)
284{ 276{
285 /* Read LE Buffer Size */ 277 /* Read LE Buffer Size */
286 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); 278 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287 279
288 /* Read LE Local Supported Features */ 280 /* Read LE Local Supported Features */
289 hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); 281 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
290 282
291 /* Read LE Advertising Channel TX Power */ 283 /* Read LE Advertising Channel TX Power */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 284 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
293 285
294 /* Read LE White List Size */ 286 /* Read LE White List Size */
295 hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); 287 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
296 288
297 /* Read LE Supported States */ 289 /* Read LE Supported States */
298 hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); 290 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
299} 291}
300 292
301static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 293static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
@@ -326,17 +318,19 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
326 return 0x00; 318 return 0x00;
327} 319}
328 320
329static void hci_setup_inquiry_mode(struct hci_dev *hdev) 321static void hci_setup_inquiry_mode(struct hci_request *req)
330{ 322{
331 u8 mode; 323 u8 mode;
332 324
333 mode = hci_get_inquiry_mode(hdev); 325 mode = hci_get_inquiry_mode(req->hdev);
334 326
335 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 327 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
336} 328}
337 329
338static void hci_setup_event_mask(struct hci_dev *hdev) 330static void hci_setup_event_mask(struct hci_request *req)
339{ 331{
332 struct hci_dev *hdev = req->hdev;
333
340 /* The second byte is 0xff instead of 0x9f (two reserved bits 334 /* The second byte is 0xff instead of 0x9f (two reserved bits
341 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 335 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
342 * command otherwise. 336 * command otherwise.
@@ -392,67 +386,70 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
392 if (lmp_le_capable(hdev)) 386 if (lmp_le_capable(hdev))
393 events[7] |= 0x20; /* LE Meta-Event */ 387 events[7] |= 0x20; /* LE Meta-Event */
394 388
395 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 389 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
396 390
397 if (lmp_le_capable(hdev)) { 391 if (lmp_le_capable(hdev)) {
398 memset(events, 0, sizeof(events)); 392 memset(events, 0, sizeof(events));
399 events[0] = 0x1f; 393 events[0] = 0x1f;
400 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK, 394 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
401 sizeof(events), events); 395 sizeof(events), events);
402 } 396 }
403} 397}
404 398
405static void hci_init2_req(struct hci_dev *hdev, unsigned long opt) 399static void hci_init2_req(struct hci_request *req, unsigned long opt)
406{ 400{
401 struct hci_dev *hdev = req->hdev;
402
407 if (lmp_bredr_capable(hdev)) 403 if (lmp_bredr_capable(hdev))
408 bredr_setup(hdev); 404 bredr_setup(req);
409 405
410 if (lmp_le_capable(hdev)) 406 if (lmp_le_capable(hdev))
411 le_setup(hdev); 407 le_setup(req);
412 408
413 hci_setup_event_mask(hdev); 409 hci_setup_event_mask(req);
414 410
415 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 411 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
416 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 412 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417 413
418 if (lmp_ssp_capable(hdev)) { 414 if (lmp_ssp_capable(hdev)) {
419 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 415 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
420 u8 mode = 0x01; 416 u8 mode = 0x01;
421 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 417 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
422 sizeof(mode), &mode); 418 sizeof(mode), &mode);
423 } else { 419 } else {
424 struct hci_cp_write_eir cp; 420 struct hci_cp_write_eir cp;
425 421
426 memset(hdev->eir, 0, sizeof(hdev->eir)); 422 memset(hdev->eir, 0, sizeof(hdev->eir));
427 memset(&cp, 0, sizeof(cp)); 423 memset(&cp, 0, sizeof(cp));
428 424
429 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 425 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
430 } 426 }
431 } 427 }
432 428
433 if (lmp_inq_rssi_capable(hdev)) 429 if (lmp_inq_rssi_capable(hdev))
434 hci_setup_inquiry_mode(hdev); 430 hci_setup_inquiry_mode(req);
435 431
436 if (lmp_inq_tx_pwr_capable(hdev)) 432 if (lmp_inq_tx_pwr_capable(hdev))
437 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 433 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
438 434
439 if (lmp_ext_feat_capable(hdev)) { 435 if (lmp_ext_feat_capable(hdev)) {
440 struct hci_cp_read_local_ext_features cp; 436 struct hci_cp_read_local_ext_features cp;
441 437
442 cp.page = 0x01; 438 cp.page = 0x01;
443 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), 439 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
444 &cp); 440 sizeof(cp), &cp);
445 } 441 }
446 442
447 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { 443 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
448 u8 enable = 1; 444 u8 enable = 1;
449 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), 445 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
450 &enable); 446 &enable);
451 } 447 }
452} 448}
453 449
454static void hci_setup_link_policy(struct hci_dev *hdev) 450static void hci_setup_link_policy(struct hci_request *req)
455{ 451{
452 struct hci_dev *hdev = req->hdev;
456 struct hci_cp_write_def_link_policy cp; 453 struct hci_cp_write_def_link_policy cp;
457 u16 link_policy = 0; 454 u16 link_policy = 0;
458 455
@@ -466,11 +463,12 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
466 link_policy |= HCI_LP_PARK; 463 link_policy |= HCI_LP_PARK;
467 464
468 cp.policy = cpu_to_le16(link_policy); 465 cp.policy = cpu_to_le16(link_policy);
469 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 466 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
470} 467}
471 468
472static void hci_set_le_support(struct hci_dev *hdev) 469static void hci_set_le_support(struct hci_request *req)
473{ 470{
471 struct hci_dev *hdev = req->hdev;
474 struct hci_cp_write_le_host_supported cp; 472 struct hci_cp_write_le_host_supported cp;
475 473
476 memset(&cp, 0, sizeof(cp)); 474 memset(&cp, 0, sizeof(cp));
@@ -481,17 +479,19 @@ static void hci_set_le_support(struct hci_dev *hdev)
481 } 479 }
482 480
483 if (cp.le != lmp_host_le_capable(hdev)) 481 if (cp.le != lmp_host_le_capable(hdev))
484 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), 482 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485 &cp); 483 &cp);
486} 484}
487 485
488static void hci_init3_req(struct hci_dev *hdev, unsigned long opt) 486static void hci_init3_req(struct hci_request *req, unsigned long opt)
489{ 487{
488 struct hci_dev *hdev = req->hdev;
489
490 if (hdev->commands[5] & 0x10) 490 if (hdev->commands[5] & 0x10)
491 hci_setup_link_policy(hdev); 491 hci_setup_link_policy(req);
492 492
493 if (lmp_le_capable(hdev)) 493 if (lmp_le_capable(hdev))
494 hci_set_le_support(hdev); 494 hci_set_le_support(req);
495} 495}
496 496
497static int __hci_init(struct hci_dev *hdev) 497static int __hci_init(struct hci_dev *hdev)
@@ -516,44 +516,44 @@ static int __hci_init(struct hci_dev *hdev)
516 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); 516 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
517} 517}
518 518
519static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 519static void hci_scan_req(struct hci_request *req, unsigned long opt)
520{ 520{
521 __u8 scan = opt; 521 __u8 scan = opt;
522 522
523 BT_DBG("%s %x", hdev->name, scan); 523 BT_DBG("%s %x", req->hdev->name, scan);
524 524
525 /* Inquiry and Page scans */ 525 /* Inquiry and Page scans */
526 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 526 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
527} 527}
528 528
529static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 529static void hci_auth_req(struct hci_request *req, unsigned long opt)
530{ 530{
531 __u8 auth = opt; 531 __u8 auth = opt;
532 532
533 BT_DBG("%s %x", hdev->name, auth); 533 BT_DBG("%s %x", req->hdev->name, auth);
534 534
535 /* Authentication */ 535 /* Authentication */
536 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 536 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
537} 537}
538 538
539static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 539static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
540{ 540{
541 __u8 encrypt = opt; 541 __u8 encrypt = opt;
542 542
543 BT_DBG("%s %x", hdev->name, encrypt); 543 BT_DBG("%s %x", req->hdev->name, encrypt);
544 544
545 /* Encryption */ 545 /* Encryption */
546 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 546 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
547} 547}
548 548
549static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) 549static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
550{ 550{
551 __le16 policy = cpu_to_le16(opt); 551 __le16 policy = cpu_to_le16(opt);
552 552
553 BT_DBG("%s %x", hdev->name, policy); 553 BT_DBG("%s %x", req->hdev->name, policy);
554 554
555 /* Default link policy */ 555 /* Default link policy */
556 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 556 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
557} 557}
558 558
559/* Get HCI device by index. 559/* Get HCI device by index.
@@ -790,9 +790,10 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
790 return copied; 790 return copied;
791} 791}
792 792
793static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) 793static void hci_inq_req(struct hci_request *req, unsigned long opt)
794{ 794{
795 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 795 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
796 struct hci_dev *hdev = req->hdev;
796 struct hci_cp_inquiry cp; 797 struct hci_cp_inquiry cp;
797 798
798 BT_DBG("%s", hdev->name); 799 BT_DBG("%s", hdev->name);
@@ -804,7 +805,7 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
804 memcpy(&cp.lap, &ir->lap, 3); 805 memcpy(&cp.lap, &ir->lap, 3);
805 cp.length = ir->length; 806 cp.length = ir->length;
806 cp.num_rsp = ir->num_rsp; 807 cp.num_rsp = ir->num_rsp;
807 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); 808 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
808} 809}
809 810
810int hci_inquiry(void __user *arg) 811int hci_inquiry(void __user *arg)
@@ -1845,7 +1846,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1845 return mgmt_device_unblocked(hdev, bdaddr, type); 1846 return mgmt_device_unblocked(hdev, bdaddr, type);
1846} 1847}
1847 1848
1848static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt) 1849static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1849{ 1850{
1850 struct le_scan_params *param = (struct le_scan_params *) opt; 1851 struct le_scan_params *param = (struct le_scan_params *) opt;
1851 struct hci_cp_le_set_scan_param cp; 1852 struct hci_cp_le_set_scan_param cp;
@@ -1855,10 +1856,10 @@ static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1855 cp.interval = cpu_to_le16(param->interval); 1856 cp.interval = cpu_to_le16(param->interval);
1856 cp.window = cpu_to_le16(param->window); 1857 cp.window = cpu_to_le16(param->window);
1857 1858
1858 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp); 1859 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1859} 1860}
1860 1861
1861static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) 1862static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1862{ 1863{
1863 struct hci_cp_le_set_scan_enable cp; 1864 struct hci_cp_le_set_scan_enable cp;
1864 1865
@@ -1866,7 +1867,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1866 cp.enable = 1; 1867 cp.enable = 1;
1867 cp.filter_dup = 1; 1868 cp.filter_dup = 1;
1868 1869
1869 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1870 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1870} 1871}
1871 1872
1872static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, 1873static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
@@ -3219,6 +3220,28 @@ static bool hci_req_is_complete(struct hci_dev *hdev)
3219 return bt_cb(skb)->req.start; 3220 return bt_cb(skb)->req.start;
3220} 3221}
3221 3222
3223static void hci_resend_last(struct hci_dev *hdev)
3224{
3225 struct hci_command_hdr *sent;
3226 struct sk_buff *skb;
3227 u16 opcode;
3228
3229 if (!hdev->sent_cmd)
3230 return;
3231
3232 sent = (void *) hdev->sent_cmd->data;
3233 opcode = __le16_to_cpu(sent->opcode);
3234 if (opcode == HCI_OP_RESET)
3235 return;
3236
3237 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3238 if (!skb)
3239 return;
3240
3241 skb_queue_head(&hdev->cmd_q, skb);
3242 queue_work(hdev->workqueue, &hdev->cmd_work);
3243}
3244
3222void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) 3245void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3223{ 3246{
3224 hci_req_complete_t req_complete = NULL; 3247 hci_req_complete_t req_complete = NULL;
@@ -3227,11 +3250,21 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3227 3250
3228 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 3251 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3229 3252
3230 /* Check that the completed command really matches the last one 3253 /* If the completed command doesn't match the last one that was
3231 * that was sent. 3254 * sent we need to do special handling of it.
3232 */ 3255 */
3233 if (!hci_sent_cmd_data(hdev, opcode)) 3256 if (!hci_sent_cmd_data(hdev, opcode)) {
3257 /* Some CSR based controllers generate a spontaneous
3258 * reset complete event during init and any pending
3259 * command will never be completed. In such a case we
3260 * need to resend whatever was the last sent
3261 * command.
3262 */
3263 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3264 hci_resend_last(hdev);
3265
3234 return; 3266 return;
3267 }
3235 3268
3236 /* If the command succeeded and there's still more commands in 3269 /* If the command succeeded and there's still more commands in
3237 * this request the request is not yet complete. 3270 * this request the request is not yet complete.
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8b878a3bdf69..0dd85a0c05f4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -54,7 +54,6 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
54 hci_dev_unlock(hdev); 54 hci_dev_unlock(hdev);
55 55
56 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status); 56 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
57 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
58 57
59 hci_conn_check_pending(hdev); 58 hci_conn_check_pending(hdev);
60} 59}
@@ -184,8 +183,6 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
184 183
185 if (!status) 184 if (!status)
186 hdev->link_policy = get_unaligned_le16(sent); 185 hdev->link_policy = get_unaligned_le16(sent);
187
188 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
189} 186}
190 187
191static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
@@ -196,8 +193,6 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 193
197 clear_bit(HCI_RESET, &hdev->flags); 194 clear_bit(HCI_RESET, &hdev->flags);
198 195
199 hci_req_complete(hdev, HCI_OP_RESET, status);
200
201 /* Reset all non-persistent flags */ 196 /* Reset all non-persistent flags */
202 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) | 197 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
203 BIT(HCI_PERIODIC_INQ)); 198 BIT(HCI_PERIODIC_INQ));
@@ -232,8 +227,6 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232 227
233 if (!status && !test_bit(HCI_INIT, &hdev->flags)) 228 if (!status && !test_bit(HCI_INIT, &hdev->flags))
234 hci_update_ad(hdev); 229 hci_update_ad(hdev);
235
236 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
237} 230}
238 231
239static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 232static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -271,8 +264,6 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
271 264
272 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 265 if (test_bit(HCI_MGMT, &hdev->dev_flags))
273 mgmt_auth_enable_complete(hdev, status); 266 mgmt_auth_enable_complete(hdev, status);
274
275 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
276} 267}
277 268
278static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 269static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -294,8 +285,6 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
294 else 285 else
295 clear_bit(HCI_ENCRYPT, &hdev->flags); 286 clear_bit(HCI_ENCRYPT, &hdev->flags);
296 } 287 }
297
298 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
299} 288}
300 289
301static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 290static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -344,7 +333,6 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
344 333
345done: 334done:
346 hci_dev_unlock(hdev); 335 hci_dev_unlock(hdev);
347 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
348} 336}
349 337
350static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 338static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -441,8 +429,6 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
441 __u8 status = *((__u8 *) skb->data); 429 __u8 status = *((__u8 *) skb->data);
442 430
443 BT_DBG("%s status 0x%2.2x", hdev->name, status); 431 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
446} 432}
447 433
448static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 434static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -480,7 +466,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
480 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 466 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
481 467
482 if (rp->status) 468 if (rp->status)
483 goto done; 469 return;
484 470
485 hdev->hci_ver = rp->hci_ver; 471 hdev->hci_ver = rp->hci_ver;
486 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 472 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
@@ -490,9 +476,6 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
490 476
491 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, 477 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
492 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); 478 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
493
494done:
495 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
496} 479}
497 480
498static void hci_cc_read_local_commands(struct hci_dev *hdev, 481static void hci_cc_read_local_commands(struct hci_dev *hdev,
@@ -504,8 +487,6 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
504 487
505 if (!rp->status) 488 if (!rp->status)
506 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 489 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
507
508 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
509} 490}
510 491
511static void hci_cc_read_local_features(struct hci_dev *hdev, 492static void hci_cc_read_local_features(struct hci_dev *hdev,
@@ -572,7 +553,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 553 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573 554
574 if (rp->status) 555 if (rp->status)
575 goto done; 556 return;
576 557
577 switch (rp->page) { 558 switch (rp->page) {
578 case 0: 559 case 0:
@@ -582,9 +563,6 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
582 memcpy(hdev->host_features, rp->features, 8); 563 memcpy(hdev->host_features, rp->features, 8);
583 break; 564 break;
584 } 565 }
585
586done:
587 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
588} 566}
589 567
590static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 568static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
@@ -594,12 +572,8 @@ static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
594 572
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 573 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596 574
597 if (rp->status) 575 if (!rp->status)
598 return; 576 hdev->flow_ctl_mode = rp->mode;
599
600 hdev->flow_ctl_mode = rp->mode;
601
602 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
603} 577}
604 578
605static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 579static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -636,8 +610,6 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
636 610
637 if (!rp->status) 611 if (!rp->status)
638 bacpy(&hdev->bdaddr, &rp->bdaddr); 612 bacpy(&hdev->bdaddr, &rp->bdaddr);
639
640 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
641} 613}
642 614
643static void hci_cc_read_data_block_size(struct hci_dev *hdev, 615static void hci_cc_read_data_block_size(struct hci_dev *hdev,
@@ -658,8 +630,6 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
658 630
659 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 631 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
660 hdev->block_cnt, hdev->block_len); 632 hdev->block_cnt, hdev->block_len);
661
662 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
663} 633}
664 634
665static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) 635static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
@@ -667,8 +637,6 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
667 __u8 status = *((__u8 *) skb->data); 637 __u8 status = *((__u8 *) skb->data);
668 638
669 BT_DBG("%s status 0x%2.2x", hdev->name, status); 639 BT_DBG("%s status 0x%2.2x", hdev->name, status);
670
671 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
672} 640}
673 641
674static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 642static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
@@ -692,8 +660,6 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
692 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 660 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
693 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 661 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
694 662
695 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
696
697a2mp_rsp: 663a2mp_rsp:
698 a2mp_send_getinfo_rsp(hdev); 664 a2mp_send_getinfo_rsp(hdev);
699} 665}
@@ -741,8 +707,6 @@ static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
741 __u8 status = *((__u8 *) skb->data); 707 __u8 status = *((__u8 *) skb->data);
742 708
743 BT_DBG("%s status 0x%2.2x", hdev->name, status); 709 BT_DBG("%s status 0x%2.2x", hdev->name, status);
744
745 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
746} 710}
747 711
748static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 712static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
@@ -750,8 +714,6 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
750 __u8 status = *((__u8 *) skb->data); 714 __u8 status = *((__u8 *) skb->data);
751 715
752 BT_DBG("%s status 0x%2.2x", hdev->name, status); 716 BT_DBG("%s status 0x%2.2x", hdev->name, status);
753
754 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
755} 717}
756 718
757static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 719static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
@@ -760,8 +722,6 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
760 __u8 status = *((__u8 *) skb->data); 722 __u8 status = *((__u8 *) skb->data);
761 723
762 BT_DBG("%s status 0x%2.2x", hdev->name, status); 724 BT_DBG("%s status 0x%2.2x", hdev->name, status);
763
764 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
765} 725}
766 726
767static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 727static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
@@ -773,8 +733,6 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
773 733
774 if (!rp->status) 734 if (!rp->status)
775 hdev->inq_tx_power = rp->tx_power; 735 hdev->inq_tx_power = rp->tx_power;
776
777 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
778} 736}
779 737
780static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 738static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -782,8 +740,6 @@ static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
782 __u8 status = *((__u8 *) skb->data); 740 __u8 status = *((__u8 *) skb->data);
783 741
784 BT_DBG("%s status 0x%2.2x", hdev->name, status); 742 BT_DBG("%s status 0x%2.2x", hdev->name, status);
785
786 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
787} 743}
788 744
789static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 745static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -845,8 +801,6 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
845 hdev->le_cnt = hdev->le_pkts; 801 hdev->le_cnt = hdev->le_pkts;
846 802
847 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 803 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
848
849 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
850} 804}
851 805
852static void hci_cc_le_read_local_features(struct hci_dev *hdev, 806static void hci_cc_le_read_local_features(struct hci_dev *hdev,
@@ -858,8 +812,6 @@ static void hci_cc_le_read_local_features(struct hci_dev *hdev,
858 812
859 if (!rp->status) 813 if (!rp->status)
860 memcpy(hdev->le_features, rp->features, 8); 814 memcpy(hdev->le_features, rp->features, 8);
861
862 hci_req_complete(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, rp->status);
863} 815}
864 816
865static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 817static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
@@ -874,8 +826,6 @@ static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
874 if (!test_bit(HCI_INIT, &hdev->flags)) 826 if (!test_bit(HCI_INIT, &hdev->flags))
875 hci_update_ad(hdev); 827 hci_update_ad(hdev);
876 } 828 }
877
878 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
879} 829}
880 830
881static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 831static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
@@ -883,8 +833,6 @@ static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
883 __u8 status = *((__u8 *) skb->data); 833 __u8 status = *((__u8 *) skb->data);
884 834
885 BT_DBG("%s status 0x%2.2x", hdev->name, status); 835 BT_DBG("%s status 0x%2.2x", hdev->name, status);
886
887 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
888} 836}
889 837
890static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 838static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -985,8 +933,6 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
985 933
986 if (!test_bit(HCI_INIT, &hdev->flags)) 934 if (!test_bit(HCI_INIT, &hdev->flags))
987 hci_update_ad(hdev); 935 hci_update_ad(hdev);
988
989 hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status);
990} 936}
991 937
992static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 938static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
@@ -995,8 +941,6 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
995 941
996 BT_DBG("%s status 0x%2.2x", hdev->name, status); 942 BT_DBG("%s status 0x%2.2x", hdev->name, status);
997 943
998 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
999
1000 if (status) { 944 if (status) {
1001 hci_dev_lock(hdev); 945 hci_dev_lock(hdev);
1002 mgmt_start_discovery_failed(hdev, status); 946 mgmt_start_discovery_failed(hdev, status);
@@ -1019,8 +963,6 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1019 963
1020 switch (cp->enable) { 964 switch (cp->enable) {
1021 case LE_SCANNING_ENABLED: 965 case LE_SCANNING_ENABLED:
1022 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1023
1024 if (status) { 966 if (status) {
1025 hci_dev_lock(hdev); 967 hci_dev_lock(hdev);
1026 mgmt_start_discovery_failed(hdev, status); 968 mgmt_start_discovery_failed(hdev, status);
@@ -1071,8 +1013,6 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1071 1013
1072 if (!rp->status) 1014 if (!rp->status)
1073 hdev->le_white_list_size = rp->size; 1015 hdev->le_white_list_size = rp->size;
1074
1075 hci_req_complete(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, rp->status);
1076} 1016}
1077 1017
1078static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 1018static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1083,8 +1023,6 @@ static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1083 1023
1084 if (rp->status) 1024 if (rp->status)
1085 return; 1025 return;
1086
1087 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1088} 1026}
1089 1027
1090static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 1028static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1095,8 +1033,6 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1095 1033
1096 if (rp->status) 1034 if (rp->status)
1097 return; 1035 return;
1098
1099 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1100} 1036}
1101 1037
1102static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1038static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
@@ -1108,8 +1044,6 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1108 1044
1109 if (!rp->status) 1045 if (!rp->status)
1110 memcpy(hdev->le_states, rp->le_states, 8); 1046 memcpy(hdev->le_states, rp->le_states, 8);
1111
1112 hci_req_complete(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, rp->status);
1113} 1047}
1114 1048
1115static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1049static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
@@ -1139,8 +1073,6 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1139 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1073 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1140 !test_bit(HCI_INIT, &hdev->flags)) 1074 !test_bit(HCI_INIT, &hdev->flags))
1141 mgmt_le_enable_complete(hdev, sent->le, status); 1075 mgmt_le_enable_complete(hdev, sent->le, status);
1142
1143 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1144} 1076}
1145 1077
1146static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, 1078static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
@@ -1162,7 +1094,6 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1162 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1094 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1163 1095
1164 if (status) { 1096 if (status) {
1165 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1166 hci_conn_check_pending(hdev); 1097 hci_conn_check_pending(hdev);
1167 hci_dev_lock(hdev); 1098 hci_dev_lock(hdev);
1168 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1099 if (test_bit(HCI_MGMT, &hdev->dev_flags))
@@ -1694,7 +1625,6 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1694 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1625 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1695 1626
1696 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status); 1627 hci_req_cmd_complete(hdev, HCI_OP_INQUIRY, status);
1697 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1698 1628
1699 hci_conn_check_pending(hdev); 1629 hci_conn_check_pending(hdev);
1700 1630