aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-14 14:29:45 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-14 14:29:45 -0400
commit5f1764ddfeb038decfe2b2fda030d0bed43fa36a (patch)
treea42d36330758ccce3becb22b96c8c748b89a837a
parent2801be4a0ea4cd0965f9c12ff813a91892a8e157 (diff)
parentb6d595e3f74fe0dd9edc0d5bf30cd6e6fe29f023 (diff)
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
Johan Hedberg says: ==================== Here's another set of Bluetooth & ieee802154 patches intended for 4.1: - Added support for QCA ROME chipset family in the btusb driver - at86rf230 driver fixes & cleanups - ieee802154 cleanups - Refactoring of Bluetooth mgmt API to allow new users - New setting for static Bluetooth address exposed to user space - Refactoring of hci_dev flags to remove limit of 32 - Remove unnecessary fast-connectable setting usage restrictions - Fix behavior to be consistent when trying to pair already paired device - Service discovery corner-case fixes Please let me know if there are any issues pulling. Thanks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/bluetooth/btusb.c333
-rw-r--r--drivers/net/ieee802154/at86rf230.c226
-rw-r--r--include/linux/ieee802154.h2
-rw-r--r--include/net/bluetooth/bluetooth.h3
-rw-r--r--include/net/bluetooth/hci.h22
-rw-r--r--include/net/bluetooth/hci_core.h59
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--net/bluetooth/af_bluetooth.c9
-rw-r--r--net/bluetooth/hci_conn.c18
-rw-r--r--net/bluetooth/hci_core.c230
-rw-r--r--net/bluetooth/hci_debugfs.c10
-rw-r--r--net/bluetooth/hci_event.c127
-rw-r--r--net/bluetooth/hci_request.c34
-rw-r--r--net/bluetooth/hci_sock.c108
-rw-r--r--net/bluetooth/l2cap_core.c10
-rw-r--r--net/bluetooth/mgmt.c1947
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bluetooth/smp.c62
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ieee802154/core.c5
-rw-r--r--net/ieee802154/nl-mac.c1
-rw-r--r--net/ieee802154/sysfs.c49
-rw-r--r--net/mac802154/util.c13
23 files changed, 1888 insertions, 1386 deletions
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8c1bf6190533..6fa9338745cf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -52,6 +52,7 @@ static struct usb_driver btusb_driver;
52#define BTUSB_SWAVE 0x1000 52#define BTUSB_SWAVE 0x1000
53#define BTUSB_INTEL_NEW 0x2000 53#define BTUSB_INTEL_NEW 0x2000
54#define BTUSB_AMP 0x4000 54#define BTUSB_AMP 0x4000
55#define BTUSB_QCA_ROME 0x8000
55 56
56static const struct usb_device_id btusb_table[] = { 57static const struct usb_device_id btusb_table[] = {
57 /* Generic Bluetooth USB device */ 58 /* Generic Bluetooth USB device */
@@ -213,6 +214,10 @@ static const struct usb_device_id blacklist_table[] = {
213 { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, 214 { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
214 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, 215 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
215 216
217 /* QCA ROME chipset */
218 { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME},
219 { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME},
220
216 /* Broadcom BCM2035 */ 221 /* Broadcom BCM2035 */
217 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, 222 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
218 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, 223 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -338,6 +343,8 @@ struct btusb_data {
338 343
339 int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb); 344 int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
340 int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); 345 int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
346
347 int (*setup_on_usb)(struct hci_dev *hdev);
341}; 348};
342 349
343static inline void btusb_free_frags(struct btusb_data *data) 350static inline void btusb_free_frags(struct btusb_data *data)
@@ -879,6 +886,15 @@ static int btusb_open(struct hci_dev *hdev)
879 886
880 BT_DBG("%s", hdev->name); 887 BT_DBG("%s", hdev->name);
881 888
889 /* Patching USB firmware files prior to starting any URBs of HCI path
890 * It is more safe to use USB bulk channel for downloading USB patch
891 */
892 if (data->setup_on_usb) {
893 err = data->setup_on_usb(hdev);
894 if (err <0)
895 return err;
896 }
897
882 err = usb_autopm_get_interface(data->intf); 898 err = usb_autopm_get_interface(data->intf);
883 if (err < 0) 899 if (err < 0)
884 return err; 900 return err;
@@ -1254,6 +1270,28 @@ static void btusb_waker(struct work_struct *work)
1254 usb_autopm_put_interface(data->intf); 1270 usb_autopm_put_interface(data->intf);
1255} 1271}
1256 1272
1273static struct sk_buff *btusb_read_local_version(struct hci_dev *hdev)
1274{
1275 struct sk_buff *skb;
1276
1277 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
1278 HCI_INIT_TIMEOUT);
1279 if (IS_ERR(skb)) {
1280 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
1281 hdev->name, PTR_ERR(skb));
1282 return skb;
1283 }
1284
1285 if (skb->len != sizeof(struct hci_rp_read_local_version)) {
1286 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
1287 hdev->name);
1288 kfree_skb(skb);
1289 return ERR_PTR(-EIO);
1290 }
1291
1292 return skb;
1293}
1294
1257static int btusb_setup_bcm92035(struct hci_dev *hdev) 1295static int btusb_setup_bcm92035(struct hci_dev *hdev)
1258{ 1296{
1259 struct sk_buff *skb; 1297 struct sk_buff *skb;
@@ -1278,12 +1316,9 @@ static int btusb_setup_csr(struct hci_dev *hdev)
1278 1316
1279 BT_DBG("%s", hdev->name); 1317 BT_DBG("%s", hdev->name);
1280 1318
1281 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, 1319 skb = btusb_read_local_version(hdev);
1282 HCI_INIT_TIMEOUT); 1320 if (IS_ERR(skb))
1283 if (IS_ERR(skb)) {
1284 BT_ERR("Reading local version failed (%ld)", -PTR_ERR(skb));
1285 return -PTR_ERR(skb); 1321 return -PTR_ERR(skb);
1286 }
1287 1322
1288 rp = (struct hci_rp_read_local_version *)skb->data; 1323 rp = (struct hci_rp_read_local_version *)skb->data;
1289 1324
@@ -2414,21 +2449,9 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
2414 kfree_skb(skb); 2449 kfree_skb(skb);
2415 2450
2416 /* Read Local Version Info */ 2451 /* Read Local Version Info */
2417 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, 2452 skb = btusb_read_local_version(hdev);
2418 HCI_INIT_TIMEOUT); 2453 if (IS_ERR(skb))
2419 if (IS_ERR(skb)) { 2454 return PTR_ERR(skb);
2420 ret = PTR_ERR(skb);
2421 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
2422 hdev->name, ret);
2423 return ret;
2424 }
2425
2426 if (skb->len != sizeof(*ver)) {
2427 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
2428 hdev->name);
2429 kfree_skb(skb);
2430 return -EIO;
2431 }
2432 2455
2433 ver = (struct hci_rp_read_local_version *)skb->data; 2456 ver = (struct hci_rp_read_local_version *)skb->data;
2434 rev = le16_to_cpu(ver->hci_rev); 2457 rev = le16_to_cpu(ver->hci_rev);
@@ -2516,20 +2539,9 @@ reset_fw:
2516 kfree_skb(skb); 2539 kfree_skb(skb);
2517 2540
2518 /* Read Local Version Info */ 2541 /* Read Local Version Info */
2519 skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, 2542 skb = btusb_read_local_version(hdev);
2520 HCI_INIT_TIMEOUT);
2521 if (IS_ERR(skb)) { 2543 if (IS_ERR(skb)) {
2522 ret = PTR_ERR(skb); 2544 ret = PTR_ERR(skb);
2523 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
2524 hdev->name, ret);
2525 goto done;
2526 }
2527
2528 if (skb->len != sizeof(*ver)) {
2529 BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
2530 hdev->name);
2531 kfree_skb(skb);
2532 ret = -EIO;
2533 goto done; 2545 goto done;
2534 } 2546 }
2535 2547
@@ -2628,6 +2640,258 @@ static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
2628 return 0; 2640 return 0;
2629} 2641}
2630 2642
2643#define QCA_DFU_PACKET_LEN 4096
2644
2645#define QCA_GET_TARGET_VERSION 0x09
2646#define QCA_CHECK_STATUS 0x05
2647#define QCA_DFU_DOWNLOAD 0x01
2648
2649#define QCA_SYSCFG_UPDATED 0x40
2650#define QCA_PATCH_UPDATED 0x80
2651#define QCA_DFU_TIMEOUT 3000
2652
2653struct qca_version {
2654 __le32 rom_version;
2655 __le32 patch_version;
2656 __le32 ram_version;
2657 __le32 ref_clock;
2658 __u8 reserved[4];
2659} __packed;
2660
2661struct qca_rampatch_version {
2662 __le16 rom_version;
2663 __le16 patch_version;
2664} __packed;
2665
2666struct qca_device_info {
2667 u32 rom_version;
2668 u8 rampatch_hdr; /* length of header in rampatch */
2669 u8 nvm_hdr; /* length of header in NVM */
2670 u8 ver_offset; /* offset of version structure in rampatch */
2671};
2672
2673static const struct qca_device_info qca_devices_table[] = {
2674 { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
2675 { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
2676 { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
2677 { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
2678 { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
2679};
2680
2681static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request,
2682 void *data, u16 size)
2683{
2684 struct btusb_data *btdata = hci_get_drvdata(hdev);
2685 struct usb_device *udev = btdata->udev;
2686 int pipe, err;
2687 u8 *buf;
2688
2689 buf = kmalloc(size, GFP_KERNEL);
2690 if (!buf)
2691 return -ENOMEM;
2692
2693 /* Found some of USB hosts have IOT issues with ours so that we should
2694 * not wait until HCI layer is ready.
2695 */
2696 pipe = usb_rcvctrlpipe(udev, 0);
2697 err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN,
2698 0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
2699 if (err < 0) {
2700 BT_ERR("%s: Failed to access otp area (%d)", hdev->name, err);
2701 goto done;
2702 }
2703
2704 memcpy(data, buf, size);
2705
2706done:
2707 kfree(buf);
2708
2709 return err;
2710}
2711
2712static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
2713 const struct firmware *firmware,
2714 size_t hdr_size)
2715{
2716 struct btusb_data *btdata = hci_get_drvdata(hdev);
2717 struct usb_device *udev = btdata->udev;
2718 size_t count, size, sent = 0;
2719 int pipe, len, err;
2720 u8 *buf;
2721
2722 buf = kmalloc(QCA_DFU_PACKET_LEN, GFP_KERNEL);
2723 if (!buf)
2724 return -ENOMEM;
2725
2726 count = firmware->size;
2727
2728 size = min_t(size_t, count, hdr_size);
2729 memcpy(buf, firmware->data, size);
2730
2731 /* USB patches should go down to controller through USB path
2732 * because binary format fits to go down through USB channel.
2733 * USB control path is for patching headers and USB bulk is for
2734 * patch body.
2735 */
2736 pipe = usb_sndctrlpipe(udev, 0);
2737 err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR,
2738 0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
2739 if (err < 0) {
2740 BT_ERR("%s: Failed to send headers (%d)", hdev->name, err);
2741 goto done;
2742 }
2743
2744 sent += size;
2745 count -= size;
2746
2747 while (count) {
2748 size = min_t(size_t, count, QCA_DFU_PACKET_LEN);
2749
2750 memcpy(buf, firmware->data + sent, size);
2751
2752 pipe = usb_sndbulkpipe(udev, 0x02);
2753 err = usb_bulk_msg(udev, pipe, buf, size, &len,
2754 QCA_DFU_TIMEOUT);
2755 if (err < 0) {
2756 BT_ERR("%s: Failed to send body at %zd of %zd (%d)",
2757 hdev->name, sent, firmware->size, err);
2758 break;
2759 }
2760
2761 if (size != len) {
2762 BT_ERR("%s: Failed to get bulk buffer", hdev->name);
2763 err = -EILSEQ;
2764 break;
2765 }
2766
2767 sent += size;
2768 count -= size;
2769 }
2770
2771done:
2772 kfree(buf);
2773 return err;
2774}
2775
2776static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
2777 struct qca_version *ver,
2778 const struct qca_device_info *info)
2779{
2780 struct qca_rampatch_version *rver;
2781 const struct firmware *fw;
2782 u32 ver_rom, ver_patch;
2783 u16 rver_rom, rver_patch;
2784 char fwname[64];
2785 int err;
2786
2787 ver_rom = le32_to_cpu(ver->rom_version);
2788 ver_patch = le32_to_cpu(ver->patch_version);
2789
2790 snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
2791
2792 err = request_firmware(&fw, fwname, &hdev->dev);
2793 if (err) {
2794 BT_ERR("%s: failed to request rampatch file: %s (%d)",
2795 hdev->name, fwname, err);
2796 return err;
2797 }
2798
2799 BT_INFO("%s: using rampatch file: %s", hdev->name, fwname);
2800
2801 rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset);
2802 rver_rom = le16_to_cpu(rver->rom_version);
2803 rver_patch = le16_to_cpu(rver->patch_version);
2804
2805 BT_INFO("%s: QCA: patch rome 0x%x build 0x%x, firmware rome 0x%x "
2806 "build 0x%x", hdev->name, rver_rom, rver_patch, ver_rom,
2807 ver_patch);
2808
2809 if (rver_rom != ver_rom || rver_patch <= ver_patch) {
2810 BT_ERR("%s: rampatch file version did not match with firmware",
2811 hdev->name);
2812 err = -EINVAL;
2813 goto done;
2814 }
2815
2816 err = btusb_setup_qca_download_fw(hdev, fw, info->rampatch_hdr);
2817
2818done:
2819 release_firmware(fw);
2820
2821 return err;
2822}
2823
2824static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
2825 struct qca_version *ver,
2826 const struct qca_device_info *info)
2827{
2828 const struct firmware *fw;
2829 char fwname[64];
2830 int err;
2831
2832 snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
2833 le32_to_cpu(ver->rom_version));
2834
2835 err = request_firmware(&fw, fwname, &hdev->dev);
2836 if (err) {
2837 BT_ERR("%s: failed to request NVM file: %s (%d)",
2838 hdev->name, fwname, err);
2839 return err;
2840 }
2841
2842 BT_INFO("%s: using NVM file: %s", hdev->name, fwname);
2843
2844 err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr);
2845
2846 release_firmware(fw);
2847
2848 return err;
2849}
2850
2851static int btusb_setup_qca(struct hci_dev *hdev)
2852{
2853 const struct qca_device_info *info = NULL;
2854 struct qca_version ver;
2855 u32 ver_rom;
2856 u8 status;
2857 int i, err;
2858
2859 err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver,
2860 sizeof(ver));
2861 if (err < 0)
2862 return err;
2863
2864 ver_rom = le32_to_cpu(ver.rom_version);
2865 for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) {
2866 if (ver_rom == qca_devices_table[i].rom_version)
2867 info = &qca_devices_table[i];
2868 }
2869 if (!info) {
2870 BT_ERR("%s: don't support firmware rome 0x%x", hdev->name,
2871 ver_rom);
2872 return -ENODEV;
2873 }
2874
2875 err = btusb_qca_send_vendor_req(hdev, QCA_CHECK_STATUS, &status,
2876 sizeof(status));
2877 if (err < 0)
2878 return err;
2879
2880 if (!(status & QCA_PATCH_UPDATED)) {
2881 err = btusb_setup_qca_load_rampatch(hdev, &ver, info);
2882 if (err < 0)
2883 return err;
2884 }
2885
2886 if (!(status & QCA_SYSCFG_UPDATED)) {
2887 err = btusb_setup_qca_load_nvm(hdev, &ver, info);
2888 if (err < 0)
2889 return err;
2890 }
2891
2892 return 0;
2893}
2894
2631static int btusb_probe(struct usb_interface *intf, 2895static int btusb_probe(struct usb_interface *intf,
2632 const struct usb_device_id *id) 2896 const struct usb_device_id *id)
2633{ 2897{
@@ -2781,6 +3045,11 @@ static int btusb_probe(struct usb_interface *intf,
2781 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 3045 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
2782 } 3046 }
2783 3047
3048 if (id->driver_info & BTUSB_QCA_ROME) {
3049 data->setup_on_usb = btusb_setup_qca;
3050 hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
3051 }
3052
2784 if (id->driver_info & BTUSB_AMP) { 3053 if (id->driver_info & BTUSB_AMP) {
2785 /* AMP controllers do not support SCO packets */ 3054 /* AMP controllers do not support SCO packets */
2786 data->isoc = NULL; 3055 data->isoc = NULL;
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 1d438bc54189..b64c5c7b2a50 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -19,6 +19,8 @@
19 */ 19 */
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/hrtimer.h>
23#include <linux/jiffies.h>
22#include <linux/interrupt.h> 24#include <linux/interrupt.h>
23#include <linux/irq.h> 25#include <linux/irq.h>
24#include <linux/gpio.h> 26#include <linux/gpio.h>
@@ -52,11 +54,21 @@ struct at86rf2xx_chip_data {
52 int (*get_desense_steps)(struct at86rf230_local *, s32); 54 int (*get_desense_steps)(struct at86rf230_local *, s32);
53}; 55};
54 56
55#define AT86RF2XX_MAX_BUF (127 + 3) 57#define AT86RF2XX_MAX_BUF (127 + 3)
58/* tx retries to access the TX_ON state
59 * if it's above then force change will be started.
60 *
61 * We assume the max_frame_retries (7) value of 802.15.4 here.
62 */
63#define AT86RF2XX_MAX_TX_RETRIES 7
64/* We use the recommended 5 minutes timeout to recalibrate */
65#define AT86RF2XX_CAL_LOOP_TIMEOUT (5 * 60 * HZ)
56 66
57struct at86rf230_state_change { 67struct at86rf230_state_change {
58 struct at86rf230_local *lp; 68 struct at86rf230_local *lp;
69 int irq;
59 70
71 struct hrtimer timer;
60 struct spi_message msg; 72 struct spi_message msg;
61 struct spi_transfer trx; 73 struct spi_transfer trx;
62 u8 buf[AT86RF2XX_MAX_BUF]; 74 u8 buf[AT86RF2XX_MAX_BUF];
@@ -81,10 +93,12 @@ struct at86rf230_local {
81 struct at86rf230_state_change irq; 93 struct at86rf230_state_change irq;
82 94
83 bool tx_aret; 95 bool tx_aret;
96 unsigned long cal_timeout;
84 s8 max_frame_retries; 97 s8 max_frame_retries;
85 bool is_tx; 98 bool is_tx;
86 /* spinlock for is_tx protection */ 99 /* spinlock for is_tx protection */
87 spinlock_t lock; 100 spinlock_t lock;
101 u8 tx_retry;
88 struct sk_buff *tx_skb; 102 struct sk_buff *tx_skb;
89 struct at86rf230_state_change tx; 103 struct at86rf230_state_change tx;
90}; 104};
@@ -407,6 +421,8 @@ at86rf230_reg_volatile(struct device *dev, unsigned int reg)
407 case RG_PHY_ED_LEVEL: 421 case RG_PHY_ED_LEVEL:
408 case RG_IRQ_STATUS: 422 case RG_IRQ_STATUS:
409 case RG_VREG_CTRL: 423 case RG_VREG_CTRL:
424 case RG_PLL_CF:
425 case RG_PLL_DCU:
410 return true; 426 return true;
411 default: 427 default:
412 return false; 428 return false;
@@ -470,18 +486,25 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
470 u8 *tx_buf = ctx->buf; 486 u8 *tx_buf = ctx->buf;
471 487
472 tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; 488 tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
473 ctx->trx.len = 2;
474 ctx->msg.complete = complete; 489 ctx->msg.complete = complete;
475 ctx->irq_enable = irq_enable; 490 ctx->irq_enable = irq_enable;
476 rc = spi_async(lp->spi, &ctx->msg); 491 rc = spi_async(lp->spi, &ctx->msg);
477 if (rc) { 492 if (rc) {
478 if (irq_enable) 493 if (irq_enable)
479 enable_irq(lp->spi->irq); 494 enable_irq(ctx->irq);
480 495
481 at86rf230_async_error(lp, ctx, rc); 496 at86rf230_async_error(lp, ctx, rc);
482 } 497 }
483} 498}
484 499
500static inline u8 at86rf230_state_to_force(u8 state)
501{
502 if (state == STATE_TX_ON)
503 return STATE_FORCE_TX_ON;
504 else
505 return STATE_FORCE_TRX_OFF;
506}
507
485static void 508static void
486at86rf230_async_state_assert(void *context) 509at86rf230_async_state_assert(void *context)
487{ 510{
@@ -512,10 +535,21 @@ at86rf230_async_state_assert(void *context)
512 * in STATE_BUSY_RX_AACK, we run a force state change 535 * in STATE_BUSY_RX_AACK, we run a force state change
513 * to STATE_TX_ON. This is a timeout handling, if the 536 * to STATE_TX_ON. This is a timeout handling, if the
514 * transceiver stucks in STATE_BUSY_RX_AACK. 537 * transceiver stucks in STATE_BUSY_RX_AACK.
538 *
539 * Additional we do several retries to try to get into
540 * TX_ON state without forcing. If the retries are
541 * higher or equal than AT86RF2XX_MAX_TX_RETRIES we
542 * will do a force change.
515 */ 543 */
516 if (ctx->to_state == STATE_TX_ON) { 544 if (ctx->to_state == STATE_TX_ON ||
517 at86rf230_async_state_change(lp, ctx, 545 ctx->to_state == STATE_TRX_OFF) {
518 STATE_FORCE_TX_ON, 546 u8 state = ctx->to_state;
547
548 if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES)
549 state = at86rf230_state_to_force(state);
550 lp->tx_retry++;
551
552 at86rf230_async_state_change(lp, ctx, state,
519 ctx->complete, 553 ctx->complete,
520 ctx->irq_enable); 554 ctx->irq_enable);
521 return; 555 return;
@@ -531,6 +565,19 @@ done:
531 ctx->complete(context); 565 ctx->complete(context);
532} 566}
533 567
568static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer)
569{
570 struct at86rf230_state_change *ctx =
571 container_of(timer, struct at86rf230_state_change, timer);
572 struct at86rf230_local *lp = ctx->lp;
573
574 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
575 at86rf230_async_state_assert,
576 ctx->irq_enable);
577
578 return HRTIMER_NORESTART;
579}
580
534/* Do state change timing delay. */ 581/* Do state change timing delay. */
535static void 582static void
536at86rf230_async_state_delay(void *context) 583at86rf230_async_state_delay(void *context)
@@ -539,6 +586,7 @@ at86rf230_async_state_delay(void *context)
539 struct at86rf230_local *lp = ctx->lp; 586 struct at86rf230_local *lp = ctx->lp;
540 struct at86rf2xx_chip_data *c = lp->data; 587 struct at86rf2xx_chip_data *c = lp->data;
541 bool force = false; 588 bool force = false;
589 ktime_t tim;
542 590
543 /* The force state changes are will show as normal states in the 591 /* The force state changes are will show as normal states in the
544 * state status subregister. We change the to_state to the 592 * state status subregister. We change the to_state to the
@@ -562,11 +610,15 @@ at86rf230_async_state_delay(void *context)
562 case STATE_TRX_OFF: 610 case STATE_TRX_OFF:
563 switch (ctx->to_state) { 611 switch (ctx->to_state) {
564 case STATE_RX_AACK_ON: 612 case STATE_RX_AACK_ON:
565 usleep_range(c->t_off_to_aack, c->t_off_to_aack + 10); 613 tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
566 goto change; 614 goto change;
567 case STATE_TX_ON: 615 case STATE_TX_ON:
568 usleep_range(c->t_off_to_tx_on, 616 tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
569 c->t_off_to_tx_on + 10); 617 /* state change from TRX_OFF to TX_ON to do a
618 * calibration, we need to reset the timeout for the
619 * next one.
620 */
621 lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
570 goto change; 622 goto change;
571 default: 623 default:
572 break; 624 break;
@@ -574,14 +626,15 @@ at86rf230_async_state_delay(void *context)
574 break; 626 break;
575 case STATE_BUSY_RX_AACK: 627 case STATE_BUSY_RX_AACK:
576 switch (ctx->to_state) { 628 switch (ctx->to_state) {
629 case STATE_TRX_OFF:
577 case STATE_TX_ON: 630 case STATE_TX_ON:
578 /* Wait for worst case receiving time if we 631 /* Wait for worst case receiving time if we
579 * didn't make a force change from BUSY_RX_AACK 632 * didn't make a force change from BUSY_RX_AACK
580 * to TX_ON. 633 * to TX_ON or TRX_OFF.
581 */ 634 */
582 if (!force) { 635 if (!force) {
583 usleep_range(c->t_frame + c->t_p_ack, 636 tim = ktime_set(0, (c->t_frame + c->t_p_ack) *
584 c->t_frame + c->t_p_ack + 1000); 637 NSEC_PER_USEC);
585 goto change; 638 goto change;
586 } 639 }
587 break; 640 break;
@@ -593,7 +646,7 @@ at86rf230_async_state_delay(void *context)
593 case STATE_P_ON: 646 case STATE_P_ON:
594 switch (ctx->to_state) { 647 switch (ctx->to_state) {
595 case STATE_TRX_OFF: 648 case STATE_TRX_OFF:
596 usleep_range(c->t_reset_to_off, c->t_reset_to_off + 10); 649 tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC);
597 goto change; 650 goto change;
598 default: 651 default:
599 break; 652 break;
@@ -604,12 +657,10 @@ at86rf230_async_state_delay(void *context)
604 } 657 }
605 658
606 /* Default delay is 1us in the most cases */ 659 /* Default delay is 1us in the most cases */
607 udelay(1); 660 tim = ktime_set(0, NSEC_PER_USEC);
608 661
609change: 662change:
610 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, 663 hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
611 at86rf230_async_state_assert,
612 ctx->irq_enable);
613} 664}
614 665
615static void 666static void
@@ -645,12 +696,11 @@ at86rf230_async_state_change_start(void *context)
645 */ 696 */
646 buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; 697 buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
647 buf[1] = ctx->to_state; 698 buf[1] = ctx->to_state;
648 ctx->trx.len = 2;
649 ctx->msg.complete = at86rf230_async_state_delay; 699 ctx->msg.complete = at86rf230_async_state_delay;
650 rc = spi_async(lp->spi, &ctx->msg); 700 rc = spi_async(lp->spi, &ctx->msg);
651 if (rc) { 701 if (rc) {
652 if (ctx->irq_enable) 702 if (ctx->irq_enable)
653 enable_irq(lp->spi->irq); 703 enable_irq(ctx->irq);
654 704
655 at86rf230_async_error(lp, ctx, rc); 705 at86rf230_async_error(lp, ctx, rc);
656 } 706 }
@@ -708,11 +758,10 @@ at86rf230_tx_complete(void *context)
708{ 758{
709 struct at86rf230_state_change *ctx = context; 759 struct at86rf230_state_change *ctx = context;
710 struct at86rf230_local *lp = ctx->lp; 760 struct at86rf230_local *lp = ctx->lp;
711 struct sk_buff *skb = lp->tx_skb;
712 761
713 enable_irq(lp->spi->irq); 762 enable_irq(ctx->irq);
714 763
715 ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret); 764 ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
716} 765}
717 766
718static void 767static void
@@ -721,7 +770,7 @@ at86rf230_tx_on(void *context)
721 struct at86rf230_state_change *ctx = context; 770 struct at86rf230_state_change *ctx = context;
722 struct at86rf230_local *lp = ctx->lp; 771 struct at86rf230_local *lp = ctx->lp;
723 772
724 at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON, 773 at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
725 at86rf230_tx_complete, true); 774 at86rf230_tx_complete, true);
726} 775}
727 776
@@ -765,14 +814,25 @@ at86rf230_tx_trac_status(void *context)
765} 814}
766 815
767static void 816static void
768at86rf230_rx(struct at86rf230_local *lp, 817at86rf230_rx_read_frame_complete(void *context)
769 const u8 *data, const u8 len, const u8 lqi)
770{ 818{
771 struct sk_buff *skb; 819 struct at86rf230_state_change *ctx = context;
820 struct at86rf230_local *lp = ctx->lp;
772 u8 rx_local_buf[AT86RF2XX_MAX_BUF]; 821 u8 rx_local_buf[AT86RF2XX_MAX_BUF];
822 const u8 *buf = ctx->buf;
823 struct sk_buff *skb;
824 u8 len, lqi;
773 825
774 memcpy(rx_local_buf, data, len); 826 len = buf[1];
775 enable_irq(lp->spi->irq); 827 if (!ieee802154_is_valid_psdu_len(len)) {
828 dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
829 len = IEEE802154_MTU;
830 }
831 lqi = buf[2 + len];
832
833 memcpy(rx_local_buf, buf + 2, len);
834 ctx->trx.len = 2;
835 enable_irq(ctx->irq);
776 836
777 skb = dev_alloc_skb(IEEE802154_MTU); 837 skb = dev_alloc_skb(IEEE802154_MTU);
778 if (!skb) { 838 if (!skb) {
@@ -785,51 +845,34 @@ at86rf230_rx(struct at86rf230_local *lp,
785} 845}
786 846
787static void 847static void
788at86rf230_rx_read_frame_complete(void *context) 848at86rf230_rx_read_frame(void *context)
789{ 849{
790 struct at86rf230_state_change *ctx = context; 850 struct at86rf230_state_change *ctx = context;
791 struct at86rf230_local *lp = ctx->lp; 851 struct at86rf230_local *lp = ctx->lp;
792 const u8 *buf = lp->irq.buf; 852 u8 *buf = ctx->buf;
793 u8 len = buf[1];
794
795 if (!ieee802154_is_valid_psdu_len(len)) {
796 dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
797 len = IEEE802154_MTU;
798 }
799
800 at86rf230_rx(lp, buf + 2, len, buf[2 + len]);
801}
802
803static void
804at86rf230_rx_read_frame(struct at86rf230_local *lp)
805{
806 int rc; 853 int rc;
807 854
808 u8 *buf = lp->irq.buf;
809
810 buf[0] = CMD_FB; 855 buf[0] = CMD_FB;
811 lp->irq.trx.len = AT86RF2XX_MAX_BUF; 856 ctx->trx.len = AT86RF2XX_MAX_BUF;
812 lp->irq.msg.complete = at86rf230_rx_read_frame_complete; 857 ctx->msg.complete = at86rf230_rx_read_frame_complete;
813 rc = spi_async(lp->spi, &lp->irq.msg); 858 rc = spi_async(lp->spi, &ctx->msg);
814 if (rc) { 859 if (rc) {
815 enable_irq(lp->spi->irq); 860 ctx->trx.len = 2;
816 at86rf230_async_error(lp, &lp->irq, rc); 861 enable_irq(ctx->irq);
862 at86rf230_async_error(lp, ctx, rc);
817 } 863 }
818} 864}
819 865
820static void 866static void
821at86rf230_rx_trac_check(void *context) 867at86rf230_rx_trac_check(void *context)
822{ 868{
823 struct at86rf230_state_change *ctx = context;
824 struct at86rf230_local *lp = ctx->lp;
825
826 /* Possible check on trac status here. This could be useful to make 869 /* Possible check on trac status here. This could be useful to make
827 * some stats why receive is failed. Not used at the moment, but it's 870 * some stats why receive is failed. Not used at the moment, but it's
828 * maybe timing relevant. Datasheet doesn't say anything about this. 871 * maybe timing relevant. Datasheet doesn't say anything about this.
829 * The programming guide say do it so. 872 * The programming guide say do it so.
830 */ 873 */
831 874
832 at86rf230_rx_read_frame(lp); 875 at86rf230_rx_read_frame(context);
833} 876}
834 877
835static void 878static void
@@ -862,13 +905,13 @@ at86rf230_irq_status(void *context)
862{ 905{
863 struct at86rf230_state_change *ctx = context; 906 struct at86rf230_state_change *ctx = context;
864 struct at86rf230_local *lp = ctx->lp; 907 struct at86rf230_local *lp = ctx->lp;
865 const u8 *buf = lp->irq.buf; 908 const u8 *buf = ctx->buf;
866 const u8 irq = buf[1]; 909 const u8 irq = buf[1];
867 910
868 if (irq & IRQ_TRX_END) { 911 if (irq & IRQ_TRX_END) {
869 at86rf230_irq_trx_end(lp); 912 at86rf230_irq_trx_end(lp);
870 } else { 913 } else {
871 enable_irq(lp->spi->irq); 914 enable_irq(ctx->irq);
872 dev_err(&lp->spi->dev, "not supported irq %02x received\n", 915 dev_err(&lp->spi->dev, "not supported irq %02x received\n",
873 irq); 916 irq);
874 } 917 }
@@ -884,7 +927,6 @@ static irqreturn_t at86rf230_isr(int irq, void *data)
884 disable_irq_nosync(irq); 927 disable_irq_nosync(irq);
885 928
886 buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; 929 buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
887 ctx->trx.len = 2;
888 ctx->msg.complete = at86rf230_irq_status; 930 ctx->msg.complete = at86rf230_irq_status;
889 rc = spi_async(lp->spi, &ctx->msg); 931 rc = spi_async(lp->spi, &ctx->msg);
890 if (rc) { 932 if (rc) {
@@ -919,7 +961,7 @@ at86rf230_write_frame(void *context)
919 struct at86rf230_state_change *ctx = context; 961 struct at86rf230_state_change *ctx = context;
920 struct at86rf230_local *lp = ctx->lp; 962 struct at86rf230_local *lp = ctx->lp;
921 struct sk_buff *skb = lp->tx_skb; 963 struct sk_buff *skb = lp->tx_skb;
922 u8 *buf = lp->tx.buf; 964 u8 *buf = ctx->buf;
923 int rc; 965 int rc;
924 966
925 spin_lock(&lp->lock); 967 spin_lock(&lp->lock);
@@ -929,11 +971,13 @@ at86rf230_write_frame(void *context)
929 buf[0] = CMD_FB | CMD_WRITE; 971 buf[0] = CMD_FB | CMD_WRITE;
930 buf[1] = skb->len + 2; 972 buf[1] = skb->len + 2;
931 memcpy(buf + 2, skb->data, skb->len); 973 memcpy(buf + 2, skb->data, skb->len);
932 lp->tx.trx.len = skb->len + 2; 974 ctx->trx.len = skb->len + 2;
933 lp->tx.msg.complete = at86rf230_write_frame_complete; 975 ctx->msg.complete = at86rf230_write_frame_complete;
934 rc = spi_async(lp->spi, &lp->tx.msg); 976 rc = spi_async(lp->spi, &ctx->msg);
935 if (rc) 977 if (rc) {
978 ctx->trx.len = 2;
936 at86rf230_async_error(lp, ctx, rc); 979 at86rf230_async_error(lp, ctx, rc);
980 }
937} 981}
938 982
939static void 983static void
@@ -946,24 +990,45 @@ at86rf230_xmit_tx_on(void *context)
946 at86rf230_write_frame, false); 990 at86rf230_write_frame, false);
947} 991}
948 992
993static void
994at86rf230_xmit_start(void *context)
995{
996 struct at86rf230_state_change *ctx = context;
997 struct at86rf230_local *lp = ctx->lp;
998
999 /* In ARET mode we need to go into STATE_TX_ARET_ON after we
1000 * are in STATE_TX_ON. The pfad differs here, so we change
1001 * the complete handler.
1002 */
1003 if (lp->tx_aret)
1004 at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
1005 at86rf230_xmit_tx_on, false);
1006 else
1007 at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
1008 at86rf230_write_frame, false);
1009}
1010
949static int 1011static int
950at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) 1012at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
951{ 1013{
952 struct at86rf230_local *lp = hw->priv; 1014 struct at86rf230_local *lp = hw->priv;
953 struct at86rf230_state_change *ctx = &lp->tx; 1015 struct at86rf230_state_change *ctx = &lp->tx;
954 1016
955 void (*tx_complete)(void *context) = at86rf230_write_frame;
956
957 lp->tx_skb = skb; 1017 lp->tx_skb = skb;
1018 lp->tx_retry = 0;
958 1019
959 /* In ARET mode we need to go into STATE_TX_ARET_ON after we 1020 /* After 5 minutes in PLL and the same frequency we run again the
960 * are in STATE_TX_ON. The pfad differs here, so we change 1021 * calibration loops which is recommended by at86rf2xx datasheets.
961 * the complete handler. 1022 *
1023 * The calibration is initiate by a state change from TRX_OFF
1024 * to TX_ON, the lp->cal_timeout should be reinit by state_delay
1025 * function then to start in the next 5 minutes.
962 */ 1026 */
963 if (lp->tx_aret) 1027 if (time_is_before_jiffies(lp->cal_timeout))
964 tx_complete = at86rf230_xmit_tx_on; 1028 at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
965 1029 at86rf230_xmit_start, false);
966 at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false); 1030 else
1031 at86rf230_xmit_start(ctx);
967 1032
968 return 0; 1033 return 0;
969} 1034}
@@ -979,6 +1044,9 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
979static int 1044static int
980at86rf230_start(struct ieee802154_hw *hw) 1045at86rf230_start(struct ieee802154_hw *hw)
981{ 1046{
1047 struct at86rf230_local *lp = hw->priv;
1048
1049 lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
982 return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON); 1050 return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
983} 1051}
984 1052
@@ -1059,6 +1127,8 @@ at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
1059 /* Wait for PLL */ 1127 /* Wait for PLL */
1060 usleep_range(lp->data->t_channel_switch, 1128 usleep_range(lp->data->t_channel_switch,
1061 lp->data->t_channel_switch + 10); 1129 lp->data->t_channel_switch + 10);
1130
1131 lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
1062 return rc; 1132 return rc;
1063} 1133}
1064 1134
@@ -1528,25 +1598,37 @@ static void
1528at86rf230_setup_spi_messages(struct at86rf230_local *lp) 1598at86rf230_setup_spi_messages(struct at86rf230_local *lp)
1529{ 1599{
1530 lp->state.lp = lp; 1600 lp->state.lp = lp;
1601 lp->state.irq = lp->spi->irq;
1531 spi_message_init(&lp->state.msg); 1602 spi_message_init(&lp->state.msg);
1532 lp->state.msg.context = &lp->state; 1603 lp->state.msg.context = &lp->state;
1604 lp->state.trx.len = 2;
1533 lp->state.trx.tx_buf = lp->state.buf; 1605 lp->state.trx.tx_buf = lp->state.buf;
1534 lp->state.trx.rx_buf = lp->state.buf; 1606 lp->state.trx.rx_buf = lp->state.buf;
1535 spi_message_add_tail(&lp->state.trx, &lp->state.msg); 1607 spi_message_add_tail(&lp->state.trx, &lp->state.msg);
1608 hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1609 lp->state.timer.function = at86rf230_async_state_timer;
1536 1610
1537 lp->irq.lp = lp; 1611 lp->irq.lp = lp;
1612 lp->irq.irq = lp->spi->irq;
1538 spi_message_init(&lp->irq.msg); 1613 spi_message_init(&lp->irq.msg);
1539 lp->irq.msg.context = &lp->irq; 1614 lp->irq.msg.context = &lp->irq;
1615 lp->irq.trx.len = 2;
1540 lp->irq.trx.tx_buf = lp->irq.buf; 1616 lp->irq.trx.tx_buf = lp->irq.buf;
1541 lp->irq.trx.rx_buf = lp->irq.buf; 1617 lp->irq.trx.rx_buf = lp->irq.buf;
1542 spi_message_add_tail(&lp->irq.trx, &lp->irq.msg); 1618 spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
1619 hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1620 lp->irq.timer.function = at86rf230_async_state_timer;
1543 1621
1544 lp->tx.lp = lp; 1622 lp->tx.lp = lp;
1623 lp->tx.irq = lp->spi->irq;
1545 spi_message_init(&lp->tx.msg); 1624 spi_message_init(&lp->tx.msg);
1546 lp->tx.msg.context = &lp->tx; 1625 lp->tx.msg.context = &lp->tx;
1626 lp->tx.trx.len = 2;
1547 lp->tx.trx.tx_buf = lp->tx.buf; 1627 lp->tx.trx.tx_buf = lp->tx.buf;
1548 lp->tx.trx.rx_buf = lp->tx.buf; 1628 lp->tx.trx.rx_buf = lp->tx.buf;
1549 spi_message_add_tail(&lp->tx.trx, &lp->tx.msg); 1629 spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
1630 hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1631 lp->tx.timer.function = at86rf230_async_state_timer;
1550} 1632}
1551 1633
1552static int at86rf230_probe(struct spi_device *spi) 1634static int at86rf230_probe(struct spi_device *spi)
@@ -1555,7 +1637,7 @@ static int at86rf230_probe(struct spi_device *spi)
1555 struct at86rf230_local *lp; 1637 struct at86rf230_local *lp;
1556 unsigned int status; 1638 unsigned int status;
1557 int rc, irq_type, rstn, slp_tr; 1639 int rc, irq_type, rstn, slp_tr;
1558 u8 xtal_trim; 1640 u8 xtal_trim = 0;
1559 1641
1560 if (!spi->irq) { 1642 if (!spi->irq) {
1561 dev_err(&spi->dev, "no IRQ specified\n"); 1643 dev_err(&spi->dev, "no IRQ specified\n");
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 40b0ab953937..8872ca103d06 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -30,6 +30,7 @@
30#define IEEE802154_MTU 127 30#define IEEE802154_MTU 127
31#define IEEE802154_ACK_PSDU_LEN 5 31#define IEEE802154_ACK_PSDU_LEN 5
32#define IEEE802154_MIN_PSDU_LEN 9 32#define IEEE802154_MIN_PSDU_LEN 9
33#define IEEE802154_FCS_LEN 2
33 34
34#define IEEE802154_PAN_ID_BROADCAST 0xffff 35#define IEEE802154_PAN_ID_BROADCAST 0xffff
35#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff 36#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
@@ -39,6 +40,7 @@
39 40
40#define IEEE802154_LIFS_PERIOD 40 41#define IEEE802154_LIFS_PERIOD 40
41#define IEEE802154_SIFS_PERIOD 12 42#define IEEE802154_SIFS_PERIOD 12
43#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
42 44
43#define IEEE802154_MAX_CHANNEL 26 45#define IEEE802154_MAX_CHANNEL 26
44#define IEEE802154_MAX_PAGE 31 46#define IEEE802154_MAX_PAGE 31
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 6bb97df16d2d..e598ca096ec9 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -354,6 +354,9 @@ void l2cap_exit(void);
354int sco_init(void); 354int sco_init(void);
355void sco_exit(void); 355void sco_exit(void);
356 356
357int mgmt_init(void);
358void mgmt_exit(void);
359
357void bt_sock_reclassify_lock(struct sock *sk, int proto); 360void bt_sock_reclassify_lock(struct sock *sk, int proto);
358 361
359#endif /* __BLUETOOTH_H */ 362#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 8e54f825153c..d942fedbaedd 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -179,15 +179,6 @@ enum {
179 HCI_RESET, 179 HCI_RESET,
180}; 180};
181 181
182/* BR/EDR and/or LE controller flags: the flags defined here should represent
183 * states configured via debugfs for debugging and testing purposes only.
184 */
185enum {
186 HCI_DUT_MODE,
187 HCI_FORCE_BREDR_SMP,
188 HCI_FORCE_STATIC_ADDR,
189};
190
191/* 182/*
192 * BR/EDR and/or LE controller flags: the flags defined here should represent 183 * BR/EDR and/or LE controller flags: the flags defined here should represent
193 * states from the controller. 184 * states from the controller.
@@ -217,6 +208,7 @@ enum {
217 HCI_HS_ENABLED, 208 HCI_HS_ENABLED,
218 HCI_LE_ENABLED, 209 HCI_LE_ENABLED,
219 HCI_ADVERTISING, 210 HCI_ADVERTISING,
211 HCI_ADVERTISING_CONNECTABLE,
220 HCI_CONNECTABLE, 212 HCI_CONNECTABLE,
221 HCI_DISCOVERABLE, 213 HCI_DISCOVERABLE,
222 HCI_LIMITED_DISCOVERABLE, 214 HCI_LIMITED_DISCOVERABLE,
@@ -225,13 +217,13 @@ enum {
225 HCI_FAST_CONNECTABLE, 217 HCI_FAST_CONNECTABLE,
226 HCI_BREDR_ENABLED, 218 HCI_BREDR_ENABLED,
227 HCI_LE_SCAN_INTERRUPTED, 219 HCI_LE_SCAN_INTERRUPTED,
228};
229 220
230/* A mask for the flags that are supposed to remain when a reset happens 221 HCI_DUT_MODE,
231 * or the HCI device is closed. 222 HCI_FORCE_BREDR_SMP,
232 */ 223 HCI_FORCE_STATIC_ADDR,
233#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \ 224
234 BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV)) 225 __HCI_NUM_FLAGS,
226};
235 227
236/* HCI timeouts */ 228/* HCI timeouts */
237#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ 229#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index acec9140c3f9..6afbf5b014a1 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -76,6 +76,7 @@ struct discovery_state {
76 u8 last_adv_data[HCI_MAX_AD_LENGTH]; 76 u8 last_adv_data[HCI_MAX_AD_LENGTH];
77 u8 last_adv_data_len; 77 u8 last_adv_data_len;
78 bool report_invalid_rssi; 78 bool report_invalid_rssi;
79 bool result_filtering;
79 s8 rssi; 80 s8 rssi;
80 u16 uuid_count; 81 u16 uuid_count;
81 u8 (*uuids)[16]; 82 u8 (*uuids)[16];
@@ -352,8 +353,7 @@ struct hci_dev {
352 353
353 struct rfkill *rfkill; 354 struct rfkill *rfkill;
354 355
355 unsigned long dbg_flags; 356 DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
356 unsigned long dev_flags;
357 357
358 struct delayed_work le_scan_disable; 358 struct delayed_work le_scan_disable;
359 struct delayed_work le_scan_restart; 359 struct delayed_work le_scan_restart;
@@ -501,6 +501,21 @@ extern struct list_head hci_cb_list;
501extern rwlock_t hci_dev_list_lock; 501extern rwlock_t hci_dev_list_lock;
502extern struct mutex hci_cb_list_lock; 502extern struct mutex hci_cb_list_lock;
503 503
504#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
505#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
506#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
507#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
508#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
509#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
510#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
511
512#define hci_dev_clear_volatile_flags(hdev) \
513 do { \
514 hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
515 hci_dev_clear_flag(hdev, HCI_LE_ADV); \
516 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
517 } while (0)
518
504/* ----- HCI interface to upper protocols ----- */ 519/* ----- HCI interface to upper protocols ----- */
505int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 520int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
506int l2cap_disconn_ind(struct hci_conn *hcon); 521int l2cap_disconn_ind(struct hci_conn *hcon);
@@ -525,6 +540,7 @@ static inline void discovery_init(struct hci_dev *hdev)
525 540
526static inline void hci_discovery_filter_clear(struct hci_dev *hdev) 541static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
527{ 542{
543 hdev->discovery.result_filtering = false;
528 hdev->discovery.report_invalid_rssi = true; 544 hdev->discovery.report_invalid_rssi = true;
529 hdev->discovery.rssi = HCI_RSSI_INVALID; 545 hdev->discovery.rssi = HCI_RSSI_INVALID;
530 hdev->discovery.uuid_count = 0; 546 hdev->discovery.uuid_count = 0;
@@ -596,14 +612,14 @@ enum {
596static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 612static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
597{ 613{
598 struct hci_dev *hdev = conn->hdev; 614 struct hci_dev *hdev = conn->hdev;
599 return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 615 return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
600 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 616 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
601} 617}
602 618
603static inline bool hci_conn_sc_enabled(struct hci_conn *conn) 619static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
604{ 620{
605 struct hci_dev *hdev = conn->hdev; 621 struct hci_dev *hdev = conn->hdev;
606 return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && 622 return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
607 test_bit(HCI_CONN_SC_ENABLED, &conn->flags); 623 test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
608} 624}
609 625
@@ -965,6 +981,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
965void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); 981void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
966void hci_smp_irks_clear(struct hci_dev *hdev); 982void hci_smp_irks_clear(struct hci_dev *hdev);
967 983
984bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
985
968void hci_remote_oob_data_clear(struct hci_dev *hdev); 986void hci_remote_oob_data_clear(struct hci_dev *hdev);
969struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 987struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
970 bdaddr_t *bdaddr, u8 bdaddr_type); 988 bdaddr_t *bdaddr, u8 bdaddr_type);
@@ -1021,10 +1039,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
1021#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) 1039#define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
1022#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) 1040#define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
1023 1041
1024#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ 1042#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
1025 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1043 !hci_dev_test_flag(dev, HCI_AUTO_OFF))
1026#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ 1044#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
1027 test_bit(HCI_SC_ENABLED, &(dev)->dev_flags)) 1045 hci_dev_test_flag(dev, HCI_SC_ENABLED))
1028 1046
1029/* ----- HCI protocols ----- */ 1047/* ----- HCI protocols ----- */
1030#define HCI_PROTO_DEFER 0x01 1048#define HCI_PROTO_DEFER 0x01
@@ -1271,6 +1289,27 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
1271 1289
1272void hci_sock_dev_event(struct hci_dev *hdev, int event); 1290void hci_sock_dev_event(struct hci_dev *hdev, int event);
1273 1291
1292#define HCI_MGMT_VAR_LEN (1 << 0)
1293#define HCI_MGMT_NO_HDEV (1 << 1)
1294#define HCI_MGMT_UNCONFIGURED (1 << 2)
1295
1296struct hci_mgmt_handler {
1297 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
1298 u16 data_len);
1299 size_t data_len;
1300 unsigned long flags;
1301};
1302
1303struct hci_mgmt_chan {
1304 struct list_head list;
1305 unsigned short channel;
1306 size_t handler_count;
1307 const struct hci_mgmt_handler *handlers;
1308};
1309
1310int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
1311void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
1312
1274/* Management interface */ 1313/* Management interface */
1275#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) 1314#define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
1276#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ 1315#define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
@@ -1290,7 +1329,9 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
1290#define DISCOV_BREDR_INQUIRY_LEN 0x08 1329#define DISCOV_BREDR_INQUIRY_LEN 0x08
1291#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ 1330#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
1292 1331
1293int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); 1332int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
1333 struct msghdr *msg, size_t msglen);
1334
1294int mgmt_new_settings(struct hci_dev *hdev); 1335int mgmt_new_settings(struct hci_dev *hdev);
1295void mgmt_index_added(struct hci_dev *hdev); 1336void mgmt_index_added(struct hci_dev *hdev);
1296void mgmt_index_removed(struct hci_dev *hdev); 1337void mgmt_index_removed(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index fe8eef00e9ca..5bf6af9cee78 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -43,6 +43,7 @@
43#define MGMT_STATUS_CANCELLED 0x10 43#define MGMT_STATUS_CANCELLED 0x10
44#define MGMT_STATUS_INVALID_INDEX 0x11 44#define MGMT_STATUS_INVALID_INDEX 0x11
45#define MGMT_STATUS_RFKILLED 0x12 45#define MGMT_STATUS_RFKILLED 0x12
46#define MGMT_STATUS_ALREADY_PAIRED 0x13
46 47
47struct mgmt_hdr { 48struct mgmt_hdr {
48 __le16 opcode; 49 __le16 opcode;
@@ -98,6 +99,7 @@ struct mgmt_rp_read_index_list {
98#define MGMT_SETTING_DEBUG_KEYS 0x00001000 99#define MGMT_SETTING_DEBUG_KEYS 0x00001000
99#define MGMT_SETTING_PRIVACY 0x00002000 100#define MGMT_SETTING_PRIVACY 0x00002000
100#define MGMT_SETTING_CONFIGURATION 0x00004000 101#define MGMT_SETTING_CONFIGURATION 0x00004000
102#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
101 103
102#define MGMT_OP_READ_INFO 0x0004 104#define MGMT_OP_READ_INFO 0x0004
103#define MGMT_READ_INFO_SIZE 0 105#define MGMT_READ_INFO_SIZE 0
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 20a4698e2255..70f9d945faf7 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -749,6 +749,13 @@ static int __init bt_init(void)
749 goto sock_err; 749 goto sock_err;
750 } 750 }
751 751
752 err = mgmt_init();
753 if (err < 0) {
754 sco_exit();
755 l2cap_exit();
756 goto sock_err;
757 }
758
752 return 0; 759 return 0;
753 760
754sock_err: 761sock_err:
@@ -763,6 +770,8 @@ error:
763 770
764static void __exit bt_exit(void) 771static void __exit bt_exit(void)
765{ 772{
773 mgmt_exit();
774
766 sco_exit(); 775 sco_exit();
767 776
768 l2cap_exit(); 777 l2cap_exit();
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 91ebb9cb31de..ee5e59839b02 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -571,7 +571,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
571 571
572 list_for_each_entry(d, &hci_dev_list, list) { 572 list_for_each_entry(d, &hci_dev_list, list) {
573 if (!test_bit(HCI_UP, &d->flags) || 573 if (!test_bit(HCI_UP, &d->flags) ||
574 test_bit(HCI_USER_CHANNEL, &d->dev_flags) || 574 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
575 d->dev_type != HCI_BREDR) 575 d->dev_type != HCI_BREDR)
576 continue; 576 continue;
577 577
@@ -700,7 +700,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
700 * and write a new random address. The flag will be set back on 700 * and write a new random address. The flag will be set back on
701 * as soon as the SET_ADV_ENABLE HCI command completes. 701 * as soon as the SET_ADV_ENABLE HCI command completes.
702 */ 702 */
703 clear_bit(HCI_LE_ADV, &hdev->dev_flags); 703 hci_dev_clear_flag(hdev, HCI_LE_ADV);
704 704
705 /* Set require_privacy to false so that the remote device has a 705 /* Set require_privacy to false so that the remote device has a
706 * chance of identifying us. 706 * chance of identifying us.
@@ -734,7 +734,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
734 int err; 734 int err;
735 735
736 /* Let's make sure that le is enabled.*/ 736 /* Let's make sure that le is enabled.*/
737 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 737 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
738 if (lmp_le_capable(hdev)) 738 if (lmp_le_capable(hdev))
739 return ERR_PTR(-ECONNREFUSED); 739 return ERR_PTR(-ECONNREFUSED);
740 740
@@ -799,7 +799,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
799 * anyway have to disable it in order to start directed 799 * anyway have to disable it in order to start directed
800 * advertising. 800 * advertising.
801 */ 801 */
802 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) { 802 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
803 u8 enable = 0x00; 803 u8 enable = 0x00;
804 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), 804 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
805 &enable); 805 &enable);
@@ -810,7 +810,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
810 /* If we're active scanning most controllers are unable 810 /* If we're active scanning most controllers are unable
811 * to initiate advertising. Simply reject the attempt. 811 * to initiate advertising. Simply reject the attempt.
812 */ 812 */
813 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && 813 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
814 hdev->le_scan_type == LE_SCAN_ACTIVE) { 814 hdev->le_scan_type == LE_SCAN_ACTIVE) {
815 skb_queue_purge(&req.cmd_q); 815 skb_queue_purge(&req.cmd_q);
816 hci_conn_del(conn); 816 hci_conn_del(conn);
@@ -840,9 +840,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
840 * handler for scan disabling knows to set the correct discovery 840 * handler for scan disabling knows to set the correct discovery
841 * state. 841 * state.
842 */ 842 */
843 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { 843 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
844 hci_req_add_le_scan_disable(&req); 844 hci_req_add_le_scan_disable(&req);
845 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags); 845 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
846 } 846 }
847 847
848 hci_req_add_le_create_conn(&req, conn); 848 hci_req_add_le_create_conn(&req, conn);
@@ -864,7 +864,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
864{ 864{
865 struct hci_conn *acl; 865 struct hci_conn *acl;
866 866
867 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 867 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
868 if (lmp_bredr_capable(hdev)) 868 if (lmp_bredr_capable(hdev))
869 return ERR_PTR(-ECONNREFUSED); 869 return ERR_PTR(-ECONNREFUSED);
870 870
@@ -942,7 +942,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
942 * Connections is used and the link is encrypted with AES-CCM 942 * Connections is used and the link is encrypted with AES-CCM
943 * using a P-256 authenticated combination key. 943 * using a P-256 authenticated combination key.
944 */ 944 */
945 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) { 945 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
946 if (!hci_conn_sc_enabled(conn) || 946 if (!hci_conn_sc_enabled(conn) ||
947 !test_bit(HCI_CONN_AES_CCM, &conn->flags) || 947 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
948 conn->key_type != HCI_LK_AUTH_COMBINATION_P256) 948 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index bba4c344c6e0..750d3445f2d2 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -80,7 +80,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
80 struct hci_dev *hdev = file->private_data; 80 struct hci_dev *hdev = file->private_data;
81 char buf[3]; 81 char buf[3];
82 82
83 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N'; 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 buf[1] = '\n'; 84 buf[1] = '\n';
85 buf[2] = '\0'; 85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -106,7 +106,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
106 if (strtobool(buf, &enable)) 106 if (strtobool(buf, &enable))
107 return -EINVAL; 107 return -EINVAL;
108 108
109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags)) 109 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
110 return -EALREADY; 110 return -EALREADY;
111 111
112 hci_req_lock(hdev); 112 hci_req_lock(hdev);
@@ -127,7 +127,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
127 if (err < 0) 127 if (err < 0)
128 return err; 128 return err;
129 129
130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags); 130 hci_dev_change_flag(hdev, HCI_DUT_MODE);
131 131
132 return count; 132 return count;
133} 133}
@@ -501,7 +501,7 @@ static void le_setup(struct hci_request *req)
501 501
502 /* LE-only controllers have LE implicitly enabled */ 502 /* LE-only controllers have LE implicitly enabled */
503 if (!lmp_bredr_capable(hdev)) 503 if (!lmp_bredr_capable(hdev))
504 set_bit(HCI_LE_ENABLED, &hdev->dev_flags); 504 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
505} 505}
506 506
507static void hci_setup_event_mask(struct hci_request *req) 507static void hci_setup_event_mask(struct hci_request *req)
@@ -591,7 +591,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
591 if (lmp_bredr_capable(hdev)) 591 if (lmp_bredr_capable(hdev))
592 bredr_setup(req); 592 bredr_setup(req);
593 else 593 else
594 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 594 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
595 595
596 if (lmp_le_capable(hdev)) 596 if (lmp_le_capable(hdev))
597 le_setup(req); 597 le_setup(req);
@@ -617,7 +617,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
617 */ 617 */
618 hdev->max_page = 0x01; 618 hdev->max_page = 0x01;
619 619
620 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 620 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
621 u8 mode = 0x01; 621 u8 mode = 0x01;
622 622
623 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, 623 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
@@ -656,7 +656,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
656 sizeof(cp), &cp); 656 sizeof(cp), &cp);
657 } 657 }
658 658
659 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { 659 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
660 u8 enable = 1; 660 u8 enable = 1;
661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), 661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
662 &enable); 662 &enable);
@@ -693,7 +693,7 @@ static void hci_set_le_support(struct hci_request *req)
693 693
694 memset(&cp, 0, sizeof(cp)); 694 memset(&cp, 0, sizeof(cp));
695 695
696 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 696 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
697 cp.le = 0x01; 697 cp.le = 0x01;
698 cp.simul = 0x00; 698 cp.simul = 0x00;
699 } 699 }
@@ -881,7 +881,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
881 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); 881 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
882 882
883 /* Enable Secure Connections if supported and configured */ 883 /* Enable Secure Connections if supported and configured */
884 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 884 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
885 bredr_sc_enabled(hdev)) { 885 bredr_sc_enabled(hdev)) {
886 u8 support = 0x01; 886 u8 support = 0x01;
887 887
@@ -901,7 +901,7 @@ static int __hci_init(struct hci_dev *hdev)
901 /* The Device Under Test (DUT) mode is special and available for 901 /* The Device Under Test (DUT) mode is special and available for
902 * all controller types. So just create it early on. 902 * all controller types. So just create it early on.
903 */ 903 */
904 if (test_bit(HCI_SETUP, &hdev->dev_flags)) { 904 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
905 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, 905 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
906 &dut_mode_fops); 906 &dut_mode_fops);
907 } 907 }
@@ -937,8 +937,8 @@ static int __hci_init(struct hci_dev *hdev)
937 * So only when in setup phase or config phase, create the debugfs 937 * So only when in setup phase or config phase, create the debugfs
938 * entries and register the SMP channels. 938 * entries and register the SMP channels.
939 */ 939 */
940 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 940 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
941 !test_bit(HCI_CONFIG, &hdev->dev_flags)) 941 !hci_dev_test_flag(hdev, HCI_CONFIG))
942 return 0; 942 return 0;
943 943
944 hci_debugfs_create_common(hdev); 944 hci_debugfs_create_common(hdev);
@@ -1300,12 +1300,12 @@ int hci_inquiry(void __user *arg)
1300 if (!hdev) 1300 if (!hdev)
1301 return -ENODEV; 1301 return -ENODEV;
1302 1302
1303 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 1303 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1304 err = -EBUSY; 1304 err = -EBUSY;
1305 goto done; 1305 goto done;
1306 } 1306 }
1307 1307
1308 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { 1308 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1309 err = -EOPNOTSUPP; 1309 err = -EOPNOTSUPP;
1310 goto done; 1310 goto done;
1311 } 1311 }
@@ -1315,7 +1315,7 @@ int hci_inquiry(void __user *arg)
1315 goto done; 1315 goto done;
1316 } 1316 }
1317 1317
1318 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 1318 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1319 err = -EOPNOTSUPP; 1319 err = -EOPNOTSUPP;
1320 goto done; 1320 goto done;
1321 } 1321 }
@@ -1387,17 +1387,17 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1387 1387
1388 hci_req_lock(hdev); 1388 hci_req_lock(hdev);
1389 1389
1390 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { 1390 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1391 ret = -ENODEV; 1391 ret = -ENODEV;
1392 goto done; 1392 goto done;
1393 } 1393 }
1394 1394
1395 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 1395 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1396 !test_bit(HCI_CONFIG, &hdev->dev_flags)) { 1396 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1397 /* Check for rfkill but allow the HCI setup stage to 1397 /* Check for rfkill but allow the HCI setup stage to
1398 * proceed (which in itself doesn't cause any RF activity). 1398 * proceed (which in itself doesn't cause any RF activity).
1399 */ 1399 */
1400 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { 1400 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1401 ret = -ERFKILL; 1401 ret = -ERFKILL;
1402 goto done; 1402 goto done;
1403 } 1403 }
@@ -1414,7 +1414,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1414 * This check is only valid for BR/EDR controllers 1414 * This check is only valid for BR/EDR controllers
1415 * since AMP controllers do not have an address. 1415 * since AMP controllers do not have an address.
1416 */ 1416 */
1417 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && 1417 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1418 hdev->dev_type == HCI_BREDR && 1418 hdev->dev_type == HCI_BREDR &&
1419 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 1419 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1420 !bacmp(&hdev->static_addr, BDADDR_ANY)) { 1420 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
@@ -1436,7 +1436,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1436 atomic_set(&hdev->cmd_cnt, 1); 1436 atomic_set(&hdev->cmd_cnt, 1);
1437 set_bit(HCI_INIT, &hdev->flags); 1437 set_bit(HCI_INIT, &hdev->flags);
1438 1438
1439 if (test_bit(HCI_SETUP, &hdev->dev_flags)) { 1439 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1440 if (hdev->setup) 1440 if (hdev->setup)
1441 ret = hdev->setup(hdev); 1441 ret = hdev->setup(hdev);
1442 1442
@@ -1448,7 +1448,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1448 */ 1448 */
1449 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 1449 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1450 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks)) 1450 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1451 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags); 1451 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1452 1452
1453 /* For an unconfigured controller it is required to 1453 /* For an unconfigured controller it is required to
1454 * read at least the version information provided by 1454 * read at least the version information provided by
@@ -1458,11 +1458,11 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1458 * also the original Bluetooth public device address 1458 * also the original Bluetooth public device address
1459 * will be read using the Read BD Address command. 1459 * will be read using the Read BD Address command.
1460 */ 1460 */
1461 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) 1461 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1462 ret = __hci_unconf_init(hdev); 1462 ret = __hci_unconf_init(hdev);
1463 } 1463 }
1464 1464
1465 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) { 1465 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1466 /* If public address change is configured, ensure that 1466 /* If public address change is configured, ensure that
1467 * the address gets programmed. If the driver does not 1467 * the address gets programmed. If the driver does not
1468 * support changing the public address, fail the power 1468 * support changing the public address, fail the power
@@ -1476,8 +1476,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1476 } 1476 }
1477 1477
1478 if (!ret) { 1478 if (!ret) {
1479 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && 1479 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1480 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) 1480 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1481 ret = __hci_init(hdev); 1481 ret = __hci_init(hdev);
1482 } 1482 }
1483 1483
@@ -1485,13 +1485,13 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1485 1485
1486 if (!ret) { 1486 if (!ret) {
1487 hci_dev_hold(hdev); 1487 hci_dev_hold(hdev);
1488 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 1488 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1489 set_bit(HCI_UP, &hdev->flags); 1489 set_bit(HCI_UP, &hdev->flags);
1490 hci_notify(hdev, HCI_DEV_UP); 1490 hci_notify(hdev, HCI_DEV_UP);
1491 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 1491 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1492 !test_bit(HCI_CONFIG, &hdev->dev_flags) && 1492 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1493 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && 1493 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1494 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && 1494 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1495 hdev->dev_type == HCI_BREDR) { 1495 hdev->dev_type == HCI_BREDR) {
1496 hci_dev_lock(hdev); 1496 hci_dev_lock(hdev);
1497 mgmt_powered(hdev, 1); 1497 mgmt_powered(hdev, 1);
@@ -1543,8 +1543,8 @@ int hci_dev_open(__u16 dev)
1543 * HCI_USER_CHANNEL will be set first before attempting to 1543 * HCI_USER_CHANNEL will be set first before attempting to
1544 * open the device. 1544 * open the device.
1545 */ 1545 */
1546 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && 1546 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1547 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 1547 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1548 err = -EOPNOTSUPP; 1548 err = -EOPNOTSUPP;
1549 goto done; 1549 goto done;
1550 } 1550 }
@@ -1554,7 +1554,7 @@ int hci_dev_open(__u16 dev)
1554 * particularly important if the setup procedure has not yet 1554 * particularly important if the setup procedure has not yet
1555 * completed. 1555 * completed.
1556 */ 1556 */
1557 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1557 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1558 cancel_delayed_work(&hdev->power_off); 1558 cancel_delayed_work(&hdev->power_off);
1559 1559
1560 /* After this call it is guaranteed that the setup procedure 1560 /* After this call it is guaranteed that the setup procedure
@@ -1569,9 +1569,9 @@ int hci_dev_open(__u16 dev)
1569 * is in use this bit will be cleared again and userspace has 1569 * is in use this bit will be cleared again and userspace has
1570 * to explicitly enable it. 1570 * to explicitly enable it.
1571 */ 1571 */
1572 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && 1572 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1573 !test_bit(HCI_MGMT, &hdev->dev_flags)) 1573 !hci_dev_test_flag(hdev, HCI_MGMT))
1574 set_bit(HCI_BONDABLE, &hdev->dev_flags); 1574 hci_dev_set_flag(hdev, HCI_BONDABLE);
1575 1575
1576 err = hci_dev_do_open(hdev); 1576 err = hci_dev_do_open(hdev);
1577 1577
@@ -1601,7 +1601,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1601{ 1601{
1602 BT_DBG("%s %p", hdev->name, hdev); 1602 BT_DBG("%s %p", hdev->name, hdev);
1603 1603
1604 if (!test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { 1604 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1605 /* Execute vendor specific shutdown routine */ 1605 /* Execute vendor specific shutdown routine */
1606 if (hdev->shutdown) 1606 if (hdev->shutdown)
1607 hdev->shutdown(hdev); 1607 hdev->shutdown(hdev);
@@ -1625,17 +1625,17 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1625 if (hdev->discov_timeout > 0) { 1625 if (hdev->discov_timeout > 0) {
1626 cancel_delayed_work(&hdev->discov_off); 1626 cancel_delayed_work(&hdev->discov_off);
1627 hdev->discov_timeout = 0; 1627 hdev->discov_timeout = 0;
1628 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1628 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1629 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 1629 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1630 } 1630 }
1631 1631
1632 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 1632 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1633 cancel_delayed_work(&hdev->service_cache); 1633 cancel_delayed_work(&hdev->service_cache);
1634 1634
1635 cancel_delayed_work_sync(&hdev->le_scan_disable); 1635 cancel_delayed_work_sync(&hdev->le_scan_disable);
1636 cancel_delayed_work_sync(&hdev->le_scan_restart); 1636 cancel_delayed_work_sync(&hdev->le_scan_restart);
1637 1637
1638 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1638 if (hci_dev_test_flag(hdev, HCI_MGMT))
1639 cancel_delayed_work_sync(&hdev->rpa_expired); 1639 cancel_delayed_work_sync(&hdev->rpa_expired);
1640 1640
1641 /* Avoid potential lockdep warnings from the *_flush() calls by 1641 /* Avoid potential lockdep warnings from the *_flush() calls by
@@ -1647,7 +1647,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1647 1647
1648 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1648 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1649 1649
1650 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 1650 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1651 if (hdev->dev_type == HCI_BREDR) 1651 if (hdev->dev_type == HCI_BREDR)
1652 mgmt_powered(hdev, 0); 1652 mgmt_powered(hdev, 0);
1653 } 1653 }
@@ -1667,8 +1667,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1667 /* Reset device */ 1667 /* Reset device */
1668 skb_queue_purge(&hdev->cmd_q); 1668 skb_queue_purge(&hdev->cmd_q);
1669 atomic_set(&hdev->cmd_cnt, 1); 1669 atomic_set(&hdev->cmd_cnt, 1);
1670 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) && 1670 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1671 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && 1671 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1672 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 1672 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1673 set_bit(HCI_INIT, &hdev->flags); 1673 set_bit(HCI_INIT, &hdev->flags);
1674 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); 1674 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -1699,7 +1699,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1699 1699
1700 /* Clear flags */ 1700 /* Clear flags */
1701 hdev->flags &= BIT(HCI_RAW); 1701 hdev->flags &= BIT(HCI_RAW);
1702 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 1702 hci_dev_clear_volatile_flags(hdev);
1703 1703
1704 /* Controller radio is available but is currently powered down */ 1704 /* Controller radio is available but is currently powered down */
1705 hdev->amp_status = AMP_STATUS_POWERED_DOWN; 1705 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
@@ -1723,12 +1723,12 @@ int hci_dev_close(__u16 dev)
1723 if (!hdev) 1723 if (!hdev)
1724 return -ENODEV; 1724 return -ENODEV;
1725 1725
1726 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 1726 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1727 err = -EBUSY; 1727 err = -EBUSY;
1728 goto done; 1728 goto done;
1729 } 1729 }
1730 1730
1731 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1731 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1732 cancel_delayed_work(&hdev->power_off); 1732 cancel_delayed_work(&hdev->power_off);
1733 1733
1734 err = hci_dev_do_close(hdev); 1734 err = hci_dev_do_close(hdev);
@@ -1786,12 +1786,12 @@ int hci_dev_reset(__u16 dev)
1786 goto done; 1786 goto done;
1787 } 1787 }
1788 1788
1789 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 1789 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1790 err = -EBUSY; 1790 err = -EBUSY;
1791 goto done; 1791 goto done;
1792 } 1792 }
1793 1793
1794 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { 1794 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1795 err = -EOPNOTSUPP; 1795 err = -EOPNOTSUPP;
1796 goto done; 1796 goto done;
1797 } 1797 }
@@ -1812,12 +1812,12 @@ int hci_dev_reset_stat(__u16 dev)
1812 if (!hdev) 1812 if (!hdev)
1813 return -ENODEV; 1813 return -ENODEV;
1814 1814
1815 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 1815 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1816 ret = -EBUSY; 1816 ret = -EBUSY;
1817 goto done; 1817 goto done;
1818 } 1818 }
1819 1819
1820 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { 1820 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1821 ret = -EOPNOTSUPP; 1821 ret = -EOPNOTSUPP;
1822 goto done; 1822 goto done;
1823 } 1823 }
@@ -1836,29 +1836,29 @@ static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1836 BT_DBG("%s scan 0x%02x", hdev->name, scan); 1836 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1837 1837
1838 if ((scan & SCAN_PAGE)) 1838 if ((scan & SCAN_PAGE))
1839 conn_changed = !test_and_set_bit(HCI_CONNECTABLE, 1839 conn_changed = !hci_dev_test_and_set_flag(hdev,
1840 &hdev->dev_flags); 1840 HCI_CONNECTABLE);
1841 else 1841 else
1842 conn_changed = test_and_clear_bit(HCI_CONNECTABLE, 1842 conn_changed = hci_dev_test_and_clear_flag(hdev,
1843 &hdev->dev_flags); 1843 HCI_CONNECTABLE);
1844 1844
1845 if ((scan & SCAN_INQUIRY)) { 1845 if ((scan & SCAN_INQUIRY)) {
1846 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE, 1846 discov_changed = !hci_dev_test_and_set_flag(hdev,
1847 &hdev->dev_flags); 1847 HCI_DISCOVERABLE);
1848 } else { 1848 } else {
1849 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 1849 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1850 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE, 1850 discov_changed = hci_dev_test_and_clear_flag(hdev,
1851 &hdev->dev_flags); 1851 HCI_DISCOVERABLE);
1852 } 1852 }
1853 1853
1854 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1854 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1855 return; 1855 return;
1856 1856
1857 if (conn_changed || discov_changed) { 1857 if (conn_changed || discov_changed) {
1858 /* In case this was disabled through mgmt */ 1858 /* In case this was disabled through mgmt */
1859 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 1859 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1860 1860
1861 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 1861 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1862 mgmt_update_adv_data(hdev); 1862 mgmt_update_adv_data(hdev);
1863 1863
1864 mgmt_new_settings(hdev); 1864 mgmt_new_settings(hdev);
@@ -1878,12 +1878,12 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
1878 if (!hdev) 1878 if (!hdev)
1879 return -ENODEV; 1879 return -ENODEV;
1880 1880
1881 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 1881 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1882 err = -EBUSY; 1882 err = -EBUSY;
1883 goto done; 1883 goto done;
1884 } 1884 }
1885 1885
1886 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { 1886 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1887 err = -EOPNOTSUPP; 1887 err = -EOPNOTSUPP;
1888 goto done; 1888 goto done;
1889 } 1889 }
@@ -1893,7 +1893,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
1893 goto done; 1893 goto done;
1894 } 1894 }
1895 1895
1896 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 1896 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1897 err = -EOPNOTSUPP; 1897 err = -EOPNOTSUPP;
1898 goto done; 1898 goto done;
1899 } 1899 }
@@ -1997,7 +1997,7 @@ int hci_get_dev_list(void __user *arg)
1997 * is running, but in that case still indicate that the 1997 * is running, but in that case still indicate that the
1998 * device is actually down. 1998 * device is actually down.
1999 */ 1999 */
2000 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 2000 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2001 flags &= ~BIT(HCI_UP); 2001 flags &= ~BIT(HCI_UP);
2002 2002
2003 (dr + n)->dev_id = hdev->id; 2003 (dr + n)->dev_id = hdev->id;
@@ -2035,7 +2035,7 @@ int hci_get_dev_info(void __user *arg)
2035 * is running, but in that case still indicate that the 2035 * is running, but in that case still indicate that the
2036 * device is actually down. 2036 * device is actually down.
2037 */ 2037 */
2038 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 2038 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2039 flags = hdev->flags & ~BIT(HCI_UP); 2039 flags = hdev->flags & ~BIT(HCI_UP);
2040 else 2040 else
2041 flags = hdev->flags; 2041 flags = hdev->flags;
@@ -2078,16 +2078,16 @@ static int hci_rfkill_set_block(void *data, bool blocked)
2078 2078
2079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 2079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2080 2080
2081 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) 2081 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2082 return -EBUSY; 2082 return -EBUSY;
2083 2083
2084 if (blocked) { 2084 if (blocked) {
2085 set_bit(HCI_RFKILLED, &hdev->dev_flags); 2085 hci_dev_set_flag(hdev, HCI_RFKILLED);
2086 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 2086 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2087 !test_bit(HCI_CONFIG, &hdev->dev_flags)) 2087 !hci_dev_test_flag(hdev, HCI_CONFIG))
2088 hci_dev_do_close(hdev); 2088 hci_dev_do_close(hdev);
2089 } else { 2089 } else {
2090 clear_bit(HCI_RFKILLED, &hdev->dev_flags); 2090 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2091 } 2091 }
2092 2092
2093 return 0; 2093 return 0;
@@ -2116,23 +2116,23 @@ static void hci_power_on(struct work_struct *work)
2116 * ignored and they need to be checked now. If they are still 2116 * ignored and they need to be checked now. If they are still
2117 * valid, it is important to turn the device back off. 2117 * valid, it is important to turn the device back off.
2118 */ 2118 */
2119 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) || 2119 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2120 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) || 2120 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2121 (hdev->dev_type == HCI_BREDR && 2121 (hdev->dev_type == HCI_BREDR &&
2122 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 2122 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2123 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 2123 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2124 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags); 2124 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2125 hci_dev_do_close(hdev); 2125 hci_dev_do_close(hdev);
2126 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 2126 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2127 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 2127 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2128 HCI_AUTO_OFF_TIMEOUT); 2128 HCI_AUTO_OFF_TIMEOUT);
2129 } 2129 }
2130 2130
2131 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) { 2131 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2132 /* For unconfigured devices, set the HCI_RAW flag 2132 /* For unconfigured devices, set the HCI_RAW flag
2133 * so that userspace can easily identify them. 2133 * so that userspace can easily identify them.
2134 */ 2134 */
2135 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) 2135 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2136 set_bit(HCI_RAW, &hdev->flags); 2136 set_bit(HCI_RAW, &hdev->flags);
2137 2137
2138 /* For fully configured devices, this will send 2138 /* For fully configured devices, this will send
@@ -2143,11 +2143,11 @@ static void hci_power_on(struct work_struct *work)
2143 * and no event will be send. 2143 * and no event will be send.
2144 */ 2144 */
2145 mgmt_index_added(hdev); 2145 mgmt_index_added(hdev);
2146 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) { 2146 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2147 /* When the controller is now configured, then it 2147 /* When the controller is now configured, then it
2148 * is important to clear the HCI_RAW flag. 2148 * is important to clear the HCI_RAW flag.
2149 */ 2149 */
2150 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) 2150 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2151 clear_bit(HCI_RAW, &hdev->flags); 2151 clear_bit(HCI_RAW, &hdev->flags);
2152 2152
2153 /* Powering on the controller with HCI_CONFIG set only 2153 /* Powering on the controller with HCI_CONFIG set only
@@ -2516,6 +2516,42 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2516 } 2516 }
2517} 2517}
2518 2518
2519bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2520{
2521 struct smp_ltk *k;
2522 struct smp_irk *irk;
2523 u8 addr_type;
2524
2525 if (type == BDADDR_BREDR) {
2526 if (hci_find_link_key(hdev, bdaddr))
2527 return true;
2528 return false;
2529 }
2530
2531 /* Convert to HCI addr type which struct smp_ltk uses */
2532 if (type == BDADDR_LE_PUBLIC)
2533 addr_type = ADDR_LE_DEV_PUBLIC;
2534 else
2535 addr_type = ADDR_LE_DEV_RANDOM;
2536
2537 irk = hci_get_irk(hdev, bdaddr, addr_type);
2538 if (irk) {
2539 bdaddr = &irk->bdaddr;
2540 addr_type = irk->addr_type;
2541 }
2542
2543 rcu_read_lock();
2544 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2545 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2546 rcu_read_unlock();
2547 return true;
2548 }
2549 }
2550 rcu_read_unlock();
2551
2552 return false;
2553}
2554
2519/* HCI command timer function */ 2555/* HCI command timer function */
2520static void hci_cmd_timeout(struct work_struct *work) 2556static void hci_cmd_timeout(struct work_struct *work)
2521{ 2557{
@@ -2950,7 +2986,7 @@ static void le_scan_restart_work(struct work_struct *work)
2950 BT_DBG("%s", hdev->name); 2986 BT_DBG("%s", hdev->name);
2951 2987
2952 /* If controller is not scanning we are done. */ 2988 /* If controller is not scanning we are done. */
2953 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 2989 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2954 return; 2990 return;
2955 2991
2956 hci_req_init(&req, hdev); 2992 hci_req_init(&req, hdev);
@@ -2983,9 +3019,9 @@ static void le_scan_restart_work(struct work_struct *work)
2983void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 3019void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2984 u8 *bdaddr_type) 3020 u8 *bdaddr_type)
2985{ 3021{
2986 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) || 3022 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2987 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 3023 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2988 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && 3024 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2989 bacmp(&hdev->static_addr, BDADDR_ANY))) { 3025 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2990 bacpy(bdaddr, &hdev->static_addr); 3026 bacpy(bdaddr, &hdev->static_addr);
2991 *bdaddr_type = ADDR_LE_DEV_RANDOM; 3027 *bdaddr_type = ADDR_LE_DEV_RANDOM;
@@ -3153,16 +3189,16 @@ int hci_register_dev(struct hci_dev *hdev)
3153 } 3189 }
3154 3190
3155 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 3191 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3156 set_bit(HCI_RFKILLED, &hdev->dev_flags); 3192 hci_dev_set_flag(hdev, HCI_RFKILLED);
3157 3193
3158 set_bit(HCI_SETUP, &hdev->dev_flags); 3194 hci_dev_set_flag(hdev, HCI_SETUP);
3159 set_bit(HCI_AUTO_OFF, &hdev->dev_flags); 3195 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3160 3196
3161 if (hdev->dev_type == HCI_BREDR) { 3197 if (hdev->dev_type == HCI_BREDR) {
3162 /* Assume BR/EDR support until proven otherwise (such as 3198 /* Assume BR/EDR support until proven otherwise (such as
3163 * through reading supported features during init. 3199 * through reading supported features during init.
3164 */ 3200 */
3165 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 3201 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3166 } 3202 }
3167 3203
3168 write_lock(&hci_dev_list_lock); 3204 write_lock(&hci_dev_list_lock);
@@ -3173,7 +3209,7 @@ int hci_register_dev(struct hci_dev *hdev)
3173 * and should not be included in normal operation. 3209 * and should not be included in normal operation.
3174 */ 3210 */
3175 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 3211 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3176 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags); 3212 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3177 3213
3178 hci_notify(hdev, HCI_DEV_REG); 3214 hci_notify(hdev, HCI_DEV_REG);
3179 hci_dev_hold(hdev); 3215 hci_dev_hold(hdev);
@@ -3199,7 +3235,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
3199 3235
3200 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 3236 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3201 3237
3202 set_bit(HCI_UNREGISTER, &hdev->dev_flags); 3238 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3203 3239
3204 id = hdev->id; 3240 id = hdev->id;
3205 3241
@@ -3215,8 +3251,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
3215 cancel_work_sync(&hdev->power_on); 3251 cancel_work_sync(&hdev->power_on);
3216 3252
3217 if (!test_bit(HCI_INIT, &hdev->flags) && 3253 if (!test_bit(HCI_INIT, &hdev->flags) &&
3218 !test_bit(HCI_SETUP, &hdev->dev_flags) && 3254 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3219 !test_bit(HCI_CONFIG, &hdev->dev_flags)) { 3255 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3220 hci_dev_lock(hdev); 3256 hci_dev_lock(hdev);
3221 mgmt_index_removed(hdev); 3257 mgmt_index_removed(hdev);
3222 hci_dev_unlock(hdev); 3258 hci_dev_unlock(hdev);
@@ -3890,7 +3926,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3890 3926
3891static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 3927static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3892{ 3928{
3893 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { 3929 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3894 /* ACL tx timeout must be longer than maximum 3930 /* ACL tx timeout must be longer than maximum
3895 * link supervision timeout (40.9 seconds) */ 3931 * link supervision timeout (40.9 seconds) */
3896 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 3932 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
@@ -4073,7 +4109,7 @@ static void hci_sched_le(struct hci_dev *hdev)
4073 if (!hci_conn_num(hdev, LE_LINK)) 4109 if (!hci_conn_num(hdev, LE_LINK))
4074 return; 4110 return;
4075 4111
4076 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { 4112 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4077 /* LE tx timeout must be longer than maximum 4113 /* LE tx timeout must be longer than maximum
4078 * link supervision timeout (40.9 seconds) */ 4114 * link supervision timeout (40.9 seconds) */
4079 if (!hdev->le_cnt && hdev->le_pkts && 4115 if (!hdev->le_cnt && hdev->le_pkts &&
@@ -4121,7 +4157,7 @@ static void hci_tx_work(struct work_struct *work)
4121 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 4157 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4122 hdev->sco_cnt, hdev->le_cnt); 4158 hdev->sco_cnt, hdev->le_cnt);
4123 4159
4124 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 4160 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4125 /* Schedule queues and send stuff to HCI driver */ 4161 /* Schedule queues and send stuff to HCI driver */
4126 hci_sched_acl(hdev); 4162 hci_sched_acl(hdev);
4127 hci_sched_sco(hdev); 4163 hci_sched_sco(hdev);
@@ -4318,7 +4354,7 @@ static void hci_rx_work(struct work_struct *work)
4318 hci_send_to_sock(hdev, skb); 4354 hci_send_to_sock(hdev, skb);
4319 } 4355 }
4320 4356
4321 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 4357 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4322 kfree_skb(skb); 4358 kfree_skb(skb);
4323 continue; 4359 continue;
4324 } 4360 }
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 65261e5d4b84..bc801e9db834 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -247,7 +247,7 @@ static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
247 struct hci_dev *hdev = file->private_data; 247 struct hci_dev *hdev = file->private_data;
248 char buf[3]; 248 char buf[3];
249 249
250 buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N'; 250 buf[0] = hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS) ? 'Y': 'N';
251 buf[1] = '\n'; 251 buf[1] = '\n';
252 buf[2] = '\0'; 252 buf[2] = '\0';
253 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 253 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -265,7 +265,7 @@ static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
265 struct hci_dev *hdev = file->private_data; 265 struct hci_dev *hdev = file->private_data;
266 char buf[3]; 266 char buf[3];
267 267
268 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N'; 268 buf[0] = hci_dev_test_flag(hdev, HCI_SC_ONLY) ? 'Y': 'N';
269 buf[1] = '\n'; 269 buf[1] = '\n';
270 buf[2] = '\0'; 270 buf[2] = '\0';
271 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 271 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -679,7 +679,7 @@ static ssize_t force_static_address_read(struct file *file,
679 struct hci_dev *hdev = file->private_data; 679 struct hci_dev *hdev = file->private_data;
680 char buf[3]; 680 char buf[3];
681 681
682 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N'; 682 buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ? 'Y': 'N';
683 buf[1] = '\n'; 683 buf[1] = '\n';
684 buf[2] = '\0'; 684 buf[2] = '\0';
685 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 685 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -704,10 +704,10 @@ static ssize_t force_static_address_write(struct file *file,
704 if (strtobool(buf, &enable)) 704 if (strtobool(buf, &enable))
705 return -EINVAL; 705 return -EINVAL;
706 706
707 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags)) 707 if (enable == hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR))
708 return -EALREADY; 708 return -EALREADY;
709 709
710 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags); 710 hci_dev_change_flag(hdev, HCI_FORCE_STATIC_ADDR);
711 711
712 return count; 712 return count;
713} 713}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 39653d46932b..c7376cd42b1c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -70,7 +70,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
70 if (status) 70 if (status)
71 return; 71 return;
72 72
73 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
74} 74}
75 75
76static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 76static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
@@ -82,7 +82,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 if (status) 82 if (status)
83 return; 83 return;
84 84
85 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
86 86
87 hci_conn_check_pending(hdev); 87 hci_conn_check_pending(hdev);
88} 88}
@@ -198,7 +198,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
198 return; 198 return;
199 199
200 /* Reset all non-persistent flags */ 200 /* Reset all non-persistent flags */
201 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 201 hci_dev_clear_volatile_flags(hdev);
202 202
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
204 204
@@ -265,7 +265,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
265 265
266 hci_dev_lock(hdev); 266 hci_dev_lock(hdev);
267 267
268 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status); 269 mgmt_set_local_name_complete(hdev, sent, status);
270 else if (!status) 270 else if (!status)
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
@@ -282,8 +282,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
282 if (rp->status) 282 if (rp->status)
283 return; 283 return;
284 284
285 if (test_bit(HCI_SETUP, &hdev->dev_flags) || 285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 test_bit(HCI_CONFIG, &hdev->dev_flags)) 286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
288} 288}
289 289
@@ -309,7 +309,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
309 clear_bit(HCI_AUTH, &hdev->flags); 309 clear_bit(HCI_AUTH, &hdev->flags);
310 } 310 }
311 311
312 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status); 313 mgmt_auth_enable_complete(hdev, status);
314 314
315 hci_dev_unlock(hdev); 315 hci_dev_unlock(hdev);
@@ -404,7 +404,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
404 if (status == 0) 404 if (status == 0)
405 memcpy(hdev->dev_class, sent, 3); 405 memcpy(hdev->dev_class, sent, 3);
406 406
407 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status); 408 mgmt_set_class_of_dev_complete(hdev, sent, status);
409 409
410 hci_dev_unlock(hdev); 410 hci_dev_unlock(hdev);
@@ -497,13 +497,13 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
497 hdev->features[1][0] &= ~LMP_HOST_SSP; 497 hdev->features[1][0] &= ~LMP_HOST_SSP;
498 } 498 }
499 499
500 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status); 501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
502 else if (!status) { 502 else if (!status) {
503 if (sent->mode) 503 if (sent->mode)
504 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
505 else 505 else
506 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
507 } 507 }
508 508
509 hci_dev_unlock(hdev); 509 hci_dev_unlock(hdev);
@@ -529,11 +529,11 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
529 hdev->features[1][0] &= ~LMP_HOST_SC; 529 hdev->features[1][0] &= ~LMP_HOST_SC;
530 } 530 }
531 531
532 if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) { 532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
533 if (sent->support) 533 if (sent->support)
534 set_bit(HCI_SC_ENABLED, &hdev->dev_flags); 534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
535 else 535 else
536 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); 536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
537 } 537 }
538 538
539 hci_dev_unlock(hdev); 539 hci_dev_unlock(hdev);
@@ -548,8 +548,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
548 if (rp->status) 548 if (rp->status)
549 return; 549 return;
550 550
551 if (test_bit(HCI_SETUP, &hdev->dev_flags) || 551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 test_bit(HCI_CONFIG, &hdev->dev_flags)) { 552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver; 553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver; 555 hdev->lmp_ver = rp->lmp_ver;
@@ -568,8 +568,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 if (rp->status) 568 if (rp->status)
569 return; 569 return;
570 570
571 if (test_bit(HCI_SETUP, &hdev->dev_flags) || 571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 test_bit(HCI_CONFIG, &hdev->dev_flags)) 572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
574} 574}
575 575
@@ -691,7 +691,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
691 if (test_bit(HCI_INIT, &hdev->flags)) 691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr); 692 bacpy(&hdev->bdaddr, &rp->bdaddr);
693 693
694 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr); 695 bacpy(&hdev->setup_addr, &rp->bdaddr);
696} 696}
697 697
@@ -900,7 +900,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
900 900
901 hci_dev_lock(hdev); 901 hci_dev_lock(hdev);
902 902
903 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 903 if (hci_dev_test_flag(hdev, HCI_MGMT))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
905 905
906 if (rp->status) 906 if (rp->status)
@@ -926,7 +926,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
926 926
927 hci_dev_lock(hdev); 927 hci_dev_lock(hdev);
928 928
929 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
931 rp->status); 931 rp->status);
932 932
@@ -985,7 +985,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
985 985
986 hci_dev_lock(hdev); 986 hci_dev_lock(hdev);
987 987
988 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 988 if (hci_dev_test_flag(hdev, HCI_MGMT))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
990 rp->status); 990 rp->status);
991 991
@@ -1001,7 +1001,7 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1001 1001
1002 hci_dev_lock(hdev); 1002 hci_dev_lock(hdev);
1003 1003
1004 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status); 1006 ACL_LINK, 0, rp->status);
1007 1007
@@ -1016,7 +1016,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1016 1016
1017 hci_dev_lock(hdev); 1017 hci_dev_lock(hdev);
1018 1018
1019 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1019 if (hci_dev_test_flag(hdev, HCI_MGMT))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1021 0, rp->status); 1021 0, rp->status);
1022 1022
@@ -1032,7 +1032,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1032 1032
1033 hci_dev_lock(hdev); 1033 hci_dev_lock(hdev);
1034 1034
1035 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1035 if (hci_dev_test_flag(hdev, HCI_MGMT))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status); 1037 ACL_LINK, 0, rp->status);
1038 1038
@@ -1109,7 +1109,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1109 if (*sent) { 1109 if (*sent) {
1110 struct hci_conn *conn; 1110 struct hci_conn *conn;
1111 1111
1112 set_bit(HCI_LE_ADV, &hdev->dev_flags); 1112 hci_dev_set_flag(hdev, HCI_LE_ADV);
1113 1113
1114 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 1114 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1115 if (conn) 1115 if (conn)
@@ -1117,7 +1117,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1117 &conn->le_conn_timeout, 1117 &conn->le_conn_timeout,
1118 conn->conn_timeout); 1118 conn->conn_timeout);
1119 } else { 1119 } else {
1120 clear_bit(HCI_LE_ADV, &hdev->dev_flags); 1120 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1121 } 1121 }
1122 1122
1123 hci_dev_unlock(hdev); 1123 hci_dev_unlock(hdev);
@@ -1192,7 +1192,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1192 1192
1193 switch (cp->enable) { 1193 switch (cp->enable) {
1194 case LE_SCAN_ENABLE: 1194 case LE_SCAN_ENABLE:
1195 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1195 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1196 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1196 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1197 clear_pending_adv_report(hdev); 1197 clear_pending_adv_report(hdev);
1198 break; 1198 break;
@@ -1217,7 +1217,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1217 */ 1217 */
1218 cancel_delayed_work(&hdev->le_scan_disable); 1218 cancel_delayed_work(&hdev->le_scan_disable);
1219 1219
1220 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1220 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1221 1221
1222 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1222 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1223 * interrupted scanning due to a connect request. Mark 1223 * interrupted scanning due to a connect request. Mark
@@ -1226,10 +1226,9 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1226 * been disabled because of active scanning, so 1226 * been disabled because of active scanning, so
1227 * re-enable it again if necessary. 1227 * re-enable it again if necessary.
1228 */ 1228 */
1229 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED, 1229 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1230 &hdev->dev_flags))
1231 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1230 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1232 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) && 1231 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1233 hdev->discovery.state == DISCOVERY_FINDING) 1232 hdev->discovery.state == DISCOVERY_FINDING)
1234 mgmt_reenable_advertising(hdev); 1233 mgmt_reenable_advertising(hdev);
1235 1234
@@ -1388,11 +1387,11 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1388 1387
1389 if (sent->le) { 1388 if (sent->le) {
1390 hdev->features[1][0] |= LMP_HOST_LE; 1389 hdev->features[1][0] |= LMP_HOST_LE;
1391 set_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1390 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1392 } else { 1391 } else {
1393 hdev->features[1][0] &= ~LMP_HOST_LE; 1392 hdev->features[1][0] &= ~LMP_HOST_LE;
1394 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1393 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1395 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 1394 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1396 } 1395 }
1397 1396
1398 if (sent->simul) 1397 if (sent->simul)
@@ -1769,7 +1768,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1769 1768
1770 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1769 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1771 1770
1772 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1771 if (hci_dev_test_flag(hdev, HCI_MGMT))
1773 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 1772 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1774 1773
1775 if (!conn) 1774 if (!conn)
@@ -2118,7 +2117,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2118 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2117 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2119 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2118 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2120 2119
2121 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2120 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2122 return; 2121 return;
2123 2122
2124 hci_dev_lock(hdev); 2123 hci_dev_lock(hdev);
@@ -2154,7 +2153,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2154 if (!num_rsp) 2153 if (!num_rsp)
2155 return; 2154 return;
2156 2155
2157 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 2156 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2158 return; 2157 return;
2159 2158
2160 hci_dev_lock(hdev); 2159 hci_dev_lock(hdev);
@@ -2304,8 +2303,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2304 * connection. These features are only touched through mgmt so 2303 * connection. These features are only touched through mgmt so
2305 * only do the checks if HCI_MGMT is set. 2304 * only do the checks if HCI_MGMT is set.
2306 */ 2305 */
2307 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 2306 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2308 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && 2307 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2309 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, 2308 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2310 BDADDR_BREDR)) { 2309 BDADDR_BREDR)) {
2311 hci_reject_conn(hdev, &ev->bdaddr); 2310 hci_reject_conn(hdev, &ev->bdaddr);
@@ -2542,7 +2541,7 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2542 2541
2543 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2542 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2544 2543
2545 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2544 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2546 goto check_auth; 2545 goto check_auth;
2547 2546
2548 if (ev->status == 0) 2547 if (ev->status == 0)
@@ -2608,7 +2607,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2608 * whenever the encryption procedure fails. 2607 * whenever the encryption procedure fails.
2609 */ 2608 */
2610 if (ev->status && conn->type == LE_LINK) 2609 if (ev->status && conn->type == LE_LINK)
2611 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 2610 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2612 2611
2613 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2612 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2614 2613
@@ -2626,7 +2625,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2626 * connections that are not encrypted with AES-CCM 2625 * connections that are not encrypted with AES-CCM
2627 * using a P-256 authenticated combination key. 2626 * using a P-256 authenticated combination key.
2628 */ 2627 */
2629 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && 2628 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2630 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || 2629 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2631 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { 2630 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2632 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); 2631 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
@@ -3331,11 +3330,11 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3331 hci_conn_drop(conn); 3330 hci_conn_drop(conn);
3332 } 3331 }
3333 3332
3334 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && 3333 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3335 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 3334 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3336 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 3335 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3337 sizeof(ev->bdaddr), &ev->bdaddr); 3336 sizeof(ev->bdaddr), &ev->bdaddr);
3338 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 3337 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3339 u8 secure; 3338 u8 secure;
3340 3339
3341 if (conn->pending_sec_level == BT_SECURITY_HIGH) 3340 if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -3391,7 +3390,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3391 3390
3392 BT_DBG("%s", hdev->name); 3391 BT_DBG("%s", hdev->name);
3393 3392
3394 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3393 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3395 return; 3394 return;
3396 3395
3397 hci_dev_lock(hdev); 3396 hci_dev_lock(hdev);
@@ -3465,7 +3464,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3465 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 3464 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3466 conn_set_key(conn, ev->key_type, conn->pin_length); 3465 conn_set_key(conn, ev->key_type, conn->pin_length);
3467 3466
3468 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3467 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3469 goto unlock; 3468 goto unlock;
3470 3469
3471 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 3470 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
@@ -3487,7 +3486,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3487 * store_hint being 0). 3486 * store_hint being 0).
3488 */ 3487 */
3489 if (key->type == HCI_LK_DEBUG_COMBINATION && 3488 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3490 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) { 3489 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3491 list_del_rcu(&key->list); 3490 list_del_rcu(&key->list);
3492 kfree_rcu(key, rcu); 3491 kfree_rcu(key, rcu);
3493 goto unlock; 3492 goto unlock;
@@ -3570,7 +3569,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3570 if (!num_rsp) 3569 if (!num_rsp)
3571 return; 3570 return;
3572 3571
3573 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 3572 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3574 return; 3573 return;
3575 3574
3576 hci_dev_lock(hdev); 3575 hci_dev_lock(hdev);
@@ -3776,7 +3775,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3776 if (!num_rsp) 3775 if (!num_rsp)
3777 return; 3776 return;
3778 3777
3779 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 3778 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3780 return; 3779 return;
3781 3780
3782 hci_dev_lock(hdev); 3781 hci_dev_lock(hdev);
@@ -3794,7 +3793,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3794 data.rssi = info->rssi; 3793 data.rssi = info->rssi;
3795 data.ssp_mode = 0x01; 3794 data.ssp_mode = 0x01;
3796 3795
3797 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3796 if (hci_dev_test_flag(hdev, HCI_MGMT))
3798 name_known = eir_has_data_type(info->data, 3797 name_known = eir_has_data_type(info->data,
3799 sizeof(info->data), 3798 sizeof(info->data),
3800 EIR_NAME_COMPLETE); 3799 EIR_NAME_COMPLETE);
@@ -3898,7 +3897,7 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
3898 * information. However it can only be trusted when 3897 * information. However it can only be trusted when
3899 * not in Secure Connection Only mode. 3898 * not in Secure Connection Only mode.
3900 */ 3899 */
3901 if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags)) 3900 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3902 return data->present; 3901 return data->present;
3903 3902
3904 /* When Secure Connections Only mode is enabled, then 3903 /* When Secure Connections Only mode is enabled, then
@@ -3942,13 +3941,13 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3942 3941
3943 hci_conn_hold(conn); 3942 hci_conn_hold(conn);
3944 3943
3945 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3944 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3946 goto unlock; 3945 goto unlock;
3947 3946
3948 /* Allow pairing if we're pairable, the initiators of the 3947 /* Allow pairing if we're pairable, the initiators of the
3949 * pairing or if the remote is not requesting bonding. 3948 * pairing or if the remote is not requesting bonding.
3950 */ 3949 */
3951 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) || 3950 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3952 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 3951 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3953 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3952 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3954 struct hci_cp_io_capability_reply cp; 3953 struct hci_cp_io_capability_reply cp;
@@ -3974,7 +3973,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3974 /* If we're not bondable, force one of the non-bondable 3973 /* If we're not bondable, force one of the non-bondable
3975 * authentication requirement values. 3974 * authentication requirement values.
3976 */ 3975 */
3977 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags)) 3976 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
3978 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 3977 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3979 3978
3980 cp.authentication = conn->auth_type; 3979 cp.authentication = conn->auth_type;
@@ -4029,7 +4028,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4029 4028
4030 hci_dev_lock(hdev); 4029 hci_dev_lock(hdev);
4031 4030
4032 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 4031 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4033 goto unlock; 4032 goto unlock;
4034 4033
4035 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4034 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -4100,7 +4099,7 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4100 4099
4101 BT_DBG("%s", hdev->name); 4100 BT_DBG("%s", hdev->name);
4102 4101
4103 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 4102 if (hci_dev_test_flag(hdev, HCI_MGMT))
4104 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 4103 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4105} 4104}
4106 4105
@@ -4119,7 +4118,7 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4119 conn->passkey_notify = __le32_to_cpu(ev->passkey); 4118 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4120 conn->passkey_entered = 0; 4119 conn->passkey_entered = 0;
4121 4120
4122 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 4121 if (hci_dev_test_flag(hdev, HCI_MGMT))
4123 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4122 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4124 conn->dst_type, conn->passkey_notify, 4123 conn->dst_type, conn->passkey_notify,
4125 conn->passkey_entered); 4124 conn->passkey_entered);
@@ -4157,7 +4156,7 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4157 return; 4156 return;
4158 } 4157 }
4159 4158
4160 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 4159 if (hci_dev_test_flag(hdev, HCI_MGMT))
4161 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4160 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4162 conn->dst_type, conn->passkey_notify, 4161 conn->dst_type, conn->passkey_notify,
4163 conn->passkey_entered); 4162 conn->passkey_entered);
@@ -4226,7 +4225,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4226 4225
4227 hci_dev_lock(hdev); 4226 hci_dev_lock(hdev);
4228 4227
4229 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 4228 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4230 goto unlock; 4229 goto unlock;
4231 4230
4232 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 4231 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
@@ -4243,7 +4242,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4243 struct hci_cp_remote_oob_ext_data_reply cp; 4242 struct hci_cp_remote_oob_ext_data_reply cp;
4244 4243
4245 bacpy(&cp.bdaddr, &ev->bdaddr); 4244 bacpy(&cp.bdaddr, &ev->bdaddr);
4246 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) { 4245 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4247 memset(cp.hash192, 0, sizeof(cp.hash192)); 4246 memset(cp.hash192, 0, sizeof(cp.hash192));
4248 memset(cp.rand192, 0, sizeof(cp.rand192)); 4247 memset(cp.rand192, 0, sizeof(cp.rand192));
4249 } else { 4248 } else {
@@ -4409,7 +4408,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4409 /* All controllers implicitly stop advertising in the event of a 4408 /* All controllers implicitly stop advertising in the event of a
4410 * connection, so ensure that the state bit is cleared. 4409 * connection, so ensure that the state bit is cleared.
4411 */ 4410 */
4412 clear_bit(HCI_LE_ADV, &hdev->dev_flags); 4411 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4413 4412
4414 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 4413 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4415 if (!conn) { 4414 if (!conn) {
@@ -4432,7 +4431,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4432 if (conn->out) { 4431 if (conn->out) {
4433 conn->resp_addr_type = ev->bdaddr_type; 4432 conn->resp_addr_type = ev->bdaddr_type;
4434 bacpy(&conn->resp_addr, &ev->bdaddr); 4433 bacpy(&conn->resp_addr, &ev->bdaddr);
4435 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { 4434 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4436 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 4435 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4437 bacpy(&conn->init_addr, &hdev->rpa); 4436 bacpy(&conn->init_addr, &hdev->rpa);
4438 } else { 4437 } else {
@@ -4658,7 +4657,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4658 /* If the controller is not using resolvable random 4657 /* If the controller is not using resolvable random
4659 * addresses, then this report can be ignored. 4658 * addresses, then this report can be ignored.
4660 */ 4659 */
4661 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags)) 4660 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4662 return; 4661 return;
4663 4662
4664 /* If the local IRK of the controller does not match 4663 /* If the local IRK of the controller does not match
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index f857e765e081..55e096d20a0f 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -270,7 +270,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
270 * and 0x01 (whitelist enabled) use the new filter policies 270 * and 0x01 (whitelist enabled) use the new filter policies
271 * 0x02 (no whitelist) and 0x03 (whitelist enabled). 271 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
272 */ 272 */
273 if (test_bit(HCI_PRIVACY, &hdev->dev_flags) && 273 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
274 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) 274 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
275 filter_policy |= 0x02; 275 filter_policy |= 0x02;
276 276
@@ -304,10 +304,10 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
304 * In this kind of scenario skip the update and let the random 304 * In this kind of scenario skip the update and let the random
305 * address be updated at the next cycle. 305 * address be updated at the next cycle.
306 */ 306 */
307 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) || 307 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
308 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { 308 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
309 BT_DBG("Deferring random address update"); 309 BT_DBG("Deferring random address update");
310 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 310 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
311 return; 311 return;
312 } 312 }
313 313
@@ -324,12 +324,12 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
324 * current RPA has expired or there is something else than 324 * current RPA has expired or there is something else than
325 * the current RPA in use, then generate a new one. 325 * the current RPA in use, then generate a new one.
326 */ 326 */
327 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { 327 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
328 int to; 328 int to;
329 329
330 *own_addr_type = ADDR_LE_DEV_RANDOM; 330 *own_addr_type = ADDR_LE_DEV_RANDOM;
331 331
332 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) && 332 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
333 !bacmp(&hdev->random_addr, &hdev->rpa)) 333 !bacmp(&hdev->random_addr, &hdev->rpa))
334 return 0; 334 return 0;
335 335
@@ -383,9 +383,9 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
383 * and a static address has been configured, then use that 383 * and a static address has been configured, then use that
384 * address instead of the public BR/EDR address. 384 * address instead of the public BR/EDR address.
385 */ 385 */
386 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) || 386 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
387 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 387 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
388 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && 388 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
389 bacmp(&hdev->static_addr, BDADDR_ANY))) { 389 bacmp(&hdev->static_addr, BDADDR_ANY))) {
390 *own_addr_type = ADDR_LE_DEV_RANDOM; 390 *own_addr_type = ADDR_LE_DEV_RANDOM;
391 if (bacmp(&hdev->static_addr, &hdev->random_addr)) 391 if (bacmp(&hdev->static_addr, &hdev->random_addr))
@@ -425,7 +425,7 @@ void __hci_update_page_scan(struct hci_request *req)
425 struct hci_dev *hdev = req->hdev; 425 struct hci_dev *hdev = req->hdev;
426 u8 scan; 426 u8 scan;
427 427
428 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 428 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
429 return; 429 return;
430 430
431 if (!hdev_is_powered(hdev)) 431 if (!hdev_is_powered(hdev))
@@ -434,7 +434,7 @@ void __hci_update_page_scan(struct hci_request *req)
434 if (mgmt_powering_down(hdev)) 434 if (mgmt_powering_down(hdev))
435 return; 435 return;
436 436
437 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || 437 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
438 disconnected_whitelist_entries(hdev)) 438 disconnected_whitelist_entries(hdev))
439 scan = SCAN_PAGE; 439 scan = SCAN_PAGE;
440 else 440 else
@@ -443,7 +443,7 @@ void __hci_update_page_scan(struct hci_request *req)
443 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE)) 443 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
444 return; 444 return;
445 445
446 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 446 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
447 scan |= SCAN_INQUIRY; 447 scan |= SCAN_INQUIRY;
448 448
449 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 449 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
@@ -471,14 +471,14 @@ void __hci_update_background_scan(struct hci_request *req)
471 471
472 if (!test_bit(HCI_UP, &hdev->flags) || 472 if (!test_bit(HCI_UP, &hdev->flags) ||
473 test_bit(HCI_INIT, &hdev->flags) || 473 test_bit(HCI_INIT, &hdev->flags) ||
474 test_bit(HCI_SETUP, &hdev->dev_flags) || 474 hci_dev_test_flag(hdev, HCI_SETUP) ||
475 test_bit(HCI_CONFIG, &hdev->dev_flags) || 475 hci_dev_test_flag(hdev, HCI_CONFIG) ||
476 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) || 476 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
477 test_bit(HCI_UNREGISTER, &hdev->dev_flags)) 477 hci_dev_test_flag(hdev, HCI_UNREGISTER))
478 return; 478 return;
479 479
480 /* No point in doing scanning if LE support hasn't been enabled */ 480 /* No point in doing scanning if LE support hasn't been enabled */
481 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 481 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
482 return; 482 return;
483 483
484 /* If discovery is active don't interfere with it */ 484 /* If discovery is active don't interfere with it */
@@ -502,7 +502,7 @@ void __hci_update_background_scan(struct hci_request *req)
502 */ 502 */
503 503
504 /* If controller is not scanning we are done. */ 504 /* If controller is not scanning we are done. */
505 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 505 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
506 return; 506 return;
507 507
508 hci_req_add_le_scan_disable(req); 508 hci_req_add_le_scan_disable(req);
@@ -524,7 +524,7 @@ void __hci_update_background_scan(struct hci_request *req)
524 /* If controller is currently scanning, we stop it to ensure we 524 /* If controller is currently scanning, we stop it to ensure we
525 * don't miss any advertising (due to duplicates filter). 525 * don't miss any advertising (due to duplicates filter).
526 */ 526 */
527 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 527 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
528 hci_req_add_le_scan_disable(req); 528 hci_req_add_le_scan_disable(req);
529 529
530 hci_req_add_le_passive_scan(req); 530 hci_req_add_le_passive_scan(req);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index cb4bc4883350..b614543b4fe3 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -31,6 +31,9 @@
31#include <net/bluetooth/hci_core.h> 31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_mon.h> 32#include <net/bluetooth/hci_mon.h>
33 33
34static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
34static atomic_t monitor_promisc = ATOMIC_INIT(0); 37static atomic_t monitor_promisc = ATOMIC_INIT(0);
35 38
36/* ----- HCI socket interface ----- */ 39/* ----- HCI socket interface ----- */
@@ -401,6 +404,56 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
401 } 404 }
402} 405}
403 406
407static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
408{
409 struct hci_mgmt_chan *c;
410
411 list_for_each_entry(c, &mgmt_chan_list, list) {
412 if (c->channel == channel)
413 return c;
414 }
415
416 return NULL;
417}
418
419static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
420{
421 struct hci_mgmt_chan *c;
422
423 mutex_lock(&mgmt_chan_list_lock);
424 c = __hci_mgmt_chan_find(channel);
425 mutex_unlock(&mgmt_chan_list_lock);
426
427 return c;
428}
429
430int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
431{
432 if (c->channel < HCI_CHANNEL_CONTROL)
433 return -EINVAL;
434
435 mutex_lock(&mgmt_chan_list_lock);
436 if (__hci_mgmt_chan_find(c->channel)) {
437 mutex_unlock(&mgmt_chan_list_lock);
438 return -EALREADY;
439 }
440
441 list_add_tail(&c->list, &mgmt_chan_list);
442
443 mutex_unlock(&mgmt_chan_list_lock);
444
445 return 0;
446}
447EXPORT_SYMBOL(hci_mgmt_chan_register);
448
449void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
450{
451 mutex_lock(&mgmt_chan_list_lock);
452 list_del(&c->list);
453 mutex_unlock(&mgmt_chan_list_lock);
454}
455EXPORT_SYMBOL(hci_mgmt_chan_unregister);
456
404static int hci_sock_release(struct socket *sock) 457static int hci_sock_release(struct socket *sock)
405{ 458{
406 struct sock *sk = sock->sk; 459 struct sock *sk = sock->sk;
@@ -421,7 +474,7 @@ static int hci_sock_release(struct socket *sock)
421 if (hdev) { 474 if (hdev) {
422 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { 475 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
423 mgmt_index_added(hdev); 476 mgmt_index_added(hdev);
424 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags); 477 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
425 hci_dev_close(hdev->id); 478 hci_dev_close(hdev->id);
426 } 479 }
427 480
@@ -481,10 +534,10 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
481 if (!hdev) 534 if (!hdev)
482 return -EBADFD; 535 return -EBADFD;
483 536
484 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) 537 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
485 return -EBUSY; 538 return -EBUSY;
486 539
487 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) 540 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
488 return -EOPNOTSUPP; 541 return -EOPNOTSUPP;
489 542
490 if (hdev->dev_type != HCI_BREDR) 543 if (hdev->dev_type != HCI_BREDR)
@@ -660,14 +713,14 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
660 713
661 if (test_bit(HCI_UP, &hdev->flags) || 714 if (test_bit(HCI_UP, &hdev->flags) ||
662 test_bit(HCI_INIT, &hdev->flags) || 715 test_bit(HCI_INIT, &hdev->flags) ||
663 test_bit(HCI_SETUP, &hdev->dev_flags) || 716 hci_dev_test_flag(hdev, HCI_SETUP) ||
664 test_bit(HCI_CONFIG, &hdev->dev_flags)) { 717 hci_dev_test_flag(hdev, HCI_CONFIG)) {
665 err = -EBUSY; 718 err = -EBUSY;
666 hci_dev_put(hdev); 719 hci_dev_put(hdev);
667 goto done; 720 goto done;
668 } 721 }
669 722
670 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 723 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
671 err = -EUSERS; 724 err = -EUSERS;
672 hci_dev_put(hdev); 725 hci_dev_put(hdev);
673 goto done; 726 goto done;
@@ -677,7 +730,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
677 730
678 err = hci_dev_open(hdev->id); 731 err = hci_dev_open(hdev->id);
679 if (err) { 732 if (err) {
680 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags); 733 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
681 mgmt_index_added(hdev); 734 mgmt_index_added(hdev);
682 hci_dev_put(hdev); 735 hci_dev_put(hdev);
683 goto done; 736 goto done;
@@ -688,38 +741,39 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
688 hci_pi(sk)->hdev = hdev; 741 hci_pi(sk)->hdev = hdev;
689 break; 742 break;
690 743
691 case HCI_CHANNEL_CONTROL: 744 case HCI_CHANNEL_MONITOR:
692 if (haddr.hci_dev != HCI_DEV_NONE) { 745 if (haddr.hci_dev != HCI_DEV_NONE) {
693 err = -EINVAL; 746 err = -EINVAL;
694 goto done; 747 goto done;
695 } 748 }
696 749
697 if (!capable(CAP_NET_ADMIN)) { 750 if (!capable(CAP_NET_RAW)) {
698 err = -EPERM; 751 err = -EPERM;
699 goto done; 752 goto done;
700 } 753 }
701 754
755 send_monitor_replay(sk);
756
757 atomic_inc(&monitor_promisc);
702 break; 758 break;
703 759
704 case HCI_CHANNEL_MONITOR: 760 default:
761 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
762 err = -EINVAL;
763 goto done;
764 }
765
705 if (haddr.hci_dev != HCI_DEV_NONE) { 766 if (haddr.hci_dev != HCI_DEV_NONE) {
706 err = -EINVAL; 767 err = -EINVAL;
707 goto done; 768 goto done;
708 } 769 }
709 770
710 if (!capable(CAP_NET_RAW)) { 771 if (!capable(CAP_NET_ADMIN)) {
711 err = -EPERM; 772 err = -EPERM;
712 goto done; 773 goto done;
713 } 774 }
714 775
715 send_monitor_replay(sk);
716
717 atomic_inc(&monitor_promisc);
718 break; 776 break;
719
720 default:
721 err = -EINVAL;
722 goto done;
723 } 777 }
724 778
725 779
@@ -833,10 +887,13 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
833 hci_sock_cmsg(sk, msg, skb); 887 hci_sock_cmsg(sk, msg, skb);
834 break; 888 break;
835 case HCI_CHANNEL_USER: 889 case HCI_CHANNEL_USER:
836 case HCI_CHANNEL_CONTROL:
837 case HCI_CHANNEL_MONITOR: 890 case HCI_CHANNEL_MONITOR:
838 sock_recv_timestamp(msg, sk, skb); 891 sock_recv_timestamp(msg, sk, skb);
839 break; 892 break;
893 default:
894 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
895 sock_recv_timestamp(msg, sk, skb);
896 break;
840 } 897 }
841 898
842 skb_free_datagram(sk, skb); 899 skb_free_datagram(sk, skb);
@@ -848,6 +905,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
848 size_t len) 905 size_t len)
849{ 906{
850 struct sock *sk = sock->sk; 907 struct sock *sk = sock->sk;
908 struct hci_mgmt_chan *chan;
851 struct hci_dev *hdev; 909 struct hci_dev *hdev;
852 struct sk_buff *skb; 910 struct sk_buff *skb;
853 int err; 911 int err;
@@ -869,14 +927,18 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
869 case HCI_CHANNEL_RAW: 927 case HCI_CHANNEL_RAW:
870 case HCI_CHANNEL_USER: 928 case HCI_CHANNEL_USER:
871 break; 929 break;
872 case HCI_CHANNEL_CONTROL:
873 err = mgmt_control(sk, msg, len);
874 goto done;
875 case HCI_CHANNEL_MONITOR: 930 case HCI_CHANNEL_MONITOR:
876 err = -EOPNOTSUPP; 931 err = -EOPNOTSUPP;
877 goto done; 932 goto done;
878 default: 933 default:
879 err = -EINVAL; 934 mutex_lock(&mgmt_chan_list_lock);
935 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
936 if (chan)
937 err = mgmt_control(chan, sk, msg, len);
938 else
939 err = -EINVAL;
940
941 mutex_unlock(&mgmt_chan_list_lock);
880 goto done; 942 goto done;
881 } 943 }
882 944
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 91c682846bcf..d69861c89bb5 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3900,7 +3900,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
3900 return -EPROTO; 3900 return -EPROTO;
3901 3901
3902 hci_dev_lock(hdev); 3902 hci_dev_lock(hdev);
3903 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 3903 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3904 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) 3904 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3905 mgmt_device_connected(hdev, hcon, 0, NULL, 0); 3905 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3906 hci_dev_unlock(hdev); 3906 hci_dev_unlock(hdev);
@@ -6987,12 +6987,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6987 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; 6987 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6988 6988
6989 if (hcon->type == ACL_LINK && 6989 if (hcon->type == ACL_LINK &&
6990 test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags)) 6990 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
6991 conn->local_fixed_chan |= L2CAP_FC_A2MP; 6991 conn->local_fixed_chan |= L2CAP_FC_A2MP;
6992 6992
6993 if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) && 6993 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6994 (bredr_sc_enabled(hcon->hdev) || 6994 (bredr_sc_enabled(hcon->hdev) ||
6995 test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags))) 6995 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6996 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; 6996 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6997 6997
6998 mutex_init(&conn->ident_lock); 6998 mutex_init(&conn->ident_lock);
@@ -7112,7 +7112,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7112 else 7112 else
7113 dst_type = ADDR_LE_DEV_RANDOM; 7113 dst_type = ADDR_LE_DEV_RANDOM;
7114 7114
7115 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 7115 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7116 role = HCI_ROLE_SLAVE; 7116 role = HCI_ROLE_SLAVE;
7117 else 7117 else
7118 role = HCI_ROLE_MASTER; 7118 role = HCI_ROLE_MASTER;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 1e4635a3374d..c58908652519 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -37,7 +37,7 @@
37#include "smp.h" 37#include "smp.h"
38 38
39#define MGMT_VERSION 1 39#define MGMT_VERSION 1
40#define MGMT_REVISION 8 40#define MGMT_REVISION 9
41 41
42static const u16 mgmt_commands[] = { 42static const u16 mgmt_commands[] = {
43 MGMT_OP_READ_INDEX_LIST, 43 MGMT_OP_READ_INDEX_LIST,
@@ -135,7 +135,7 @@ static const u16 mgmt_events[] = {
135#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 135#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
136 "\x00\x00\x00\x00\x00\x00\x00\x00" 136 "\x00\x00\x00\x00\x00\x00\x00\x00"
137 137
138struct pending_cmd { 138struct mgmt_pending_cmd {
139 struct list_head list; 139 struct list_head list;
140 u16 opcode; 140 u16 opcode;
141 int index; 141 int index;
@@ -143,7 +143,7 @@ struct pending_cmd {
143 size_t param_len; 143 size_t param_len;
144 struct sock *sk; 144 struct sock *sk;
145 void *user_data; 145 void *user_data;
146 int (*cmd_complete)(struct pending_cmd *cmd, u8 status); 146 int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
147}; 147};
148 148
149/* HCI to MGMT error code conversion table */ 149/* HCI to MGMT error code conversion table */
@@ -219,8 +219,9 @@ static u8 mgmt_status(u8 hci_status)
219 return MGMT_STATUS_FAILED; 219 return MGMT_STATUS_FAILED;
220} 220}
221 221
222static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len, 222static int mgmt_send_event(u16 event, struct hci_dev *hdev,
223 struct sock *skip_sk) 223 unsigned short channel, void *data, u16 data_len,
224 struct sock *skip_sk)
224{ 225{
225 struct sk_buff *skb; 226 struct sk_buff *skb;
226 struct mgmt_hdr *hdr; 227 struct mgmt_hdr *hdr;
@@ -243,13 +244,20 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
243 /* Time stamp */ 244 /* Time stamp */
244 __net_timestamp(skb); 245 __net_timestamp(skb);
245 246
246 hci_send_to_channel(HCI_CHANNEL_CONTROL, skb, skip_sk); 247 hci_send_to_channel(channel, skb, skip_sk);
247 kfree_skb(skb); 248 kfree_skb(skb);
248 249
249 return 0; 250 return 0;
250} 251}
251 252
252static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 253static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
254 struct sock *skip_sk)
255{
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 skip_sk);
258}
259
260static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
253{ 261{
254 struct sk_buff *skb; 262 struct sk_buff *skb;
255 struct mgmt_hdr *hdr; 263 struct mgmt_hdr *hdr;
@@ -279,8 +287,8 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
279 return err; 287 return err;
280} 288}
281 289
282static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, 290static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
283 void *rp, size_t rp_len) 291 void *rp, size_t rp_len)
284{ 292{
285 struct sk_buff *skb; 293 struct sk_buff *skb;
286 struct mgmt_hdr *hdr; 294 struct mgmt_hdr *hdr;
@@ -323,8 +331,8 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
323 rp.version = MGMT_VERSION; 331 rp.version = MGMT_VERSION;
324 rp.revision = cpu_to_le16(MGMT_REVISION); 332 rp.revision = cpu_to_le16(MGMT_REVISION);
325 333
326 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp, 334 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
327 sizeof(rp)); 335 &rp, sizeof(rp));
328} 336}
329 337
330static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, 338static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -354,8 +362,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
354 for (i = 0; i < num_events; i++, opcode++) 362 for (i = 0; i < num_events; i++, opcode++)
355 put_unaligned_le16(mgmt_events[i], opcode); 363 put_unaligned_le16(mgmt_events[i], opcode);
356 364
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp, 365 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
358 rp_size); 366 rp, rp_size);
359 kfree(rp); 367 kfree(rp);
360 368
361 return err; 369 return err;
@@ -377,7 +385,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
377 count = 0; 385 count = 0;
378 list_for_each_entry(d, &hci_dev_list, list) { 386 list_for_each_entry(d, &hci_dev_list, list) {
379 if (d->dev_type == HCI_BREDR && 387 if (d->dev_type == HCI_BREDR &&
380 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) 388 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
381 count++; 389 count++;
382 } 390 }
383 391
@@ -390,9 +398,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
390 398
391 count = 0; 399 count = 0;
392 list_for_each_entry(d, &hci_dev_list, list) { 400 list_for_each_entry(d, &hci_dev_list, list) {
393 if (test_bit(HCI_SETUP, &d->dev_flags) || 401 if (hci_dev_test_flag(d, HCI_SETUP) ||
394 test_bit(HCI_CONFIG, &d->dev_flags) || 402 hci_dev_test_flag(d, HCI_CONFIG) ||
395 test_bit(HCI_USER_CHANNEL, &d->dev_flags)) 403 hci_dev_test_flag(d, HCI_USER_CHANNEL))
396 continue; 404 continue;
397 405
398 /* Devices marked as raw-only are neither configured 406 /* Devices marked as raw-only are neither configured
@@ -402,7 +410,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
402 continue; 410 continue;
403 411
404 if (d->dev_type == HCI_BREDR && 412 if (d->dev_type == HCI_BREDR &&
405 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) { 413 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
406 rp->index[count++] = cpu_to_le16(d->id); 414 rp->index[count++] = cpu_to_le16(d->id);
407 BT_DBG("Added hci%u", d->id); 415 BT_DBG("Added hci%u", d->id);
408 } 416 }
@@ -413,8 +421,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
413 421
414 read_unlock(&hci_dev_list_lock); 422 read_unlock(&hci_dev_list_lock);
415 423
416 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, 424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
417 rp_len); 425 0, rp, rp_len);
418 426
419 kfree(rp); 427 kfree(rp);
420 428
@@ -437,7 +445,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
437 count = 0; 445 count = 0;
438 list_for_each_entry(d, &hci_dev_list, list) { 446 list_for_each_entry(d, &hci_dev_list, list) {
439 if (d->dev_type == HCI_BREDR && 447 if (d->dev_type == HCI_BREDR &&
440 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) 448 hci_dev_test_flag(d, HCI_UNCONFIGURED))
441 count++; 449 count++;
442 } 450 }
443 451
@@ -450,9 +458,9 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
450 458
451 count = 0; 459 count = 0;
452 list_for_each_entry(d, &hci_dev_list, list) { 460 list_for_each_entry(d, &hci_dev_list, list) {
453 if (test_bit(HCI_SETUP, &d->dev_flags) || 461 if (hci_dev_test_flag(d, HCI_SETUP) ||
454 test_bit(HCI_CONFIG, &d->dev_flags) || 462 hci_dev_test_flag(d, HCI_CONFIG) ||
455 test_bit(HCI_USER_CHANNEL, &d->dev_flags)) 463 hci_dev_test_flag(d, HCI_USER_CHANNEL))
456 continue; 464 continue;
457 465
458 /* Devices marked as raw-only are neither configured 466 /* Devices marked as raw-only are neither configured
@@ -462,7 +470,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
462 continue; 470 continue;
463 471
464 if (d->dev_type == HCI_BREDR && 472 if (d->dev_type == HCI_BREDR &&
465 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) { 473 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
466 rp->index[count++] = cpu_to_le16(d->id); 474 rp->index[count++] = cpu_to_le16(d->id);
467 BT_DBG("Added hci%u", d->id); 475 BT_DBG("Added hci%u", d->id);
468 } 476 }
@@ -473,8 +481,8 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
473 481
474 read_unlock(&hci_dev_list_lock); 482 read_unlock(&hci_dev_list_lock);
475 483
476 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST, 484 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
477 0, rp, rp_len); 485 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
478 486
479 kfree(rp); 487 kfree(rp);
480 488
@@ -484,7 +492,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
484static bool is_configured(struct hci_dev *hdev) 492static bool is_configured(struct hci_dev *hdev)
485{ 493{
486 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && 494 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
487 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags)) 495 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
488 return false; 496 return false;
489 497
490 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) && 498 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
@@ -499,7 +507,7 @@ static __le32 get_missing_options(struct hci_dev *hdev)
499 u32 options = 0; 507 u32 options = 0;
500 508
501 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && 509 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
502 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags)) 510 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
503 options |= MGMT_OPTION_EXTERNAL_CONFIG; 511 options |= MGMT_OPTION_EXTERNAL_CONFIG;
504 512
505 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) && 513 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
@@ -521,8 +529,8 @@ static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
521{ 529{
522 __le32 options = get_missing_options(hdev); 530 __le32 options = get_missing_options(hdev);
523 531
524 return cmd_complete(sk, hdev->id, opcode, 0, &options, 532 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
525 sizeof(options)); 533 sizeof(options));
526} 534}
527 535
528static int read_config_info(struct sock *sk, struct hci_dev *hdev, 536static int read_config_info(struct sock *sk, struct hci_dev *hdev,
@@ -549,8 +557,8 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev,
549 557
550 hci_dev_unlock(hdev); 558 hci_dev_unlock(hdev);
551 559
552 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp, 560 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
553 sizeof(rp)); 561 &rp, sizeof(rp));
554} 562}
555 563
556static u32 get_supported_settings(struct hci_dev *hdev) 564static u32 get_supported_settings(struct hci_dev *hdev)
@@ -583,6 +591,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
583 settings |= MGMT_SETTING_ADVERTISING; 591 settings |= MGMT_SETTING_ADVERTISING;
584 settings |= MGMT_SETTING_SECURE_CONN; 592 settings |= MGMT_SETTING_SECURE_CONN;
585 settings |= MGMT_SETTING_PRIVACY; 593 settings |= MGMT_SETTING_PRIVACY;
594 settings |= MGMT_SETTING_STATIC_ADDRESS;
586 } 595 }
587 596
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 597 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
@@ -599,45 +608,64 @@ static u32 get_current_settings(struct hci_dev *hdev)
599 if (hdev_is_powered(hdev)) 608 if (hdev_is_powered(hdev))
600 settings |= MGMT_SETTING_POWERED; 609 settings |= MGMT_SETTING_POWERED;
601 610
602 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 611 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
603 settings |= MGMT_SETTING_CONNECTABLE; 612 settings |= MGMT_SETTING_CONNECTABLE;
604 613
605 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) 614 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
606 settings |= MGMT_SETTING_FAST_CONNECTABLE; 615 settings |= MGMT_SETTING_FAST_CONNECTABLE;
607 616
608 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 617 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
609 settings |= MGMT_SETTING_DISCOVERABLE; 618 settings |= MGMT_SETTING_DISCOVERABLE;
610 619
611 if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) 620 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
612 settings |= MGMT_SETTING_BONDABLE; 621 settings |= MGMT_SETTING_BONDABLE;
613 622
614 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 623 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
615 settings |= MGMT_SETTING_BREDR; 624 settings |= MGMT_SETTING_BREDR;
616 625
617 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 626 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
618 settings |= MGMT_SETTING_LE; 627 settings |= MGMT_SETTING_LE;
619 628
620 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 629 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
621 settings |= MGMT_SETTING_LINK_SECURITY; 630 settings |= MGMT_SETTING_LINK_SECURITY;
622 631
623 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 632 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
624 settings |= MGMT_SETTING_SSP; 633 settings |= MGMT_SETTING_SSP;
625 634
626 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags)) 635 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
627 settings |= MGMT_SETTING_HS; 636 settings |= MGMT_SETTING_HS;
628 637
629 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 638 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
630 settings |= MGMT_SETTING_ADVERTISING; 639 settings |= MGMT_SETTING_ADVERTISING;
631 640
632 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) 641 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
633 settings |= MGMT_SETTING_SECURE_CONN; 642 settings |= MGMT_SETTING_SECURE_CONN;
634 643
635 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) 644 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
636 settings |= MGMT_SETTING_DEBUG_KEYS; 645 settings |= MGMT_SETTING_DEBUG_KEYS;
637 646
638 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) 647 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
639 settings |= MGMT_SETTING_PRIVACY; 648 settings |= MGMT_SETTING_PRIVACY;
640 649
650 /* The current setting for static address has two purposes. The
651 * first is to indicate if the static address will be used and
652 * the second is to indicate if it is actually set.
653 *
654 * This means if the static address is not configured, this flag
655 * will never bet set. If the address is configured, then if the
656 * address is actually used decides if the flag is set or not.
657 *
658 * For single mode LE only controllers and dual-mode controllers
659 * with BR/EDR disabled, the existence of the static address will
660 * be evaluated.
661 */
662 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
663 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
664 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
665 if (bacmp(&hdev->static_addr, BDADDR_ANY))
666 settings |= MGMT_SETTING_STATIC_ADDRESS;
667 }
668
641 return settings; 669 return settings;
642} 670}
643 671
@@ -751,9 +779,10 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
751 return ptr; 779 return ptr;
752} 780}
753 781
754static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) 782static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode,
783 struct hci_dev *hdev)
755{ 784{
756 struct pending_cmd *cmd; 785 struct mgmt_pending_cmd *cmd;
757 786
758 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 787 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
759 if (cmd->opcode == opcode) 788 if (cmd->opcode == opcode)
@@ -763,11 +792,11 @@ static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
763 return NULL; 792 return NULL;
764} 793}
765 794
766static struct pending_cmd *mgmt_pending_find_data(u16 opcode, 795static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode,
767 struct hci_dev *hdev, 796 struct hci_dev *hdev,
768 const void *data) 797 const void *data)
769{ 798{
770 struct pending_cmd *cmd; 799 struct mgmt_pending_cmd *cmd;
771 800
772 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 801 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
773 if (cmd->user_data != data) 802 if (cmd->user_data != data)
@@ -811,7 +840,7 @@ static void update_scan_rsp_data(struct hci_request *req)
811 struct hci_cp_le_set_scan_rsp_data cp; 840 struct hci_cp_le_set_scan_rsp_data cp;
812 u8 len; 841 u8 len;
813 842
814 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 843 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
815 return; 844 return;
816 845
817 memset(&cp, 0, sizeof(cp)); 846 memset(&cp, 0, sizeof(cp));
@@ -832,7 +861,7 @@ static void update_scan_rsp_data(struct hci_request *req)
832 861
833static u8 get_adv_discov_flags(struct hci_dev *hdev) 862static u8 get_adv_discov_flags(struct hci_dev *hdev)
834{ 863{
835 struct pending_cmd *cmd; 864 struct mgmt_pending_cmd *cmd;
836 865
837 /* If there's a pending mgmt command the flags will not yet have 866 /* If there's a pending mgmt command the flags will not yet have
838 * their final values, so check for this first. 867 * their final values, so check for this first.
@@ -845,9 +874,9 @@ static u8 get_adv_discov_flags(struct hci_dev *hdev)
845 else if (cp->val == 0x02) 874 else if (cp->val == 0x02)
846 return LE_AD_LIMITED; 875 return LE_AD_LIMITED;
847 } else { 876 } else {
848 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags)) 877 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
849 return LE_AD_LIMITED; 878 return LE_AD_LIMITED;
850 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 879 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
851 return LE_AD_GENERAL; 880 return LE_AD_GENERAL;
852 } 881 }
853 882
@@ -860,7 +889,7 @@ static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
860 889
861 flags |= get_adv_discov_flags(hdev); 890 flags |= get_adv_discov_flags(hdev);
862 891
863 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 892 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
864 flags |= LE_AD_NO_BREDR; 893 flags |= LE_AD_NO_BREDR;
865 894
866 if (flags) { 895 if (flags) {
@@ -892,7 +921,7 @@ static void update_adv_data(struct hci_request *req)
892 struct hci_cp_le_set_adv_data cp; 921 struct hci_cp_le_set_adv_data cp;
893 u8 len; 922 u8 len;
894 923
895 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 924 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 return; 925 return;
897 926
898 memset(&cp, 0, sizeof(cp)); 927 memset(&cp, 0, sizeof(cp));
@@ -980,10 +1009,10 @@ static void update_eir(struct hci_request *req)
980 if (!lmp_ext_inq_capable(hdev)) 1009 if (!lmp_ext_inq_capable(hdev))
981 return; 1010 return;
982 1011
983 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 1012 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
984 return; 1013 return;
985 1014
986 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 1015 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
987 return; 1016 return;
988 1017
989 memset(&cp, 0, sizeof(cp)); 1018 memset(&cp, 0, sizeof(cp));
@@ -1019,17 +1048,17 @@ static void update_class(struct hci_request *req)
1019 if (!hdev_is_powered(hdev)) 1048 if (!hdev_is_powered(hdev))
1020 return; 1049 return;
1021 1050
1022 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 1051 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1023 return; 1052 return;
1024 1053
1025 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 1054 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1026 return; 1055 return;
1027 1056
1028 cod[0] = hdev->minor_class; 1057 cod[0] = hdev->minor_class;
1029 cod[1] = hdev->major_class; 1058 cod[1] = hdev->major_class;
1030 cod[2] = get_service_classes(hdev); 1059 cod[2] = get_service_classes(hdev);
1031 1060
1032 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags)) 1061 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1033 cod[1] |= 0x20; 1062 cod[1] |= 0x20;
1034 1063
1035 if (memcmp(cod, hdev->dev_class, 3) == 0) 1064 if (memcmp(cod, hdev->dev_class, 3) == 0)
@@ -1040,7 +1069,7 @@ static void update_class(struct hci_request *req)
1040 1069
1041static bool get_connectable(struct hci_dev *hdev) 1070static bool get_connectable(struct hci_dev *hdev)
1042{ 1071{
1043 struct pending_cmd *cmd; 1072 struct mgmt_pending_cmd *cmd;
1044 1073
1045 /* If there's a pending mgmt command the flag will not yet have 1074 /* If there's a pending mgmt command the flag will not yet have
1046 * it's final value, so check for this first. 1075 * it's final value, so check for this first.
@@ -1051,7 +1080,7 @@ static bool get_connectable(struct hci_dev *hdev)
1051 return cp->val; 1080 return cp->val;
1052 } 1081 }
1053 1082
1054 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1083 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1055} 1084}
1056 1085
1057static void disable_advertising(struct hci_request *req) 1086static void disable_advertising(struct hci_request *req)
@@ -1071,7 +1100,7 @@ static void enable_advertising(struct hci_request *req)
1071 if (hci_conn_num(hdev, LE_LINK) > 0) 1100 if (hci_conn_num(hdev, LE_LINK) > 0)
1072 return; 1101 return;
1073 1102
1074 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) 1103 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1075 disable_advertising(req); 1104 disable_advertising(req);
1076 1105
1077 /* Clear the HCI_LE_ADV bit temporarily so that the 1106 /* Clear the HCI_LE_ADV bit temporarily so that the
@@ -1079,9 +1108,12 @@ static void enable_advertising(struct hci_request *req)
1079 * and write a new random address. The flag will be set back on 1108 * and write a new random address. The flag will be set back on
1080 * as soon as the SET_ADV_ENABLE HCI command completes. 1109 * as soon as the SET_ADV_ENABLE HCI command completes.
1081 */ 1110 */
1082 clear_bit(HCI_LE_ADV, &hdev->dev_flags); 1111 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1083 1112
1084 connectable = get_connectable(hdev); 1113 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1114 connectable = true;
1115 else
1116 connectable = get_connectable(hdev);
1085 1117
1086 /* Set require_privacy to true only when non-connectable 1118 /* Set require_privacy to true only when non-connectable
1087 * advertising is used. In that case it is fine to use a 1119 * advertising is used. In that case it is fine to use a
@@ -1108,7 +1140,7 @@ static void service_cache_off(struct work_struct *work)
1108 service_cache.work); 1140 service_cache.work);
1109 struct hci_request req; 1141 struct hci_request req;
1110 1142
1111 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 1143 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1112 return; 1144 return;
1113 1145
1114 hci_req_init(&req, hdev); 1146 hci_req_init(&req, hdev);
@@ -1131,9 +1163,9 @@ static void rpa_expired(struct work_struct *work)
1131 1163
1132 BT_DBG(""); 1164 BT_DBG("");
1133 1165
1134 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 1166 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1135 1167
1136 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1168 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1137 return; 1169 return;
1138 1170
1139 /* The generation of a new RPA and programming it into the 1171 /* The generation of a new RPA and programming it into the
@@ -1146,7 +1178,7 @@ static void rpa_expired(struct work_struct *work)
1146 1178
1147static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) 1179static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1148{ 1180{
1149 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags)) 1181 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1150 return; 1182 return;
1151 1183
1152 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); 1184 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
@@ -1157,7 +1189,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1157 * for mgmt we require user-space to explicitly enable 1189 * for mgmt we require user-space to explicitly enable
1158 * it 1190 * it
1159 */ 1191 */
1160 clear_bit(HCI_BONDABLE, &hdev->dev_flags); 1192 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1161} 1193}
1162 1194
1163static int read_controller_info(struct sock *sk, struct hci_dev *hdev, 1195static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
@@ -1186,22 +1218,22 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1186 1218
1187 hci_dev_unlock(hdev); 1219 hci_dev_unlock(hdev);
1188 1220
1189 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, 1221 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1190 sizeof(rp)); 1222 sizeof(rp));
1191} 1223}
1192 1224
1193static void mgmt_pending_free(struct pending_cmd *cmd) 1225static void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
1194{ 1226{
1195 sock_put(cmd->sk); 1227 sock_put(cmd->sk);
1196 kfree(cmd->param); 1228 kfree(cmd->param);
1197 kfree(cmd); 1229 kfree(cmd);
1198} 1230}
1199 1231
1200static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 1232static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1201 struct hci_dev *hdev, void *data, 1233 struct hci_dev *hdev,
1202 u16 len) 1234 void *data, u16 len)
1203{ 1235{
1204 struct pending_cmd *cmd; 1236 struct mgmt_pending_cmd *cmd;
1205 1237
1206 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1238 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1207 if (!cmd) 1239 if (!cmd)
@@ -1227,11 +1259,11 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1227} 1259}
1228 1260
1229static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 1261static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1230 void (*cb)(struct pending_cmd *cmd, 1262 void (*cb)(struct mgmt_pending_cmd *cmd,
1231 void *data), 1263 void *data),
1232 void *data) 1264 void *data)
1233{ 1265{
1234 struct pending_cmd *cmd, *tmp; 1266 struct mgmt_pending_cmd *cmd, *tmp;
1235 1267
1236 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { 1268 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1237 if (opcode > 0 && cmd->opcode != opcode) 1269 if (opcode > 0 && cmd->opcode != opcode)
@@ -1241,7 +1273,7 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1241 } 1273 }
1242} 1274}
1243 1275
1244static void mgmt_pending_remove(struct pending_cmd *cmd) 1276static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
1245{ 1277{
1246 list_del(&cmd->list); 1278 list_del(&cmd->list);
1247 mgmt_pending_free(cmd); 1279 mgmt_pending_free(cmd);
@@ -1251,8 +1283,8 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1251{ 1283{
1252 __le32 settings = cpu_to_le32(get_current_settings(hdev)); 1284 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1253 1285
1254 return cmd_complete(sk, hdev->id, opcode, 0, &settings, 1286 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1255 sizeof(settings)); 1287 sizeof(settings));
1256} 1288}
1257 1289
1258static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode) 1290static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
@@ -1296,7 +1328,7 @@ static bool hci_stop_discovery(struct hci_request *req)
1296 1328
1297 default: 1329 default:
1298 /* Passive scanning */ 1330 /* Passive scanning */
1299 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { 1331 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1300 hci_req_add_le_scan_disable(req); 1332 hci_req_add_le_scan_disable(req);
1301 return true; 1333 return true;
1302 } 1334 }
@@ -1322,7 +1354,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1322 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1354 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1323 } 1355 }
1324 1356
1325 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) 1357 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1326 disable_advertising(&req); 1358 disable_advertising(&req);
1327 1359
1328 discov_stopped = hci_stop_discovery(&req); 1360 discov_stopped = hci_stop_discovery(&req);
@@ -1370,24 +1402,24 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1370 u16 len) 1402 u16 len)
1371{ 1403{
1372 struct mgmt_mode *cp = data; 1404 struct mgmt_mode *cp = data;
1373 struct pending_cmd *cmd; 1405 struct mgmt_pending_cmd *cmd;
1374 int err; 1406 int err;
1375 1407
1376 BT_DBG("request for %s", hdev->name); 1408 BT_DBG("request for %s", hdev->name);
1377 1409
1378 if (cp->val != 0x00 && cp->val != 0x01) 1410 if (cp->val != 0x00 && cp->val != 0x01)
1379 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, 1411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1380 MGMT_STATUS_INVALID_PARAMS); 1412 MGMT_STATUS_INVALID_PARAMS);
1381 1413
1382 hci_dev_lock(hdev); 1414 hci_dev_lock(hdev);
1383 1415
1384 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { 1416 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1385 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, 1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 MGMT_STATUS_BUSY); 1418 MGMT_STATUS_BUSY);
1387 goto failed; 1419 goto failed;
1388 } 1420 }
1389 1421
1390 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 1422 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1391 cancel_delayed_work(&hdev->power_off); 1423 cancel_delayed_work(&hdev->power_off);
1392 1424
1393 if (cp->val) { 1425 if (cp->val) {
@@ -1452,7 +1484,7 @@ struct cmd_lookup {
1452 u8 mgmt_status; 1484 u8 mgmt_status;
1453}; 1485};
1454 1486
1455static void settings_rsp(struct pending_cmd *cmd, void *data) 1487static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1456{ 1488{
1457 struct cmd_lookup *match = data; 1489 struct cmd_lookup *match = data;
1458 1490
@@ -1468,15 +1500,15 @@ static void settings_rsp(struct pending_cmd *cmd, void *data)
1468 mgmt_pending_free(cmd); 1500 mgmt_pending_free(cmd);
1469} 1501}
1470 1502
1471static void cmd_status_rsp(struct pending_cmd *cmd, void *data) 1503static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1472{ 1504{
1473 u8 *status = data; 1505 u8 *status = data;
1474 1506
1475 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); 1507 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1476 mgmt_pending_remove(cmd); 1508 mgmt_pending_remove(cmd);
1477} 1509}
1478 1510
1479static void cmd_complete_rsp(struct pending_cmd *cmd, void *data) 1511static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1480{ 1512{
1481 if (cmd->cmd_complete) { 1513 if (cmd->cmd_complete) {
1482 u8 *status = data; 1514 u8 *status = data;
@@ -1490,23 +1522,23 @@ static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1490 cmd_status_rsp(cmd, data); 1522 cmd_status_rsp(cmd, data);
1491} 1523}
1492 1524
1493static int generic_cmd_complete(struct pending_cmd *cmd, u8 status) 1525static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1494{ 1526{
1495 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, 1527 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1496 cmd->param, cmd->param_len); 1528 cmd->param, cmd->param_len);
1497} 1529}
1498 1530
1499static int addr_cmd_complete(struct pending_cmd *cmd, u8 status) 1531static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1500{ 1532{
1501 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1533 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1502 sizeof(struct mgmt_addr_info)); 1534 cmd->param, sizeof(struct mgmt_addr_info));
1503} 1535}
1504 1536
1505static u8 mgmt_bredr_support(struct hci_dev *hdev) 1537static u8 mgmt_bredr_support(struct hci_dev *hdev)
1506{ 1538{
1507 if (!lmp_bredr_capable(hdev)) 1539 if (!lmp_bredr_capable(hdev))
1508 return MGMT_STATUS_NOT_SUPPORTED; 1540 return MGMT_STATUS_NOT_SUPPORTED;
1509 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 1541 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1510 return MGMT_STATUS_REJECTED; 1542 return MGMT_STATUS_REJECTED;
1511 else 1543 else
1512 return MGMT_STATUS_SUCCESS; 1544 return MGMT_STATUS_SUCCESS;
@@ -1516,7 +1548,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
1516{ 1548{
1517 if (!lmp_le_capable(hdev)) 1549 if (!lmp_le_capable(hdev))
1518 return MGMT_STATUS_NOT_SUPPORTED; 1550 return MGMT_STATUS_NOT_SUPPORTED;
1519 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 1551 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1520 return MGMT_STATUS_REJECTED; 1552 return MGMT_STATUS_REJECTED;
1521 else 1553 else
1522 return MGMT_STATUS_SUCCESS; 1554 return MGMT_STATUS_SUCCESS;
@@ -1525,7 +1557,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
1525static void set_discoverable_complete(struct hci_dev *hdev, u8 status, 1557static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1526 u16 opcode) 1558 u16 opcode)
1527{ 1559{
1528 struct pending_cmd *cmd; 1560 struct mgmt_pending_cmd *cmd;
1529 struct mgmt_mode *cp; 1561 struct mgmt_mode *cp;
1530 struct hci_request req; 1562 struct hci_request req;
1531 bool changed; 1563 bool changed;
@@ -1540,15 +1572,14 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1540 1572
1541 if (status) { 1573 if (status) {
1542 u8 mgmt_err = mgmt_status(status); 1574 u8 mgmt_err = mgmt_status(status);
1543 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); 1575 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1544 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 1576 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1545 goto remove_cmd; 1577 goto remove_cmd;
1546 } 1578 }
1547 1579
1548 cp = cmd->param; 1580 cp = cmd->param;
1549 if (cp->val) { 1581 if (cp->val) {
1550 changed = !test_and_set_bit(HCI_DISCOVERABLE, 1582 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1551 &hdev->dev_flags);
1552 1583
1553 if (hdev->discov_timeout > 0) { 1584 if (hdev->discov_timeout > 0) {
1554 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 1585 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
@@ -1556,8 +1587,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1556 to); 1587 to);
1557 } 1588 }
1558 } else { 1589 } else {
1559 changed = test_and_clear_bit(HCI_DISCOVERABLE, 1590 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1560 &hdev->dev_flags);
1561 } 1591 }
1562 1592
1563 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); 1593 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -1586,7 +1616,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1586 u16 len) 1616 u16 len)
1587{ 1617{
1588 struct mgmt_cp_set_discoverable *cp = data; 1618 struct mgmt_cp_set_discoverable *cp = data;
1589 struct pending_cmd *cmd; 1619 struct mgmt_pending_cmd *cmd;
1590 struct hci_request req; 1620 struct hci_request req;
1591 u16 timeout; 1621 u16 timeout;
1592 u8 scan; 1622 u8 scan;
@@ -1594,14 +1624,14 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1594 1624
1595 BT_DBG("request for %s", hdev->name); 1625 BT_DBG("request for %s", hdev->name);
1596 1626
1597 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && 1627 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1598 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 1628 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED); 1630 MGMT_STATUS_REJECTED);
1601 1631
1602 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) 1632 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1603 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1604 MGMT_STATUS_INVALID_PARAMS); 1634 MGMT_STATUS_INVALID_PARAMS);
1605 1635
1606 timeout = __le16_to_cpu(cp->timeout); 1636 timeout = __le16_to_cpu(cp->timeout);
1607 1637
@@ -1610,27 +1640,27 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1610 */ 1640 */
1611 if ((cp->val == 0x00 && timeout > 0) || 1641 if ((cp->val == 0x00 && timeout > 0) ||
1612 (cp->val == 0x02 && timeout == 0)) 1642 (cp->val == 0x02 && timeout == 0))
1613 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1643 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1614 MGMT_STATUS_INVALID_PARAMS); 1644 MGMT_STATUS_INVALID_PARAMS);
1615 1645
1616 hci_dev_lock(hdev); 1646 hci_dev_lock(hdev);
1617 1647
1618 if (!hdev_is_powered(hdev) && timeout > 0) { 1648 if (!hdev_is_powered(hdev) && timeout > 0) {
1619 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1649 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1620 MGMT_STATUS_NOT_POWERED); 1650 MGMT_STATUS_NOT_POWERED);
1621 goto failed; 1651 goto failed;
1622 } 1652 }
1623 1653
1624 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 1654 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1625 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 1655 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1626 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1627 MGMT_STATUS_BUSY); 1657 MGMT_STATUS_BUSY);
1628 goto failed; 1658 goto failed;
1629 } 1659 }
1630 1660
1631 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) { 1661 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1632 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1633 MGMT_STATUS_REJECTED); 1663 MGMT_STATUS_REJECTED);
1634 goto failed; 1664 goto failed;
1635 } 1665 }
1636 1666
@@ -1641,8 +1671,8 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1641 * not a valid operation since it requires a timeout 1671 * not a valid operation since it requires a timeout
1642 * and so no need to check HCI_LIMITED_DISCOVERABLE. 1672 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1643 */ 1673 */
1644 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { 1674 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1645 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1675 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1646 changed = true; 1676 changed = true;
1647 } 1677 }
1648 1678
@@ -1660,9 +1690,9 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1660 * value with the new value. And if only the timeout gets updated, 1690 * value with the new value. And if only the timeout gets updated,
1661 * then no need for any HCI transactions. 1691 * then no need for any HCI transactions.
1662 */ 1692 */
1663 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) && 1693 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1664 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE, 1694 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1665 &hdev->dev_flags)) { 1695 HCI_LIMITED_DISCOVERABLE)) {
1666 cancel_delayed_work(&hdev->discov_off); 1696 cancel_delayed_work(&hdev->discov_off);
1667 hdev->discov_timeout = timeout; 1697 hdev->discov_timeout = timeout;
1668 1698
@@ -1691,16 +1721,16 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1691 1721
1692 /* Limited discoverable mode */ 1722 /* Limited discoverable mode */
1693 if (cp->val == 0x02) 1723 if (cp->val == 0x02)
1694 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 1724 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1695 else 1725 else
1696 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 1726 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 1727
1698 hci_req_init(&req, hdev); 1728 hci_req_init(&req, hdev);
1699 1729
1700 /* The procedure for LE-only controllers is much simpler - just 1730 /* The procedure for LE-only controllers is much simpler - just
1701 * update the advertising data. 1731 * update the advertising data.
1702 */ 1732 */
1703 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 1733 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1704 goto update_ad; 1734 goto update_ad;
1705 1735
1706 scan = SCAN_PAGE; 1736 scan = SCAN_PAGE;
@@ -1730,7 +1760,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1730 1760
1731 scan |= SCAN_INQUIRY; 1761 scan |= SCAN_INQUIRY;
1732 } else { 1762 } else {
1733 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 1763 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1734 } 1764 }
1735 1765
1736 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); 1766 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
@@ -1753,7 +1783,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
1753 struct hci_cp_write_page_scan_activity acp; 1783 struct hci_cp_write_page_scan_activity acp;
1754 u8 type; 1784 u8 type;
1755 1785
1756 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 1786 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1757 return; 1787 return;
1758 1788
1759 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 1789 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
@@ -1785,7 +1815,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
1785static void set_connectable_complete(struct hci_dev *hdev, u8 status, 1815static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1786 u16 opcode) 1816 u16 opcode)
1787{ 1817{
1788 struct pending_cmd *cmd; 1818 struct mgmt_pending_cmd *cmd;
1789 struct mgmt_mode *cp; 1819 struct mgmt_mode *cp;
1790 bool conn_changed, discov_changed; 1820 bool conn_changed, discov_changed;
1791 1821
@@ -1799,20 +1829,20 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1799 1829
1800 if (status) { 1830 if (status) {
1801 u8 mgmt_err = mgmt_status(status); 1831 u8 mgmt_err = mgmt_status(status);
1802 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); 1832 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1803 goto remove_cmd; 1833 goto remove_cmd;
1804 } 1834 }
1805 1835
1806 cp = cmd->param; 1836 cp = cmd->param;
1807 if (cp->val) { 1837 if (cp->val) {
1808 conn_changed = !test_and_set_bit(HCI_CONNECTABLE, 1838 conn_changed = !hci_dev_test_and_set_flag(hdev,
1809 &hdev->dev_flags); 1839 HCI_CONNECTABLE);
1810 discov_changed = false; 1840 discov_changed = false;
1811 } else { 1841 } else {
1812 conn_changed = test_and_clear_bit(HCI_CONNECTABLE, 1842 conn_changed = hci_dev_test_and_clear_flag(hdev,
1813 &hdev->dev_flags); 1843 HCI_CONNECTABLE);
1814 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE, 1844 discov_changed = hci_dev_test_and_clear_flag(hdev,
1815 &hdev->dev_flags); 1845 HCI_DISCOVERABLE);
1816 } 1846 }
1817 1847
1818 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); 1848 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
@@ -1838,14 +1868,14 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
1838 bool changed = false; 1868 bool changed = false;
1839 int err; 1869 int err;
1840 1870
1841 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 1871 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1842 changed = true; 1872 changed = true;
1843 1873
1844 if (val) { 1874 if (val) {
1845 set_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1875 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1846 } else { 1876 } else {
1847 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1877 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1848 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1878 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1849 } 1879 }
1850 1880
1851 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); 1881 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
@@ -1865,21 +1895,21 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1865 u16 len) 1895 u16 len)
1866{ 1896{
1867 struct mgmt_mode *cp = data; 1897 struct mgmt_mode *cp = data;
1868 struct pending_cmd *cmd; 1898 struct mgmt_pending_cmd *cmd;
1869 struct hci_request req; 1899 struct hci_request req;
1870 u8 scan; 1900 u8 scan;
1871 int err; 1901 int err;
1872 1902
1873 BT_DBG("request for %s", hdev->name); 1903 BT_DBG("request for %s", hdev->name);
1874 1904
1875 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && 1905 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1876 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 1906 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1878 MGMT_STATUS_REJECTED); 1908 MGMT_STATUS_REJECTED);
1879 1909
1880 if (cp->val != 0x00 && cp->val != 0x01) 1910 if (cp->val != 0x00 && cp->val != 0x01)
1881 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1911 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1882 MGMT_STATUS_INVALID_PARAMS); 1912 MGMT_STATUS_INVALID_PARAMS);
1883 1913
1884 hci_dev_lock(hdev); 1914 hci_dev_lock(hdev);
1885 1915
@@ -1890,8 +1920,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1890 1920
1891 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 1921 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1892 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 1922 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1893 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1923 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1894 MGMT_STATUS_BUSY); 1924 MGMT_STATUS_BUSY);
1895 goto failed; 1925 goto failed;
1896 } 1926 }
1897 1927
@@ -1907,10 +1937,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1907 * by-product of disabling connectable, we need to update the 1937 * by-product of disabling connectable, we need to update the
1908 * advertising flags. 1938 * advertising flags.
1909 */ 1939 */
1910 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 1940 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1911 if (!cp->val) { 1941 if (!cp->val) {
1912 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 1942 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1913 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1943 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1914 } 1944 }
1915 update_adv_data(&req); 1945 update_adv_data(&req);
1916 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) { 1946 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
@@ -1939,17 +1969,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1939 } 1969 }
1940 1970
1941no_scan_update: 1971no_scan_update:
1942 /* If we're going from non-connectable to connectable or
1943 * vice-versa when fast connectable is enabled ensure that fast
1944 * connectable gets disabled. write_fast_connectable won't do
1945 * anything if the page scan parameters are already what they
1946 * should be.
1947 */
1948 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1949 write_fast_connectable(&req, false);
1950
1951 /* Update the advertising parameters if necessary */ 1972 /* Update the advertising parameters if necessary */
1952 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1973 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1953 enable_advertising(&req); 1974 enable_advertising(&req);
1954 1975
1955 err = hci_req_run(&req, set_connectable_complete); 1976 err = hci_req_run(&req, set_connectable_complete);
@@ -1976,15 +1997,15 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1976 BT_DBG("request for %s", hdev->name); 1997 BT_DBG("request for %s", hdev->name);
1977 1998
1978 if (cp->val != 0x00 && cp->val != 0x01) 1999 if (cp->val != 0x00 && cp->val != 0x01)
1979 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, 2000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1980 MGMT_STATUS_INVALID_PARAMS); 2001 MGMT_STATUS_INVALID_PARAMS);
1981 2002
1982 hci_dev_lock(hdev); 2003 hci_dev_lock(hdev);
1983 2004
1984 if (cp->val) 2005 if (cp->val)
1985 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags); 2006 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1986 else 2007 else
1987 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags); 2008 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1988 2009
1989 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev); 2010 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1990 if (err < 0) 2011 if (err < 0)
@@ -2002,7 +2023,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2002 u16 len) 2023 u16 len)
2003{ 2024{
2004 struct mgmt_mode *cp = data; 2025 struct mgmt_mode *cp = data;
2005 struct pending_cmd *cmd; 2026 struct mgmt_pending_cmd *cmd;
2006 u8 val, status; 2027 u8 val, status;
2007 int err; 2028 int err;
2008 2029
@@ -2010,21 +2031,20 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2010 2031
2011 status = mgmt_bredr_support(hdev); 2032 status = mgmt_bredr_support(hdev);
2012 if (status) 2033 if (status)
2013 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 2034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2014 status); 2035 status);
2015 2036
2016 if (cp->val != 0x00 && cp->val != 0x01) 2037 if (cp->val != 0x00 && cp->val != 0x01)
2017 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 2038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2018 MGMT_STATUS_INVALID_PARAMS); 2039 MGMT_STATUS_INVALID_PARAMS);
2019 2040
2020 hci_dev_lock(hdev); 2041 hci_dev_lock(hdev);
2021 2042
2022 if (!hdev_is_powered(hdev)) { 2043 if (!hdev_is_powered(hdev)) {
2023 bool changed = false; 2044 bool changed = false;
2024 2045
2025 if (!!cp->val != test_bit(HCI_LINK_SECURITY, 2046 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2026 &hdev->dev_flags)) { 2047 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2027 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2028 changed = true; 2048 changed = true;
2029 } 2049 }
2030 2050
@@ -2039,8 +2059,8 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2039 } 2059 }
2040 2060
2041 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { 2061 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2042 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 2062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2043 MGMT_STATUS_BUSY); 2063 MGMT_STATUS_BUSY);
2044 goto failed; 2064 goto failed;
2045 } 2065 }
2046 2066
@@ -2071,7 +2091,7 @@ failed:
2071static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 2091static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2072{ 2092{
2073 struct mgmt_mode *cp = data; 2093 struct mgmt_mode *cp = data;
2074 struct pending_cmd *cmd; 2094 struct mgmt_pending_cmd *cmd;
2075 u8 status; 2095 u8 status;
2076 int err; 2096 int err;
2077 2097
@@ -2079,15 +2099,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2079 2099
2080 status = mgmt_bredr_support(hdev); 2100 status = mgmt_bredr_support(hdev);
2081 if (status) 2101 if (status)
2082 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); 2102 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2083 2103
2084 if (!lmp_ssp_capable(hdev)) 2104 if (!lmp_ssp_capable(hdev))
2085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 2105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2086 MGMT_STATUS_NOT_SUPPORTED); 2106 MGMT_STATUS_NOT_SUPPORTED);
2087 2107
2088 if (cp->val != 0x00 && cp->val != 0x01) 2108 if (cp->val != 0x00 && cp->val != 0x01)
2089 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 2109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2090 MGMT_STATUS_INVALID_PARAMS); 2110 MGMT_STATUS_INVALID_PARAMS);
2091 2111
2092 hci_dev_lock(hdev); 2112 hci_dev_lock(hdev);
2093 2113
@@ -2095,16 +2115,16 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2095 bool changed; 2115 bool changed;
2096 2116
2097 if (cp->val) { 2117 if (cp->val) {
2098 changed = !test_and_set_bit(HCI_SSP_ENABLED, 2118 changed = !hci_dev_test_and_set_flag(hdev,
2099 &hdev->dev_flags); 2119 HCI_SSP_ENABLED);
2100 } else { 2120 } else {
2101 changed = test_and_clear_bit(HCI_SSP_ENABLED, 2121 changed = hci_dev_test_and_clear_flag(hdev,
2102 &hdev->dev_flags); 2122 HCI_SSP_ENABLED);
2103 if (!changed) 2123 if (!changed)
2104 changed = test_and_clear_bit(HCI_HS_ENABLED, 2124 changed = hci_dev_test_and_clear_flag(hdev,
2105 &hdev->dev_flags); 2125 HCI_HS_ENABLED);
2106 else 2126 else
2107 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 2127 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2108 } 2128 }
2109 2129
2110 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); 2130 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -2118,12 +2138,12 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2118 } 2138 }
2119 2139
2120 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { 2140 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2121 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 2141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2122 MGMT_STATUS_BUSY); 2142 MGMT_STATUS_BUSY);
2123 goto failed; 2143 goto failed;
2124 } 2144 }
2125 2145
2126 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 2146 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2127 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); 2147 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2128 goto failed; 2148 goto failed;
2129 } 2149 }
@@ -2134,7 +2154,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2134 goto failed; 2154 goto failed;
2135 } 2155 }
2136 2156
2137 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) 2157 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2138 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, 2158 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2139 sizeof(cp->val), &cp->val); 2159 sizeof(cp->val), &cp->val);
2140 2160
@@ -2160,38 +2180,38 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2160 2180
2161 status = mgmt_bredr_support(hdev); 2181 status = mgmt_bredr_support(hdev);
2162 if (status) 2182 if (status)
2163 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); 2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2164 2184
2165 if (!lmp_ssp_capable(hdev)) 2185 if (!lmp_ssp_capable(hdev))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 2186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2167 MGMT_STATUS_NOT_SUPPORTED); 2187 MGMT_STATUS_NOT_SUPPORTED);
2168 2188
2169 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 2189 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2170 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 2190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2171 MGMT_STATUS_REJECTED); 2191 MGMT_STATUS_REJECTED);
2172 2192
2173 if (cp->val != 0x00 && cp->val != 0x01) 2193 if (cp->val != 0x00 && cp->val != 0x01)
2174 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 2194 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2175 MGMT_STATUS_INVALID_PARAMS); 2195 MGMT_STATUS_INVALID_PARAMS);
2176 2196
2177 hci_dev_lock(hdev); 2197 hci_dev_lock(hdev);
2178 2198
2179 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { 2199 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2181 MGMT_STATUS_BUSY); 2201 MGMT_STATUS_BUSY);
2182 goto unlock; 2202 goto unlock;
2183 } 2203 }
2184 2204
2185 if (cp->val) { 2205 if (cp->val) {
2186 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags); 2206 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2187 } else { 2207 } else {
2188 if (hdev_is_powered(hdev)) { 2208 if (hdev_is_powered(hdev)) {
2189 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 2209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2190 MGMT_STATUS_REJECTED); 2210 MGMT_STATUS_REJECTED);
2191 goto unlock; 2211 goto unlock;
2192 } 2212 }
2193 2213
2194 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 2214 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2195 } 2215 }
2196 2216
2197 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); 2217 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
@@ -2232,7 +2252,7 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2232 * has actually been enabled. During power on, the 2252 * has actually been enabled. During power on, the
2233 * update in powered_update_hci will take care of it. 2253 * update in powered_update_hci will take care of it.
2234 */ 2254 */
2235 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 2255 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2236 struct hci_request req; 2256 struct hci_request req;
2237 2257
2238 hci_req_init(&req, hdev); 2258 hci_req_init(&req, hdev);
@@ -2250,7 +2270,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2250{ 2270{
2251 struct mgmt_mode *cp = data; 2271 struct mgmt_mode *cp = data;
2252 struct hci_cp_write_le_host_supported hci_cp; 2272 struct hci_cp_write_le_host_supported hci_cp;
2253 struct pending_cmd *cmd; 2273 struct mgmt_pending_cmd *cmd;
2254 struct hci_request req; 2274 struct hci_request req;
2255 int err; 2275 int err;
2256 u8 val, enabled; 2276 u8 val, enabled;
@@ -2258,17 +2278,17 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2258 BT_DBG("request for %s", hdev->name); 2278 BT_DBG("request for %s", hdev->name);
2259 2279
2260 if (!lmp_le_capable(hdev)) 2280 if (!lmp_le_capable(hdev))
2261 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 2281 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2262 MGMT_STATUS_NOT_SUPPORTED); 2282 MGMT_STATUS_NOT_SUPPORTED);
2263 2283
2264 if (cp->val != 0x00 && cp->val != 0x01) 2284 if (cp->val != 0x00 && cp->val != 0x01)
2265 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 2285 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2266 MGMT_STATUS_INVALID_PARAMS); 2286 MGMT_STATUS_INVALID_PARAMS);
2267 2287
2268 /* LE-only devices do not allow toggling LE on/off */ 2288 /* LE-only devices do not allow toggling LE on/off */
2269 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 2289 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2270 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 2290 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2271 MGMT_STATUS_REJECTED); 2291 MGMT_STATUS_REJECTED);
2272 2292
2273 hci_dev_lock(hdev); 2293 hci_dev_lock(hdev);
2274 2294
@@ -2278,13 +2298,13 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2278 if (!hdev_is_powered(hdev) || val == enabled) { 2298 if (!hdev_is_powered(hdev) || val == enabled) {
2279 bool changed = false; 2299 bool changed = false;
2280 2300
2281 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 2301 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2282 change_bit(HCI_LE_ENABLED, &hdev->dev_flags); 2302 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2283 changed = true; 2303 changed = true;
2284 } 2304 }
2285 2305
2286 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 2306 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 2307 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2288 changed = true; 2308 changed = true;
2289 } 2309 }
2290 2310
@@ -2300,8 +2320,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2300 2320
2301 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) || 2321 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2302 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { 2322 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2303 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 2323 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2304 MGMT_STATUS_BUSY); 2324 MGMT_STATUS_BUSY);
2305 goto unlock; 2325 goto unlock;
2306 } 2326 }
2307 2327
@@ -2319,7 +2339,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2319 hci_cp.le = val; 2339 hci_cp.le = val;
2320 hci_cp.simul = 0x00; 2340 hci_cp.simul = 0x00;
2321 } else { 2341 } else {
2322 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) 2342 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2323 disable_advertising(&req); 2343 disable_advertising(&req);
2324 } 2344 }
2325 2345
@@ -2343,7 +2363,7 @@ unlock:
2343 */ 2363 */
2344static bool pending_eir_or_class(struct hci_dev *hdev) 2364static bool pending_eir_or_class(struct hci_dev *hdev)
2345{ 2365{
2346 struct pending_cmd *cmd; 2366 struct mgmt_pending_cmd *cmd;
2347 2367
2348 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 2368 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2349 switch (cmd->opcode) { 2369 switch (cmd->opcode) {
@@ -2379,7 +2399,7 @@ static u8 get_uuid_size(const u8 *uuid)
2379 2399
2380static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status) 2400static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2381{ 2401{
2382 struct pending_cmd *cmd; 2402 struct mgmt_pending_cmd *cmd;
2383 2403
2384 hci_dev_lock(hdev); 2404 hci_dev_lock(hdev);
2385 2405
@@ -2387,8 +2407,8 @@ static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2387 if (!cmd) 2407 if (!cmd)
2388 goto unlock; 2408 goto unlock;
2389 2409
2390 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), 2410 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2391 hdev->dev_class, 3); 2411 mgmt_status(status), hdev->dev_class, 3);
2392 2412
2393 mgmt_pending_remove(cmd); 2413 mgmt_pending_remove(cmd);
2394 2414
@@ -2406,7 +2426,7 @@ static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2406static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 2426static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2407{ 2427{
2408 struct mgmt_cp_add_uuid *cp = data; 2428 struct mgmt_cp_add_uuid *cp = data;
2409 struct pending_cmd *cmd; 2429 struct mgmt_pending_cmd *cmd;
2410 struct hci_request req; 2430 struct hci_request req;
2411 struct bt_uuid *uuid; 2431 struct bt_uuid *uuid;
2412 int err; 2432 int err;
@@ -2416,8 +2436,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2416 hci_dev_lock(hdev); 2436 hci_dev_lock(hdev);
2417 2437
2418 if (pending_eir_or_class(hdev)) { 2438 if (pending_eir_or_class(hdev)) {
2419 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, 2439 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2420 MGMT_STATUS_BUSY); 2440 MGMT_STATUS_BUSY);
2421 goto failed; 2441 goto failed;
2422 } 2442 }
2423 2443
@@ -2443,8 +2463,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2443 if (err != -ENODATA) 2463 if (err != -ENODATA)
2444 goto failed; 2464 goto failed;
2445 2465
2446 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, 2466 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2447 hdev->dev_class, 3); 2467 hdev->dev_class, 3);
2448 goto failed; 2468 goto failed;
2449 } 2469 }
2450 2470
@@ -2466,7 +2486,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
2466 if (!hdev_is_powered(hdev)) 2486 if (!hdev_is_powered(hdev))
2467 return false; 2487 return false;
2468 2488
2469 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { 2489 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2470 queue_delayed_work(hdev->workqueue, &hdev->service_cache, 2490 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2471 CACHE_TIMEOUT); 2491 CACHE_TIMEOUT);
2472 return true; 2492 return true;
@@ -2486,7 +2506,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2486 u16 len) 2506 u16 len)
2487{ 2507{
2488 struct mgmt_cp_remove_uuid *cp = data; 2508 struct mgmt_cp_remove_uuid *cp = data;
2489 struct pending_cmd *cmd; 2509 struct mgmt_pending_cmd *cmd;
2490 struct bt_uuid *match, *tmp; 2510 struct bt_uuid *match, *tmp;
2491 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 2511 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2492 struct hci_request req; 2512 struct hci_request req;
@@ -2497,8 +2517,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2497 hci_dev_lock(hdev); 2517 hci_dev_lock(hdev);
2498 2518
2499 if (pending_eir_or_class(hdev)) { 2519 if (pending_eir_or_class(hdev)) {
2500 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, 2520 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2501 MGMT_STATUS_BUSY); 2521 MGMT_STATUS_BUSY);
2502 goto unlock; 2522 goto unlock;
2503 } 2523 }
2504 2524
@@ -2506,8 +2526,9 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2506 hci_uuids_clear(hdev); 2526 hci_uuids_clear(hdev);
2507 2527
2508 if (enable_service_cache(hdev)) { 2528 if (enable_service_cache(hdev)) {
2509 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 2529 err = mgmt_cmd_complete(sk, hdev->id,
2510 0, hdev->dev_class, 3); 2530 MGMT_OP_REMOVE_UUID,
2531 0, hdev->dev_class, 3);
2511 goto unlock; 2532 goto unlock;
2512 } 2533 }
2513 2534
@@ -2526,8 +2547,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2526 } 2547 }
2527 2548
2528 if (found == 0) { 2549 if (found == 0) {
2529 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, 2550 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2530 MGMT_STATUS_INVALID_PARAMS); 2551 MGMT_STATUS_INVALID_PARAMS);
2531 goto unlock; 2552 goto unlock;
2532 } 2553 }
2533 2554
@@ -2542,8 +2563,8 @@ update_class:
2542 if (err != -ENODATA) 2563 if (err != -ENODATA)
2543 goto unlock; 2564 goto unlock;
2544 2565
2545 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, 2566 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2546 hdev->dev_class, 3); 2567 hdev->dev_class, 3);
2547 goto unlock; 2568 goto unlock;
2548 } 2569 }
2549 2570
@@ -2571,27 +2592,27 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2571 u16 len) 2592 u16 len)
2572{ 2593{
2573 struct mgmt_cp_set_dev_class *cp = data; 2594 struct mgmt_cp_set_dev_class *cp = data;
2574 struct pending_cmd *cmd; 2595 struct mgmt_pending_cmd *cmd;
2575 struct hci_request req; 2596 struct hci_request req;
2576 int err; 2597 int err;
2577 2598
2578 BT_DBG("request for %s", hdev->name); 2599 BT_DBG("request for %s", hdev->name);
2579 2600
2580 if (!lmp_bredr_capable(hdev)) 2601 if (!lmp_bredr_capable(hdev))
2581 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 2602 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2582 MGMT_STATUS_NOT_SUPPORTED); 2603 MGMT_STATUS_NOT_SUPPORTED);
2583 2604
2584 hci_dev_lock(hdev); 2605 hci_dev_lock(hdev);
2585 2606
2586 if (pending_eir_or_class(hdev)) { 2607 if (pending_eir_or_class(hdev)) {
2587 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 2608 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2588 MGMT_STATUS_BUSY); 2609 MGMT_STATUS_BUSY);
2589 goto unlock; 2610 goto unlock;
2590 } 2611 }
2591 2612
2592 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { 2613 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 2614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2594 MGMT_STATUS_INVALID_PARAMS); 2615 MGMT_STATUS_INVALID_PARAMS);
2595 goto unlock; 2616 goto unlock;
2596 } 2617 }
2597 2618
@@ -2599,14 +2620,14 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2599 hdev->minor_class = cp->minor; 2620 hdev->minor_class = cp->minor;
2600 2621
2601 if (!hdev_is_powered(hdev)) { 2622 if (!hdev_is_powered(hdev)) {
2602 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, 2623 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2603 hdev->dev_class, 3); 2624 hdev->dev_class, 3);
2604 goto unlock; 2625 goto unlock;
2605 } 2626 }
2606 2627
2607 hci_req_init(&req, hdev); 2628 hci_req_init(&req, hdev);
2608 2629
2609 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { 2630 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2610 hci_dev_unlock(hdev); 2631 hci_dev_unlock(hdev);
2611 cancel_delayed_work_sync(&hdev->service_cache); 2632 cancel_delayed_work_sync(&hdev->service_cache);
2612 hci_dev_lock(hdev); 2633 hci_dev_lock(hdev);
@@ -2620,8 +2641,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2620 if (err != -ENODATA) 2641 if (err != -ENODATA)
2621 goto unlock; 2642 goto unlock;
2622 2643
2623 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, 2644 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2624 hdev->dev_class, 3); 2645 hdev->dev_class, 3);
2625 goto unlock; 2646 goto unlock;
2626 } 2647 }
2627 2648
@@ -2651,15 +2672,15 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2651 BT_DBG("request for %s", hdev->name); 2672 BT_DBG("request for %s", hdev->name);
2652 2673
2653 if (!lmp_bredr_capable(hdev)) 2674 if (!lmp_bredr_capable(hdev))
2654 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 2675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2655 MGMT_STATUS_NOT_SUPPORTED); 2676 MGMT_STATUS_NOT_SUPPORTED);
2656 2677
2657 key_count = __le16_to_cpu(cp->key_count); 2678 key_count = __le16_to_cpu(cp->key_count);
2658 if (key_count > max_key_count) { 2679 if (key_count > max_key_count) {
2659 BT_ERR("load_link_keys: too big key_count value %u", 2680 BT_ERR("load_link_keys: too big key_count value %u",
2660 key_count); 2681 key_count);
2661 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 2682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2662 MGMT_STATUS_INVALID_PARAMS); 2683 MGMT_STATUS_INVALID_PARAMS);
2663 } 2684 }
2664 2685
2665 expected_len = sizeof(*cp) + key_count * 2686 expected_len = sizeof(*cp) + key_count *
@@ -2667,13 +2688,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2667 if (expected_len != len) { 2688 if (expected_len != len) {
2668 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 2689 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2669 expected_len, len); 2690 expected_len, len);
2670 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 2691 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2671 MGMT_STATUS_INVALID_PARAMS); 2692 MGMT_STATUS_INVALID_PARAMS);
2672 } 2693 }
2673 2694
2674 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01) 2695 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2675 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 2696 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2676 MGMT_STATUS_INVALID_PARAMS); 2697 MGMT_STATUS_INVALID_PARAMS);
2677 2698
2678 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 2699 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2679 key_count); 2700 key_count);
@@ -2682,8 +2703,9 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2682 struct mgmt_link_key_info *key = &cp->keys[i]; 2703 struct mgmt_link_key_info *key = &cp->keys[i];
2683 2704
2684 if (key->addr.type != BDADDR_BREDR || key->type > 0x08) 2705 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2685 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 2706 return mgmt_cmd_status(sk, hdev->id,
2686 MGMT_STATUS_INVALID_PARAMS); 2707 MGMT_OP_LOAD_LINK_KEYS,
2708 MGMT_STATUS_INVALID_PARAMS);
2687 } 2709 }
2688 2710
2689 hci_dev_lock(hdev); 2711 hci_dev_lock(hdev);
@@ -2691,11 +2713,10 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2691 hci_link_keys_clear(hdev); 2713 hci_link_keys_clear(hdev);
2692 2714
2693 if (cp->debug_keys) 2715 if (cp->debug_keys)
2694 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS, 2716 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2695 &hdev->dev_flags);
2696 else 2717 else
2697 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS, 2718 changed = hci_dev_test_and_clear_flag(hdev,
2698 &hdev->dev_flags); 2719 HCI_KEEP_DEBUG_KEYS);
2699 2720
2700 if (changed) 2721 if (changed)
2701 new_settings(hdev, NULL); 2722 new_settings(hdev, NULL);
@@ -2713,7 +2734,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2713 key->type, key->pin_len, NULL); 2734 key->type, key->pin_len, NULL);
2714 } 2735 }
2715 2736
2716 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); 2737 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2717 2738
2718 hci_dev_unlock(hdev); 2739 hci_dev_unlock(hdev);
2719 2740
@@ -2738,7 +2759,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2738 struct mgmt_cp_unpair_device *cp = data; 2759 struct mgmt_cp_unpair_device *cp = data;
2739 struct mgmt_rp_unpair_device rp; 2760 struct mgmt_rp_unpair_device rp;
2740 struct hci_cp_disconnect dc; 2761 struct hci_cp_disconnect dc;
2741 struct pending_cmd *cmd; 2762 struct mgmt_pending_cmd *cmd;
2742 struct hci_conn *conn; 2763 struct hci_conn *conn;
2743 int err; 2764 int err;
2744 2765
@@ -2747,20 +2768,21 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2747 rp.addr.type = cp->addr.type; 2768 rp.addr.type = cp->addr.type;
2748 2769
2749 if (!bdaddr_type_is_valid(cp->addr.type)) 2770 if (!bdaddr_type_is_valid(cp->addr.type))
2750 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 2771 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2751 MGMT_STATUS_INVALID_PARAMS, 2772 MGMT_STATUS_INVALID_PARAMS,
2752 &rp, sizeof(rp)); 2773 &rp, sizeof(rp));
2753 2774
2754 if (cp->disconnect != 0x00 && cp->disconnect != 0x01) 2775 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2755 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 2776 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2756 MGMT_STATUS_INVALID_PARAMS, 2777 MGMT_STATUS_INVALID_PARAMS,
2757 &rp, sizeof(rp)); 2778 &rp, sizeof(rp));
2758 2779
2759 hci_dev_lock(hdev); 2780 hci_dev_lock(hdev);
2760 2781
2761 if (!hdev_is_powered(hdev)) { 2782 if (!hdev_is_powered(hdev)) {
2762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 2783 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2763 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 2784 MGMT_STATUS_NOT_POWERED, &rp,
2785 sizeof(rp));
2764 goto unlock; 2786 goto unlock;
2765 } 2787 }
2766 2788
@@ -2810,8 +2832,9 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2810 } 2832 }
2811 2833
2812 if (err < 0) { 2834 if (err < 0) {
2813 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 2835 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2814 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp)); 2836 MGMT_STATUS_NOT_PAIRED, &rp,
2837 sizeof(rp));
2815 goto unlock; 2838 goto unlock;
2816 } 2839 }
2817 2840
@@ -2819,8 +2842,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2819 * link is requested. 2842 * link is requested.
2820 */ 2843 */
2821 if (!conn) { 2844 if (!conn) {
2822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, 2845 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2823 &rp, sizeof(rp)); 2846 &rp, sizeof(rp));
2824 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); 2847 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2825 goto unlock; 2848 goto unlock;
2826 } 2849 }
@@ -2850,7 +2873,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2850{ 2873{
2851 struct mgmt_cp_disconnect *cp = data; 2874 struct mgmt_cp_disconnect *cp = data;
2852 struct mgmt_rp_disconnect rp; 2875 struct mgmt_rp_disconnect rp;
2853 struct pending_cmd *cmd; 2876 struct mgmt_pending_cmd *cmd;
2854 struct hci_conn *conn; 2877 struct hci_conn *conn;
2855 int err; 2878 int err;
2856 2879
@@ -2861,21 +2884,22 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2861 rp.addr.type = cp->addr.type; 2884 rp.addr.type = cp->addr.type;
2862 2885
2863 if (!bdaddr_type_is_valid(cp->addr.type)) 2886 if (!bdaddr_type_is_valid(cp->addr.type))
2864 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 2887 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2865 MGMT_STATUS_INVALID_PARAMS, 2888 MGMT_STATUS_INVALID_PARAMS,
2866 &rp, sizeof(rp)); 2889 &rp, sizeof(rp));
2867 2890
2868 hci_dev_lock(hdev); 2891 hci_dev_lock(hdev);
2869 2892
2870 if (!test_bit(HCI_UP, &hdev->flags)) { 2893 if (!test_bit(HCI_UP, &hdev->flags)) {
2871 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 2894 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2872 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 2895 MGMT_STATUS_NOT_POWERED, &rp,
2896 sizeof(rp));
2873 goto failed; 2897 goto failed;
2874 } 2898 }
2875 2899
2876 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { 2900 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2877 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 2901 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2878 MGMT_STATUS_BUSY, &rp, sizeof(rp)); 2902 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2879 goto failed; 2903 goto failed;
2880 } 2904 }
2881 2905
@@ -2886,8 +2910,9 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2886 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 2910 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2887 2911
2888 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { 2912 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2889 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 2913 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2890 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); 2914 MGMT_STATUS_NOT_CONNECTED, &rp,
2915 sizeof(rp));
2891 goto failed; 2916 goto failed;
2892 } 2917 }
2893 2918
@@ -2941,8 +2966,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2941 hci_dev_lock(hdev); 2966 hci_dev_lock(hdev);
2942 2967
2943 if (!hdev_is_powered(hdev)) { 2968 if (!hdev_is_powered(hdev)) {
2944 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 2969 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2945 MGMT_STATUS_NOT_POWERED); 2970 MGMT_STATUS_NOT_POWERED);
2946 goto unlock; 2971 goto unlock;
2947 } 2972 }
2948 2973
@@ -2975,8 +3000,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2975 /* Recalculate length in case of filtered SCO connections, etc */ 3000 /* Recalculate length in case of filtered SCO connections, etc */
2976 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 3001 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2977 3002
2978 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, 3003 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2979 rp_len); 3004 rp_len);
2980 3005
2981 kfree(rp); 3006 kfree(rp);
2982 3007
@@ -2988,7 +3013,7 @@ unlock:
2988static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, 3013static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2989 struct mgmt_cp_pin_code_neg_reply *cp) 3014 struct mgmt_cp_pin_code_neg_reply *cp)
2990{ 3015{
2991 struct pending_cmd *cmd; 3016 struct mgmt_pending_cmd *cmd;
2992 int err; 3017 int err;
2993 3018
2994 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, 3019 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
@@ -3010,7 +3035,7 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3010 struct hci_conn *conn; 3035 struct hci_conn *conn;
3011 struct mgmt_cp_pin_code_reply *cp = data; 3036 struct mgmt_cp_pin_code_reply *cp = data;
3012 struct hci_cp_pin_code_reply reply; 3037 struct hci_cp_pin_code_reply reply;
3013 struct pending_cmd *cmd; 3038 struct mgmt_pending_cmd *cmd;
3014 int err; 3039 int err;
3015 3040
3016 BT_DBG(""); 3041 BT_DBG("");
@@ -3018,15 +3043,15 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3018 hci_dev_lock(hdev); 3043 hci_dev_lock(hdev);
3019 3044
3020 if (!hdev_is_powered(hdev)) { 3045 if (!hdev_is_powered(hdev)) {
3021 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 3046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3022 MGMT_STATUS_NOT_POWERED); 3047 MGMT_STATUS_NOT_POWERED);
3023 goto failed; 3048 goto failed;
3024 } 3049 }
3025 3050
3026 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 3051 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3027 if (!conn) { 3052 if (!conn) {
3028 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 3053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3029 MGMT_STATUS_NOT_CONNECTED); 3054 MGMT_STATUS_NOT_CONNECTED);
3030 goto failed; 3055 goto failed;
3031 } 3056 }
3032 3057
@@ -3039,8 +3064,8 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3039 3064
3040 err = send_pin_code_neg_reply(sk, hdev, &ncp); 3065 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3041 if (err >= 0) 3066 if (err >= 0)
3042 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 3067 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3043 MGMT_STATUS_INVALID_PARAMS); 3068 MGMT_STATUS_INVALID_PARAMS);
3044 3069
3045 goto failed; 3070 goto failed;
3046 } 3071 }
@@ -3074,8 +3099,8 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3074 BT_DBG(""); 3099 BT_DBG("");
3075 3100
3076 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY) 3101 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3077 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 3102 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3078 MGMT_STATUS_INVALID_PARAMS, NULL, 0); 3103 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3079 3104
3080 hci_dev_lock(hdev); 3105 hci_dev_lock(hdev);
3081 3106
@@ -3086,14 +3111,14 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3086 3111
3087 hci_dev_unlock(hdev); 3112 hci_dev_unlock(hdev);
3088 3113
3089 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL, 3114 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3090 0); 3115 NULL, 0);
3091} 3116}
3092 3117
3093static struct pending_cmd *find_pairing(struct hci_conn *conn) 3118static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3094{ 3119{
3095 struct hci_dev *hdev = conn->hdev; 3120 struct hci_dev *hdev = conn->hdev;
3096 struct pending_cmd *cmd; 3121 struct mgmt_pending_cmd *cmd;
3097 3122
3098 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 3123 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3099 if (cmd->opcode != MGMT_OP_PAIR_DEVICE) 3124 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
@@ -3108,7 +3133,7 @@ static struct pending_cmd *find_pairing(struct hci_conn *conn)
3108 return NULL; 3133 return NULL;
3109} 3134}
3110 3135
3111static int pairing_complete(struct pending_cmd *cmd, u8 status) 3136static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3112{ 3137{
3113 struct mgmt_rp_pair_device rp; 3138 struct mgmt_rp_pair_device rp;
3114 struct hci_conn *conn = cmd->user_data; 3139 struct hci_conn *conn = cmd->user_data;
@@ -3117,8 +3142,8 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status)
3117 bacpy(&rp.addr.bdaddr, &conn->dst); 3142 bacpy(&rp.addr.bdaddr, &conn->dst);
3118 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); 3143 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3119 3144
3120 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, 3145 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3121 &rp, sizeof(rp)); 3146 status, &rp, sizeof(rp));
3122 3147
3123 /* So we don't get further callbacks for this connection */ 3148 /* So we don't get further callbacks for this connection */
3124 conn->connect_cfm_cb = NULL; 3149 conn->connect_cfm_cb = NULL;
@@ -3140,7 +3165,7 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status)
3140void mgmt_smp_complete(struct hci_conn *conn, bool complete) 3165void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3141{ 3166{
3142 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED; 3167 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3143 struct pending_cmd *cmd; 3168 struct mgmt_pending_cmd *cmd;
3144 3169
3145 cmd = find_pairing(conn); 3170 cmd = find_pairing(conn);
3146 if (cmd) { 3171 if (cmd) {
@@ -3151,7 +3176,7 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3151 3176
3152static void pairing_complete_cb(struct hci_conn *conn, u8 status) 3177static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3153{ 3178{
3154 struct pending_cmd *cmd; 3179 struct mgmt_pending_cmd *cmd;
3155 3180
3156 BT_DBG("status %u", status); 3181 BT_DBG("status %u", status);
3157 3182
@@ -3167,7 +3192,7 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3167 3192
3168static void le_pairing_complete_cb(struct hci_conn *conn, u8 status) 3193static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3169{ 3194{
3170 struct pending_cmd *cmd; 3195 struct mgmt_pending_cmd *cmd;
3171 3196
3172 BT_DBG("status %u", status); 3197 BT_DBG("status %u", status);
3173 3198
@@ -3189,7 +3214,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3189{ 3214{
3190 struct mgmt_cp_pair_device *cp = data; 3215 struct mgmt_cp_pair_device *cp = data;
3191 struct mgmt_rp_pair_device rp; 3216 struct mgmt_rp_pair_device rp;
3192 struct pending_cmd *cmd; 3217 struct mgmt_pending_cmd *cmd;
3193 u8 sec_level, auth_type; 3218 u8 sec_level, auth_type;
3194 struct hci_conn *conn; 3219 struct hci_conn *conn;
3195 int err; 3220 int err;
@@ -3201,20 +3226,28 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3201 rp.addr.type = cp->addr.type; 3226 rp.addr.type = cp->addr.type;
3202 3227
3203 if (!bdaddr_type_is_valid(cp->addr.type)) 3228 if (!bdaddr_type_is_valid(cp->addr.type))
3204 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 3229 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3205 MGMT_STATUS_INVALID_PARAMS, 3230 MGMT_STATUS_INVALID_PARAMS,
3206 &rp, sizeof(rp)); 3231 &rp, sizeof(rp));
3207 3232
3208 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY) 3233 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3209 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 3234 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3210 MGMT_STATUS_INVALID_PARAMS, 3235 MGMT_STATUS_INVALID_PARAMS,
3211 &rp, sizeof(rp)); 3236 &rp, sizeof(rp));
3212 3237
3213 hci_dev_lock(hdev); 3238 hci_dev_lock(hdev);
3214 3239
3215 if (!hdev_is_powered(hdev)) { 3240 if (!hdev_is_powered(hdev)) {
3216 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 3241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3217 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 3242 MGMT_STATUS_NOT_POWERED, &rp,
3243 sizeof(rp));
3244 goto unlock;
3245 }
3246
3247 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3248 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3249 MGMT_STATUS_ALREADY_PAIRED, &rp,
3250 sizeof(rp));
3218 goto unlock; 3251 goto unlock;
3219 } 3252 }
3220 3253
@@ -3262,16 +3295,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3262 else 3295 else
3263 status = MGMT_STATUS_CONNECT_FAILED; 3296 status = MGMT_STATUS_CONNECT_FAILED;
3264 3297
3265 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 3298 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3266 status, &rp, 3299 status, &rp, sizeof(rp));
3267 sizeof(rp));
3268 goto unlock; 3300 goto unlock;
3269 } 3301 }
3270 3302
3271 if (conn->connect_cfm_cb) { 3303 if (conn->connect_cfm_cb) {
3272 hci_conn_drop(conn); 3304 hci_conn_drop(conn);
3273 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 3305 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3274 MGMT_STATUS_BUSY, &rp, sizeof(rp)); 3306 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3275 goto unlock; 3307 goto unlock;
3276 } 3308 }
3277 3309
@@ -3315,7 +3347,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3315 u16 len) 3347 u16 len)
3316{ 3348{
3317 struct mgmt_addr_info *addr = data; 3349 struct mgmt_addr_info *addr = data;
3318 struct pending_cmd *cmd; 3350 struct mgmt_pending_cmd *cmd;
3319 struct hci_conn *conn; 3351 struct hci_conn *conn;
3320 int err; 3352 int err;
3321 3353
@@ -3324,31 +3356,31 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3324 hci_dev_lock(hdev); 3356 hci_dev_lock(hdev);
3325 3357
3326 if (!hdev_is_powered(hdev)) { 3358 if (!hdev_is_powered(hdev)) {
3327 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 3359 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3328 MGMT_STATUS_NOT_POWERED); 3360 MGMT_STATUS_NOT_POWERED);
3329 goto unlock; 3361 goto unlock;
3330 } 3362 }
3331 3363
3332 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev); 3364 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3333 if (!cmd) { 3365 if (!cmd) {
3334 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 3366 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3335 MGMT_STATUS_INVALID_PARAMS); 3367 MGMT_STATUS_INVALID_PARAMS);
3336 goto unlock; 3368 goto unlock;
3337 } 3369 }
3338 3370
3339 conn = cmd->user_data; 3371 conn = cmd->user_data;
3340 3372
3341 if (bacmp(&addr->bdaddr, &conn->dst) != 0) { 3373 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3342 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 3374 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3343 MGMT_STATUS_INVALID_PARAMS); 3375 MGMT_STATUS_INVALID_PARAMS);
3344 goto unlock; 3376 goto unlock;
3345 } 3377 }
3346 3378
3347 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED); 3379 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3348 mgmt_pending_remove(cmd); 3380 mgmt_pending_remove(cmd);
3349 3381
3350 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, 3382 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3351 addr, sizeof(*addr)); 3383 addr, sizeof(*addr));
3352unlock: 3384unlock:
3353 hci_dev_unlock(hdev); 3385 hci_dev_unlock(hdev);
3354 return err; 3386 return err;
@@ -3358,16 +3390,16 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3358 struct mgmt_addr_info *addr, u16 mgmt_op, 3390 struct mgmt_addr_info *addr, u16 mgmt_op,
3359 u16 hci_op, __le32 passkey) 3391 u16 hci_op, __le32 passkey)
3360{ 3392{
3361 struct pending_cmd *cmd; 3393 struct mgmt_pending_cmd *cmd;
3362 struct hci_conn *conn; 3394 struct hci_conn *conn;
3363 int err; 3395 int err;
3364 3396
3365 hci_dev_lock(hdev); 3397 hci_dev_lock(hdev);
3366 3398
3367 if (!hdev_is_powered(hdev)) { 3399 if (!hdev_is_powered(hdev)) {
3368 err = cmd_complete(sk, hdev->id, mgmt_op, 3400 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3369 MGMT_STATUS_NOT_POWERED, addr, 3401 MGMT_STATUS_NOT_POWERED, addr,
3370 sizeof(*addr)); 3402 sizeof(*addr));
3371 goto done; 3403 goto done;
3372 } 3404 }
3373 3405
@@ -3377,22 +3409,22 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3377 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr); 3409 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3378 3410
3379 if (!conn) { 3411 if (!conn) {
3380 err = cmd_complete(sk, hdev->id, mgmt_op, 3412 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3381 MGMT_STATUS_NOT_CONNECTED, addr, 3413 MGMT_STATUS_NOT_CONNECTED, addr,
3382 sizeof(*addr)); 3414 sizeof(*addr));
3383 goto done; 3415 goto done;
3384 } 3416 }
3385 3417
3386 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3418 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3387 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3419 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3388 if (!err) 3420 if (!err)
3389 err = cmd_complete(sk, hdev->id, mgmt_op, 3421 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3390 MGMT_STATUS_SUCCESS, addr, 3422 MGMT_STATUS_SUCCESS, addr,
3391 sizeof(*addr)); 3423 sizeof(*addr));
3392 else 3424 else
3393 err = cmd_complete(sk, hdev->id, mgmt_op, 3425 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3394 MGMT_STATUS_FAILED, addr, 3426 MGMT_STATUS_FAILED, addr,
3395 sizeof(*addr)); 3427 sizeof(*addr));
3396 3428
3397 goto done; 3429 goto done;
3398 } 3430 }
@@ -3444,8 +3476,8 @@ static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3444 BT_DBG(""); 3476 BT_DBG("");
3445 3477
3446 if (len != sizeof(*cp)) 3478 if (len != sizeof(*cp))
3447 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, 3479 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3448 MGMT_STATUS_INVALID_PARAMS); 3480 MGMT_STATUS_INVALID_PARAMS);
3449 3481
3450 return user_pairing_resp(sk, hdev, &cp->addr, 3482 return user_pairing_resp(sk, hdev, &cp->addr,
3451 MGMT_OP_USER_CONFIRM_REPLY, 3483 MGMT_OP_USER_CONFIRM_REPLY,
@@ -3501,7 +3533,7 @@ static void update_name(struct hci_request *req)
3501static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode) 3533static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3502{ 3534{
3503 struct mgmt_cp_set_local_name *cp; 3535 struct mgmt_cp_set_local_name *cp;
3504 struct pending_cmd *cmd; 3536 struct mgmt_pending_cmd *cmd;
3505 3537
3506 BT_DBG("status 0x%02x", status); 3538 BT_DBG("status 0x%02x", status);
3507 3539
@@ -3514,11 +3546,11 @@ static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3514 cp = cmd->param; 3546 cp = cmd->param;
3515 3547
3516 if (status) 3548 if (status)
3517 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 3549 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3518 mgmt_status(status)); 3550 mgmt_status(status));
3519 else 3551 else
3520 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, 3552 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3521 cp, sizeof(*cp)); 3553 cp, sizeof(*cp));
3522 3554
3523 mgmt_pending_remove(cmd); 3555 mgmt_pending_remove(cmd);
3524 3556
@@ -3530,7 +3562,7 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3530 u16 len) 3562 u16 len)
3531{ 3563{
3532 struct mgmt_cp_set_local_name *cp = data; 3564 struct mgmt_cp_set_local_name *cp = data;
3533 struct pending_cmd *cmd; 3565 struct mgmt_pending_cmd *cmd;
3534 struct hci_request req; 3566 struct hci_request req;
3535 int err; 3567 int err;
3536 3568
@@ -3544,8 +3576,8 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3544 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && 3576 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3545 !memcmp(hdev->short_name, cp->short_name, 3577 !memcmp(hdev->short_name, cp->short_name,
3546 sizeof(hdev->short_name))) { 3578 sizeof(hdev->short_name))) {
3547 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, 3579 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3548 data, len); 3580 data, len);
3549 goto failed; 3581 goto failed;
3550 } 3582 }
3551 3583
@@ -3554,8 +3586,8 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3554 if (!hdev_is_powered(hdev)) { 3586 if (!hdev_is_powered(hdev)) {
3555 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); 3587 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3556 3588
3557 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, 3589 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3558 data, len); 3590 data, len);
3559 if (err < 0) 3591 if (err < 0)
3560 goto failed; 3592 goto failed;
3561 3593
@@ -3598,7 +3630,7 @@ failed:
3598static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, 3630static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3599 void *data, u16 data_len) 3631 void *data, u16 data_len)
3600{ 3632{
3601 struct pending_cmd *cmd; 3633 struct mgmt_pending_cmd *cmd;
3602 int err; 3634 int err;
3603 3635
3604 BT_DBG("%s", hdev->name); 3636 BT_DBG("%s", hdev->name);
@@ -3606,20 +3638,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3606 hci_dev_lock(hdev); 3638 hci_dev_lock(hdev);
3607 3639
3608 if (!hdev_is_powered(hdev)) { 3640 if (!hdev_is_powered(hdev)) {
3609 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 3641 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3610 MGMT_STATUS_NOT_POWERED); 3642 MGMT_STATUS_NOT_POWERED);
3611 goto unlock; 3643 goto unlock;
3612 } 3644 }
3613 3645
3614 if (!lmp_ssp_capable(hdev)) { 3646 if (!lmp_ssp_capable(hdev)) {
3615 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 3647 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3616 MGMT_STATUS_NOT_SUPPORTED); 3648 MGMT_STATUS_NOT_SUPPORTED);
3617 goto unlock; 3649 goto unlock;
3618 } 3650 }
3619 3651
3620 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { 3652 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3621 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 3653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3622 MGMT_STATUS_BUSY); 3654 MGMT_STATUS_BUSY);
3623 goto unlock; 3655 goto unlock;
3624 } 3656 }
3625 3657
@@ -3652,9 +3684,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3652 BT_DBG("%s ", hdev->name); 3684 BT_DBG("%s ", hdev->name);
3653 3685
3654 if (!bdaddr_type_is_valid(addr->type)) 3686 if (!bdaddr_type_is_valid(addr->type))
3655 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, 3687 return mgmt_cmd_complete(sk, hdev->id,
3656 MGMT_STATUS_INVALID_PARAMS, addr, 3688 MGMT_OP_ADD_REMOTE_OOB_DATA,
3657 sizeof(*addr)); 3689 MGMT_STATUS_INVALID_PARAMS,
3690 addr, sizeof(*addr));
3658 3691
3659 hci_dev_lock(hdev); 3692 hci_dev_lock(hdev);
3660 3693
@@ -3663,10 +3696,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3663 u8 status; 3696 u8 status;
3664 3697
3665 if (cp->addr.type != BDADDR_BREDR) { 3698 if (cp->addr.type != BDADDR_BREDR) {
3666 err = cmd_complete(sk, hdev->id, 3699 err = mgmt_cmd_complete(sk, hdev->id,
3667 MGMT_OP_ADD_REMOTE_OOB_DATA, 3700 MGMT_OP_ADD_REMOTE_OOB_DATA,
3668 MGMT_STATUS_INVALID_PARAMS, 3701 MGMT_STATUS_INVALID_PARAMS,
3669 &cp->addr, sizeof(cp->addr)); 3702 &cp->addr, sizeof(cp->addr));
3670 goto unlock; 3703 goto unlock;
3671 } 3704 }
3672 3705
@@ -3678,8 +3711,9 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3678 else 3711 else
3679 status = MGMT_STATUS_SUCCESS; 3712 status = MGMT_STATUS_SUCCESS;
3680 3713
3681 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, 3714 err = mgmt_cmd_complete(sk, hdev->id,
3682 status, &cp->addr, sizeof(cp->addr)); 3715 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3716 &cp->addr, sizeof(cp->addr));
3683 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { 3717 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3684 struct mgmt_cp_add_remote_oob_ext_data *cp = data; 3718 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3685 u8 *rand192, *hash192, *rand256, *hash256; 3719 u8 *rand192, *hash192, *rand256, *hash256;
@@ -3691,10 +3725,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3691 */ 3725 */
3692 if (memcmp(cp->rand192, ZERO_KEY, 16) || 3726 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3693 memcmp(cp->hash192, ZERO_KEY, 16)) { 3727 memcmp(cp->hash192, ZERO_KEY, 16)) {
3694 err = cmd_complete(sk, hdev->id, 3728 err = mgmt_cmd_complete(sk, hdev->id,
3695 MGMT_OP_ADD_REMOTE_OOB_DATA, 3729 MGMT_OP_ADD_REMOTE_OOB_DATA,
3696 MGMT_STATUS_INVALID_PARAMS, 3730 MGMT_STATUS_INVALID_PARAMS,
3697 addr, sizeof(*addr)); 3731 addr, sizeof(*addr));
3698 goto unlock; 3732 goto unlock;
3699 } 3733 }
3700 3734
@@ -3734,12 +3768,13 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3734 else 3768 else
3735 status = MGMT_STATUS_SUCCESS; 3769 status = MGMT_STATUS_SUCCESS;
3736 3770
3737 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, 3771 err = mgmt_cmd_complete(sk, hdev->id,
3738 status, &cp->addr, sizeof(cp->addr)); 3772 MGMT_OP_ADD_REMOTE_OOB_DATA,
3773 status, &cp->addr, sizeof(cp->addr));
3739 } else { 3774 } else {
3740 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len); 3775 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3741 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, 3776 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3742 MGMT_STATUS_INVALID_PARAMS); 3777 MGMT_STATUS_INVALID_PARAMS);
3743 } 3778 }
3744 3779
3745unlock: 3780unlock:
@@ -3757,9 +3792,10 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3757 BT_DBG("%s", hdev->name); 3792 BT_DBG("%s", hdev->name);
3758 3793
3759 if (cp->addr.type != BDADDR_BREDR) 3794 if (cp->addr.type != BDADDR_BREDR)
3760 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 3795 return mgmt_cmd_complete(sk, hdev->id,
3761 MGMT_STATUS_INVALID_PARAMS, 3796 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3762 &cp->addr, sizeof(cp->addr)); 3797 MGMT_STATUS_INVALID_PARAMS,
3798 &cp->addr, sizeof(cp->addr));
3763 3799
3764 hci_dev_lock(hdev); 3800 hci_dev_lock(hdev);
3765 3801
@@ -3776,8 +3812,8 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3776 status = MGMT_STATUS_SUCCESS; 3812 status = MGMT_STATUS_SUCCESS;
3777 3813
3778done: 3814done:
3779 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 3815 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3780 status, &cp->addr, sizeof(cp->addr)); 3816 status, &cp->addr, sizeof(cp->addr));
3781 3817
3782 hci_dev_unlock(hdev); 3818 hci_dev_unlock(hdev);
3783 return err; 3819 return err;
@@ -3820,12 +3856,12 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
3820 return false; 3856 return false;
3821 3857
3822 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && 3858 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3823 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 3859 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3824 *status = MGMT_STATUS_NOT_SUPPORTED; 3860 *status = MGMT_STATUS_NOT_SUPPORTED;
3825 return false; 3861 return false;
3826 } 3862 }
3827 3863
3828 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) { 3864 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3829 /* Don't let discovery abort an outgoing 3865 /* Don't let discovery abort an outgoing
3830 * connection attempt that's using directed 3866 * connection attempt that's using directed
3831 * advertising. 3867 * advertising.
@@ -3843,7 +3879,7 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
3843 * is running. Thus, we should temporarily stop it in order to 3879 * is running. Thus, we should temporarily stop it in order to
3844 * set the discovery scanning parameters. 3880 * set the discovery scanning parameters.
3845 */ 3881 */
3846 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 3882 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3847 hci_req_add_le_scan_disable(req); 3883 hci_req_add_le_scan_disable(req);
3848 3884
3849 memset(&param_cp, 0, sizeof(param_cp)); 3885 memset(&param_cp, 0, sizeof(param_cp));
@@ -3883,7 +3919,7 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
3883static void start_discovery_complete(struct hci_dev *hdev, u8 status, 3919static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3884 u16 opcode) 3920 u16 opcode)
3885{ 3921{
3886 struct pending_cmd *cmd; 3922 struct mgmt_pending_cmd *cmd;
3887 unsigned long timeout; 3923 unsigned long timeout;
3888 3924
3889 BT_DBG("status %d", status); 3925 BT_DBG("status %d", status);
@@ -3933,8 +3969,7 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3933 */ 3969 */
3934 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, 3970 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3935 &hdev->quirks) && 3971 &hdev->quirks) &&
3936 (hdev->discovery.uuid_count > 0 || 3972 hdev->discovery.result_filtering) {
3937 hdev->discovery.rssi != HCI_RSSI_INVALID)) {
3938 hdev->discovery.scan_start = jiffies; 3973 hdev->discovery.scan_start = jiffies;
3939 hdev->discovery.scan_duration = timeout; 3974 hdev->discovery.scan_duration = timeout;
3940 } 3975 }
@@ -3951,7 +3986,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3951 void *data, u16 len) 3986 void *data, u16 len)
3952{ 3987{
3953 struct mgmt_cp_start_discovery *cp = data; 3988 struct mgmt_cp_start_discovery *cp = data;
3954 struct pending_cmd *cmd; 3989 struct mgmt_pending_cmd *cmd;
3955 struct hci_request req; 3990 struct hci_request req;
3956 u8 status; 3991 u8 status;
3957 int err; 3992 int err;
@@ -3961,17 +3996,17 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3961 hci_dev_lock(hdev); 3996 hci_dev_lock(hdev);
3962 3997
3963 if (!hdev_is_powered(hdev)) { 3998 if (!hdev_is_powered(hdev)) {
3964 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, 3999 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3965 MGMT_STATUS_NOT_POWERED, 4000 MGMT_STATUS_NOT_POWERED,
3966 &cp->type, sizeof(cp->type)); 4001 &cp->type, sizeof(cp->type));
3967 goto failed; 4002 goto failed;
3968 } 4003 }
3969 4004
3970 if (hdev->discovery.state != DISCOVERY_STOPPED || 4005 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3971 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { 4006 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3972 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, 4007 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3973 MGMT_STATUS_BUSY, &cp->type, 4008 MGMT_STATUS_BUSY, &cp->type,
3974 sizeof(cp->type)); 4009 sizeof(cp->type));
3975 goto failed; 4010 goto failed;
3976 } 4011 }
3977 4012
@@ -3994,8 +4029,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3994 hci_req_init(&req, hdev); 4029 hci_req_init(&req, hdev);
3995 4030
3996 if (!trigger_discovery(&req, &status)) { 4031 if (!trigger_discovery(&req, &status)) {
3997 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, 4032 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3998 status, &cp->type, sizeof(cp->type)); 4033 status, &cp->type, sizeof(cp->type));
3999 mgmt_pending_remove(cmd); 4034 mgmt_pending_remove(cmd);
4000 goto failed; 4035 goto failed;
4001 } 4036 }
@@ -4013,17 +4048,18 @@ failed:
4013 return err; 4048 return err;
4014} 4049}
4015 4050
4016static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status) 4051static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4052 u8 status)
4017{ 4053{
4018 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, 4054 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4019 cmd->param, 1); 4055 cmd->param, 1);
4020} 4056}
4021 4057
4022static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, 4058static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4023 void *data, u16 len) 4059 void *data, u16 len)
4024{ 4060{
4025 struct mgmt_cp_start_service_discovery *cp = data; 4061 struct mgmt_cp_start_service_discovery *cp = data;
4026 struct pending_cmd *cmd; 4062 struct mgmt_pending_cmd *cmd;
4027 struct hci_request req; 4063 struct hci_request req;
4028 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); 4064 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4029 u16 uuid_count, expected_len; 4065 u16 uuid_count, expected_len;
@@ -4035,19 +4071,19 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4035 hci_dev_lock(hdev); 4071 hci_dev_lock(hdev);
4036 4072
4037 if (!hdev_is_powered(hdev)) { 4073 if (!hdev_is_powered(hdev)) {
4038 err = cmd_complete(sk, hdev->id, 4074 err = mgmt_cmd_complete(sk, hdev->id,
4039 MGMT_OP_START_SERVICE_DISCOVERY, 4075 MGMT_OP_START_SERVICE_DISCOVERY,
4040 MGMT_STATUS_NOT_POWERED, 4076 MGMT_STATUS_NOT_POWERED,
4041 &cp->type, sizeof(cp->type)); 4077 &cp->type, sizeof(cp->type));
4042 goto failed; 4078 goto failed;
4043 } 4079 }
4044 4080
4045 if (hdev->discovery.state != DISCOVERY_STOPPED || 4081 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4046 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { 4082 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4047 err = cmd_complete(sk, hdev->id, 4083 err = mgmt_cmd_complete(sk, hdev->id,
4048 MGMT_OP_START_SERVICE_DISCOVERY, 4084 MGMT_OP_START_SERVICE_DISCOVERY,
4049 MGMT_STATUS_BUSY, &cp->type, 4085 MGMT_STATUS_BUSY, &cp->type,
4050 sizeof(cp->type)); 4086 sizeof(cp->type));
4051 goto failed; 4087 goto failed;
4052 } 4088 }
4053 4089
@@ -4055,10 +4091,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4055 if (uuid_count > max_uuid_count) { 4091 if (uuid_count > max_uuid_count) {
4056 BT_ERR("service_discovery: too big uuid_count value %u", 4092 BT_ERR("service_discovery: too big uuid_count value %u",
4057 uuid_count); 4093 uuid_count);
4058 err = cmd_complete(sk, hdev->id, 4094 err = mgmt_cmd_complete(sk, hdev->id,
4059 MGMT_OP_START_SERVICE_DISCOVERY, 4095 MGMT_OP_START_SERVICE_DISCOVERY,
4060 MGMT_STATUS_INVALID_PARAMS, &cp->type, 4096 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4061 sizeof(cp->type)); 4097 sizeof(cp->type));
4062 goto failed; 4098 goto failed;
4063 } 4099 }
4064 4100
@@ -4066,10 +4102,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4066 if (expected_len != len) { 4102 if (expected_len != len) {
4067 BT_ERR("service_discovery: expected %u bytes, got %u bytes", 4103 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4068 expected_len, len); 4104 expected_len, len);
4069 err = cmd_complete(sk, hdev->id, 4105 err = mgmt_cmd_complete(sk, hdev->id,
4070 MGMT_OP_START_SERVICE_DISCOVERY, 4106 MGMT_OP_START_SERVICE_DISCOVERY,
4071 MGMT_STATUS_INVALID_PARAMS, &cp->type, 4107 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4072 sizeof(cp->type)); 4108 sizeof(cp->type));
4073 goto failed; 4109 goto failed;
4074 } 4110 }
4075 4111
@@ -4087,6 +4123,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4087 */ 4123 */
4088 hci_discovery_filter_clear(hdev); 4124 hci_discovery_filter_clear(hdev);
4089 4125
4126 hdev->discovery.result_filtering = true;
4090 hdev->discovery.type = cp->type; 4127 hdev->discovery.type = cp->type;
4091 hdev->discovery.rssi = cp->rssi; 4128 hdev->discovery.rssi = cp->rssi;
4092 hdev->discovery.uuid_count = uuid_count; 4129 hdev->discovery.uuid_count = uuid_count;
@@ -4095,10 +4132,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4095 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16, 4132 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4096 GFP_KERNEL); 4133 GFP_KERNEL);
4097 if (!hdev->discovery.uuids) { 4134 if (!hdev->discovery.uuids) {
4098 err = cmd_complete(sk, hdev->id, 4135 err = mgmt_cmd_complete(sk, hdev->id,
4099 MGMT_OP_START_SERVICE_DISCOVERY, 4136 MGMT_OP_START_SERVICE_DISCOVERY,
4100 MGMT_STATUS_FAILED, 4137 MGMT_STATUS_FAILED,
4101 &cp->type, sizeof(cp->type)); 4138 &cp->type, sizeof(cp->type));
4102 mgmt_pending_remove(cmd); 4139 mgmt_pending_remove(cmd);
4103 goto failed; 4140 goto failed;
4104 } 4141 }
@@ -4107,9 +4144,9 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4107 hci_req_init(&req, hdev); 4144 hci_req_init(&req, hdev);
4108 4145
4109 if (!trigger_discovery(&req, &status)) { 4146 if (!trigger_discovery(&req, &status)) {
4110 err = cmd_complete(sk, hdev->id, 4147 err = mgmt_cmd_complete(sk, hdev->id,
4111 MGMT_OP_START_SERVICE_DISCOVERY, 4148 MGMT_OP_START_SERVICE_DISCOVERY,
4112 status, &cp->type, sizeof(cp->type)); 4149 status, &cp->type, sizeof(cp->type));
4113 mgmt_pending_remove(cmd); 4150 mgmt_pending_remove(cmd);
4114 goto failed; 4151 goto failed;
4115 } 4152 }
@@ -4129,7 +4166,7 @@ failed:
4129 4166
4130static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode) 4167static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4131{ 4168{
4132 struct pending_cmd *cmd; 4169 struct mgmt_pending_cmd *cmd;
4133 4170
4134 BT_DBG("status %d", status); 4171 BT_DBG("status %d", status);
4135 4172
@@ -4151,7 +4188,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4151 u16 len) 4188 u16 len)
4152{ 4189{
4153 struct mgmt_cp_stop_discovery *mgmt_cp = data; 4190 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4154 struct pending_cmd *cmd; 4191 struct mgmt_pending_cmd *cmd;
4155 struct hci_request req; 4192 struct hci_request req;
4156 int err; 4193 int err;
4157 4194
@@ -4160,16 +4197,16 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4160 hci_dev_lock(hdev); 4197 hci_dev_lock(hdev);
4161 4198
4162 if (!hci_discovery_active(hdev)) { 4199 if (!hci_discovery_active(hdev)) {
4163 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 4200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4164 MGMT_STATUS_REJECTED, &mgmt_cp->type, 4201 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4165 sizeof(mgmt_cp->type)); 4202 sizeof(mgmt_cp->type));
4166 goto unlock; 4203 goto unlock;
4167 } 4204 }
4168 4205
4169 if (hdev->discovery.type != mgmt_cp->type) { 4206 if (hdev->discovery.type != mgmt_cp->type) {
4170 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 4207 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4171 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type, 4208 MGMT_STATUS_INVALID_PARAMS,
4172 sizeof(mgmt_cp->type)); 4209 &mgmt_cp->type, sizeof(mgmt_cp->type));
4173 goto unlock; 4210 goto unlock;
4174 } 4211 }
4175 4212
@@ -4195,8 +4232,8 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4195 4232
4196 /* If no HCI commands were sent we're done */ 4233 /* If no HCI commands were sent we're done */
4197 if (err == -ENODATA) { 4234 if (err == -ENODATA) {
4198 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0, 4235 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4199 &mgmt_cp->type, sizeof(mgmt_cp->type)); 4236 &mgmt_cp->type, sizeof(mgmt_cp->type));
4200 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 4237 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4201 } 4238 }
4202 4239
@@ -4217,17 +4254,17 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4217 hci_dev_lock(hdev); 4254 hci_dev_lock(hdev);
4218 4255
4219 if (!hci_discovery_active(hdev)) { 4256 if (!hci_discovery_active(hdev)) {
4220 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 4257 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4221 MGMT_STATUS_FAILED, &cp->addr, 4258 MGMT_STATUS_FAILED, &cp->addr,
4222 sizeof(cp->addr)); 4259 sizeof(cp->addr));
4223 goto failed; 4260 goto failed;
4224 } 4261 }
4225 4262
4226 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); 4263 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4227 if (!e) { 4264 if (!e) {
4228 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 4265 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4229 MGMT_STATUS_INVALID_PARAMS, &cp->addr, 4266 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4230 sizeof(cp->addr)); 4267 sizeof(cp->addr));
4231 goto failed; 4268 goto failed;
4232 } 4269 }
4233 4270
@@ -4239,8 +4276,8 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4239 hci_inquiry_cache_update_resolve(hdev, e); 4276 hci_inquiry_cache_update_resolve(hdev, e);
4240 } 4277 }
4241 4278
4242 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr, 4279 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4243 sizeof(cp->addr)); 4280 &cp->addr, sizeof(cp->addr));
4244 4281
4245failed: 4282failed:
4246 hci_dev_unlock(hdev); 4283 hci_dev_unlock(hdev);
@@ -4257,9 +4294,9 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4257 BT_DBG("%s", hdev->name); 4294 BT_DBG("%s", hdev->name);
4258 4295
4259 if (!bdaddr_type_is_valid(cp->addr.type)) 4296 if (!bdaddr_type_is_valid(cp->addr.type))
4260 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, 4297 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4261 MGMT_STATUS_INVALID_PARAMS, 4298 MGMT_STATUS_INVALID_PARAMS,
4262 &cp->addr, sizeof(cp->addr)); 4299 &cp->addr, sizeof(cp->addr));
4263 4300
4264 hci_dev_lock(hdev); 4301 hci_dev_lock(hdev);
4265 4302
@@ -4275,8 +4312,8 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4275 status = MGMT_STATUS_SUCCESS; 4312 status = MGMT_STATUS_SUCCESS;
4276 4313
4277done: 4314done:
4278 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, 4315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4279 &cp->addr, sizeof(cp->addr)); 4316 &cp->addr, sizeof(cp->addr));
4280 4317
4281 hci_dev_unlock(hdev); 4318 hci_dev_unlock(hdev);
4282 4319
@@ -4293,9 +4330,9 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4293 BT_DBG("%s", hdev->name); 4330 BT_DBG("%s", hdev->name);
4294 4331
4295 if (!bdaddr_type_is_valid(cp->addr.type)) 4332 if (!bdaddr_type_is_valid(cp->addr.type))
4296 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, 4333 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4297 MGMT_STATUS_INVALID_PARAMS, 4334 MGMT_STATUS_INVALID_PARAMS,
4298 &cp->addr, sizeof(cp->addr)); 4335 &cp->addr, sizeof(cp->addr));
4299 4336
4300 hci_dev_lock(hdev); 4337 hci_dev_lock(hdev);
4301 4338
@@ -4311,8 +4348,8 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4311 status = MGMT_STATUS_SUCCESS; 4348 status = MGMT_STATUS_SUCCESS;
4312 4349
4313done: 4350done:
4314 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, 4351 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4315 &cp->addr, sizeof(cp->addr)); 4352 &cp->addr, sizeof(cp->addr));
4316 4353
4317 hci_dev_unlock(hdev); 4354 hci_dev_unlock(hdev);
4318 4355
@@ -4332,8 +4369,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4332 source = __le16_to_cpu(cp->source); 4369 source = __le16_to_cpu(cp->source);
4333 4370
4334 if (source > 0x0002) 4371 if (source > 0x0002)
4335 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 4372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4336 MGMT_STATUS_INVALID_PARAMS); 4373 MGMT_STATUS_INVALID_PARAMS);
4337 4374
4338 hci_dev_lock(hdev); 4375 hci_dev_lock(hdev);
4339 4376
@@ -4342,7 +4379,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4342 hdev->devid_product = __le16_to_cpu(cp->product); 4379 hdev->devid_product = __le16_to_cpu(cp->product);
4343 hdev->devid_version = __le16_to_cpu(cp->version); 4380 hdev->devid_version = __le16_to_cpu(cp->version);
4344 4381
4345 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); 4382 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4383 NULL, 0);
4346 4384
4347 hci_req_init(&req, hdev); 4385 hci_req_init(&req, hdev);
4348 update_eir(&req); 4386 update_eir(&req);
@@ -4368,10 +4406,10 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4368 goto unlock; 4406 goto unlock;
4369 } 4407 }
4370 4408
4371 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) 4409 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4372 set_bit(HCI_ADVERTISING, &hdev->dev_flags); 4410 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4373 else 4411 else
4374 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 4412 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4375 4413
4376 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, 4414 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4377 &match); 4415 &match);
@@ -4389,41 +4427,48 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4389 u16 len) 4427 u16 len)
4390{ 4428{
4391 struct mgmt_mode *cp = data; 4429 struct mgmt_mode *cp = data;
4392 struct pending_cmd *cmd; 4430 struct mgmt_pending_cmd *cmd;
4393 struct hci_request req; 4431 struct hci_request req;
4394 u8 val, enabled, status; 4432 u8 val, status;
4395 int err; 4433 int err;
4396 4434
4397 BT_DBG("request for %s", hdev->name); 4435 BT_DBG("request for %s", hdev->name);
4398 4436
4399 status = mgmt_le_support(hdev); 4437 status = mgmt_le_support(hdev);
4400 if (status) 4438 if (status)
4401 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, 4439 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4402 status); 4440 status);
4403 4441
4404 if (cp->val != 0x00 && cp->val != 0x01) 4442 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4405 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, 4443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4406 MGMT_STATUS_INVALID_PARAMS); 4444 MGMT_STATUS_INVALID_PARAMS);
4407 4445
4408 hci_dev_lock(hdev); 4446 hci_dev_lock(hdev);
4409 4447
4410 val = !!cp->val; 4448 val = !!cp->val;
4411 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4412 4449
4413 /* The following conditions are ones which mean that we should 4450 /* The following conditions are ones which mean that we should
4414 * not do any HCI communication but directly send a mgmt 4451 * not do any HCI communication but directly send a mgmt
4415 * response to user space (after toggling the flag if 4452 * response to user space (after toggling the flag if
4416 * necessary). 4453 * necessary).
4417 */ 4454 */
4418 if (!hdev_is_powered(hdev) || val == enabled || 4455 if (!hdev_is_powered(hdev) ||
4456 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4457 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4419 hci_conn_num(hdev, LE_LINK) > 0 || 4458 hci_conn_num(hdev, LE_LINK) > 0 ||
4420 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && 4459 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4421 hdev->le_scan_type == LE_SCAN_ACTIVE)) { 4460 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4422 bool changed = false; 4461 bool changed;
4423 4462
4424 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 4463 if (cp->val) {
4425 change_bit(HCI_ADVERTISING, &hdev->dev_flags); 4464 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4426 changed = true; 4465 if (cp->val == 0x02)
4466 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4467 else
4468 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4469 } else {
4470 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4471 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4427 } 4472 }
4428 4473
4429 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev); 4474 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
@@ -4438,8 +4483,8 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4438 4483
4439 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) || 4484 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4440 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) { 4485 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4441 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, 4486 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4442 MGMT_STATUS_BUSY); 4487 MGMT_STATUS_BUSY);
4443 goto unlock; 4488 goto unlock;
4444 } 4489 }
4445 4490
@@ -4451,6 +4496,11 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4451 4496
4452 hci_req_init(&req, hdev); 4497 hci_req_init(&req, hdev);
4453 4498
4499 if (cp->val == 0x02)
4500 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4501 else
4502 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4503
4454 if (val) 4504 if (val)
4455 enable_advertising(&req); 4505 enable_advertising(&req);
4456 else 4506 else
@@ -4474,34 +4524,38 @@ static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4474 BT_DBG("%s", hdev->name); 4524 BT_DBG("%s", hdev->name);
4475 4525
4476 if (!lmp_le_capable(hdev)) 4526 if (!lmp_le_capable(hdev))
4477 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 4527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4478 MGMT_STATUS_NOT_SUPPORTED); 4528 MGMT_STATUS_NOT_SUPPORTED);
4479 4529
4480 if (hdev_is_powered(hdev)) 4530 if (hdev_is_powered(hdev))
4481 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 4531 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4482 MGMT_STATUS_REJECTED); 4532 MGMT_STATUS_REJECTED);
4483 4533
4484 if (bacmp(&cp->bdaddr, BDADDR_ANY)) { 4534 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4485 if (!bacmp(&cp->bdaddr, BDADDR_NONE)) 4535 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4486 return cmd_status(sk, hdev->id, 4536 return mgmt_cmd_status(sk, hdev->id,
4487 MGMT_OP_SET_STATIC_ADDRESS, 4537 MGMT_OP_SET_STATIC_ADDRESS,
4488 MGMT_STATUS_INVALID_PARAMS); 4538 MGMT_STATUS_INVALID_PARAMS);
4489 4539
4490 /* Two most significant bits shall be set */ 4540 /* Two most significant bits shall be set */
4491 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0) 4541 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4492 return cmd_status(sk, hdev->id, 4542 return mgmt_cmd_status(sk, hdev->id,
4493 MGMT_OP_SET_STATIC_ADDRESS, 4543 MGMT_OP_SET_STATIC_ADDRESS,
4494 MGMT_STATUS_INVALID_PARAMS); 4544 MGMT_STATUS_INVALID_PARAMS);
4495 } 4545 }
4496 4546
4497 hci_dev_lock(hdev); 4547 hci_dev_lock(hdev);
4498 4548
4499 bacpy(&hdev->static_addr, &cp->bdaddr); 4549 bacpy(&hdev->static_addr, &cp->bdaddr);
4500 4550
4501 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0); 4551 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4552 if (err < 0)
4553 goto unlock;
4502 4554
4503 hci_dev_unlock(hdev); 4555 err = new_settings(hdev, sk);
4504 4556
4557unlock:
4558 hci_dev_unlock(hdev);
4505 return err; 4559 return err;
4506} 4560}
4507 4561
@@ -4515,36 +4569,37 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4515 BT_DBG("%s", hdev->name); 4569 BT_DBG("%s", hdev->name);
4516 4570
4517 if (!lmp_le_capable(hdev)) 4571 if (!lmp_le_capable(hdev))
4518 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 4572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4519 MGMT_STATUS_NOT_SUPPORTED); 4573 MGMT_STATUS_NOT_SUPPORTED);
4520 4574
4521 interval = __le16_to_cpu(cp->interval); 4575 interval = __le16_to_cpu(cp->interval);
4522 4576
4523 if (interval < 0x0004 || interval > 0x4000) 4577 if (interval < 0x0004 || interval > 0x4000)
4524 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 4578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4525 MGMT_STATUS_INVALID_PARAMS); 4579 MGMT_STATUS_INVALID_PARAMS);
4526 4580
4527 window = __le16_to_cpu(cp->window); 4581 window = __le16_to_cpu(cp->window);
4528 4582
4529 if (window < 0x0004 || window > 0x4000) 4583 if (window < 0x0004 || window > 0x4000)
4530 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 4584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4531 MGMT_STATUS_INVALID_PARAMS); 4585 MGMT_STATUS_INVALID_PARAMS);
4532 4586
4533 if (window > interval) 4587 if (window > interval)
4534 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 4588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4535 MGMT_STATUS_INVALID_PARAMS); 4589 MGMT_STATUS_INVALID_PARAMS);
4536 4590
4537 hci_dev_lock(hdev); 4591 hci_dev_lock(hdev);
4538 4592
4539 hdev->le_scan_interval = interval; 4593 hdev->le_scan_interval = interval;
4540 hdev->le_scan_window = window; 4594 hdev->le_scan_window = window;
4541 4595
4542 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0); 4596 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4597 NULL, 0);
4543 4598
4544 /* If background scan is running, restart it so new parameters are 4599 /* If background scan is running, restart it so new parameters are
4545 * loaded. 4600 * loaded.
4546 */ 4601 */
4547 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && 4602 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4548 hdev->discovery.state == DISCOVERY_STOPPED) { 4603 hdev->discovery.state == DISCOVERY_STOPPED) {
4549 struct hci_request req; 4604 struct hci_request req;
4550 4605
@@ -4564,7 +4619,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4564static void fast_connectable_complete(struct hci_dev *hdev, u8 status, 4619static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4565 u16 opcode) 4620 u16 opcode)
4566{ 4621{
4567 struct pending_cmd *cmd; 4622 struct mgmt_pending_cmd *cmd;
4568 4623
4569 BT_DBG("status 0x%02x", status); 4624 BT_DBG("status 0x%02x", status);
4570 4625
@@ -4575,15 +4630,15 @@ static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4575 goto unlock; 4630 goto unlock;
4576 4631
4577 if (status) { 4632 if (status) {
4578 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 4633 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4579 mgmt_status(status)); 4634 mgmt_status(status));
4580 } else { 4635 } else {
4581 struct mgmt_mode *cp = cmd->param; 4636 struct mgmt_mode *cp = cmd->param;
4582 4637
4583 if (cp->val) 4638 if (cp->val)
4584 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); 4639 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4585 else 4640 else
4586 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); 4641 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4587 4642
4588 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); 4643 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4589 new_settings(hdev, cmd->sk); 4644 new_settings(hdev, cmd->sk);
@@ -4599,40 +4654,40 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4599 void *data, u16 len) 4654 void *data, u16 len)
4600{ 4655{
4601 struct mgmt_mode *cp = data; 4656 struct mgmt_mode *cp = data;
4602 struct pending_cmd *cmd; 4657 struct mgmt_pending_cmd *cmd;
4603 struct hci_request req; 4658 struct hci_request req;
4604 int err; 4659 int err;
4605 4660
4606 BT_DBG("%s", hdev->name); 4661 BT_DBG("%s", hdev->name);
4607 4662
4608 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) || 4663 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4609 hdev->hci_ver < BLUETOOTH_VER_1_2) 4664 hdev->hci_ver < BLUETOOTH_VER_1_2)
4610 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 4665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4611 MGMT_STATUS_NOT_SUPPORTED); 4666 MGMT_STATUS_NOT_SUPPORTED);
4612 4667
4613 if (cp->val != 0x00 && cp->val != 0x01) 4668 if (cp->val != 0x00 && cp->val != 0x01)
4614 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 4669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4615 MGMT_STATUS_INVALID_PARAMS); 4670 MGMT_STATUS_INVALID_PARAMS);
4616
4617 if (!hdev_is_powered(hdev))
4618 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4619 MGMT_STATUS_NOT_POWERED);
4620
4621 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4622 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4623 MGMT_STATUS_REJECTED);
4624 4671
4625 hci_dev_lock(hdev); 4672 hci_dev_lock(hdev);
4626 4673
4627 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { 4674 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 4675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4629 MGMT_STATUS_BUSY); 4676 MGMT_STATUS_BUSY);
4677 goto unlock;
4678 }
4679
4680 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4681 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4682 hdev);
4630 goto unlock; 4683 goto unlock;
4631 } 4684 }
4632 4685
4633 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) { 4686 if (!hdev_is_powered(hdev)) {
4687 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4634 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, 4688 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4635 hdev); 4689 hdev);
4690 new_settings(hdev, sk);
4636 goto unlock; 4691 goto unlock;
4637 } 4692 }
4638 4693
@@ -4649,8 +4704,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4649 4704
4650 err = hci_req_run(&req, fast_connectable_complete); 4705 err = hci_req_run(&req, fast_connectable_complete);
4651 if (err < 0) { 4706 if (err < 0) {
4652 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 4707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4653 MGMT_STATUS_FAILED); 4708 MGMT_STATUS_FAILED);
4654 mgmt_pending_remove(cmd); 4709 mgmt_pending_remove(cmd);
4655 } 4710 }
4656 4711
@@ -4662,7 +4717,7 @@ unlock:
4662 4717
4663static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) 4718static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4664{ 4719{
4665 struct pending_cmd *cmd; 4720 struct mgmt_pending_cmd *cmd;
4666 4721
4667 BT_DBG("status 0x%02x", status); 4722 BT_DBG("status 0x%02x", status);
4668 4723
@@ -4678,9 +4733,9 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4678 /* We need to restore the flag if related HCI commands 4733 /* We need to restore the flag if related HCI commands
4679 * failed. 4734 * failed.
4680 */ 4735 */
4681 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 4736 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4682 4737
4683 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); 4738 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4684 } else { 4739 } else {
4685 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); 4740 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4686 new_settings(hdev, cmd->sk); 4741 new_settings(hdev, cmd->sk);
@@ -4695,41 +4750,41 @@ unlock:
4695static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 4750static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4696{ 4751{
4697 struct mgmt_mode *cp = data; 4752 struct mgmt_mode *cp = data;
4698 struct pending_cmd *cmd; 4753 struct mgmt_pending_cmd *cmd;
4699 struct hci_request req; 4754 struct hci_request req;
4700 int err; 4755 int err;
4701 4756
4702 BT_DBG("request for %s", hdev->name); 4757 BT_DBG("request for %s", hdev->name);
4703 4758
4704 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev)) 4759 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4705 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4706 MGMT_STATUS_NOT_SUPPORTED); 4761 MGMT_STATUS_NOT_SUPPORTED);
4707 4762
4708 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 4763 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4709 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4710 MGMT_STATUS_REJECTED); 4765 MGMT_STATUS_REJECTED);
4711 4766
4712 if (cp->val != 0x00 && cp->val != 0x01) 4767 if (cp->val != 0x00 && cp->val != 0x01)
4713 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4768 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4714 MGMT_STATUS_INVALID_PARAMS); 4769 MGMT_STATUS_INVALID_PARAMS);
4715 4770
4716 hci_dev_lock(hdev); 4771 hci_dev_lock(hdev);
4717 4772
4718 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 4773 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4719 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); 4774 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4720 goto unlock; 4775 goto unlock;
4721 } 4776 }
4722 4777
4723 if (!hdev_is_powered(hdev)) { 4778 if (!hdev_is_powered(hdev)) {
4724 if (!cp->val) { 4779 if (!cp->val) {
4725 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 4780 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4726 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 4781 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4727 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 4782 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4728 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); 4783 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4729 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 4784 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4730 } 4785 }
4731 4786
4732 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 4787 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4733 4788
4734 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); 4789 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4735 if (err < 0) 4790 if (err < 0)
@@ -4741,8 +4796,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4741 4796
4742 /* Reject disabling when powered on */ 4797 /* Reject disabling when powered on */
4743 if (!cp->val) { 4798 if (!cp->val) {
4744 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4799 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4745 MGMT_STATUS_REJECTED); 4800 MGMT_STATUS_REJECTED);
4746 goto unlock; 4801 goto unlock;
4747 } else { 4802 } else {
4748 /* When configuring a dual-mode controller to operate 4803 /* When configuring a dual-mode controller to operate
@@ -4759,18 +4814,18 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4759 * switching BR/EDR back on when secure connections has been 4814 * switching BR/EDR back on when secure connections has been
4760 * enabled is not a supported transaction. 4815 * enabled is not a supported transaction.
4761 */ 4816 */
4762 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && 4817 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4763 (bacmp(&hdev->static_addr, BDADDR_ANY) || 4818 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4764 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) { 4819 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4765 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4820 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4766 MGMT_STATUS_REJECTED); 4821 MGMT_STATUS_REJECTED);
4767 goto unlock; 4822 goto unlock;
4768 } 4823 }
4769 } 4824 }
4770 4825
4771 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) { 4826 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4772 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4827 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4773 MGMT_STATUS_BUSY); 4828 MGMT_STATUS_BUSY);
4774 goto unlock; 4829 goto unlock;
4775 } 4830 }
4776 4831
@@ -4783,7 +4838,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4783 /* We need to flip the bit already here so that update_adv_data 4838 /* We need to flip the bit already here so that update_adv_data
4784 * generates the correct flags. 4839 * generates the correct flags.
4785 */ 4840 */
4786 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 4841 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4787 4842
4788 hci_req_init(&req, hdev); 4843 hci_req_init(&req, hdev);
4789 4844
@@ -4806,7 +4861,7 @@ unlock:
4806 4861
4807static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) 4862static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4808{ 4863{
4809 struct pending_cmd *cmd; 4864 struct mgmt_pending_cmd *cmd;
4810 struct mgmt_mode *cp; 4865 struct mgmt_mode *cp;
4811 4866
4812 BT_DBG("%s status %u", hdev->name, status); 4867 BT_DBG("%s status %u", hdev->name, status);
@@ -4818,8 +4873,8 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4818 goto unlock; 4873 goto unlock;
4819 4874
4820 if (status) { 4875 if (status) {
4821 cmd_status(cmd->sk, cmd->index, cmd->opcode, 4876 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4822 mgmt_status(status)); 4877 mgmt_status(status));
4823 goto remove; 4878 goto remove;
4824 } 4879 }
4825 4880
@@ -4827,16 +4882,16 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4827 4882
4828 switch (cp->val) { 4883 switch (cp->val) {
4829 case 0x00: 4884 case 0x00:
4830 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); 4885 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4831 clear_bit(HCI_SC_ONLY, &hdev->dev_flags); 4886 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4832 break; 4887 break;
4833 case 0x01: 4888 case 0x01:
4834 set_bit(HCI_SC_ENABLED, &hdev->dev_flags); 4889 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4835 clear_bit(HCI_SC_ONLY, &hdev->dev_flags); 4890 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4836 break; 4891 break;
4837 case 0x02: 4892 case 0x02:
4838 set_bit(HCI_SC_ENABLED, &hdev->dev_flags); 4893 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4839 set_bit(HCI_SC_ONLY, &hdev->dev_flags); 4894 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4840 break; 4895 break;
4841 } 4896 }
4842 4897
@@ -4853,7 +4908,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4853 void *data, u16 len) 4908 void *data, u16 len)
4854{ 4909{
4855 struct mgmt_mode *cp = data; 4910 struct mgmt_mode *cp = data;
4856 struct pending_cmd *cmd; 4911 struct mgmt_pending_cmd *cmd;
4857 struct hci_request req; 4912 struct hci_request req;
4858 u8 val; 4913 u8 val;
4859 int err; 4914 int err;
@@ -4861,37 +4916,37 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4861 BT_DBG("request for %s", hdev->name); 4916 BT_DBG("request for %s", hdev->name);
4862 4917
4863 if (!lmp_sc_capable(hdev) && 4918 if (!lmp_sc_capable(hdev) &&
4864 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 4919 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4865 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4866 MGMT_STATUS_NOT_SUPPORTED); 4921 MGMT_STATUS_NOT_SUPPORTED);
4867 4922
4868 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && 4923 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4869 lmp_sc_capable(hdev) && 4924 lmp_sc_capable(hdev) &&
4870 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 4925 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4871 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4926 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4872 MGMT_STATUS_REJECTED); 4927 MGMT_STATUS_REJECTED);
4873 4928
4874 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) 4929 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4875 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4876 MGMT_STATUS_INVALID_PARAMS); 4931 MGMT_STATUS_INVALID_PARAMS);
4877 4932
4878 hci_dev_lock(hdev); 4933 hci_dev_lock(hdev);
4879 4934
4880 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) || 4935 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4881 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 4936 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4882 bool changed; 4937 bool changed;
4883 4938
4884 if (cp->val) { 4939 if (cp->val) {
4885 changed = !test_and_set_bit(HCI_SC_ENABLED, 4940 changed = !hci_dev_test_and_set_flag(hdev,
4886 &hdev->dev_flags); 4941 HCI_SC_ENABLED);
4887 if (cp->val == 0x02) 4942 if (cp->val == 0x02)
4888 set_bit(HCI_SC_ONLY, &hdev->dev_flags); 4943 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4889 else 4944 else
4890 clear_bit(HCI_SC_ONLY, &hdev->dev_flags); 4945 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4891 } else { 4946 } else {
4892 changed = test_and_clear_bit(HCI_SC_ENABLED, 4947 changed = hci_dev_test_and_clear_flag(hdev,
4893 &hdev->dev_flags); 4948 HCI_SC_ENABLED);
4894 clear_bit(HCI_SC_ONLY, &hdev->dev_flags); 4949 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4895 } 4950 }
4896 4951
4897 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); 4952 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
@@ -4905,15 +4960,15 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4905 } 4960 }
4906 4961
4907 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { 4962 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4908 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4963 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4909 MGMT_STATUS_BUSY); 4964 MGMT_STATUS_BUSY);
4910 goto failed; 4965 goto failed;
4911 } 4966 }
4912 4967
4913 val = !!cp->val; 4968 val = !!cp->val;
4914 4969
4915 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && 4970 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4916 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) { 4971 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4917 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); 4972 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4918 goto failed; 4973 goto failed;
4919 } 4974 }
@@ -4947,27 +5002,26 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4947 BT_DBG("request for %s", hdev->name); 5002 BT_DBG("request for %s", hdev->name);
4948 5003
4949 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) 5004 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4950 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, 5005 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4951 MGMT_STATUS_INVALID_PARAMS); 5006 MGMT_STATUS_INVALID_PARAMS);
4952 5007
4953 hci_dev_lock(hdev); 5008 hci_dev_lock(hdev);
4954 5009
4955 if (cp->val) 5010 if (cp->val)
4956 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS, 5011 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4957 &hdev->dev_flags);
4958 else 5012 else
4959 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS, 5013 changed = hci_dev_test_and_clear_flag(hdev,
4960 &hdev->dev_flags); 5014 HCI_KEEP_DEBUG_KEYS);
4961 5015
4962 if (cp->val == 0x02) 5016 if (cp->val == 0x02)
4963 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS, 5017 use_changed = !hci_dev_test_and_set_flag(hdev,
4964 &hdev->dev_flags); 5018 HCI_USE_DEBUG_KEYS);
4965 else 5019 else
4966 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS, 5020 use_changed = hci_dev_test_and_clear_flag(hdev,
4967 &hdev->dev_flags); 5021 HCI_USE_DEBUG_KEYS);
4968 5022
4969 if (hdev_is_powered(hdev) && use_changed && 5023 if (hdev_is_powered(hdev) && use_changed &&
4970 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 5024 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4971 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00; 5025 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4972 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, 5026 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4973 sizeof(mode), &mode); 5027 sizeof(mode), &mode);
@@ -4995,32 +5049,32 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4995 BT_DBG("request for %s", hdev->name); 5049 BT_DBG("request for %s", hdev->name);
4996 5050
4997 if (!lmp_le_capable(hdev)) 5051 if (!lmp_le_capable(hdev))
4998 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, 5052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4999 MGMT_STATUS_NOT_SUPPORTED); 5053 MGMT_STATUS_NOT_SUPPORTED);
5000 5054
5001 if (cp->privacy != 0x00 && cp->privacy != 0x01) 5055 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5002 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, 5056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5003 MGMT_STATUS_INVALID_PARAMS); 5057 MGMT_STATUS_INVALID_PARAMS);
5004 5058
5005 if (hdev_is_powered(hdev)) 5059 if (hdev_is_powered(hdev))
5006 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, 5060 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5007 MGMT_STATUS_REJECTED); 5061 MGMT_STATUS_REJECTED);
5008 5062
5009 hci_dev_lock(hdev); 5063 hci_dev_lock(hdev);
5010 5064
5011 /* If user space supports this command it is also expected to 5065 /* If user space supports this command it is also expected to
5012 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag. 5066 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5013 */ 5067 */
5014 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags); 5068 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5015 5069
5016 if (cp->privacy) { 5070 if (cp->privacy) {
5017 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags); 5071 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5018 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); 5072 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5019 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 5073 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5020 } else { 5074 } else {
5021 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags); 5075 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5022 memset(hdev->irk, 0, sizeof(hdev->irk)); 5076 memset(hdev->irk, 0, sizeof(hdev->irk));
5023 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 5077 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5024 } 5078 }
5025 5079
5026 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); 5080 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
@@ -5063,22 +5117,22 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5063 BT_DBG("request for %s", hdev->name); 5117 BT_DBG("request for %s", hdev->name);
5064 5118
5065 if (!lmp_le_capable(hdev)) 5119 if (!lmp_le_capable(hdev))
5066 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, 5120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5067 MGMT_STATUS_NOT_SUPPORTED); 5121 MGMT_STATUS_NOT_SUPPORTED);
5068 5122
5069 irk_count = __le16_to_cpu(cp->irk_count); 5123 irk_count = __le16_to_cpu(cp->irk_count);
5070 if (irk_count > max_irk_count) { 5124 if (irk_count > max_irk_count) {
5071 BT_ERR("load_irks: too big irk_count value %u", irk_count); 5125 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5072 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, 5126 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5073 MGMT_STATUS_INVALID_PARAMS); 5127 MGMT_STATUS_INVALID_PARAMS);
5074 } 5128 }
5075 5129
5076 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info); 5130 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5077 if (expected_len != len) { 5131 if (expected_len != len) {
5078 BT_ERR("load_irks: expected %u bytes, got %u bytes", 5132 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5079 expected_len, len); 5133 expected_len, len);
5080 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, 5134 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5081 MGMT_STATUS_INVALID_PARAMS); 5135 MGMT_STATUS_INVALID_PARAMS);
5082 } 5136 }
5083 5137
5084 BT_DBG("%s irk_count %u", hdev->name, irk_count); 5138 BT_DBG("%s irk_count %u", hdev->name, irk_count);
@@ -5087,9 +5141,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5087 struct mgmt_irk_info *key = &cp->irks[i]; 5141 struct mgmt_irk_info *key = &cp->irks[i];
5088 5142
5089 if (!irk_is_valid(key)) 5143 if (!irk_is_valid(key))
5090 return cmd_status(sk, hdev->id, 5144 return mgmt_cmd_status(sk, hdev->id,
5091 MGMT_OP_LOAD_IRKS, 5145 MGMT_OP_LOAD_IRKS,
5092 MGMT_STATUS_INVALID_PARAMS); 5146 MGMT_STATUS_INVALID_PARAMS);
5093 } 5147 }
5094 5148
5095 hci_dev_lock(hdev); 5149 hci_dev_lock(hdev);
@@ -5109,9 +5163,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5109 BDADDR_ANY); 5163 BDADDR_ANY);
5110 } 5164 }
5111 5165
5112 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags); 5166 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5113 5167
5114 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); 5168 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5115 5169
5116 hci_dev_unlock(hdev); 5170 hci_dev_unlock(hdev);
5117 5171
@@ -5149,14 +5203,14 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5149 BT_DBG("request for %s", hdev->name); 5203 BT_DBG("request for %s", hdev->name);
5150 5204
5151 if (!lmp_le_capable(hdev)) 5205 if (!lmp_le_capable(hdev))
5152 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 5206 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5153 MGMT_STATUS_NOT_SUPPORTED); 5207 MGMT_STATUS_NOT_SUPPORTED);
5154 5208
5155 key_count = __le16_to_cpu(cp->key_count); 5209 key_count = __le16_to_cpu(cp->key_count);
5156 if (key_count > max_key_count) { 5210 if (key_count > max_key_count) {
5157 BT_ERR("load_ltks: too big key_count value %u", key_count); 5211 BT_ERR("load_ltks: too big key_count value %u", key_count);
5158 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 5212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5159 MGMT_STATUS_INVALID_PARAMS); 5213 MGMT_STATUS_INVALID_PARAMS);
5160 } 5214 }
5161 5215
5162 expected_len = sizeof(*cp) + key_count * 5216 expected_len = sizeof(*cp) + key_count *
@@ -5164,8 +5218,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5164 if (expected_len != len) { 5218 if (expected_len != len) {
5165 BT_ERR("load_keys: expected %u bytes, got %u bytes", 5219 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5166 expected_len, len); 5220 expected_len, len);
5167 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 5221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5168 MGMT_STATUS_INVALID_PARAMS); 5222 MGMT_STATUS_INVALID_PARAMS);
5169 } 5223 }
5170 5224
5171 BT_DBG("%s key_count %u", hdev->name, key_count); 5225 BT_DBG("%s key_count %u", hdev->name, key_count);
@@ -5174,9 +5228,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5174 struct mgmt_ltk_info *key = &cp->keys[i]; 5228 struct mgmt_ltk_info *key = &cp->keys[i];
5175 5229
5176 if (!ltk_is_valid(key)) 5230 if (!ltk_is_valid(key))
5177 return cmd_status(sk, hdev->id, 5231 return mgmt_cmd_status(sk, hdev->id,
5178 MGMT_OP_LOAD_LONG_TERM_KEYS, 5232 MGMT_OP_LOAD_LONG_TERM_KEYS,
5179 MGMT_STATUS_INVALID_PARAMS); 5233 MGMT_STATUS_INVALID_PARAMS);
5180 } 5234 }
5181 5235
5182 hci_dev_lock(hdev); 5236 hci_dev_lock(hdev);
@@ -5221,7 +5275,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5221 key->rand); 5275 key->rand);
5222 } 5276 }
5223 5277
5224 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, 5278 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5225 NULL, 0); 5279 NULL, 0);
5226 5280
5227 hci_dev_unlock(hdev); 5281 hci_dev_unlock(hdev);
@@ -5229,7 +5283,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5229 return err; 5283 return err;
5230} 5284}
5231 5285
5232static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status) 5286static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5233{ 5287{
5234 struct hci_conn *conn = cmd->user_data; 5288 struct hci_conn *conn = cmd->user_data;
5235 struct mgmt_rp_get_conn_info rp; 5289 struct mgmt_rp_get_conn_info rp;
@@ -5247,8 +5301,8 @@ static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5247 rp.max_tx_power = HCI_TX_POWER_INVALID; 5301 rp.max_tx_power = HCI_TX_POWER_INVALID;
5248 } 5302 }
5249 5303
5250 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, 5304 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5251 &rp, sizeof(rp)); 5305 status, &rp, sizeof(rp));
5252 5306
5253 hci_conn_drop(conn); 5307 hci_conn_drop(conn);
5254 hci_conn_put(conn); 5308 hci_conn_put(conn);
@@ -5260,7 +5314,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5260 u16 opcode) 5314 u16 opcode)
5261{ 5315{
5262 struct hci_cp_read_rssi *cp; 5316 struct hci_cp_read_rssi *cp;
5263 struct pending_cmd *cmd; 5317 struct mgmt_pending_cmd *cmd;
5264 struct hci_conn *conn; 5318 struct hci_conn *conn;
5265 u16 handle; 5319 u16 handle;
5266 u8 status; 5320 u8 status;
@@ -5325,15 +5379,16 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5325 rp.addr.type = cp->addr.type; 5379 rp.addr.type = cp->addr.type;
5326 5380
5327 if (!bdaddr_type_is_valid(cp->addr.type)) 5381 if (!bdaddr_type_is_valid(cp->addr.type))
5328 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, 5382 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5329 MGMT_STATUS_INVALID_PARAMS, 5383 MGMT_STATUS_INVALID_PARAMS,
5330 &rp, sizeof(rp)); 5384 &rp, sizeof(rp));
5331 5385
5332 hci_dev_lock(hdev); 5386 hci_dev_lock(hdev);
5333 5387
5334 if (!hdev_is_powered(hdev)) { 5388 if (!hdev_is_powered(hdev)) {
5335 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, 5389 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5336 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 5390 MGMT_STATUS_NOT_POWERED, &rp,
5391 sizeof(rp));
5337 goto unlock; 5392 goto unlock;
5338 } 5393 }
5339 5394
@@ -5344,14 +5399,15 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5344 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 5399 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5345 5400
5346 if (!conn || conn->state != BT_CONNECTED) { 5401 if (!conn || conn->state != BT_CONNECTED) {
5347 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, 5402 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5348 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); 5403 MGMT_STATUS_NOT_CONNECTED, &rp,
5404 sizeof(rp));
5349 goto unlock; 5405 goto unlock;
5350 } 5406 }
5351 5407
5352 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { 5408 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5353 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, 5409 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5354 MGMT_STATUS_BUSY, &rp, sizeof(rp)); 5410 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5355 goto unlock; 5411 goto unlock;
5356 } 5412 }
5357 5413
@@ -5371,7 +5427,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5371 struct hci_request req; 5427 struct hci_request req;
5372 struct hci_cp_read_tx_power req_txp_cp; 5428 struct hci_cp_read_tx_power req_txp_cp;
5373 struct hci_cp_read_rssi req_rssi_cp; 5429 struct hci_cp_read_rssi req_rssi_cp;
5374 struct pending_cmd *cmd; 5430 struct mgmt_pending_cmd *cmd;
5375 5431
5376 hci_req_init(&req, hdev); 5432 hci_req_init(&req, hdev);
5377 req_rssi_cp.handle = cpu_to_le16(conn->handle); 5433 req_rssi_cp.handle = cpu_to_le16(conn->handle);
@@ -5419,8 +5475,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5419 rp.tx_power = conn->tx_power; 5475 rp.tx_power = conn->tx_power;
5420 rp.max_tx_power = conn->max_tx_power; 5476 rp.max_tx_power = conn->max_tx_power;
5421 5477
5422 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, 5478 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5423 MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); 5479 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5424 } 5480 }
5425 5481
5426unlock: 5482unlock:
@@ -5428,7 +5484,7 @@ unlock:
5428 return err; 5484 return err;
5429} 5485}
5430 5486
5431static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status) 5487static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5432{ 5488{
5433 struct hci_conn *conn = cmd->user_data; 5489 struct hci_conn *conn = cmd->user_data;
5434 struct mgmt_rp_get_clock_info rp; 5490 struct mgmt_rp_get_clock_info rp;
@@ -5453,8 +5509,8 @@ static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5453 } 5509 }
5454 5510
5455complete: 5511complete:
5456 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, 5512 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5457 sizeof(rp)); 5513 sizeof(rp));
5458 5514
5459 if (conn) { 5515 if (conn) {
5460 hci_conn_drop(conn); 5516 hci_conn_drop(conn);
@@ -5467,7 +5523,7 @@ complete:
5467static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode) 5523static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5468{ 5524{
5469 struct hci_cp_read_clock *hci_cp; 5525 struct hci_cp_read_clock *hci_cp;
5470 struct pending_cmd *cmd; 5526 struct mgmt_pending_cmd *cmd;
5471 struct hci_conn *conn; 5527 struct hci_conn *conn;
5472 5528
5473 BT_DBG("%s status %u", hdev->name, status); 5529 BT_DBG("%s status %u", hdev->name, status);
@@ -5502,7 +5558,7 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5502 struct mgmt_cp_get_clock_info *cp = data; 5558 struct mgmt_cp_get_clock_info *cp = data;
5503 struct mgmt_rp_get_clock_info rp; 5559 struct mgmt_rp_get_clock_info rp;
5504 struct hci_cp_read_clock hci_cp; 5560 struct hci_cp_read_clock hci_cp;
5505 struct pending_cmd *cmd; 5561 struct mgmt_pending_cmd *cmd;
5506 struct hci_request req; 5562 struct hci_request req;
5507 struct hci_conn *conn; 5563 struct hci_conn *conn;
5508 int err; 5564 int err;
@@ -5514,15 +5570,16 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5514 rp.addr.type = cp->addr.type; 5570 rp.addr.type = cp->addr.type;
5515 5571
5516 if (cp->addr.type != BDADDR_BREDR) 5572 if (cp->addr.type != BDADDR_BREDR)
5517 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, 5573 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5518 MGMT_STATUS_INVALID_PARAMS, 5574 MGMT_STATUS_INVALID_PARAMS,
5519 &rp, sizeof(rp)); 5575 &rp, sizeof(rp));
5520 5576
5521 hci_dev_lock(hdev); 5577 hci_dev_lock(hdev);
5522 5578
5523 if (!hdev_is_powered(hdev)) { 5579 if (!hdev_is_powered(hdev)) {
5524 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, 5580 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5525 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 5581 MGMT_STATUS_NOT_POWERED, &rp,
5582 sizeof(rp));
5526 goto unlock; 5583 goto unlock;
5527 } 5584 }
5528 5585
@@ -5530,10 +5587,10 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 5587 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5531 &cp->addr.bdaddr); 5588 &cp->addr.bdaddr);
5532 if (!conn || conn->state != BT_CONNECTED) { 5589 if (!conn || conn->state != BT_CONNECTED) {
5533 err = cmd_complete(sk, hdev->id, 5590 err = mgmt_cmd_complete(sk, hdev->id,
5534 MGMT_OP_GET_CLOCK_INFO, 5591 MGMT_OP_GET_CLOCK_INFO,
5535 MGMT_STATUS_NOT_CONNECTED, 5592 MGMT_STATUS_NOT_CONNECTED,
5536 &rp, sizeof(rp)); 5593 &rp, sizeof(rp));
5537 goto unlock; 5594 goto unlock;
5538 } 5595 }
5539 } else { 5596 } else {
@@ -5644,7 +5701,7 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
5644 5701
5645static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode) 5702static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5646{ 5703{
5647 struct pending_cmd *cmd; 5704 struct mgmt_pending_cmd *cmd;
5648 5705
5649 BT_DBG("status 0x%02x", status); 5706 BT_DBG("status 0x%02x", status);
5650 5707
@@ -5665,7 +5722,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
5665 void *data, u16 len) 5722 void *data, u16 len)
5666{ 5723{
5667 struct mgmt_cp_add_device *cp = data; 5724 struct mgmt_cp_add_device *cp = data;
5668 struct pending_cmd *cmd; 5725 struct mgmt_pending_cmd *cmd;
5669 struct hci_request req; 5726 struct hci_request req;
5670 u8 auto_conn, addr_type; 5727 u8 auto_conn, addr_type;
5671 int err; 5728 int err;
@@ -5674,14 +5731,14 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
5674 5731
5675 if (!bdaddr_type_is_valid(cp->addr.type) || 5732 if (!bdaddr_type_is_valid(cp->addr.type) ||
5676 !bacmp(&cp->addr.bdaddr, BDADDR_ANY)) 5733 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5677 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, 5734 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5678 MGMT_STATUS_INVALID_PARAMS, 5735 MGMT_STATUS_INVALID_PARAMS,
5679 &cp->addr, sizeof(cp->addr)); 5736 &cp->addr, sizeof(cp->addr));
5680 5737
5681 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02) 5738 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5682 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, 5739 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5683 MGMT_STATUS_INVALID_PARAMS, 5740 MGMT_STATUS_INVALID_PARAMS,
5684 &cp->addr, sizeof(cp->addr)); 5741 &cp->addr, sizeof(cp->addr));
5685 5742
5686 hci_req_init(&req, hdev); 5743 hci_req_init(&req, hdev);
5687 5744
@@ -5767,7 +5824,7 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
5767 5824
5768static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode) 5825static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5769{ 5826{
5770 struct pending_cmd *cmd; 5827 struct mgmt_pending_cmd *cmd;
5771 5828
5772 BT_DBG("status 0x%02x", status); 5829 BT_DBG("status 0x%02x", status);
5773 5830
@@ -5788,7 +5845,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
5788 void *data, u16 len) 5845 void *data, u16 len)
5789{ 5846{
5790 struct mgmt_cp_remove_device *cp = data; 5847 struct mgmt_cp_remove_device *cp = data;
5791 struct pending_cmd *cmd; 5848 struct mgmt_pending_cmd *cmd;
5792 struct hci_request req; 5849 struct hci_request req;
5793 int err; 5850 int err;
5794 5851
@@ -5921,15 +5978,15 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5921 int i; 5978 int i;
5922 5979
5923 if (!lmp_le_capable(hdev)) 5980 if (!lmp_le_capable(hdev))
5924 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 5981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5925 MGMT_STATUS_NOT_SUPPORTED); 5982 MGMT_STATUS_NOT_SUPPORTED);
5926 5983
5927 param_count = __le16_to_cpu(cp->param_count); 5984 param_count = __le16_to_cpu(cp->param_count);
5928 if (param_count > max_param_count) { 5985 if (param_count > max_param_count) {
5929 BT_ERR("load_conn_param: too big param_count value %u", 5986 BT_ERR("load_conn_param: too big param_count value %u",
5930 param_count); 5987 param_count);
5931 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 5988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5932 MGMT_STATUS_INVALID_PARAMS); 5989 MGMT_STATUS_INVALID_PARAMS);
5933 } 5990 }
5934 5991
5935 expected_len = sizeof(*cp) + param_count * 5992 expected_len = sizeof(*cp) + param_count *
@@ -5937,8 +5994,8 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5937 if (expected_len != len) { 5994 if (expected_len != len) {
5938 BT_ERR("load_conn_param: expected %u bytes, got %u bytes", 5995 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5939 expected_len, len); 5996 expected_len, len);
5940 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 5997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5941 MGMT_STATUS_INVALID_PARAMS); 5998 MGMT_STATUS_INVALID_PARAMS);
5942 } 5999 }
5943 6000
5944 BT_DBG("%s param_count %u", hdev->name, param_count); 6001 BT_DBG("%s param_count %u", hdev->name, param_count);
@@ -5993,7 +6050,8 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5993 6050
5994 hci_dev_unlock(hdev); 6051 hci_dev_unlock(hdev);
5995 6052
5996 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0); 6053 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6054 NULL, 0);
5997} 6055}
5998 6056
5999static int set_external_config(struct sock *sk, struct hci_dev *hdev, 6057static int set_external_config(struct sock *sk, struct hci_dev *hdev,
@@ -6006,25 +6064,23 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6006 BT_DBG("%s", hdev->name); 6064 BT_DBG("%s", hdev->name);
6007 6065
6008 if (hdev_is_powered(hdev)) 6066 if (hdev_is_powered(hdev))
6009 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, 6067 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6010 MGMT_STATUS_REJECTED); 6068 MGMT_STATUS_REJECTED);
6011 6069
6012 if (cp->config != 0x00 && cp->config != 0x01) 6070 if (cp->config != 0x00 && cp->config != 0x01)
6013 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, 6071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6014 MGMT_STATUS_INVALID_PARAMS); 6072 MGMT_STATUS_INVALID_PARAMS);
6015 6073
6016 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) 6074 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6017 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, 6075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6018 MGMT_STATUS_NOT_SUPPORTED); 6076 MGMT_STATUS_NOT_SUPPORTED);
6019 6077
6020 hci_dev_lock(hdev); 6078 hci_dev_lock(hdev);
6021 6079
6022 if (cp->config) 6080 if (cp->config)
6023 changed = !test_and_set_bit(HCI_EXT_CONFIGURED, 6081 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6024 &hdev->dev_flags);
6025 else 6082 else
6026 changed = test_and_clear_bit(HCI_EXT_CONFIGURED, 6083 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6027 &hdev->dev_flags);
6028 6084
6029 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev); 6085 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6030 if (err < 0) 6086 if (err < 0)
@@ -6035,12 +6091,12 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6035 6091
6036 err = new_options(hdev, sk); 6092 err = new_options(hdev, sk);
6037 6093
6038 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) { 6094 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6039 mgmt_index_removed(hdev); 6095 mgmt_index_removed(hdev);
6040 6096
6041 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { 6097 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6042 set_bit(HCI_CONFIG, &hdev->dev_flags); 6098 hci_dev_set_flag(hdev, HCI_CONFIG);
6043 set_bit(HCI_AUTO_OFF, &hdev->dev_flags); 6099 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6044 6100
6045 queue_work(hdev->req_workqueue, &hdev->power_on); 6101 queue_work(hdev->req_workqueue, &hdev->power_on);
6046 } else { 6102 } else {
@@ -6064,16 +6120,16 @@ static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6064 BT_DBG("%s", hdev->name); 6120 BT_DBG("%s", hdev->name);
6065 6121
6066 if (hdev_is_powered(hdev)) 6122 if (hdev_is_powered(hdev))
6067 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, 6123 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6068 MGMT_STATUS_REJECTED); 6124 MGMT_STATUS_REJECTED);
6069 6125
6070 if (!bacmp(&cp->bdaddr, BDADDR_ANY)) 6126 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6071 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, 6127 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6072 MGMT_STATUS_INVALID_PARAMS); 6128 MGMT_STATUS_INVALID_PARAMS);
6073 6129
6074 if (!hdev->set_bdaddr) 6130 if (!hdev->set_bdaddr)
6075 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, 6131 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6076 MGMT_STATUS_NOT_SUPPORTED); 6132 MGMT_STATUS_NOT_SUPPORTED);
6077 6133
6078 hci_dev_lock(hdev); 6134 hci_dev_lock(hdev);
6079 6135
@@ -6087,16 +6143,16 @@ static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6087 if (!changed) 6143 if (!changed)
6088 goto unlock; 6144 goto unlock;
6089 6145
6090 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) 6146 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6091 err = new_options(hdev, sk); 6147 err = new_options(hdev, sk);
6092 6148
6093 if (is_configured(hdev)) { 6149 if (is_configured(hdev)) {
6094 mgmt_index_removed(hdev); 6150 mgmt_index_removed(hdev);
6095 6151
6096 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags); 6152 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6097 6153
6098 set_bit(HCI_CONFIG, &hdev->dev_flags); 6154 hci_dev_set_flag(hdev, HCI_CONFIG);
6099 set_bit(HCI_AUTO_OFF, &hdev->dev_flags); 6155 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6100 6156
6101 queue_work(hdev->req_workqueue, &hdev->power_on); 6157 queue_work(hdev->req_workqueue, &hdev->power_on);
6102 } 6158 }
@@ -6106,81 +6162,91 @@ unlock:
6106 return err; 6162 return err;
6107} 6163}
6108 6164
6109static const struct mgmt_handler { 6165static const struct hci_mgmt_handler mgmt_handlers[] = {
6110 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
6111 u16 data_len);
6112 bool var_len;
6113 size_t data_len;
6114} mgmt_handlers[] = {
6115 { NULL }, /* 0x0000 (no command) */ 6166 { NULL }, /* 0x0000 (no command) */
6116 { read_version, false, MGMT_READ_VERSION_SIZE }, 6167 { read_version, MGMT_READ_VERSION_SIZE,
6117 { read_commands, false, MGMT_READ_COMMANDS_SIZE }, 6168 HCI_MGMT_NO_HDEV },
6118 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE }, 6169 { read_commands, MGMT_READ_COMMANDS_SIZE,
6119 { read_controller_info, false, MGMT_READ_INFO_SIZE }, 6170 HCI_MGMT_NO_HDEV },
6120 { set_powered, false, MGMT_SETTING_SIZE }, 6171 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6121 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE }, 6172 HCI_MGMT_NO_HDEV },
6122 { set_connectable, false, MGMT_SETTING_SIZE }, 6173 { read_controller_info, MGMT_READ_INFO_SIZE, 0 },
6123 { set_fast_connectable, false, MGMT_SETTING_SIZE }, 6174 { set_powered, MGMT_SETTING_SIZE, 0 },
6124 { set_bondable, false, MGMT_SETTING_SIZE }, 6175 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE, 0 },
6125 { set_link_security, false, MGMT_SETTING_SIZE }, 6176 { set_connectable, MGMT_SETTING_SIZE, 0 },
6126 { set_ssp, false, MGMT_SETTING_SIZE }, 6177 { set_fast_connectable, MGMT_SETTING_SIZE, 0 },
6127 { set_hs, false, MGMT_SETTING_SIZE }, 6178 { set_bondable, MGMT_SETTING_SIZE, 0 },
6128 { set_le, false, MGMT_SETTING_SIZE }, 6179 { set_link_security, MGMT_SETTING_SIZE, 0 },
6129 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE }, 6180 { set_ssp, MGMT_SETTING_SIZE, 0 },
6130 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE }, 6181 { set_hs, MGMT_SETTING_SIZE, 0 },
6131 { add_uuid, false, MGMT_ADD_UUID_SIZE }, 6182 { set_le, MGMT_SETTING_SIZE, 0 },
6132 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE }, 6183 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE, 0 },
6133 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE }, 6184 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE, 0 },
6134 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE }, 6185 { add_uuid, MGMT_ADD_UUID_SIZE, 0 },
6135 { disconnect, false, MGMT_DISCONNECT_SIZE }, 6186 { remove_uuid, MGMT_REMOVE_UUID_SIZE, 0 },
6136 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE }, 6187 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6137 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE }, 6188 HCI_MGMT_VAR_LEN },
6138 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE }, 6189 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6139 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE }, 6190 HCI_MGMT_VAR_LEN },
6140 { pair_device, false, MGMT_PAIR_DEVICE_SIZE }, 6191 { disconnect, MGMT_DISCONNECT_SIZE, 0 },
6141 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE }, 6192 { get_connections, MGMT_GET_CONNECTIONS_SIZE, 0 },
6142 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE }, 6193 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE, 0 },
6143 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE }, 6194 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE, 0 },
6144 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, 6195 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE, 0 },
6145 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE }, 6196 { pair_device, MGMT_PAIR_DEVICE_SIZE, 0 },
6146 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, 6197 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE, 0 },
6147 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE }, 6198 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE, 0 },
6148 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE }, 6199 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE, 0 },
6149 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, 6200 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE, 0 },
6150 { start_discovery, false, MGMT_START_DISCOVERY_SIZE }, 6201 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE, 0 },
6151 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE }, 6202 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE, 0 },
6152 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE }, 6203 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6153 { block_device, false, MGMT_BLOCK_DEVICE_SIZE }, 6204 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6154 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE }, 6205 HCI_MGMT_VAR_LEN },
6155 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE }, 6206 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE, 0 },
6156 { set_advertising, false, MGMT_SETTING_SIZE }, 6207 { start_discovery, MGMT_START_DISCOVERY_SIZE, 0 },
6157 { set_bredr, false, MGMT_SETTING_SIZE }, 6208 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE, 0 },
6158 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE }, 6209 { confirm_name, MGMT_CONFIRM_NAME_SIZE, 0 },
6159 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE }, 6210 { block_device, MGMT_BLOCK_DEVICE_SIZE, 0 },
6160 { set_secure_conn, false, MGMT_SETTING_SIZE }, 6211 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE, 0 },
6161 { set_debug_keys, false, MGMT_SETTING_SIZE }, 6212 { set_device_id, MGMT_SET_DEVICE_ID_SIZE, 0 },
6162 { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, 6213 { set_advertising, MGMT_SETTING_SIZE, 0 },
6163 { load_irks, true, MGMT_LOAD_IRKS_SIZE }, 6214 { set_bredr, MGMT_SETTING_SIZE, 0 },
6164 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE }, 6215 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE, 0 },
6165 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE }, 6216 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE, 0 },
6166 { add_device, false, MGMT_ADD_DEVICE_SIZE }, 6217 { set_secure_conn, MGMT_SETTING_SIZE, 0 },
6167 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE }, 6218 { set_debug_keys, MGMT_SETTING_SIZE, 0 },
6168 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE }, 6219 { set_privacy, MGMT_SET_PRIVACY_SIZE, 0 },
6169 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE }, 6220 { load_irks, MGMT_LOAD_IRKS_SIZE,
6170 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE }, 6221 HCI_MGMT_VAR_LEN },
6171 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE }, 6222 { get_conn_info, MGMT_GET_CONN_INFO_SIZE, 0 },
6172 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE }, 6223 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE, 0 },
6173 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE }, 6224 { add_device, MGMT_ADD_DEVICE_SIZE, 0 },
6225 { remove_device, MGMT_REMOVE_DEVICE_SIZE, 0 },
6226 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6227 HCI_MGMT_VAR_LEN },
6228 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6229 HCI_MGMT_NO_HDEV },
6230 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6231 HCI_MGMT_UNCONFIGURED },
6232 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6233 HCI_MGMT_UNCONFIGURED },
6234 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6235 HCI_MGMT_UNCONFIGURED },
6236 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6237 HCI_MGMT_VAR_LEN },
6174}; 6238};
6175 6239
6176int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 6240int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
6241 struct msghdr *msg, size_t msglen)
6177{ 6242{
6178 void *buf; 6243 void *buf;
6179 u8 *cp; 6244 u8 *cp;
6180 struct mgmt_hdr *hdr; 6245 struct mgmt_hdr *hdr;
6181 u16 opcode, index, len; 6246 u16 opcode, index, len;
6182 struct hci_dev *hdev = NULL; 6247 struct hci_dev *hdev = NULL;
6183 const struct mgmt_handler *handler; 6248 const struct hci_mgmt_handler *handler;
6249 bool var_len, no_hdev;
6184 int err; 6250 int err;
6185 6251
6186 BT_DBG("got %zu bytes", msglen); 6252 BT_DBG("got %zu bytes", msglen);
@@ -6207,60 +6273,52 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
6207 goto done; 6273 goto done;
6208 } 6274 }
6209 6275
6276 if (opcode >= chan->handler_count ||
6277 chan->handlers[opcode].func == NULL) {
6278 BT_DBG("Unknown op %u", opcode);
6279 err = mgmt_cmd_status(sk, index, opcode,
6280 MGMT_STATUS_UNKNOWN_COMMAND);
6281 goto done;
6282 }
6283
6284 handler = &chan->handlers[opcode];
6285
6210 if (index != MGMT_INDEX_NONE) { 6286 if (index != MGMT_INDEX_NONE) {
6211 hdev = hci_dev_get(index); 6287 hdev = hci_dev_get(index);
6212 if (!hdev) { 6288 if (!hdev) {
6213 err = cmd_status(sk, index, opcode, 6289 err = mgmt_cmd_status(sk, index, opcode,
6214 MGMT_STATUS_INVALID_INDEX); 6290 MGMT_STATUS_INVALID_INDEX);
6215 goto done; 6291 goto done;
6216 } 6292 }
6217 6293
6218 if (test_bit(HCI_SETUP, &hdev->dev_flags) || 6294 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
6219 test_bit(HCI_CONFIG, &hdev->dev_flags) || 6295 hci_dev_test_flag(hdev, HCI_CONFIG) ||
6220 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 6296 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
6221 err = cmd_status(sk, index, opcode, 6297 err = mgmt_cmd_status(sk, index, opcode,
6222 MGMT_STATUS_INVALID_INDEX); 6298 MGMT_STATUS_INVALID_INDEX);
6223 goto done; 6299 goto done;
6224 } 6300 }
6225 6301
6226 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && 6302 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
6227 opcode != MGMT_OP_READ_CONFIG_INFO && 6303 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
6228 opcode != MGMT_OP_SET_EXTERNAL_CONFIG && 6304 err = mgmt_cmd_status(sk, index, opcode,
6229 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) { 6305 MGMT_STATUS_INVALID_INDEX);
6230 err = cmd_status(sk, index, opcode,
6231 MGMT_STATUS_INVALID_INDEX);
6232 goto done; 6306 goto done;
6233 } 6307 }
6234 } 6308 }
6235 6309
6236 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 6310 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
6237 mgmt_handlers[opcode].func == NULL) { 6311 if (no_hdev != !hdev) {
6238 BT_DBG("Unknown op %u", opcode); 6312 err = mgmt_cmd_status(sk, index, opcode,
6239 err = cmd_status(sk, index, opcode, 6313 MGMT_STATUS_INVALID_INDEX);
6240 MGMT_STATUS_UNKNOWN_COMMAND);
6241 goto done;
6242 }
6243
6244 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
6245 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
6246 err = cmd_status(sk, index, opcode,
6247 MGMT_STATUS_INVALID_INDEX);
6248 goto done; 6314 goto done;
6249 } 6315 }
6250 6316
6251 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST && 6317 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
6252 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) { 6318 if ((var_len && len < handler->data_len) ||
6253 err = cmd_status(sk, index, opcode, 6319 (!var_len && len != handler->data_len)) {
6254 MGMT_STATUS_INVALID_INDEX); 6320 err = mgmt_cmd_status(sk, index, opcode,
6255 goto done; 6321 MGMT_STATUS_INVALID_PARAMS);
6256 }
6257
6258 handler = &mgmt_handlers[opcode];
6259
6260 if ((handler->var_len && len < handler->data_len) ||
6261 (!handler->var_len && len != handler->data_len)) {
6262 err = cmd_status(sk, index, opcode,
6263 MGMT_STATUS_INVALID_PARAMS);
6264 goto done; 6322 goto done;
6265 } 6323 }
6266 6324
@@ -6291,7 +6349,7 @@ void mgmt_index_added(struct hci_dev *hdev)
6291 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 6349 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6292 return; 6350 return;
6293 6351
6294 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) 6352 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6295 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL); 6353 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6296 else 6354 else
6297 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); 6355 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
@@ -6309,7 +6367,7 @@ void mgmt_index_removed(struct hci_dev *hdev)
6309 6367
6310 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); 6368 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6311 6369
6312 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) 6370 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6313 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL); 6371 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6314 else 6372 else
6315 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); 6373 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
@@ -6377,7 +6435,7 @@ static int powered_update_hci(struct hci_dev *hdev)
6377 6435
6378 hci_req_init(&req, hdev); 6436 hci_req_init(&req, hdev);
6379 6437
6380 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 6438 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6381 !lmp_host_ssp_capable(hdev)) { 6439 !lmp_host_ssp_capable(hdev)) {
6382 u8 mode = 0x01; 6440 u8 mode = 0x01;
6383 6441
@@ -6391,7 +6449,7 @@ static int powered_update_hci(struct hci_dev *hdev)
6391 } 6449 }
6392 } 6450 }
6393 6451
6394 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && 6452 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6395 lmp_bredr_capable(hdev)) { 6453 lmp_bredr_capable(hdev)) {
6396 struct hci_cp_write_le_host_supported cp; 6454 struct hci_cp_write_le_host_supported cp;
6397 6455
@@ -6412,24 +6470,27 @@ static int powered_update_hci(struct hci_dev *hdev)
6412 * advertising data. This also applies to the case 6470 * advertising data. This also applies to the case
6413 * where BR/EDR was toggled during the AUTO_OFF phase. 6471 * where BR/EDR was toggled during the AUTO_OFF phase.
6414 */ 6472 */
6415 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 6473 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6416 update_adv_data(&req); 6474 update_adv_data(&req);
6417 update_scan_rsp_data(&req); 6475 update_scan_rsp_data(&req);
6418 } 6476 }
6419 6477
6420 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 6478 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6421 enable_advertising(&req); 6479 enable_advertising(&req);
6422 6480
6423 restart_le_actions(&req); 6481 restart_le_actions(&req);
6424 } 6482 }
6425 6483
6426 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 6484 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6427 if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) 6485 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6428 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE, 6486 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6429 sizeof(link_sec), &link_sec); 6487 sizeof(link_sec), &link_sec);
6430 6488
6431 if (lmp_bredr_capable(hdev)) { 6489 if (lmp_bredr_capable(hdev)) {
6432 write_fast_connectable(&req, false); 6490 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6491 write_fast_connectable(&req, true);
6492 else
6493 write_fast_connectable(&req, false);
6433 __hci_update_page_scan(&req); 6494 __hci_update_page_scan(&req);
6434 update_class(&req); 6495 update_class(&req);
6435 update_name(&req); 6496 update_name(&req);
@@ -6445,7 +6506,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
6445 u8 status, zero_cod[] = { 0, 0, 0 }; 6506 u8 status, zero_cod[] = { 0, 0, 0 };
6446 int err; 6507 int err;
6447 6508
6448 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 6509 if (!hci_dev_test_flag(hdev, HCI_MGMT))
6449 return 0; 6510 return 0;
6450 6511
6451 if (powered) { 6512 if (powered) {
@@ -6466,7 +6527,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
6466 * been triggered, potentially causing misleading DISCONNECTED 6527 * been triggered, potentially causing misleading DISCONNECTED
6467 * status responses. 6528 * status responses.
6468 */ 6529 */
6469 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) 6530 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6470 status = MGMT_STATUS_INVALID_INDEX; 6531 status = MGMT_STATUS_INVALID_INDEX;
6471 else 6532 else
6472 status = MGMT_STATUS_NOT_POWERED; 6533 status = MGMT_STATUS_NOT_POWERED;
@@ -6488,7 +6549,7 @@ new_settings:
6488 6549
6489void mgmt_set_powered_failed(struct hci_dev *hdev, int err) 6550void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6490{ 6551{
6491 struct pending_cmd *cmd; 6552 struct mgmt_pending_cmd *cmd;
6492 u8 status; 6553 u8 status;
6493 6554
6494 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); 6555 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
@@ -6500,7 +6561,7 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6500 else 6561 else
6501 status = MGMT_STATUS_FAILED; 6562 status = MGMT_STATUS_FAILED;
6502 6563
6503 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); 6564 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6504 6565
6505 mgmt_pending_remove(cmd); 6566 mgmt_pending_remove(cmd);
6506} 6567}
@@ -6516,11 +6577,11 @@ void mgmt_discoverable_timeout(struct hci_dev *hdev)
6516 * of a timeout triggered from general discoverable, it is 6577 * of a timeout triggered from general discoverable, it is
6517 * safe to unconditionally clear the flag. 6578 * safe to unconditionally clear the flag.
6518 */ 6579 */
6519 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); 6580 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
6520 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 6581 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6521 6582
6522 hci_req_init(&req, hdev); 6583 hci_req_init(&req, hdev);
6523 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 6584 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6524 u8 scan = SCAN_PAGE; 6585 u8 scan = SCAN_PAGE;
6525 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 6586 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6526 sizeof(scan), &scan); 6587 sizeof(scan), &scan);
@@ -6739,7 +6800,7 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6739 sizeof(*ev) + eir_len, NULL); 6800 sizeof(*ev) + eir_len, NULL);
6740} 6801}
6741 6802
6742static void disconnect_rsp(struct pending_cmd *cmd, void *data) 6803static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6743{ 6804{
6744 struct sock **sk = data; 6805 struct sock **sk = data;
6745 6806
@@ -6751,7 +6812,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6751 mgmt_pending_remove(cmd); 6812 mgmt_pending_remove(cmd);
6752} 6813}
6753 6814
6754static void unpair_device_rsp(struct pending_cmd *cmd, void *data) 6815static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6755{ 6816{
6756 struct hci_dev *hdev = data; 6817 struct hci_dev *hdev = data;
6757 struct mgmt_cp_unpair_device *cp = cmd->param; 6818 struct mgmt_cp_unpair_device *cp = cmd->param;
@@ -6764,7 +6825,7 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6764 6825
6765bool mgmt_powering_down(struct hci_dev *hdev) 6826bool mgmt_powering_down(struct hci_dev *hdev)
6766{ 6827{
6767 struct pending_cmd *cmd; 6828 struct mgmt_pending_cmd *cmd;
6768 struct mgmt_mode *cp; 6829 struct mgmt_mode *cp;
6769 6830
6770 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); 6831 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
@@ -6819,7 +6880,7 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6819{ 6880{
6820 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type); 6881 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6821 struct mgmt_cp_disconnect *cp; 6882 struct mgmt_cp_disconnect *cp;
6822 struct pending_cmd *cmd; 6883 struct mgmt_pending_cmd *cmd;
6823 6884
6824 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 6885 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6825 hdev); 6886 hdev);
@@ -6874,7 +6935,7 @@ void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6874void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 6935void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6875 u8 status) 6936 u8 status)
6876{ 6937{
6877 struct pending_cmd *cmd; 6938 struct mgmt_pending_cmd *cmd;
6878 6939
6879 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); 6940 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6880 if (!cmd) 6941 if (!cmd)
@@ -6887,7 +6948,7 @@ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6887void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 6948void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6888 u8 status) 6949 u8 status)
6889{ 6950{
6890 struct pending_cmd *cmd; 6951 struct mgmt_pending_cmd *cmd;
6891 6952
6892 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); 6953 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6893 if (!cmd) 6954 if (!cmd)
@@ -6932,7 +6993,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6932 u8 link_type, u8 addr_type, u8 status, 6993 u8 link_type, u8 addr_type, u8 status,
6933 u8 opcode) 6994 u8 opcode)
6934{ 6995{
6935 struct pending_cmd *cmd; 6996 struct mgmt_pending_cmd *cmd;
6936 6997
6937 cmd = mgmt_pending_find(opcode, hdev); 6998 cmd = mgmt_pending_find(opcode, hdev);
6938 if (!cmd) 6999 if (!cmd)
@@ -6993,7 +7054,7 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6993void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status) 7054void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6994{ 7055{
6995 struct mgmt_ev_auth_failed ev; 7056 struct mgmt_ev_auth_failed ev;
6996 struct pending_cmd *cmd; 7057 struct mgmt_pending_cmd *cmd;
6997 u8 status = mgmt_status(hci_status); 7058 u8 status = mgmt_status(hci_status);
6998 7059
6999 bacpy(&ev.addr.bdaddr, &conn->dst); 7060 bacpy(&ev.addr.bdaddr, &conn->dst);
@@ -7024,11 +7085,9 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7024 } 7085 }
7025 7086
7026 if (test_bit(HCI_AUTH, &hdev->flags)) 7087 if (test_bit(HCI_AUTH, &hdev->flags))
7027 changed = !test_and_set_bit(HCI_LINK_SECURITY, 7088 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7028 &hdev->dev_flags);
7029 else 7089 else
7030 changed = test_and_clear_bit(HCI_LINK_SECURITY, 7090 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7031 &hdev->dev_flags);
7032 7091
7033 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, 7092 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7034 &match); 7093 &match);
@@ -7064,9 +7123,9 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7064 if (status) { 7123 if (status) {
7065 u8 mgmt_err = mgmt_status(status); 7124 u8 mgmt_err = mgmt_status(status);
7066 7125
7067 if (enable && test_and_clear_bit(HCI_SSP_ENABLED, 7126 if (enable && hci_dev_test_and_clear_flag(hdev,
7068 &hdev->dev_flags)) { 7127 HCI_SSP_ENABLED)) {
7069 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 7128 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7070 new_settings(hdev, NULL); 7129 new_settings(hdev, NULL);
7071 } 7130 }
7072 7131
@@ -7076,14 +7135,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7076 } 7135 }
7077 7136
7078 if (enable) { 7137 if (enable) {
7079 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 7138 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7080 } else { 7139 } else {
7081 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 7140 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7082 if (!changed) 7141 if (!changed)
7083 changed = test_and_clear_bit(HCI_HS_ENABLED, 7142 changed = hci_dev_test_and_clear_flag(hdev,
7084 &hdev->dev_flags); 7143 HCI_HS_ENABLED);
7085 else 7144 else
7086 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 7145 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7087 } 7146 }
7088 7147
7089 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); 7148 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
@@ -7096,8 +7155,8 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7096 7155
7097 hci_req_init(&req, hdev); 7156 hci_req_init(&req, hdev);
7098 7157
7099 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 7158 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7100 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) 7159 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7101 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE, 7160 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7102 sizeof(enable), &enable); 7161 sizeof(enable), &enable);
7103 update_eir(&req); 7162 update_eir(&req);
@@ -7108,7 +7167,7 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7108 hci_req_run(&req, NULL); 7167 hci_req_run(&req, NULL);
7109} 7168}
7110 7169
7111static void sk_lookup(struct pending_cmd *cmd, void *data) 7170static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7112{ 7171{
7113 struct cmd_lookup *match = data; 7172 struct cmd_lookup *match = data;
7114 7173
@@ -7138,7 +7197,7 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7138void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) 7197void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7139{ 7198{
7140 struct mgmt_cp_set_local_name ev; 7199 struct mgmt_cp_set_local_name ev;
7141 struct pending_cmd *cmd; 7200 struct mgmt_pending_cmd *cmd;
7142 7201
7143 if (status) 7202 if (status)
7144 return; 7203 return;
@@ -7166,7 +7225,7 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7166 u8 *rand192, u8 *hash256, u8 *rand256, 7225 u8 *rand192, u8 *hash256, u8 *rand256,
7167 u8 status) 7226 u8 status)
7168{ 7227{
7169 struct pending_cmd *cmd; 7228 struct mgmt_pending_cmd *cmd;
7170 7229
7171 BT_DBG("%s status %u", hdev->name, status); 7230 BT_DBG("%s status %u", hdev->name, status);
7172 7231
@@ -7175,8 +7234,8 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7175 return; 7234 return;
7176 7235
7177 if (status) { 7236 if (status) {
7178 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 7237 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7179 mgmt_status(status)); 7238 mgmt_status(status));
7180 } else { 7239 } else {
7181 struct mgmt_rp_read_local_oob_data rp; 7240 struct mgmt_rp_read_local_oob_data rp;
7182 size_t rp_size = sizeof(rp); 7241 size_t rp_size = sizeof(rp);
@@ -7191,8 +7250,9 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7191 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256); 7250 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7192 } 7251 }
7193 7252
7194 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0, 7253 mgmt_cmd_complete(cmd->sk, hdev->id,
7195 &rp, rp_size); 7254 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7255 &rp, rp_size);
7196 } 7256 }
7197 7257
7198 mgmt_pending_remove(cmd); 7258 mgmt_pending_remove(cmd);
@@ -7268,7 +7328,7 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7268static void restart_le_scan(struct hci_dev *hdev) 7328static void restart_le_scan(struct hci_dev *hdev)
7269{ 7329{
7270 /* If controller is not scanning we are done. */ 7330 /* If controller is not scanning we are done. */
7271 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 7331 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7272 return; 7332 return;
7273 7333
7274 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY, 7334 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
@@ -7280,14 +7340,58 @@ static void restart_le_scan(struct hci_dev *hdev)
7280 DISCOV_LE_RESTART_DELAY); 7340 DISCOV_LE_RESTART_DELAY);
7281} 7341}
7282 7342
7343static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7344 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7345{
7346 /* If a RSSI threshold has been specified, and
7347 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7348 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7349 * is set, let it through for further processing, as we might need to
7350 * restart the scan.
7351 *
7352 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7353 * the results are also dropped.
7354 */
7355 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7356 (rssi == HCI_RSSI_INVALID ||
7357 (rssi < hdev->discovery.rssi &&
7358 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7359 return false;
7360
7361 if (hdev->discovery.uuid_count != 0) {
7362 /* If a list of UUIDs is provided in filter, results with no
7363 * matching UUID should be dropped.
7364 */
7365 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7366 hdev->discovery.uuids) &&
7367 !eir_has_uuids(scan_rsp, scan_rsp_len,
7368 hdev->discovery.uuid_count,
7369 hdev->discovery.uuids))
7370 return false;
7371 }
7372
7373 /* If duplicate filtering does not report RSSI changes, then restart
7374 * scanning to ensure updated result with updated RSSI values.
7375 */
7376 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7377 restart_le_scan(hdev);
7378
7379 /* Validate RSSI value against the RSSI threshold once more. */
7380 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7381 rssi < hdev->discovery.rssi)
7382 return false;
7383 }
7384
7385 return true;
7386}
7387
7283void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 7388void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7284 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, 7389 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7285 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) 7390 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7286{ 7391{
7287 char buf[512]; 7392 char buf[512];
7288 struct mgmt_ev_device_found *ev = (void *) buf; 7393 struct mgmt_ev_device_found *ev = (void *)buf;
7289 size_t ev_size; 7394 size_t ev_size;
7290 bool match;
7291 7395
7292 /* Don't send events for a non-kernel initiated discovery. With 7396 /* Don't send events for a non-kernel initiated discovery. With
7293 * LE one exception is if we have pend_le_reports > 0 in which 7397 * LE one exception is if we have pend_le_reports > 0 in which
@@ -7300,21 +7404,12 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7300 return; 7404 return;
7301 } 7405 }
7302 7406
7303 /* When using service discovery with a RSSI threshold, then check 7407 if (hdev->discovery.result_filtering) {
7304 * if such a RSSI threshold is specified. If a RSSI threshold has 7408 /* We are using service discovery */
7305 * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, 7409 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7306 * then all results with a RSSI smaller than the RSSI threshold will be 7410 scan_rsp_len))
7307 * dropped. If the quirk is set, let it through for further processing, 7411 return;
7308 * as we might need to restart the scan. 7412 }
7309 *
7310 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7311 * the results are also dropped.
7312 */
7313 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7314 (rssi == HCI_RSSI_INVALID ||
7315 (rssi < hdev->discovery.rssi &&
7316 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7317 return;
7318 7413
7319 /* Make sure that the buffer is big enough. The 5 extra bytes 7414 /* Make sure that the buffer is big enough. The 5 extra bytes
7320 * are for the potential CoD field. 7415 * are for the potential CoD field.
@@ -7341,87 +7436,17 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7341 ev->rssi = rssi; 7436 ev->rssi = rssi;
7342 ev->flags = cpu_to_le32(flags); 7437 ev->flags = cpu_to_le32(flags);
7343 7438
7344 if (eir_len > 0) { 7439 if (eir_len > 0)
7345 /* When using service discovery and a list of UUID is
7346 * provided, results with no matching UUID should be
7347 * dropped. In case there is a match the result is
7348 * kept and checking possible scan response data
7349 * will be skipped.
7350 */
7351 if (hdev->discovery.uuid_count > 0) {
7352 match = eir_has_uuids(eir, eir_len,
7353 hdev->discovery.uuid_count,
7354 hdev->discovery.uuids);
7355 /* If duplicate filtering does not report RSSI changes,
7356 * then restart scanning to ensure updated result with
7357 * updated RSSI values.
7358 */
7359 if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7360 &hdev->quirks))
7361 restart_le_scan(hdev);
7362 } else {
7363 match = true;
7364 }
7365
7366 if (!match && !scan_rsp_len)
7367 return;
7368
7369 /* Copy EIR or advertising data into event */ 7440 /* Copy EIR or advertising data into event */
7370 memcpy(ev->eir, eir, eir_len); 7441 memcpy(ev->eir, eir, eir_len);
7371 } else {
7372 /* When using service discovery and a list of UUID is
7373 * provided, results with empty EIR or advertising data
7374 * should be dropped since they do not match any UUID.
7375 */
7376 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
7377 return;
7378
7379 match = false;
7380 }
7381 7442
7382 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV)) 7443 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7383 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, 7444 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7384 dev_class, 3); 7445 dev_class, 3);
7385 7446
7386 if (scan_rsp_len > 0) { 7447 if (scan_rsp_len > 0)
7387 /* When using service discovery and a list of UUID is
7388 * provided, results with no matching UUID should be
7389 * dropped if there is no previous match from the
7390 * advertising data.
7391 */
7392 if (hdev->discovery.uuid_count > 0) {
7393 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7394 hdev->discovery.uuid_count,
7395 hdev->discovery.uuids))
7396 return;
7397
7398 /* If duplicate filtering does not report RSSI changes,
7399 * then restart scanning to ensure updated result with
7400 * updated RSSI values.
7401 */
7402 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7403 &hdev->quirks))
7404 restart_le_scan(hdev);
7405 }
7406
7407 /* Append scan response data to event */ 7448 /* Append scan response data to event */
7408 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len); 7449 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7409 } else {
7410 /* When using service discovery and a list of UUID is
7411 * provided, results with empty scan response and no
7412 * previous matched advertising data should be dropped.
7413 */
7414 if (hdev->discovery.uuid_count > 0 && !match)
7415 return;
7416 }
7417
7418 /* Validate the reported RSSI value against the RSSI threshold once more
7419 * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
7420 * scanning.
7421 */
7422 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7423 rssi < hdev->discovery.rssi)
7424 return;
7425 7450
7426 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); 7451 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7427 ev_size = sizeof(*ev) + eir_len + scan_rsp_len; 7452 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
@@ -7474,10 +7499,26 @@ void mgmt_reenable_advertising(struct hci_dev *hdev)
7474{ 7499{
7475 struct hci_request req; 7500 struct hci_request req;
7476 7501
7477 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 7502 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7478 return; 7503 return;
7479 7504
7480 hci_req_init(&req, hdev); 7505 hci_req_init(&req, hdev);
7481 enable_advertising(&req); 7506 enable_advertising(&req);
7482 hci_req_run(&req, adv_enable_complete); 7507 hci_req_run(&req, adv_enable_complete);
7483} 7508}
7509
7510static struct hci_mgmt_chan chan = {
7511 .channel = HCI_CHANNEL_CONTROL,
7512 .handler_count = ARRAY_SIZE(mgmt_handlers),
7513 .handlers = mgmt_handlers,
7514};
7515
7516int mgmt_init(void)
7517{
7518 return hci_mgmt_chan_register(&chan);
7519}
7520
7521void mgmt_exit(void)
7522{
7523 hci_mgmt_chan_unregister(&chan);
7524}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 54279ac28120..4322c833e748 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1231,7 +1231,7 @@ error:
1231 return err; 1231 return err;
1232} 1232}
1233 1233
1234void __exit sco_exit(void) 1234void sco_exit(void)
1235{ 1235{
1236 bt_procfs_cleanup(&init_net, "sco"); 1236 bt_procfs_cleanup(&init_net, "sco");
1237 1237
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index c91c19bfc0a8..9155840068cf 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -52,7 +52,7 @@
52 52
53#define SMP_TIMEOUT msecs_to_jiffies(30000) 53#define SMP_TIMEOUT msecs_to_jiffies(30000)
54 54
55#define AUTH_REQ_MASK(dev) (test_bit(HCI_SC_ENABLED, &(dev)->dev_flags) ? \ 55#define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \
56 0x1f : 0x07) 56 0x1f : 0x07)
57#define KEY_DIST_MASK 0x07 57#define KEY_DIST_MASK 0x07
58 58
@@ -589,7 +589,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
589 struct hci_dev *hdev = hcon->hdev; 589 struct hci_dev *hdev = hcon->hdev;
590 u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT; 590 u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT;
591 591
592 if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) { 592 if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
593 local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; 593 local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
594 remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; 594 remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
595 authreq |= SMP_AUTH_BONDING; 595 authreq |= SMP_AUTH_BONDING;
@@ -597,18 +597,18 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
597 authreq &= ~SMP_AUTH_BONDING; 597 authreq &= ~SMP_AUTH_BONDING;
598 } 598 }
599 599
600 if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags)) 600 if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
601 remote_dist |= SMP_DIST_ID_KEY; 601 remote_dist |= SMP_DIST_ID_KEY;
602 602
603 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) 603 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
604 local_dist |= SMP_DIST_ID_KEY; 604 local_dist |= SMP_DIST_ID_KEY;
605 605
606 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && 606 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
607 (authreq & SMP_AUTH_SC)) { 607 (authreq & SMP_AUTH_SC)) {
608 struct oob_data *oob_data; 608 struct oob_data *oob_data;
609 u8 bdaddr_type; 609 u8 bdaddr_type;
610 610
611 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 611 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
612 local_dist |= SMP_DIST_LINK_KEY; 612 local_dist |= SMP_DIST_LINK_KEY;
613 remote_dist |= SMP_DIST_LINK_KEY; 613 remote_dist |= SMP_DIST_LINK_KEY;
614 } 614 }
@@ -692,7 +692,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
692 * support hasn't been explicitly enabled. 692 * support hasn't been explicitly enabled.
693 */ 693 */
694 if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG && 694 if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG &&
695 !test_bit(HCI_KEEP_DEBUG_KEYS, &hcon->hdev->dev_flags)) { 695 !hci_dev_test_flag(hcon->hdev, HCI_KEEP_DEBUG_KEYS)) {
696 list_del_rcu(&smp->ltk->list); 696 list_del_rcu(&smp->ltk->list);
697 kfree_rcu(smp->ltk, rcu); 697 kfree_rcu(smp->ltk, rcu);
698 smp->ltk = NULL; 698 smp->ltk = NULL;
@@ -1052,7 +1052,7 @@ static void smp_notify_keys(struct l2cap_conn *conn)
1052 /* Don't keep debug keys around if the relevant 1052 /* Don't keep debug keys around if the relevant
1053 * flag is not set. 1053 * flag is not set.
1054 */ 1054 */
1055 if (!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags) && 1055 if (!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS) &&
1056 key->type == HCI_LK_DEBUG_COMBINATION) { 1056 key->type == HCI_LK_DEBUG_COMBINATION) {
1057 list_del_rcu(&key->list); 1057 list_del_rcu(&key->list);
1058 kfree_rcu(key, rcu); 1058 kfree_rcu(key, rcu);
@@ -1604,15 +1604,15 @@ static void build_bredr_pairing_cmd(struct smp_chan *smp,
1604 struct hci_dev *hdev = conn->hcon->hdev; 1604 struct hci_dev *hdev = conn->hcon->hdev;
1605 u8 local_dist = 0, remote_dist = 0; 1605 u8 local_dist = 0, remote_dist = 0;
1606 1606
1607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) { 1607 if (hci_dev_test_flag(hdev, HCI_BONDABLE)) {
1608 local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; 1608 local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
1609 remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; 1609 remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
1610 } 1610 }
1611 1611
1612 if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags)) 1612 if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING))
1613 remote_dist |= SMP_DIST_ID_KEY; 1613 remote_dist |= SMP_DIST_ID_KEY;
1614 1614
1615 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) 1615 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
1616 local_dist |= SMP_DIST_ID_KEY; 1616 local_dist |= SMP_DIST_ID_KEY;
1617 1617
1618 if (!rsp) { 1618 if (!rsp) {
@@ -1664,11 +1664,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
1664 /* We didn't start the pairing, so match remote */ 1664 /* We didn't start the pairing, so match remote */
1665 auth = req->auth_req & AUTH_REQ_MASK(hdev); 1665 auth = req->auth_req & AUTH_REQ_MASK(hdev);
1666 1666
1667 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && 1667 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
1668 (auth & SMP_AUTH_BONDING)) 1668 (auth & SMP_AUTH_BONDING))
1669 return SMP_PAIRING_NOTSUPP; 1669 return SMP_PAIRING_NOTSUPP;
1670 1670
1671 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) 1671 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
1672 return SMP_AUTH_REQUIREMENTS; 1672 return SMP_AUTH_REQUIREMENTS;
1673 1673
1674 smp->preq[0] = SMP_CMD_PAIRING_REQ; 1674 smp->preq[0] = SMP_CMD_PAIRING_REQ;
@@ -1679,7 +1679,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
1679 if (conn->hcon->type == ACL_LINK) { 1679 if (conn->hcon->type == ACL_LINK) {
1680 /* We must have a BR/EDR SC link */ 1680 /* We must have a BR/EDR SC link */
1681 if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) && 1681 if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
1682 !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) 1682 !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
1683 return SMP_CROSS_TRANSP_NOT_ALLOWED; 1683 return SMP_CROSS_TRANSP_NOT_ALLOWED;
1684 1684
1685 set_bit(SMP_FLAG_SC, &smp->flags); 1685 set_bit(SMP_FLAG_SC, &smp->flags);
@@ -1743,10 +1743,10 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
1743 smp->remote_key_dist &= ~SMP_SC_NO_DIST; 1743 smp->remote_key_dist &= ~SMP_SC_NO_DIST;
1744 /* Wait for Public Key from Initiating Device */ 1744 /* Wait for Public Key from Initiating Device */
1745 return 0; 1745 return 0;
1746 } else {
1747 SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
1748 } 1746 }
1749 1747
1748 SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
1749
1750 /* Request setup of TK */ 1750 /* Request setup of TK */
1751 ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); 1751 ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
1752 if (ret) 1752 if (ret)
@@ -1761,7 +1761,7 @@ static u8 sc_send_public_key(struct smp_chan *smp)
1761 1761
1762 BT_DBG(""); 1762 BT_DBG("");
1763 1763
1764 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) { 1764 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
1765 BT_DBG("Using debug keys"); 1765 BT_DBG("Using debug keys");
1766 memcpy(smp->local_pk, debug_pk, 64); 1766 memcpy(smp->local_pk, debug_pk, 64);
1767 memcpy(smp->local_sk, debug_sk, 32); 1767 memcpy(smp->local_sk, debug_sk, 32);
@@ -1816,7 +1816,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
1816 1816
1817 auth = rsp->auth_req & AUTH_REQ_MASK(hdev); 1817 auth = rsp->auth_req & AUTH_REQ_MASK(hdev);
1818 1818
1819 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) 1819 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
1820 return SMP_AUTH_REQUIREMENTS; 1820 return SMP_AUTH_REQUIREMENTS;
1821 1821
1822 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 1822 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -1926,8 +1926,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
1926 1926
1927 if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) 1927 if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
1928 return smp_confirm(smp); 1928 return smp_confirm(smp);
1929 else 1929
1930 set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); 1930 set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
1931 1931
1932 return 0; 1932 return 0;
1933} 1933}
@@ -2086,7 +2086,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
2086 2086
2087 auth = rp->auth_req & AUTH_REQ_MASK(hdev); 2087 auth = rp->auth_req & AUTH_REQ_MASK(hdev);
2088 2088
2089 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) 2089 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC))
2090 return SMP_AUTH_REQUIREMENTS; 2090 return SMP_AUTH_REQUIREMENTS;
2091 2091
2092 if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) 2092 if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
@@ -2107,7 +2107,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
2107 if (!smp) 2107 if (!smp)
2108 return SMP_UNSPECIFIED; 2108 return SMP_UNSPECIFIED;
2109 2109
2110 if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) && 2110 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
2111 (auth & SMP_AUTH_BONDING)) 2111 (auth & SMP_AUTH_BONDING))
2112 return SMP_PAIRING_NOTSUPP; 2112 return SMP_PAIRING_NOTSUPP;
2113 2113
@@ -2141,7 +2141,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2141 2141
2142 chan = conn->smp; 2142 chan = conn->smp;
2143 2143
2144 if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) 2144 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2145 return 1; 2145 return 1;
2146 2146
2147 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) 2147 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
@@ -2170,7 +2170,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2170 2170
2171 authreq = seclevel_to_authreq(sec_level); 2171 authreq = seclevel_to_authreq(sec_level);
2172 2172
2173 if (test_bit(HCI_SC_ENABLED, &hcon->hdev->dev_flags)) 2173 if (hci_dev_test_flag(hcon->hdev, HCI_SC_ENABLED))
2174 authreq |= SMP_AUTH_SC; 2174 authreq |= SMP_AUTH_SC;
2175 2175
2176 /* Require MITM if IO Capability allows or the security level 2176 /* Require MITM if IO Capability allows or the security level
@@ -2606,7 +2606,7 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
2606 if (skb->len < 1) 2606 if (skb->len < 1)
2607 return -EILSEQ; 2607 return -EILSEQ;
2608 2608
2609 if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) { 2609 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) {
2610 reason = SMP_PAIRING_NOTSUPP; 2610 reason = SMP_PAIRING_NOTSUPP;
2611 goto done; 2611 goto done;
2612 } 2612 }
@@ -2744,16 +2744,16 @@ static void bredr_pairing(struct l2cap_chan *chan)
2744 return; 2744 return;
2745 2745
2746 /* Secure Connections support must be enabled */ 2746 /* Secure Connections support must be enabled */
2747 if (!test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) 2747 if (!hci_dev_test_flag(hdev, HCI_SC_ENABLED))
2748 return; 2748 return;
2749 2749
2750 /* BR/EDR must use Secure Connections for SMP */ 2750 /* BR/EDR must use Secure Connections for SMP */
2751 if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) && 2751 if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
2752 !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) 2752 !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
2753 return; 2753 return;
2754 2754
2755 /* If our LE support is not enabled don't do anything */ 2755 /* If our LE support is not enabled don't do anything */
2756 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 2756 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2757 return; 2757 return;
2758 2758
2759 /* Don't bother if remote LE support is not enabled */ 2759 /* Don't bother if remote LE support is not enabled */
@@ -3003,7 +3003,7 @@ static ssize_t force_bredr_smp_read(struct file *file,
3003 struct hci_dev *hdev = file->private_data; 3003 struct hci_dev *hdev = file->private_data;
3004 char buf[3]; 3004 char buf[3];
3005 3005
3006 buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N'; 3006 buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y': 'N';
3007 buf[1] = '\n'; 3007 buf[1] = '\n';
3008 buf[2] = '\0'; 3008 buf[2] = '\0';
3009 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 3009 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -3025,7 +3025,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
3025 if (strtobool(buf, &enable)) 3025 if (strtobool(buf, &enable))
3026 return -EINVAL; 3026 return -EINVAL;
3027 3027
3028 if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) 3028 if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
3029 return -EALREADY; 3029 return -EALREADY;
3030 3030
3031 if (enable) { 3031 if (enable) {
@@ -3044,7 +3044,7 @@ static ssize_t force_bredr_smp_write(struct file *file,
3044 smp_del_chan(chan); 3044 smp_del_chan(chan);
3045 } 3045 }
3046 3046
3047 change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags); 3047 hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP);
3048 3048
3049 return count; 3049 return count;
3050} 3050}
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index dfd3c6007f60..0ae5822ef944 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -113,7 +113,7 @@ static void lowpan_setup(struct net_device *dev)
113{ 113{
114 dev->addr_len = IEEE802154_ADDR_LEN; 114 dev->addr_len = IEEE802154_ADDR_LEN;
115 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); 115 memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
116 dev->type = ARPHRD_IEEE802154; 116 dev->type = ARPHRD_6LOWPAN;
117 /* Frame Control + Sequence Number + Address fields + Security Header */ 117 /* Frame Control + Sequence Number + Address fields + Security Header */
118 dev->hard_header_len = 2 + 1 + 20 + 14; 118 dev->hard_header_len = 2 + 1 + 20 + 14;
119 dev->needed_tailroom = 2; /* FCS */ 119 dev->needed_tailroom = 2; /* FCS */
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index 888d0991c761..2ee00e8a0308 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -25,6 +25,9 @@
25#include "sysfs.h" 25#include "sysfs.h"
26#include "core.h" 26#include "core.h"
27 27
28/* name for sysfs, %d is appended */
29#define PHY_NAME "phy"
30
28/* RCU-protected (and RTNL for writers) */ 31/* RCU-protected (and RTNL for writers) */
29LIST_HEAD(cfg802154_rdev_list); 32LIST_HEAD(cfg802154_rdev_list);
30int cfg802154_rdev_list_generation; 33int cfg802154_rdev_list_generation;
@@ -122,7 +125,7 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
122 125
123 INIT_LIST_HEAD(&rdev->wpan_dev_list); 126 INIT_LIST_HEAD(&rdev->wpan_dev_list);
124 device_initialize(&rdev->wpan_phy.dev); 127 device_initialize(&rdev->wpan_phy.dev);
125 dev_set_name(&rdev->wpan_phy.dev, "wpan-phy%d", rdev->wpan_phy_idx); 128 dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
126 129
127 rdev->wpan_phy.dev.class = &wpan_phy_class; 130 rdev->wpan_phy.dev.class = &wpan_phy_class;
128 rdev->wpan_phy.dev.platform_data = rdev; 131 rdev->wpan_phy.dev.platform_data = rdev;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 9105265920fe..2b4955d7aae5 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -76,7 +76,6 @@ nla_put_failure:
76 nlmsg_free(msg); 76 nlmsg_free(msg);
77 return -ENOBUFS; 77 return -ENOBUFS;
78} 78}
79EXPORT_SYMBOL(ieee802154_nl_start_confirm);
80 79
81static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, 80static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
82 u32 seq, int flags, struct net_device *dev) 81 u32 seq, int flags, struct net_device *dev)
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index dff55c2d87f3..133b4280660c 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -48,49 +48,6 @@ static ssize_t name_show(struct device *dev,
48} 48}
49static DEVICE_ATTR_RO(name); 49static DEVICE_ATTR_RO(name);
50 50
51#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
52static ssize_t name ## _show(struct device *dev, \
53 struct device_attribute *attr, char *buf) \
54{ \
55 struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
56 int ret; \
57 \
58 mutex_lock(&phy->pib_lock); \
59 ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
60 mutex_unlock(&phy->pib_lock); \
61 return ret; \
62} \
63static DEVICE_ATTR_RO(name)
64
65#define MASTER_SHOW(field, format_string) \
66 MASTER_SHOW_COMPLEX(field, format_string, phy->field)
67
68MASTER_SHOW(current_channel, "%d");
69MASTER_SHOW(current_page, "%d");
70MASTER_SHOW(transmit_power, "%d +- 1 dB");
71MASTER_SHOW_COMPLEX(cca_mode, "%d", phy->cca.mode);
72
73static ssize_t channels_supported_show(struct device *dev,
74 struct device_attribute *attr,
75 char *buf)
76{
77 struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
78 int ret;
79 int i, len = 0;
80
81 mutex_lock(&phy->pib_lock);
82 for (i = 0; i < 32; i++) {
83 ret = snprintf(buf + len, PAGE_SIZE - len,
84 "%#09x\n", phy->channels_supported[i]);
85 if (ret < 0)
86 break;
87 len += ret;
88 }
89 mutex_unlock(&phy->pib_lock);
90 return len;
91}
92static DEVICE_ATTR_RO(channels_supported);
93
94static void wpan_phy_release(struct device *dev) 51static void wpan_phy_release(struct device *dev)
95{ 52{
96 struct cfg802154_registered_device *rdev = dev_to_rdev(dev); 53 struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
@@ -101,12 +58,6 @@ static void wpan_phy_release(struct device *dev)
101static struct attribute *pmib_attrs[] = { 58static struct attribute *pmib_attrs[] = {
102 &dev_attr_index.attr, 59 &dev_attr_index.attr,
103 &dev_attr_name.attr, 60 &dev_attr_name.attr,
104 /* below will be removed soon */
105 &dev_attr_current_channel.attr,
106 &dev_attr_current_page.attr,
107 &dev_attr_channels_supported.attr,
108 &dev_attr_transmit_power.attr,
109 &dev_attr_cca_mode.attr,
110 NULL, 61 NULL,
111}; 62};
112ATTRIBUTE_GROUPS(pmib); 63ATTRIBUTE_GROUPS(pmib);
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 5fc979027919..150bf807e572 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -65,8 +65,19 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
65{ 65{
66 if (ifs_handling) { 66 if (ifs_handling) {
67 struct ieee802154_local *local = hw_to_local(hw); 67 struct ieee802154_local *local = hw_to_local(hw);
68 u8 max_sifs_size;
68 69
69 if (skb->len > 18) 70 /* If transceiver sets CRC on his own we need to use lifs
71 * threshold len above 16 otherwise 18, because it's not
72 * part of skb->len.
73 */
74 if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
75 max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
76 IEEE802154_FCS_LEN;
77 else
78 max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;
79
80 if (skb->len > max_sifs_size)
70 hrtimer_start(&local->ifs_timer, 81 hrtimer_start(&local->ifs_timer,
71 ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC), 82 ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC),
72 HRTIMER_MODE_REL); 83 HRTIMER_MODE_REL);