aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2013-11-05 15:50:22 -0500
committerJohn W. Linville <linville@tuxdriver.com>2013-11-05 15:50:22 -0500
commit33b443422eda3828814e8c4b17cf4202ec6ac529 (patch)
tree8bec6c2b851bc7924ff2ec26e08c5858cc4c51fd
parentb476d3f143e8b213273834e92615370ca65ff126 (diff)
parentd78a32a8fcf775111ccc9ba611a08ca5c29784b6 (diff)
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
-rw-r--r--include/net/bluetooth/bluetooth.h1
-rw-r--r--include/net/bluetooth/hci.h35
-rw-r--r--include/net/bluetooth/hci_core.h89
-rw-r--r--include/net/bluetooth/l2cap.h20
-rw-r--r--net/bluetooth/a2mp.c9
-rw-r--r--net/bluetooth/af_bluetooth.c9
-rw-r--r--net/bluetooth/hci_conn.c48
-rw-r--r--net/bluetooth/hci_core.c803
-rw-r--r--net/bluetooth/hci_event.c59
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/hci_sysfs.c373
-rw-r--r--net/bluetooth/l2cap_core.c227
-rw-r--r--net/bluetooth/l2cap_sock.c120
-rw-r--r--net/bluetooth/mgmt.c637
-rw-r--r--net/bluetooth/rfcomm/core.c14
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bluetooth/smp.c4
18 files changed, 1506 insertions, 973 deletions
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bf2ddffdae2d..a707a0209df4 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -282,6 +282,7 @@ struct bt_skb_cb {
282 __u8 incoming; 282 __u8 incoming;
283 __u16 expect; 283 __u16 expect;
284 __u8 force_active; 284 __u8 force_active;
285 struct l2cap_chan *chan;
285 struct l2cap_ctrl control; 286 struct l2cap_ctrl control;
286 struct hci_req_ctrl req; 287 struct hci_req_ctrl req;
287 bdaddr_t bdaddr; 288 bdaddr_t bdaddr;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index b096f5f73789..1784c48699f0 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -115,6 +115,7 @@ enum {
115 HCI_PAIRABLE, 115 HCI_PAIRABLE,
116 HCI_SERVICE_CACHE, 116 HCI_SERVICE_CACHE,
117 HCI_DEBUG_KEYS, 117 HCI_DEBUG_KEYS,
118 HCI_DUT_MODE,
118 HCI_UNREGISTER, 119 HCI_UNREGISTER,
119 HCI_USER_CHANNEL, 120 HCI_USER_CHANNEL,
120 121
@@ -125,6 +126,7 @@ enum {
125 HCI_ADVERTISING, 126 HCI_ADVERTISING,
126 HCI_CONNECTABLE, 127 HCI_CONNECTABLE,
127 HCI_DISCOVERABLE, 128 HCI_DISCOVERABLE,
129 HCI_LIMITED_DISCOVERABLE,
128 HCI_LINK_SECURITY, 130 HCI_LINK_SECURITY,
129 HCI_PERIODIC_INQ, 131 HCI_PERIODIC_INQ,
130 HCI_FAST_CONNECTABLE, 132 HCI_FAST_CONNECTABLE,
@@ -823,6 +825,12 @@ struct hci_rp_read_num_supported_iac {
823 825
824#define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39 826#define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39
825 827
828#define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a
829struct hci_cp_write_current_iac_lap {
830 __u8 num_iac;
831 __u8 iac_lap[6];
832} __packed;
833
826#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45 834#define HCI_OP_WRITE_INQUIRY_MODE 0x0c45
827 835
828#define HCI_MAX_EIR_LENGTH 240 836#define HCI_MAX_EIR_LENGTH 240
@@ -1036,6 +1044,10 @@ struct hci_rp_write_remote_amp_assoc {
1036 __u8 phy_handle; 1044 __u8 phy_handle;
1037} __packed; 1045} __packed;
1038 1046
1047#define HCI_OP_ENABLE_DUT_MODE 0x1803
1048
1049#define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804
1050
1039#define HCI_OP_LE_SET_EVENT_MASK 0x2001 1051#define HCI_OP_LE_SET_EVENT_MASK 0x2001
1040struct hci_cp_le_set_event_mask { 1052struct hci_cp_le_set_event_mask {
1041 __u8 mask[8]; 1053 __u8 mask[8];
@@ -1056,11 +1068,6 @@ struct hci_rp_le_read_local_features {
1056 1068
1057#define HCI_OP_LE_SET_RANDOM_ADDR 0x2005 1069#define HCI_OP_LE_SET_RANDOM_ADDR 0x2005
1058 1070
1059#define LE_ADV_IND 0x00
1060#define LE_ADV_DIRECT_IND 0x01
1061#define LE_ADV_SCAN_IND 0x02
1062#define LE_ADV_NONCONN_IND 0x03
1063
1064#define HCI_OP_LE_SET_ADV_PARAM 0x2006 1071#define HCI_OP_LE_SET_ADV_PARAM 0x2006
1065struct hci_cp_le_set_adv_param { 1072struct hci_cp_le_set_adv_param {
1066 __le16 min_interval; 1073 __le16 min_interval;
@@ -1087,6 +1094,12 @@ struct hci_cp_le_set_adv_data {
1087 __u8 data[HCI_MAX_AD_LENGTH]; 1094 __u8 data[HCI_MAX_AD_LENGTH];
1088} __packed; 1095} __packed;
1089 1096
1097#define HCI_OP_LE_SET_SCAN_RSP_DATA 0x2009
1098struct hci_cp_le_set_scan_rsp_data {
1099 __u8 length;
1100 __u8 data[HCI_MAX_AD_LENGTH];
1101} __packed;
1102
1090#define HCI_OP_LE_SET_ADV_ENABLE 0x200a 1103#define HCI_OP_LE_SET_ADV_ENABLE 0x200a
1091 1104
1092#define LE_SCAN_PASSIVE 0x00 1105#define LE_SCAN_PASSIVE 0x00
@@ -1567,11 +1580,11 @@ struct hci_ev_le_ltk_req {
1567} __packed; 1580} __packed;
1568 1581
1569/* Advertising report event types */ 1582/* Advertising report event types */
1570#define ADV_IND 0x00 1583#define LE_ADV_IND 0x00
1571#define ADV_DIRECT_IND 0x01 1584#define LE_ADV_DIRECT_IND 0x01
1572#define ADV_SCAN_IND 0x02 1585#define LE_ADV_SCAN_IND 0x02
1573#define ADV_NONCONN_IND 0x03 1586#define LE_ADV_NONCONN_IND 0x03
1574#define ADV_SCAN_RSP 0x04 1587#define LE_ADV_SCAN_RSP 0x04
1575 1588
1576#define ADDR_LE_DEV_PUBLIC 0x00 1589#define ADDR_LE_DEV_PUBLIC 0x00
1577#define ADDR_LE_DEV_RANDOM 0x01 1590#define ADDR_LE_DEV_RANDOM 0x01
@@ -1779,6 +1792,4 @@ struct hci_inquiry_req {
1779}; 1792};
1780#define IREQ_CACHE_FLUSH 0x0001 1793#define IREQ_CACHE_FLUSH 0x0001
1781 1794
1782extern bool enable_hs;
1783
1784#endif /* __HCI_H */ 1795#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 4e208420d84c..8c0ab3d86f95 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -81,6 +81,7 @@ struct hci_conn_hash {
81struct bdaddr_list { 81struct bdaddr_list {
82 struct list_head list; 82 struct list_head list;
83 bdaddr_t bdaddr; 83 bdaddr_t bdaddr;
84 u8 bdaddr_type;
84}; 85};
85 86
86struct bt_uuid { 87struct bt_uuid {
@@ -141,6 +142,7 @@ struct hci_dev {
141 __u8 dev_type; 142 __u8 dev_type;
142 bdaddr_t bdaddr; 143 bdaddr_t bdaddr;
143 bdaddr_t static_addr; 144 bdaddr_t static_addr;
145 __u8 own_addr_type;
144 __u8 dev_name[HCI_MAX_NAME_LENGTH]; 146 __u8 dev_name[HCI_MAX_NAME_LENGTH];
145 __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; 147 __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
146 __u8 eir[HCI_MAX_EIR_LENGTH]; 148 __u8 eir[HCI_MAX_EIR_LENGTH];
@@ -167,6 +169,9 @@ struct hci_dev {
167 __u8 page_scan_type; 169 __u8 page_scan_type;
168 __u16 le_scan_interval; 170 __u16 le_scan_interval;
169 __u16 le_scan_window; 171 __u16 le_scan_window;
172 __u16 le_conn_min_interval;
173 __u16 le_conn_max_interval;
174 __u8 ssp_debug_mode;
170 175
171 __u16 devid_source; 176 __u16 devid_source;
172 __u16 devid_vendor; 177 __u16 devid_vendor;
@@ -283,6 +288,8 @@ struct hci_dev {
283 __s8 adv_tx_power; 288 __s8 adv_tx_power;
284 __u8 adv_data[HCI_MAX_AD_LENGTH]; 289 __u8 adv_data[HCI_MAX_AD_LENGTH];
285 __u8 adv_data_len; 290 __u8 adv_data_len;
291 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
292 __u8 scan_rsp_data_len;
286 293
287 int (*open)(struct hci_dev *hdev); 294 int (*open)(struct hci_dev *hdev);
288 int (*close)(struct hci_dev *hdev); 295 int (*close)(struct hci_dev *hdev);
@@ -311,7 +318,6 @@ struct hci_conn {
311 __u8 attempt; 318 __u8 attempt;
312 __u8 dev_class[3]; 319 __u8 dev_class[3];
313 __u8 features[HCI_MAX_PAGES][8]; 320 __u8 features[HCI_MAX_PAGES][8];
314 __u16 interval;
315 __u16 pkt_type; 321 __u16 pkt_type;
316 __u16 link_policy; 322 __u16 link_policy;
317 __u32 link_mode; 323 __u32 link_mode;
@@ -339,8 +345,8 @@ struct hci_conn {
339 struct list_head chan_list; 345 struct list_head chan_list;
340 346
341 struct delayed_work disc_work; 347 struct delayed_work disc_work;
342 struct timer_list idle_timer; 348 struct delayed_work auto_accept_work;
343 struct timer_list auto_accept_timer; 349 struct delayed_work idle_work;
344 350
345 struct device dev; 351 struct device dev;
346 352
@@ -649,7 +655,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
649 switch (conn->type) { 655 switch (conn->type) {
650 case ACL_LINK: 656 case ACL_LINK:
651 case LE_LINK: 657 case LE_LINK:
652 del_timer(&conn->idle_timer); 658 cancel_delayed_work(&conn->idle_work);
653 if (conn->state == BT_CONNECTED) { 659 if (conn->state == BT_CONNECTED) {
654 timeo = conn->disc_timeout; 660 timeo = conn->disc_timeout;
655 if (!conn->out) 661 if (!conn->out)
@@ -730,7 +736,7 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
730int hci_inquiry(void __user *arg); 736int hci_inquiry(void __user *arg);
731 737
732struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, 738struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
733 bdaddr_t *bdaddr); 739 bdaddr_t *bdaddr, u8 type);
734int hci_blacklist_clear(struct hci_dev *hdev); 740int hci_blacklist_clear(struct hci_dev *hdev);
735int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 741int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
736int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 742int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
@@ -765,8 +771,6 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
765int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); 771int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
766 772
767void hci_init_sysfs(struct hci_dev *hdev); 773void hci_init_sysfs(struct hci_dev *hdev);
768int hci_add_sysfs(struct hci_dev *hdev);
769void hci_del_sysfs(struct hci_dev *hdev);
770void hci_conn_init_sysfs(struct hci_conn *conn); 774void hci_conn_init_sysfs(struct hci_conn *conn);
771void hci_conn_add_sysfs(struct hci_conn *conn); 775void hci_conn_add_sysfs(struct hci_conn *conn);
772void hci_conn_del_sysfs(struct hci_conn *conn); 776void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -1009,34 +1013,6 @@ static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
1009 return false; 1013 return false;
1010} 1014}
1011 1015
1012static inline size_t eir_get_length(u8 *eir, size_t eir_len)
1013{
1014 size_t parsed = 0;
1015
1016 while (parsed < eir_len) {
1017 u8 field_len = eir[0];
1018
1019 if (field_len == 0)
1020 return parsed;
1021
1022 parsed += field_len + 1;
1023 eir += field_len + 1;
1024 }
1025
1026 return eir_len;
1027}
1028
1029static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
1030 u8 data_len)
1031{
1032 eir[eir_len++] = sizeof(type) + data_len;
1033 eir[eir_len++] = type;
1034 memcpy(&eir[eir_len], data, data_len);
1035 eir_len += data_len;
1036
1037 return eir_len;
1038}
1039
1040int hci_register_cb(struct hci_cb *hcb); 1016int hci_register_cb(struct hci_cb *hcb);
1041int hci_unregister_cb(struct hci_cb *hcb); 1017int hci_unregister_cb(struct hci_cb *hcb);
1042 1018
@@ -1100,11 +1076,12 @@ void mgmt_index_added(struct hci_dev *hdev);
1100void mgmt_index_removed(struct hci_dev *hdev); 1076void mgmt_index_removed(struct hci_dev *hdev);
1101void mgmt_set_powered_failed(struct hci_dev *hdev, int err); 1077void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
1102int mgmt_powered(struct hci_dev *hdev, u8 powered); 1078int mgmt_powered(struct hci_dev *hdev, u8 powered);
1103int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); 1079void mgmt_discoverable_timeout(struct hci_dev *hdev);
1104int mgmt_connectable(struct hci_dev *hdev, u8 connectable); 1080void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
1105int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status); 1081void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
1106int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 1082void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
1107 bool persistent); 1083void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
1084 bool persistent);
1108void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1085void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1109 u8 addr_type, u32 flags, u8 *name, u8 name_len, 1086 u8 addr_type, u32 flags, u8 *name, u8 name_len,
1110 u8 *dev_class); 1087 u8 *dev_class);
@@ -1114,11 +1091,11 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
1114 u8 link_type, u8 addr_type, u8 status); 1091 u8 link_type, u8 addr_type, u8 status);
1115void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1092void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1116 u8 addr_type, u8 status); 1093 u8 addr_type, u8 status);
1117int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); 1094void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
1118int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1095void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1119 u8 status); 1096 u8 status);
1120int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1097void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1121 u8 status); 1098 u8 status);
1122int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 1099int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
1123 u8 link_type, u8 addr_type, __le32 value, 1100 u8 link_type, u8 addr_type, __le32 value,
1124 u8 confirm_hint); 1101 u8 confirm_hint);
@@ -1135,15 +1112,15 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1135int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, 1112int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
1136 u8 link_type, u8 addr_type, u32 passkey, 1113 u8 link_type, u8 addr_type, u32 passkey,
1137 u8 entered); 1114 u8 entered);
1138int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1115void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1139 u8 addr_type, u8 status); 1116 u8 addr_type, u8 status);
1140int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); 1117void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
1141int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); 1118void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
1142int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 1119void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
1143 u8 status); 1120 u8 status);
1144int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); 1121void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
1145int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, 1122void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
1146 u8 *randomizer, u8 status); 1123 u8 *randomizer, u8 status);
1147void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1124void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1148 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, 1125 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
1149 u8 ssp, u8 *eir, u16 eir_len); 1126 u8 ssp, u8 *eir, u16 eir_len);
@@ -1152,7 +1129,7 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1152void mgmt_discovering(struct hci_dev *hdev, u8 discovering); 1129void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
1153int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 1130int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1154int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 1131int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1155int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent); 1132void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
1156void mgmt_reenable_advertising(struct hci_dev *hdev); 1133void mgmt_reenable_advertising(struct hci_dev *hdev);
1157 1134
1158/* HCI info for socket */ 1135/* HCI info for socket */
@@ -1183,8 +1160,6 @@ struct hci_sec_filter {
1183#define hci_req_lock(d) mutex_lock(&d->req_lock) 1160#define hci_req_lock(d) mutex_lock(&d->req_lock)
1184#define hci_req_unlock(d) mutex_unlock(&d->req_lock) 1161#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
1185 1162
1186void hci_update_ad(struct hci_request *req);
1187
1188void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 1163void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
1189 u16 latency, u16 to_multiplier); 1164 u16 latency, u16 to_multiplier);
1190void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], 1165void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 07757a2af942..51329905bfaa 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -435,8 +435,6 @@ struct l2cap_seq_list {
435#define L2CAP_SEQ_LIST_TAIL 0x8000 435#define L2CAP_SEQ_LIST_TAIL 0x8000
436 436
437struct l2cap_chan { 437struct l2cap_chan {
438 struct sock *sk;
439
440 struct l2cap_conn *conn; 438 struct l2cap_conn *conn;
441 struct hci_conn *hs_hcon; 439 struct hci_conn *hs_hcon;
442 struct hci_chan *hs_hchan; 440 struct hci_chan *hs_hchan;
@@ -551,10 +549,12 @@ struct l2cap_ops {
551 void (*teardown) (struct l2cap_chan *chan, int err); 549 void (*teardown) (struct l2cap_chan *chan, int err);
552 void (*close) (struct l2cap_chan *chan); 550 void (*close) (struct l2cap_chan *chan);
553 void (*state_change) (struct l2cap_chan *chan, 551 void (*state_change) (struct l2cap_chan *chan,
554 int state); 552 int state, int err);
555 void (*ready) (struct l2cap_chan *chan); 553 void (*ready) (struct l2cap_chan *chan);
556 void (*defer) (struct l2cap_chan *chan); 554 void (*defer) (struct l2cap_chan *chan);
557 void (*resume) (struct l2cap_chan *chan); 555 void (*resume) (struct l2cap_chan *chan);
556 void (*set_shutdown) (struct l2cap_chan *chan);
557 long (*get_sndtimeo) (struct l2cap_chan *chan);
558 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, 558 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
559 unsigned long len, int nb); 559 unsigned long len, int nb);
560}; 560};
@@ -795,6 +795,19 @@ static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
795{ 795{
796} 796}
797 797
798static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
799{
800}
801
802static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
803{
804}
805
806static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
807{
808 return 0;
809}
810
798extern bool disable_ertm; 811extern bool disable_ertm;
799 812
800int l2cap_init_sockets(void); 813int l2cap_init_sockets(void);
@@ -802,7 +815,6 @@ void l2cap_cleanup_sockets(void);
802bool l2cap_is_socket(struct socket *sock); 815bool l2cap_is_socket(struct socket *sock);
803 816
804void __l2cap_connect_rsp_defer(struct l2cap_chan *chan); 817void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
805int __l2cap_wait_ack(struct sock *sk);
806 818
807int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm); 819int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
808int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid); 820int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 60ca52819247..efcd108822c4 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -672,7 +672,8 @@ static void a2mp_chan_close_cb(struct l2cap_chan *chan)
672 l2cap_chan_put(chan); 672 l2cap_chan_put(chan);
673} 673}
674 674
675static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state) 675static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
676 int err)
676{ 677{
677 struct amp_mgr *mgr = chan->data; 678 struct amp_mgr *mgr = chan->data;
678 679
@@ -709,6 +710,9 @@ static struct l2cap_ops a2mp_chan_ops = {
709 .teardown = l2cap_chan_no_teardown, 710 .teardown = l2cap_chan_no_teardown,
710 .ready = l2cap_chan_no_ready, 711 .ready = l2cap_chan_no_ready,
711 .defer = l2cap_chan_no_defer, 712 .defer = l2cap_chan_no_defer,
713 .resume = l2cap_chan_no_resume,
714 .set_shutdown = l2cap_chan_no_set_shutdown,
715 .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
712}; 716};
713 717
714static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) 718static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
@@ -832,6 +836,9 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
832{ 836{
833 struct amp_mgr *mgr; 837 struct amp_mgr *mgr;
834 838
839 if (conn->hcon->type != ACL_LINK)
840 return NULL;
841
835 mgr = amp_mgr_create(conn, false); 842 mgr = amp_mgr_create(conn, false);
836 if (!mgr) { 843 if (!mgr) {
837 BT_ERR("Could not create AMP manager"); 844 BT_ERR("Could not create AMP manager");
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 1f1a1118f489..f6a1671ea2ff 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,6 +25,7 @@
25/* Bluetooth address family and sockets. */ 25/* Bluetooth address family and sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/debugfs.h>
28#include <asm/ioctls.h> 29#include <asm/ioctls.h>
29 30
30#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
@@ -708,12 +709,17 @@ static struct net_proto_family bt_sock_family_ops = {
708 .create = bt_sock_create, 709 .create = bt_sock_create,
709}; 710};
710 711
712struct dentry *bt_debugfs;
713EXPORT_SYMBOL_GPL(bt_debugfs);
714
711static int __init bt_init(void) 715static int __init bt_init(void)
712{ 716{
713 int err; 717 int err;
714 718
715 BT_INFO("Core ver %s", VERSION); 719 BT_INFO("Core ver %s", VERSION);
716 720
721 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
722
717 err = bt_sysfs_init(); 723 err = bt_sysfs_init();
718 if (err < 0) 724 if (err < 0)
719 return err; 725 return err;
@@ -754,7 +760,6 @@ error:
754 760
755static void __exit bt_exit(void) 761static void __exit bt_exit(void)
756{ 762{
757
758 sco_exit(); 763 sco_exit();
759 764
760 l2cap_exit(); 765 l2cap_exit();
@@ -764,6 +769,8 @@ static void __exit bt_exit(void)
764 sock_unregister(PF_BLUETOOTH); 769 sock_unregister(PF_BLUETOOTH);
765 770
766 bt_sysfs_cleanup(); 771 bt_sysfs_cleanup();
772
773 debugfs_remove_recursive(bt_debugfs);
767} 774}
768 775
769subsys_initcall(bt_init); 776subsys_initcall(bt_init);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ff04b051792d..ba5366c320da 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -317,8 +317,10 @@ static void hci_conn_timeout(struct work_struct *work)
317} 317}
318 318
319/* Enter sniff mode */ 319/* Enter sniff mode */
320static void hci_conn_enter_sniff_mode(struct hci_conn *conn) 320static void hci_conn_idle(struct work_struct *work)
321{ 321{
322 struct hci_conn *conn = container_of(work, struct hci_conn,
323 idle_work.work);
322 struct hci_dev *hdev = conn->hdev; 324 struct hci_dev *hdev = conn->hdev;
323 325
324 BT_DBG("hcon %p mode %d", conn, conn->mode); 326 BT_DBG("hcon %p mode %d", conn, conn->mode);
@@ -352,21 +354,12 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
352 } 354 }
353} 355}
354 356
355static void hci_conn_idle(unsigned long arg) 357static void hci_conn_auto_accept(struct work_struct *work)
356{
357 struct hci_conn *conn = (void *) arg;
358
359 BT_DBG("hcon %p mode %d", conn, conn->mode);
360
361 hci_conn_enter_sniff_mode(conn);
362}
363
364static void hci_conn_auto_accept(unsigned long arg)
365{ 358{
366 struct hci_conn *conn = (void *) arg; 359 struct hci_conn *conn = container_of(work, struct hci_conn,
367 struct hci_dev *hdev = conn->hdev; 360 auto_accept_work.work);
368 361
369 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 362 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
370 &conn->dst); 363 &conn->dst);
371} 364}
372 365
@@ -415,9 +408,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
415 INIT_LIST_HEAD(&conn->chan_list); 408 INIT_LIST_HEAD(&conn->chan_list);
416 409
417 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 410 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
418 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 411 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
419 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 412 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
420 (unsigned long) conn);
421 413
422 atomic_set(&conn->refcnt, 0); 414 atomic_set(&conn->refcnt, 0);
423 415
@@ -438,11 +430,9 @@ int hci_conn_del(struct hci_conn *conn)
438 430
439 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); 431 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
440 432
441 del_timer(&conn->idle_timer);
442
443 cancel_delayed_work_sync(&conn->disc_work); 433 cancel_delayed_work_sync(&conn->disc_work);
444 434 cancel_delayed_work_sync(&conn->auto_accept_work);
445 del_timer(&conn->auto_accept_timer); 435 cancel_delayed_work_sync(&conn->idle_work);
446 436
447 if (conn->type == ACL_LINK) { 437 if (conn->type == ACL_LINK) {
448 struct hci_conn *sco = conn->link; 438 struct hci_conn *sco = conn->link;
@@ -568,11 +558,12 @@ static int hci_create_le_conn(struct hci_conn *conn)
568 bacpy(&cp.peer_addr, &conn->dst); 558 bacpy(&cp.peer_addr, &conn->dst);
569 cp.peer_addr_type = conn->dst_type; 559 cp.peer_addr_type = conn->dst_type;
570 cp.own_address_type = conn->src_type; 560 cp.own_address_type = conn->src_type;
571 cp.conn_interval_min = __constant_cpu_to_le16(0x0028); 561 cp.conn_interval_min = cpu_to_le16(hdev->le_conn_min_interval);
572 cp.conn_interval_max = __constant_cpu_to_le16(0x0038); 562 cp.conn_interval_max = cpu_to_le16(hdev->le_conn_max_interval);
573 cp.supervision_timeout = __constant_cpu_to_le16(0x002a); 563 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
574 cp.min_ce_len = __constant_cpu_to_le16(0x0000); 564 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
575 cp.max_ce_len = __constant_cpu_to_le16(0x0000); 565 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
566
576 hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 567 hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
577 568
578 err = hci_req_run(&req, create_le_conn_complete); 569 err = hci_req_run(&req, create_le_conn_complete);
@@ -625,12 +616,7 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
625 else 616 else
626 conn->dst_type = ADDR_LE_DEV_RANDOM; 617 conn->dst_type = ADDR_LE_DEV_RANDOM;
627 618
628 if (bacmp(&conn->src, BDADDR_ANY)) { 619 conn->src_type = hdev->own_addr_type;
629 conn->src_type = ADDR_LE_DEV_PUBLIC;
630 } else {
631 bacpy(&conn->src, &hdev->static_addr);
632 conn->src_type = ADDR_LE_DEV_RANDOM;
633 }
634 620
635 conn->state = BT_CONNECT; 621 conn->state = BT_CONNECT;
636 conn->out = true; 622 conn->out = true;
@@ -922,8 +908,8 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
922 908
923timer: 909timer:
924 if (hdev->idle_timeout > 0) 910 if (hdev->idle_timeout > 0)
925 mod_timer(&conn->idle_timer, 911 queue_delayed_work(hdev->workqueue, &conn->idle_work,
926 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 912 msecs_to_jiffies(hdev->idle_timeout));
927} 913}
928 914
929/* Drop all connection on the device */ 915/* Drop all connection on the device */
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 7add9c96e32c..6ccc4eb9e55e 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -27,8 +27,9 @@
27 27
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/idr.h> 29#include <linux/idr.h>
30
31#include <linux/rfkill.h> 30#include <linux/rfkill.h>
31#include <linux/debugfs.h>
32#include <asm/unaligned.h>
32 33
33#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
@@ -55,6 +56,586 @@ static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event); 56 hci_sock_dev_event(hdev, event);
56} 57}
57 58
59/* ---- HCI debugfs entries ---- */
60
61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
196 u8 i, val[16];
197
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
204
205 seq_printf(f, "%pUb\n", val);
206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
426 hdev->idle_timeout = val;
427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
454 hdev->sniff_min_interval = val;
455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
482 hdev->sniff_max_interval = val;
483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->le_conn_min_interval = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->le_conn_max_interval = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
58/* ---- HCI requests ---- */ 639/* ---- HCI requests ---- */
59 640
60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) 641static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
@@ -556,6 +1137,14 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
556 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 1137 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
557 1138
558 if (lmp_ssp_capable(hdev)) { 1139 if (lmp_ssp_capable(hdev)) {
1140 /* When SSP is available, then the host features page
1141 * should also be available as well. However some
1142 * controllers list the max_page as 0 as long as SSP
1143 * has not been enabled. To achieve proper debugging
1144 * output, force the minimum max_page to 1 at least.
1145 */
1146 hdev->max_page = 0x01;
1147
559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 1148 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
560 u8 mode = 0x01; 1149 u8 mode = 0x01;
561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, 1150 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
@@ -686,8 +1275,17 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
686 hci_setup_link_policy(req); 1275 hci_setup_link_policy(req);
687 1276
688 if (lmp_le_capable(hdev)) { 1277 if (lmp_le_capable(hdev)) {
1278 /* If the controller has a public BD_ADDR, then by
1279 * default use that one. If this is a LE only
1280 * controller without one, default to the random
1281 * address.
1282 */
1283 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1284 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1285 else
1286 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1287
689 hci_set_le_support(req); 1288 hci_set_le_support(req);
690 hci_update_ad(req);
691 } 1289 }
692 1290
693 /* Read features beyond page 1 if available */ 1291 /* Read features beyond page 1 if available */
@@ -721,6 +1319,14 @@ static int __hci_init(struct hci_dev *hdev)
721 if (err < 0) 1319 if (err < 0)
722 return err; 1320 return err;
723 1321
1322 /* The Device Under Test (DUT) mode is special and available for
1323 * all controller types. So just create it early on.
1324 */
1325 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1326 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1327 &dut_mode_fops);
1328 }
1329
724 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode 1330 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
725 * BR/EDR/LE type controllers. AMP controllers only need the 1331 * BR/EDR/LE type controllers. AMP controllers only need the
726 * first stage init. 1332 * first stage init.
@@ -736,7 +1342,71 @@ static int __hci_init(struct hci_dev *hdev)
736 if (err < 0) 1342 if (err < 0)
737 return err; 1343 return err;
738 1344
739 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT); 1345 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1346 if (err < 0)
1347 return err;
1348
1349 /* Only create debugfs entries during the initial setup
1350 * phase and not every time the controller gets powered on.
1351 */
1352 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1353 return 0;
1354
1355 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1356 &features_fops);
1357 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1358 &hdev->manufacturer);
1359 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1360 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1361 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1362 &blacklist_fops);
1363 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1364
1365 if (lmp_bredr_capable(hdev)) {
1366 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1367 hdev, &inquiry_cache_fops);
1368 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1369 hdev, &link_keys_fops);
1370 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1371 hdev, &use_debug_keys_fops);
1372 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1373 hdev, &dev_class_fops);
1374 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1375 hdev, &voice_setting_fops);
1376 }
1377
1378 if (lmp_ssp_capable(hdev)) {
1379 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1380 hdev, &auto_accept_delay_fops);
1381 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1382 hdev, &ssp_debug_mode_fops);
1383 }
1384
1385 if (lmp_sniff_capable(hdev)) {
1386 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1387 hdev, &idle_timeout_fops);
1388 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1389 hdev, &sniff_min_interval_fops);
1390 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1391 hdev, &sniff_max_interval_fops);
1392 }
1393
1394 if (lmp_le_capable(hdev)) {
1395 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1396 &hdev->le_white_list_size);
1397 debugfs_create_file("static_address", 0444, hdev->debugfs,
1398 hdev, &static_address_fops);
1399 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1400 hdev, &own_address_type_fops);
1401 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1402 hdev, &long_term_keys_fops);
1403 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1404 hdev, &conn_min_interval_fops);
1405 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1406 hdev, &conn_max_interval_fops);
1407 }
1408
1409 return 0;
740} 1410}
741 1411
742static void hci_scan_req(struct hci_request *req, unsigned long opt) 1412static void hci_scan_req(struct hci_request *req, unsigned long opt)
@@ -1127,89 +1797,6 @@ done:
1127 return err; 1797 return err;
1128} 1798}
1129 1799
1130static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1131{
1132 u8 ad_len = 0, flags = 0;
1133 size_t name_len;
1134
1135 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1136 flags |= LE_AD_GENERAL;
1137
1138 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1139 if (lmp_le_br_capable(hdev))
1140 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1141 if (lmp_host_le_br_capable(hdev))
1142 flags |= LE_AD_SIM_LE_BREDR_HOST;
1143 } else {
1144 flags |= LE_AD_NO_BREDR;
1145 }
1146
1147 if (flags) {
1148 BT_DBG("adv flags 0x%02x", flags);
1149
1150 ptr[0] = 2;
1151 ptr[1] = EIR_FLAGS;
1152 ptr[2] = flags;
1153
1154 ad_len += 3;
1155 ptr += 3;
1156 }
1157
1158 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1159 ptr[0] = 2;
1160 ptr[1] = EIR_TX_POWER;
1161 ptr[2] = (u8) hdev->adv_tx_power;
1162
1163 ad_len += 3;
1164 ptr += 3;
1165 }
1166
1167 name_len = strlen(hdev->dev_name);
1168 if (name_len > 0) {
1169 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1170
1171 if (name_len > max_len) {
1172 name_len = max_len;
1173 ptr[1] = EIR_NAME_SHORT;
1174 } else
1175 ptr[1] = EIR_NAME_COMPLETE;
1176
1177 ptr[0] = name_len + 1;
1178
1179 memcpy(ptr + 2, hdev->dev_name, name_len);
1180
1181 ad_len += (name_len + 2);
1182 ptr += (name_len + 2);
1183 }
1184
1185 return ad_len;
1186}
1187
1188void hci_update_ad(struct hci_request *req)
1189{
1190 struct hci_dev *hdev = req->hdev;
1191 struct hci_cp_le_set_adv_data cp;
1192 u8 len;
1193
1194 if (!lmp_le_capable(hdev))
1195 return;
1196
1197 memset(&cp, 0, sizeof(cp));
1198
1199 len = create_ad(hdev, cp.data);
1200
1201 if (hdev->adv_data_len == len &&
1202 memcmp(cp.data, hdev->adv_data, len) == 0)
1203 return;
1204
1205 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1206 hdev->adv_data_len = len;
1207
1208 cp.length = len;
1209
1210 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1211}
1212
1213static int hci_dev_do_open(struct hci_dev *hdev) 1800static int hci_dev_do_open(struct hci_dev *hdev)
1214{ 1801{
1215 int ret = 0; 1802 int ret = 0;
@@ -1367,6 +1954,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1367 cancel_delayed_work(&hdev->discov_off); 1954 cancel_delayed_work(&hdev->discov_off);
1368 hdev->discov_timeout = 0; 1955 hdev->discov_timeout = 0;
1369 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1956 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1957 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1370 } 1958 }
1371 1959
1372 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 1960 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
@@ -1789,19 +2377,12 @@ static void hci_power_off(struct work_struct *work)
1789static void hci_discov_off(struct work_struct *work) 2377static void hci_discov_off(struct work_struct *work)
1790{ 2378{
1791 struct hci_dev *hdev; 2379 struct hci_dev *hdev;
1792 u8 scan = SCAN_PAGE;
1793 2380
1794 hdev = container_of(work, struct hci_dev, discov_off.work); 2381 hdev = container_of(work, struct hci_dev, discov_off.work);
1795 2382
1796 BT_DBG("%s", hdev->name); 2383 BT_DBG("%s", hdev->name);
1797 2384
1798 hci_dev_lock(hdev); 2385 mgmt_discoverable_timeout(hdev);
1799
1800 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1801
1802 hdev->discov_timeout = 0;
1803
1804 hci_dev_unlock(hdev);
1805} 2386}
1806 2387
1807int hci_uuids_clear(struct hci_dev *hdev) 2388int hci_uuids_clear(struct hci_dev *hdev)
@@ -2124,13 +2705,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2124 return 0; 2705 return 0;
2125} 2706}
2126 2707
2127struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 2708struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2709 bdaddr_t *bdaddr, u8 type)
2128{ 2710{
2129 struct bdaddr_list *b; 2711 struct bdaddr_list *b;
2130 2712
2131 list_for_each_entry(b, &hdev->blacklist, list) 2713 list_for_each_entry(b, &hdev->blacklist, list) {
2132 if (bacmp(bdaddr, &b->bdaddr) == 0) 2714 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2133 return b; 2715 return b;
2716 }
2134 2717
2135 return NULL; 2718 return NULL;
2136} 2719}
@@ -2140,9 +2723,7 @@ int hci_blacklist_clear(struct hci_dev *hdev)
2140 struct list_head *p, *n; 2723 struct list_head *p, *n;
2141 2724
2142 list_for_each_safe(p, n, &hdev->blacklist) { 2725 list_for_each_safe(p, n, &hdev->blacklist) {
2143 struct bdaddr_list *b; 2726 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2144
2145 b = list_entry(p, struct bdaddr_list, list);
2146 2727
2147 list_del(p); 2728 list_del(p);
2148 kfree(b); 2729 kfree(b);
@@ -2155,10 +2736,10 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2155{ 2736{
2156 struct bdaddr_list *entry; 2737 struct bdaddr_list *entry;
2157 2738
2158 if (bacmp(bdaddr, BDADDR_ANY) == 0) 2739 if (!bacmp(bdaddr, BDADDR_ANY))
2159 return -EBADF; 2740 return -EBADF;
2160 2741
2161 if (hci_blacklist_lookup(hdev, bdaddr)) 2742 if (hci_blacklist_lookup(hdev, bdaddr, type))
2162 return -EEXIST; 2743 return -EEXIST;
2163 2744
2164 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 2745 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
@@ -2166,6 +2747,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2166 return -ENOMEM; 2747 return -ENOMEM;
2167 2748
2168 bacpy(&entry->bdaddr, bdaddr); 2749 bacpy(&entry->bdaddr, bdaddr);
2750 entry->bdaddr_type = type;
2169 2751
2170 list_add(&entry->list, &hdev->blacklist); 2752 list_add(&entry->list, &hdev->blacklist);
2171 2753
@@ -2176,10 +2758,10 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2176{ 2758{
2177 struct bdaddr_list *entry; 2759 struct bdaddr_list *entry;
2178 2760
2179 if (bacmp(bdaddr, BDADDR_ANY) == 0) 2761 if (!bacmp(bdaddr, BDADDR_ANY))
2180 return hci_blacklist_clear(hdev); 2762 return hci_blacklist_clear(hdev);
2181 2763
2182 entry = hci_blacklist_lookup(hdev, bdaddr); 2764 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2183 if (!entry) 2765 if (!entry)
2184 return -ENOENT; 2766 return -ENOENT;
2185 2767
@@ -2287,6 +2869,8 @@ struct hci_dev *hci_alloc_dev(void)
2287 2869
2288 hdev->le_scan_interval = 0x0060; 2870 hdev->le_scan_interval = 0x0060;
2289 hdev->le_scan_window = 0x0030; 2871 hdev->le_scan_window = 0x0030;
2872 hdev->le_conn_min_interval = 0x0028;
2873 hdev->le_conn_max_interval = 0x0038;
2290 2874
2291 mutex_init(&hdev->lock); 2875 mutex_init(&hdev->lock);
2292 mutex_init(&hdev->req_lock); 2876 mutex_init(&hdev->req_lock);
@@ -2376,7 +2960,12 @@ int hci_register_dev(struct hci_dev *hdev)
2376 goto err; 2960 goto err;
2377 } 2961 }
2378 2962
2379 error = hci_add_sysfs(hdev); 2963 if (!IS_ERR_OR_NULL(bt_debugfs))
2964 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2965
2966 dev_set_name(&hdev->dev, "%s", hdev->name);
2967
2968 error = device_add(&hdev->dev);
2380 if (error < 0) 2969 if (error < 0)
2381 goto err_wqueue; 2970 goto err_wqueue;
2382 2971
@@ -2464,7 +3053,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
2464 rfkill_destroy(hdev->rfkill); 3053 rfkill_destroy(hdev->rfkill);
2465 } 3054 }
2466 3055
2467 hci_del_sysfs(hdev); 3056 device_del(&hdev->dev);
3057
3058 debugfs_remove_recursive(hdev->debugfs);
2468 3059
2469 destroy_workqueue(hdev->workqueue); 3060 destroy_workqueue(hdev->workqueue);
2470 destroy_workqueue(hdev->req_workqueue); 3061 destroy_workqueue(hdev->req_workqueue);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 5391469ff1a5..5935f748c0f9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -195,6 +195,11 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 195
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0; 197 hdev->adv_data_len = 0;
198
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
201
202 hdev->ssp_debug_mode = 0;
198} 203}
199 204
200static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 205static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -310,11 +315,6 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
310 set_bit(HCI_ISCAN, &hdev->flags); 315 set_bit(HCI_ISCAN, &hdev->flags);
311 if (!old_iscan) 316 if (!old_iscan)
312 mgmt_discoverable(hdev, 1); 317 mgmt_discoverable(hdev, 1);
313 if (hdev->discov_timeout > 0) {
314 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
315 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
316 to);
317 }
318 } else if (old_iscan) 318 } else if (old_iscan)
319 mgmt_discoverable(hdev, 0); 319 mgmt_discoverable(hdev, 0);
320 320
@@ -470,14 +470,13 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
470 if (rp->status) 470 if (rp->status)
471 return; 471 return;
472 472
473 hdev->hci_ver = rp->hci_ver; 473 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
474 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 474 hdev->hci_ver = rp->hci_ver;
475 hdev->lmp_ver = rp->lmp_ver; 475 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
476 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 476 hdev->lmp_ver = rp->lmp_ver;
477 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 477 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
478 478 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
479 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, 479 }
480 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
481} 480}
482 481
483static void hci_cc_read_local_commands(struct hci_dev *hdev, 482static void hci_cc_read_local_commands(struct hci_dev *hdev,
@@ -557,7 +556,8 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
557 if (rp->status) 556 if (rp->status)
558 return; 557 return;
559 558
560 hdev->max_page = rp->max_page; 559 if (hdev->max_page < rp->max_page)
560 hdev->max_page = rp->max_page;
561 561
562 if (rp->page < HCI_MAX_PAGES) 562 if (rp->page < HCI_MAX_PAGES)
563 memcpy(hdev->features[rp->page], rp->features, 8); 563 memcpy(hdev->features[rp->page], rp->features, 8);
@@ -939,14 +939,6 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
939 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 939 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
940 } 940 }
941 941
942 if (*sent && !test_bit(HCI_INIT, &hdev->flags)) {
943 struct hci_request req;
944
945 hci_req_init(&req, hdev);
946 hci_update_ad(&req);
947 hci_req_run(&req, NULL);
948 }
949
950 hci_dev_unlock(hdev); 942 hci_dev_unlock(hdev);
951} 943}
952 944
@@ -1702,7 +1694,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1702 &flags); 1694 &flags);
1703 1695
1704 if ((mask & HCI_LM_ACCEPT) && 1696 if ((mask & HCI_LM_ACCEPT) &&
1705 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1697 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1706 /* Connection accepted */ 1698 /* Connection accepted */
1707 struct inquiry_entry *ie; 1699 struct inquiry_entry *ie;
1708 struct hci_conn *conn; 1700 struct hci_conn *conn;
@@ -2559,7 +2551,6 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2559 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2551 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2560 if (conn) { 2552 if (conn) {
2561 conn->mode = ev->mode; 2553 conn->mode = ev->mode;
2562 conn->interval = __le16_to_cpu(ev->interval);
2563 2554
2564 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 2555 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2565 &conn->flags)) { 2556 &conn->flags)) {
@@ -2941,6 +2932,23 @@ unlock:
2941 hci_dev_unlock(hdev); 2932 hci_dev_unlock(hdev);
2942} 2933}
2943 2934
2935static inline size_t eir_get_length(u8 *eir, size_t eir_len)
2936{
2937 size_t parsed = 0;
2938
2939 while (parsed < eir_len) {
2940 u8 field_len = eir[0];
2941
2942 if (field_len == 0)
2943 return parsed;
2944
2945 parsed += field_len + 1;
2946 eir += field_len + 1;
2947 }
2948
2949 return eir_len;
2950}
2951
2944static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 2952static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2945 struct sk_buff *skb) 2953 struct sk_buff *skb)
2946{ 2954{
@@ -3181,7 +3189,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3181 3189
3182 if (hdev->auto_accept_delay > 0) { 3190 if (hdev->auto_accept_delay > 0) {
3183 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3191 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3184 mod_timer(&conn->auto_accept_timer, jiffies + delay); 3192 queue_delayed_work(conn->hdev->workqueue,
3193 &conn->auto_accept_work, delay);
3185 goto unlock; 3194 goto unlock;
3186 } 3195 }
3187 3196
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 97f96ebdd56d..71f0be173080 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -481,7 +481,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
481 481
482 hci_dev_lock(hdev); 482 hci_dev_lock(hdev);
483 483
484 err = hci_blacklist_add(hdev, &bdaddr, 0); 484 err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
485 485
486 hci_dev_unlock(hdev); 486 hci_dev_unlock(hdev);
487 487
@@ -498,7 +498,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
498 498
499 hci_dev_lock(hdev); 499 hci_dev_lock(hdev);
500 500
501 err = hci_blacklist_del(hdev, &bdaddr, 0); 501 err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
502 502
503 hci_dev_unlock(hdev); 503 hci_dev_unlock(hdev);
504 504
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index edf623a29043..0b61250cfdf9 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,17 +1,12 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/debugfs.h>
4#include <linux/module.h> 3#include <linux/module.h>
5#include <asm/unaligned.h>
6 4
7#include <net/bluetooth/bluetooth.h> 5#include <net/bluetooth/bluetooth.h>
8#include <net/bluetooth/hci_core.h> 6#include <net/bluetooth/hci_core.h>
9 7
10static struct class *bt_class; 8static struct class *bt_class;
11 9
12struct dentry *bt_debugfs;
13EXPORT_SYMBOL_GPL(bt_debugfs);
14
15static inline char *link_typetostr(int type) 10static inline char *link_typetostr(int type)
16{ 11{
17 switch (type) { 12 switch (type) {
@@ -42,29 +37,15 @@ static ssize_t show_link_address(struct device *dev,
42 return sprintf(buf, "%pMR\n", &conn->dst); 37 return sprintf(buf, "%pMR\n", &conn->dst);
43} 38}
44 39
45static ssize_t show_link_features(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct hci_conn *conn = to_hci_conn(dev);
49
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0][0], conn->features[0][1],
52 conn->features[0][2], conn->features[0][3],
53 conn->features[0][4], conn->features[0][5],
54 conn->features[0][6], conn->features[0][7]);
55}
56
57#define LINK_ATTR(_name, _mode, _show, _store) \ 40#define LINK_ATTR(_name, _mode, _show, _store) \
58struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store) 41struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
59 42
60static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); 43static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
61static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); 44static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
62static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
63 45
64static struct attribute *bt_link_attrs[] = { 46static struct attribute *bt_link_attrs[] = {
65 &link_attr_type.attr, 47 &link_attr_type.attr,
66 &link_attr_address.attr, 48 &link_attr_address.attr,
67 &link_attr_features.attr,
68 NULL 49 NULL
69}; 50};
70 51
@@ -150,28 +131,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
150 hci_dev_put(hdev); 131 hci_dev_put(hdev);
151} 132}
152 133
153static inline char *host_bustostr(int bus)
154{
155 switch (bus) {
156 case HCI_VIRTUAL:
157 return "VIRTUAL";
158 case HCI_USB:
159 return "USB";
160 case HCI_PCCARD:
161 return "PCCARD";
162 case HCI_UART:
163 return "UART";
164 case HCI_RS232:
165 return "RS232";
166 case HCI_PCI:
167 return "PCI";
168 case HCI_SDIO:
169 return "SDIO";
170 default:
171 return "UNKNOWN";
172 }
173}
174
175static inline char *host_typetostr(int type) 134static inline char *host_typetostr(int type)
176{ 135{
177 switch (type) { 136 switch (type) {
@@ -184,13 +143,6 @@ static inline char *host_typetostr(int type)
184 } 143 }
185} 144}
186 145
187static ssize_t show_bus(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{
190 struct hci_dev *hdev = to_hci_dev(dev);
191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
192}
193
194static ssize_t show_type(struct device *dev, 146static ssize_t show_type(struct device *dev,
195 struct device_attribute *attr, char *buf) 147 struct device_attribute *attr, char *buf)
196{ 148{
@@ -212,14 +164,6 @@ static ssize_t show_name(struct device *dev,
212 return sprintf(buf, "%s\n", name); 164 return sprintf(buf, "%s\n", name);
213} 165}
214 166
215static ssize_t show_class(struct device *dev,
216 struct device_attribute *attr, char *buf)
217{
218 struct hci_dev *hdev = to_hci_dev(dev);
219 return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
220 hdev->dev_class[1], hdev->dev_class[0]);
221}
222
223static ssize_t show_address(struct device *dev, 167static ssize_t show_address(struct device *dev,
224 struct device_attribute *attr, char *buf) 168 struct device_attribute *attr, char *buf)
225{ 169{
@@ -227,150 +171,14 @@ static ssize_t show_address(struct device *dev,
227 return sprintf(buf, "%pMR\n", &hdev->bdaddr); 171 return sprintf(buf, "%pMR\n", &hdev->bdaddr);
228} 172}
229 173
230static ssize_t show_features(struct device *dev,
231 struct device_attribute *attr, char *buf)
232{
233 struct hci_dev *hdev = to_hci_dev(dev);
234
235 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
236 hdev->features[0][0], hdev->features[0][1],
237 hdev->features[0][2], hdev->features[0][3],
238 hdev->features[0][4], hdev->features[0][5],
239 hdev->features[0][6], hdev->features[0][7]);
240}
241
242static ssize_t show_manufacturer(struct device *dev,
243 struct device_attribute *attr, char *buf)
244{
245 struct hci_dev *hdev = to_hci_dev(dev);
246 return sprintf(buf, "%d\n", hdev->manufacturer);
247}
248
249static ssize_t show_hci_version(struct device *dev,
250 struct device_attribute *attr, char *buf)
251{
252 struct hci_dev *hdev = to_hci_dev(dev);
253 return sprintf(buf, "%d\n", hdev->hci_ver);
254}
255
256static ssize_t show_hci_revision(struct device *dev,
257 struct device_attribute *attr, char *buf)
258{
259 struct hci_dev *hdev = to_hci_dev(dev);
260 return sprintf(buf, "%d\n", hdev->hci_rev);
261}
262
263static ssize_t show_idle_timeout(struct device *dev,
264 struct device_attribute *attr, char *buf)
265{
266 struct hci_dev *hdev = to_hci_dev(dev);
267 return sprintf(buf, "%d\n", hdev->idle_timeout);
268}
269
270static ssize_t store_idle_timeout(struct device *dev,
271 struct device_attribute *attr,
272 const char *buf, size_t count)
273{
274 struct hci_dev *hdev = to_hci_dev(dev);
275 unsigned int val;
276 int rv;
277
278 rv = kstrtouint(buf, 0, &val);
279 if (rv < 0)
280 return rv;
281
282 if (val != 0 && (val < 500 || val > 3600000))
283 return -EINVAL;
284
285 hdev->idle_timeout = val;
286
287 return count;
288}
289
290static ssize_t show_sniff_max_interval(struct device *dev,
291 struct device_attribute *attr, char *buf)
292{
293 struct hci_dev *hdev = to_hci_dev(dev);
294 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
295}
296
297static ssize_t store_sniff_max_interval(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
300{
301 struct hci_dev *hdev = to_hci_dev(dev);
302 u16 val;
303 int rv;
304
305 rv = kstrtou16(buf, 0, &val);
306 if (rv < 0)
307 return rv;
308
309 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
310 return -EINVAL;
311
312 hdev->sniff_max_interval = val;
313
314 return count;
315}
316
317static ssize_t show_sniff_min_interval(struct device *dev,
318 struct device_attribute *attr, char *buf)
319{
320 struct hci_dev *hdev = to_hci_dev(dev);
321 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
322}
323
324static ssize_t store_sniff_min_interval(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
327{
328 struct hci_dev *hdev = to_hci_dev(dev);
329 u16 val;
330 int rv;
331
332 rv = kstrtou16(buf, 0, &val);
333 if (rv < 0)
334 return rv;
335
336 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
337 return -EINVAL;
338
339 hdev->sniff_min_interval = val;
340
341 return count;
342}
343
344static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
345static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 174static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
346static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 175static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
347static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
348static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 176static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
349static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
350static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
351static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
352static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
353
354static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
355 show_idle_timeout, store_idle_timeout);
356static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
357 show_sniff_max_interval, store_sniff_max_interval);
358static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
359 show_sniff_min_interval, store_sniff_min_interval);
360 177
361static struct attribute *bt_host_attrs[] = { 178static struct attribute *bt_host_attrs[] = {
362 &dev_attr_bus.attr,
363 &dev_attr_type.attr, 179 &dev_attr_type.attr,
364 &dev_attr_name.attr, 180 &dev_attr_name.attr,
365 &dev_attr_class.attr,
366 &dev_attr_address.attr, 181 &dev_attr_address.attr,
367 &dev_attr_features.attr,
368 &dev_attr_manufacturer.attr,
369 &dev_attr_hci_version.attr,
370 &dev_attr_hci_revision.attr,
371 &dev_attr_idle_timeout.attr,
372 &dev_attr_sniff_max_interval.attr,
373 &dev_attr_sniff_min_interval.attr,
374 NULL 182 NULL
375}; 183};
376 184
@@ -396,141 +204,6 @@ static struct device_type bt_host = {
396 .release = bt_host_release, 204 .release = bt_host_release,
397}; 205};
398 206
399static int inquiry_cache_show(struct seq_file *f, void *p)
400{
401 struct hci_dev *hdev = f->private;
402 struct discovery_state *cache = &hdev->discovery;
403 struct inquiry_entry *e;
404
405 hci_dev_lock(hdev);
406
407 list_for_each_entry(e, &cache->all, all) {
408 struct inquiry_data *data = &e->data;
409 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
410 &data->bdaddr,
411 data->pscan_rep_mode, data->pscan_period_mode,
412 data->pscan_mode, data->dev_class[2],
413 data->dev_class[1], data->dev_class[0],
414 __le16_to_cpu(data->clock_offset),
415 data->rssi, data->ssp_mode, e->timestamp);
416 }
417
418 hci_dev_unlock(hdev);
419
420 return 0;
421}
422
423static int inquiry_cache_open(struct inode *inode, struct file *file)
424{
425 return single_open(file, inquiry_cache_show, inode->i_private);
426}
427
428static const struct file_operations inquiry_cache_fops = {
429 .open = inquiry_cache_open,
430 .read = seq_read,
431 .llseek = seq_lseek,
432 .release = single_release,
433};
434
435static int blacklist_show(struct seq_file *f, void *p)
436{
437 struct hci_dev *hdev = f->private;
438 struct bdaddr_list *b;
439
440 hci_dev_lock(hdev);
441
442 list_for_each_entry(b, &hdev->blacklist, list)
443 seq_printf(f, "%pMR\n", &b->bdaddr);
444
445 hci_dev_unlock(hdev);
446
447 return 0;
448}
449
450static int blacklist_open(struct inode *inode, struct file *file)
451{
452 return single_open(file, blacklist_show, inode->i_private);
453}
454
455static const struct file_operations blacklist_fops = {
456 .open = blacklist_open,
457 .read = seq_read,
458 .llseek = seq_lseek,
459 .release = single_release,
460};
461
462static void print_bt_uuid(struct seq_file *f, u8 *uuid)
463{
464 u32 data0, data5;
465 u16 data1, data2, data3, data4;
466
467 data5 = get_unaligned_le32(uuid);
468 data4 = get_unaligned_le16(uuid + 4);
469 data3 = get_unaligned_le16(uuid + 6);
470 data2 = get_unaligned_le16(uuid + 8);
471 data1 = get_unaligned_le16(uuid + 10);
472 data0 = get_unaligned_le32(uuid + 12);
473
474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
475 data0, data1, data2, data3, data4, data5);
476}
477
478static int uuids_show(struct seq_file *f, void *p)
479{
480 struct hci_dev *hdev = f->private;
481 struct bt_uuid *uuid;
482
483 hci_dev_lock(hdev);
484
485 list_for_each_entry(uuid, &hdev->uuids, list)
486 print_bt_uuid(f, uuid->uuid);
487
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int uuids_open(struct inode *inode, struct file *file)
494{
495 return single_open(file, uuids_show, inode->i_private);
496}
497
498static const struct file_operations uuids_fops = {
499 .open = uuids_open,
500 .read = seq_read,
501 .llseek = seq_lseek,
502 .release = single_release,
503};
504
505static int auto_accept_delay_set(void *data, u64 val)
506{
507 struct hci_dev *hdev = data;
508
509 hci_dev_lock(hdev);
510
511 hdev->auto_accept_delay = val;
512
513 hci_dev_unlock(hdev);
514
515 return 0;
516}
517
518static int auto_accept_delay_get(void *data, u64 *val)
519{
520 struct hci_dev *hdev = data;
521
522 hci_dev_lock(hdev);
523
524 *val = hdev->auto_accept_delay;
525
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
532 auto_accept_delay_set, "%llu\n");
533
534void hci_init_sysfs(struct hci_dev *hdev) 207void hci_init_sysfs(struct hci_dev *hdev)
535{ 208{
536 struct device *dev = &hdev->dev; 209 struct device *dev = &hdev->dev;
@@ -542,52 +215,8 @@ void hci_init_sysfs(struct hci_dev *hdev)
542 device_initialize(dev); 215 device_initialize(dev);
543} 216}
544 217
545int hci_add_sysfs(struct hci_dev *hdev)
546{
547 struct device *dev = &hdev->dev;
548 int err;
549
550 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
551
552 dev_set_name(dev, "%s", hdev->name);
553
554 err = device_add(dev);
555 if (err < 0)
556 return err;
557
558 if (!bt_debugfs)
559 return 0;
560
561 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
562 if (!hdev->debugfs)
563 return 0;
564
565 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
566 hdev, &inquiry_cache_fops);
567
568 debugfs_create_file("blacklist", 0444, hdev->debugfs,
569 hdev, &blacklist_fops);
570
571 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
572
573 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
574 &auto_accept_delay_fops);
575 return 0;
576}
577
578void hci_del_sysfs(struct hci_dev *hdev)
579{
580 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
581
582 debugfs_remove_recursive(hdev->debugfs);
583
584 device_del(&hdev->dev);
585}
586
587int __init bt_sysfs_init(void) 218int __init bt_sysfs_init(void)
588{ 219{
589 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
590
591 bt_class = class_create(THIS_MODULE, "bluetooth"); 220 bt_class = class_create(THIS_MODULE, "bluetooth");
592 221
593 return PTR_ERR_OR_ZERO(bt_class); 222 return PTR_ERR_OR_ZERO(bt_class);
@@ -596,6 +225,4 @@ int __init bt_sysfs_init(void)
596void bt_sysfs_cleanup(void) 225void bt_sysfs_cleanup(void)
597{ 226{
598 class_destroy(bt_class); 227 class_destroy(bt_class);
599
600 debugfs_remove_recursive(bt_debugfs);
601} 228}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 0c3446da1ec9..0cef67707838 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -223,38 +223,25 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
223 return 0; 223 return 0;
224} 224}
225 225
226static void __l2cap_state_change(struct l2cap_chan *chan, int state) 226static void l2cap_state_change(struct l2cap_chan *chan, int state)
227{ 227{
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), 228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state)); 229 state_to_string(state));
230 230
231 chan->state = state; 231 chan->state = state;
232 chan->ops->state_change(chan, state); 232 chan->ops->state_change(chan, state, 0);
233} 233}
234 234
235static void l2cap_state_change(struct l2cap_chan *chan, int state) 235static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
236 int state, int err)
236{ 237{
237 struct sock *sk = chan->sk; 238 chan->state = state;
238 239 chan->ops->state_change(chan, chan->state, err);
239 lock_sock(sk);
240 __l2cap_state_change(chan, state);
241 release_sock(sk);
242}
243
244static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
245{
246 struct sock *sk = chan->sk;
247
248 sk->sk_err = err;
249} 240}
250 241
251static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) 242static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
252{ 243{
253 struct sock *sk = chan->sk; 244 chan->ops->state_change(chan, chan->state, err);
254
255 lock_sock(sk);
256 __l2cap_chan_set_err(chan, err);
257 release_sock(sk);
258} 245}
259 246
260static void __set_retrans_timer(struct l2cap_chan *chan) 247static void __set_retrans_timer(struct l2cap_chan *chan)
@@ -645,8 +632,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
645 case BT_CONFIG: 632 case BT_CONFIG:
646 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && 633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
647 conn->hcon->type == ACL_LINK) { 634 conn->hcon->type == ACL_LINK) {
648 struct sock *sk = chan->sk; 635 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
649 __set_chan_timer(chan, sk->sk_sndtimeo);
650 l2cap_send_disconn_req(chan, reason); 636 l2cap_send_disconn_req(chan, reason);
651 } else 637 } else
652 l2cap_chan_del(chan, reason); 638 l2cap_chan_del(chan, reason);
@@ -1230,7 +1216,6 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1230 1216
1231static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) 1217static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1232{ 1218{
1233 struct sock *sk = chan->sk;
1234 struct l2cap_conn *conn = chan->conn; 1219 struct l2cap_conn *conn = chan->conn;
1235 struct l2cap_disconn_req req; 1220 struct l2cap_disconn_req req;
1236 1221
@@ -1253,10 +1238,7 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1253 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, 1238 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1254 sizeof(req), &req); 1239 sizeof(req), &req);
1255 1240
1256 lock_sock(sk); 1241 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1257 __l2cap_state_change(chan, BT_DISCONN);
1258 __l2cap_chan_set_err(chan, err);
1259 release_sock(sk);
1260} 1242}
1261 1243
1262/* ---- L2CAP connections ---- */ 1244/* ---- L2CAP connections ---- */
@@ -1300,20 +1282,16 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1300 rsp.dcid = cpu_to_le16(chan->scid); 1282 rsp.dcid = cpu_to_le16(chan->scid);
1301 1283
1302 if (l2cap_chan_check_security(chan)) { 1284 if (l2cap_chan_check_security(chan)) {
1303 struct sock *sk = chan->sk;
1304
1305 lock_sock(sk);
1306 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 1285 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1307 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); 1286 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1308 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1287 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1309 chan->ops->defer(chan); 1288 chan->ops->defer(chan);
1310 1289
1311 } else { 1290 } else {
1312 __l2cap_state_change(chan, BT_CONFIG); 1291 l2cap_state_change(chan, BT_CONFIG);
1313 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); 1292 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1314 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); 1293 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1315 } 1294 }
1316 release_sock(sk);
1317 } else { 1295 } else {
1318 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); 1296 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1319 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1297 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
@@ -1383,14 +1361,15 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1383 1361
1384static void l2cap_le_conn_ready(struct l2cap_conn *conn) 1362static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1385{ 1363{
1386 struct sock *parent; 1364 struct hci_conn *hcon = conn->hcon;
1387 struct l2cap_chan *chan, *pchan; 1365 struct l2cap_chan *chan, *pchan;
1366 u8 dst_type;
1388 1367
1389 BT_DBG(""); 1368 BT_DBG("");
1390 1369
1391 /* Check if we have socket listening on cid */ 1370 /* Check if we have socket listening on cid */
1392 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, 1371 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1393 &conn->hcon->src, &conn->hcon->dst); 1372 &hcon->src, &hcon->dst);
1394 if (!pchan) 1373 if (!pchan)
1395 return; 1374 return;
1396 1375
@@ -1398,9 +1377,13 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1398 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT)) 1377 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1399 return; 1378 return;
1400 1379
1401 parent = pchan->sk; 1380 dst_type = bdaddr_type(hcon, hcon->dst_type);
1381
1382 /* If device is blocked, do not create a channel for it */
1383 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1384 return;
1402 1385
1403 lock_sock(parent); 1386 l2cap_chan_lock(pchan);
1404 1387
1405 chan = pchan->ops->new_connection(pchan); 1388 chan = pchan->ops->new_connection(pchan);
1406 if (!chan) 1389 if (!chan)
@@ -1408,15 +1391,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1408 1391
1409 chan->dcid = L2CAP_CID_ATT; 1392 chan->dcid = L2CAP_CID_ATT;
1410 1393
1411 bacpy(&chan->src, &conn->hcon->src); 1394 bacpy(&chan->src, &hcon->src);
1412 bacpy(&chan->dst, &conn->hcon->dst); 1395 bacpy(&chan->dst, &hcon->dst);
1413 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type); 1396 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1414 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type); 1397 chan->dst_type = dst_type;
1415 1398
1416 __l2cap_chan_add(conn, chan); 1399 __l2cap_chan_add(conn, chan);
1417 1400
1418clean: 1401clean:
1419 release_sock(parent); 1402 l2cap_chan_unlock(pchan);
1420} 1403}
1421 1404
1422static void l2cap_conn_ready(struct l2cap_conn *conn) 1405static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -1451,12 +1434,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
1451 l2cap_chan_ready(chan); 1434 l2cap_chan_ready(chan);
1452 1435
1453 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1436 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1454 struct sock *sk = chan->sk; 1437 l2cap_chan_ready(chan);
1455 __clear_chan_timer(chan);
1456 lock_sock(sk);
1457 __l2cap_state_change(chan, BT_CONNECTED);
1458 sk->sk_state_change(sk);
1459 release_sock(sk);
1460 1438
1461 } else if (chan->state == BT_CONNECT) { 1439 } else if (chan->state == BT_CONNECT) {
1462 l2cap_do_start(chan); 1440 l2cap_do_start(chan);
@@ -1764,7 +1742,6 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1764int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, 1742int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1765 bdaddr_t *dst, u8 dst_type) 1743 bdaddr_t *dst, u8 dst_type)
1766{ 1744{
1767 struct sock *sk = chan->sk;
1768 struct l2cap_conn *conn; 1745 struct l2cap_conn *conn;
1769 struct hci_conn *hcon; 1746 struct hci_conn *hcon;
1770 struct hci_dev *hdev; 1747 struct hci_dev *hdev;
@@ -1876,7 +1853,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1876 hci_conn_drop(hcon); 1853 hci_conn_drop(hcon);
1877 1854
1878 l2cap_state_change(chan, BT_CONNECT); 1855 l2cap_state_change(chan, BT_CONNECT);
1879 __set_chan_timer(chan, sk->sk_sndtimeo); 1856 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1880 1857
1881 if (hcon->state == BT_CONNECTED) { 1858 if (hcon->state == BT_CONNECTED) {
1882 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 1859 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1896,38 +1873,6 @@ done:
1896 return err; 1873 return err;
1897} 1874}
1898 1875
1899int __l2cap_wait_ack(struct sock *sk)
1900{
1901 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1902 DECLARE_WAITQUEUE(wait, current);
1903 int err = 0;
1904 int timeo = HZ/5;
1905
1906 add_wait_queue(sk_sleep(sk), &wait);
1907 set_current_state(TASK_INTERRUPTIBLE);
1908 while (chan->unacked_frames > 0 && chan->conn) {
1909 if (!timeo)
1910 timeo = HZ/5;
1911
1912 if (signal_pending(current)) {
1913 err = sock_intr_errno(timeo);
1914 break;
1915 }
1916
1917 release_sock(sk);
1918 timeo = schedule_timeout(timeo);
1919 lock_sock(sk);
1920 set_current_state(TASK_INTERRUPTIBLE);
1921
1922 err = sock_error(sk);
1923 if (err)
1924 break;
1925 }
1926 set_current_state(TASK_RUNNING);
1927 remove_wait_queue(sk_sleep(sk), &wait);
1928 return err;
1929}
1930
1931static void l2cap_monitor_timeout(struct work_struct *work) 1876static void l2cap_monitor_timeout(struct work_struct *work)
1932{ 1877{
1933 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1878 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -2868,17 +2813,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2868 mutex_lock(&conn->chan_lock); 2813 mutex_lock(&conn->chan_lock);
2869 2814
2870 list_for_each_entry(chan, &conn->chan_l, list) { 2815 list_for_each_entry(chan, &conn->chan_l, list) {
2871 struct sock *sk = chan->sk;
2872 if (chan->chan_type != L2CAP_CHAN_RAW) 2816 if (chan->chan_type != L2CAP_CHAN_RAW)
2873 continue; 2817 continue;
2874 2818
2875 /* Don't send frame to the socket it came from */ 2819 /* Don't send frame to the channel it came from */
2876 if (skb->sk == sk) 2820 if (bt_cb(skb)->chan == chan)
2877 continue; 2821 continue;
2822
2878 nskb = skb_clone(skb, GFP_KERNEL); 2823 nskb = skb_clone(skb, GFP_KERNEL);
2879 if (!nskb) 2824 if (!nskb)
2880 continue; 2825 continue;
2881
2882 if (chan->ops->recv(chan, nskb)) 2826 if (chan->ops->recv(chan, nskb))
2883 kfree_skb(nskb); 2827 kfree_skb(nskb);
2884 } 2828 }
@@ -3757,7 +3701,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3757 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 3701 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3758 struct l2cap_conn_rsp rsp; 3702 struct l2cap_conn_rsp rsp;
3759 struct l2cap_chan *chan = NULL, *pchan; 3703 struct l2cap_chan *chan = NULL, *pchan;
3760 struct sock *parent, *sk = NULL;
3761 int result, status = L2CAP_CS_NO_INFO; 3704 int result, status = L2CAP_CS_NO_INFO;
3762 3705
3763 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 3706 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
@@ -3773,10 +3716,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3773 goto sendresp; 3716 goto sendresp;
3774 } 3717 }
3775 3718
3776 parent = pchan->sk;
3777
3778 mutex_lock(&conn->chan_lock); 3719 mutex_lock(&conn->chan_lock);
3779 lock_sock(parent); 3720 l2cap_chan_lock(pchan);
3780 3721
3781 /* Check if the ACL is secure enough (if not SDP) */ 3722 /* Check if the ACL is secure enough (if not SDP) */
3782 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && 3723 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
@@ -3796,8 +3737,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3796 if (!chan) 3737 if (!chan)
3797 goto response; 3738 goto response;
3798 3739
3799 sk = chan->sk;
3800
3801 /* For certain devices (ex: HID mouse), support for authentication, 3740 /* For certain devices (ex: HID mouse), support for authentication,
3802 * pairing and bonding is optional. For such devices, inorder to avoid 3741 * pairing and bonding is optional. For such devices, inorder to avoid
3803 * the ACL alive for too long after L2CAP disconnection, reset the ACL 3742 * the ACL alive for too long after L2CAP disconnection, reset the ACL
@@ -3817,14 +3756,14 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3817 3756
3818 dcid = chan->scid; 3757 dcid = chan->scid;
3819 3758
3820 __set_chan_timer(chan, sk->sk_sndtimeo); 3759 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3821 3760
3822 chan->ident = cmd->ident; 3761 chan->ident = cmd->ident;
3823 3762
3824 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 3763 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3825 if (l2cap_chan_check_security(chan)) { 3764 if (l2cap_chan_check_security(chan)) {
3826 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 3765 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3827 __l2cap_state_change(chan, BT_CONNECT2); 3766 l2cap_state_change(chan, BT_CONNECT2);
3828 result = L2CAP_CR_PEND; 3767 result = L2CAP_CR_PEND;
3829 status = L2CAP_CS_AUTHOR_PEND; 3768 status = L2CAP_CS_AUTHOR_PEND;
3830 chan->ops->defer(chan); 3769 chan->ops->defer(chan);
@@ -3834,27 +3773,27 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3834 * physical link is up. 3773 * physical link is up.
3835 */ 3774 */
3836 if (amp_id == AMP_ID_BREDR) { 3775 if (amp_id == AMP_ID_BREDR) {
3837 __l2cap_state_change(chan, BT_CONFIG); 3776 l2cap_state_change(chan, BT_CONFIG);
3838 result = L2CAP_CR_SUCCESS; 3777 result = L2CAP_CR_SUCCESS;
3839 } else { 3778 } else {
3840 __l2cap_state_change(chan, BT_CONNECT2); 3779 l2cap_state_change(chan, BT_CONNECT2);
3841 result = L2CAP_CR_PEND; 3780 result = L2CAP_CR_PEND;
3842 } 3781 }
3843 status = L2CAP_CS_NO_INFO; 3782 status = L2CAP_CS_NO_INFO;
3844 } 3783 }
3845 } else { 3784 } else {
3846 __l2cap_state_change(chan, BT_CONNECT2); 3785 l2cap_state_change(chan, BT_CONNECT2);
3847 result = L2CAP_CR_PEND; 3786 result = L2CAP_CR_PEND;
3848 status = L2CAP_CS_AUTHEN_PEND; 3787 status = L2CAP_CS_AUTHEN_PEND;
3849 } 3788 }
3850 } else { 3789 } else {
3851 __l2cap_state_change(chan, BT_CONNECT2); 3790 l2cap_state_change(chan, BT_CONNECT2);
3852 result = L2CAP_CR_PEND; 3791 result = L2CAP_CR_PEND;
3853 status = L2CAP_CS_NO_INFO; 3792 status = L2CAP_CS_NO_INFO;
3854 } 3793 }
3855 3794
3856response: 3795response:
3857 release_sock(parent); 3796 l2cap_chan_unlock(pchan);
3858 mutex_unlock(&conn->chan_lock); 3797 mutex_unlock(&conn->chan_lock);
3859 3798
3860sendresp: 3799sendresp:
@@ -4010,6 +3949,18 @@ static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4010 L2CAP_CONF_SUCCESS, flags), data); 3949 L2CAP_CONF_SUCCESS, flags), data);
4011} 3950}
4012 3951
3952static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3953 u16 scid, u16 dcid)
3954{
3955 struct l2cap_cmd_rej_cid rej;
3956
3957 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3958 rej.scid = __cpu_to_le16(scid);
3959 rej.dcid = __cpu_to_le16(dcid);
3960
3961 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3962}
3963
4013static inline int l2cap_config_req(struct l2cap_conn *conn, 3964static inline int l2cap_config_req(struct l2cap_conn *conn,
4014 struct l2cap_cmd_hdr *cmd, u16 cmd_len, 3965 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4015 u8 *data) 3966 u8 *data)
@@ -4029,18 +3980,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
4029 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 3980 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4030 3981
4031 chan = l2cap_get_chan_by_scid(conn, dcid); 3982 chan = l2cap_get_chan_by_scid(conn, dcid);
4032 if (!chan) 3983 if (!chan) {
4033 return -EBADSLT; 3984 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
3985 return 0;
3986 }
4034 3987
4035 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3988 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4036 struct l2cap_cmd_rej_cid rej; 3989 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4037 3990 chan->dcid);
4038 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4039 rej.scid = cpu_to_le16(chan->scid);
4040 rej.dcid = cpu_to_le16(chan->dcid);
4041
4042 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4043 sizeof(rej), &rej);
4044 goto unlock; 3991 goto unlock;
4045 } 3992 }
4046 3993
@@ -4243,7 +4190,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4243 struct l2cap_disconn_rsp rsp; 4190 struct l2cap_disconn_rsp rsp;
4244 u16 dcid, scid; 4191 u16 dcid, scid;
4245 struct l2cap_chan *chan; 4192 struct l2cap_chan *chan;
4246 struct sock *sk;
4247 4193
4248 if (cmd_len != sizeof(*req)) 4194 if (cmd_len != sizeof(*req))
4249 return -EPROTO; 4195 return -EPROTO;
@@ -4258,20 +4204,17 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4258 chan = __l2cap_get_chan_by_scid(conn, dcid); 4204 chan = __l2cap_get_chan_by_scid(conn, dcid);
4259 if (!chan) { 4205 if (!chan) {
4260 mutex_unlock(&conn->chan_lock); 4206 mutex_unlock(&conn->chan_lock);
4261 return -EBADSLT; 4207 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4208 return 0;
4262 } 4209 }
4263 4210
4264 l2cap_chan_lock(chan); 4211 l2cap_chan_lock(chan);
4265 4212
4266 sk = chan->sk;
4267
4268 rsp.dcid = cpu_to_le16(chan->scid); 4213 rsp.dcid = cpu_to_le16(chan->scid);
4269 rsp.scid = cpu_to_le16(chan->dcid); 4214 rsp.scid = cpu_to_le16(chan->dcid);
4270 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 4215 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4271 4216
4272 lock_sock(sk); 4217 chan->ops->set_shutdown(chan);
4273 sk->sk_shutdown = SHUTDOWN_MASK;
4274 release_sock(sk);
4275 4218
4276 l2cap_chan_hold(chan); 4219 l2cap_chan_hold(chan);
4277 l2cap_chan_del(chan, ECONNRESET); 4220 l2cap_chan_del(chan, ECONNRESET);
@@ -4491,7 +4434,9 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
4491 &conn->hcon->dst); 4434 &conn->hcon->dst);
4492 if (!hs_hcon) { 4435 if (!hs_hcon) {
4493 hci_dev_put(hdev); 4436 hci_dev_put(hdev);
4494 return -EBADSLT; 4437 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4438 chan->dcid);
4439 return 0;
4495 } 4440 }
4496 4441
4497 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); 4442 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
@@ -4769,7 +4714,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
4769 sizeof(rsp), &rsp); 4714 sizeof(rsp), &rsp);
4770 4715
4771 if (result == L2CAP_CR_SUCCESS) { 4716 if (result == L2CAP_CR_SUCCESS) {
4772 __l2cap_state_change(chan, BT_CONFIG); 4717 l2cap_state_change(chan, BT_CONFIG);
4773 set_bit(CONF_REQ_SENT, &chan->conf_state); 4718 set_bit(CONF_REQ_SENT, &chan->conf_state);
4774 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn), 4719 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4775 L2CAP_CONF_REQ, 4720 L2CAP_CONF_REQ,
@@ -5347,20 +5292,6 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5347 } 5292 }
5348} 5293}
5349 5294
5350static __le16 l2cap_err_to_reason(int err)
5351{
5352 switch (err) {
5353 case -EBADSLT:
5354 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5355 case -EMSGSIZE:
5356 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5357 case -EINVAL:
5358 case -EPROTO:
5359 default:
5360 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5361 }
5362}
5363
5364static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, 5295static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5365 struct sk_buff *skb) 5296 struct sk_buff *skb)
5366{ 5297{
@@ -5393,7 +5324,7 @@ static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5393 5324
5394 BT_ERR("Wrong link type (%d)", err); 5325 BT_ERR("Wrong link type (%d)", err);
5395 5326
5396 rej.reason = l2cap_err_to_reason(err); 5327 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5397 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 5328 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5398 sizeof(rej), &rej); 5329 sizeof(rej), &rej);
5399 } 5330 }
@@ -5438,7 +5369,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5438 5369
5439 BT_ERR("Wrong link type (%d)", err); 5370 BT_ERR("Wrong link type (%d)", err);
5440 5371
5441 rej.reason = l2cap_err_to_reason(err); 5372 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5442 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, 5373 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5443 sizeof(rej), &rej); 5374 sizeof(rej), &rej);
5444 } 5375 }
@@ -6446,8 +6377,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6446 if (hcon->type != ACL_LINK) 6377 if (hcon->type != ACL_LINK)
6447 goto drop; 6378 goto drop;
6448 6379
6449 chan = l2cap_global_chan_by_psm(0, psm, &conn->hcon->src, 6380 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst);
6450 &conn->hcon->dst);
6451 if (!chan) 6381 if (!chan)
6452 goto drop; 6382 goto drop;
6453 6383
@@ -6460,7 +6390,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6460 goto drop; 6390 goto drop;
6461 6391
6462 /* Store remote BD_ADDR and PSM for msg_name */ 6392 /* Store remote BD_ADDR and PSM for msg_name */
6463 bacpy(&bt_cb(skb)->bdaddr, &conn->hcon->dst); 6393 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6464 bt_cb(skb)->psm = psm; 6394 bt_cb(skb)->psm = psm;
6465 6395
6466 if (!chan->ops->recv(chan, skb)) 6396 if (!chan->ops->recv(chan, skb))
@@ -6480,12 +6410,15 @@ static void l2cap_att_channel(struct l2cap_conn *conn,
6480 goto drop; 6410 goto drop;
6481 6411
6482 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT, 6412 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6483 &conn->hcon->src, &conn->hcon->dst); 6413 &hcon->src, &hcon->dst);
6484 if (!chan) 6414 if (!chan)
6485 goto drop; 6415 goto drop;
6486 6416
6487 BT_DBG("chan %p, len %d", chan, skb->len); 6417 BT_DBG("chan %p, len %d", chan, skb->len);
6488 6418
6419 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6420 goto drop;
6421
6489 if (chan->imtu < skb->len) 6422 if (chan->imtu < skb->len)
6490 goto drop; 6423 goto drop;
6491 6424
@@ -6682,31 +6615,26 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6682 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 6615 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6683 } 6616 }
6684 } else if (chan->state == BT_CONNECT2) { 6617 } else if (chan->state == BT_CONNECT2) {
6685 struct sock *sk = chan->sk;
6686 struct l2cap_conn_rsp rsp; 6618 struct l2cap_conn_rsp rsp;
6687 __u16 res, stat; 6619 __u16 res, stat;
6688 6620
6689 lock_sock(sk);
6690
6691 if (!status) { 6621 if (!status) {
6692 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 6622 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6693 res = L2CAP_CR_PEND; 6623 res = L2CAP_CR_PEND;
6694 stat = L2CAP_CS_AUTHOR_PEND; 6624 stat = L2CAP_CS_AUTHOR_PEND;
6695 chan->ops->defer(chan); 6625 chan->ops->defer(chan);
6696 } else { 6626 } else {
6697 __l2cap_state_change(chan, BT_CONFIG); 6627 l2cap_state_change(chan, BT_CONFIG);
6698 res = L2CAP_CR_SUCCESS; 6628 res = L2CAP_CR_SUCCESS;
6699 stat = L2CAP_CS_NO_INFO; 6629 stat = L2CAP_CS_NO_INFO;
6700 } 6630 }
6701 } else { 6631 } else {
6702 __l2cap_state_change(chan, BT_DISCONN); 6632 l2cap_state_change(chan, BT_DISCONN);
6703 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 6633 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6704 res = L2CAP_CR_SEC_BLOCK; 6634 res = L2CAP_CR_SEC_BLOCK;
6705 stat = L2CAP_CS_NO_INFO; 6635 stat = L2CAP_CS_NO_INFO;
6706 } 6636 }
6707 6637
6708 release_sock(sk);
6709
6710 rsp.scid = cpu_to_le16(chan->dcid); 6638 rsp.scid = cpu_to_le16(chan->dcid);
6711 rsp.dcid = cpu_to_le16(chan->scid); 6639 rsp.dcid = cpu_to_le16(chan->scid);
6712 rsp.result = cpu_to_le16(res); 6640 rsp.result = cpu_to_le16(res);
@@ -6880,12 +6808,11 @@ int __init l2cap_init(void)
6880 if (err < 0) 6808 if (err < 0)
6881 return err; 6809 return err;
6882 6810
6883 if (bt_debugfs) { 6811 if (IS_ERR_OR_NULL(bt_debugfs))
6884 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, 6812 return 0;
6885 NULL, &l2cap_debugfs_fops); 6813
6886 if (!l2cap_debugfs) 6814 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6887 BT_ERR("Failed to create L2CAP debug file"); 6815 NULL, &l2cap_debugfs_fops);
6888 }
6889 6816
6890 return 0; 6817 return 0;
6891} 6818}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5ffd75e20bde..7cc24d263caa 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -72,6 +72,15 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
72 if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) 72 if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
73 return -EINVAL; 73 return -EINVAL;
74 74
75 if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
76 /* Connection oriented channels are not supported on LE */
77 if (la.l2_psm)
78 return -EINVAL;
79 /* We only allow ATT user space socket */
80 if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
81 return -EINVAL;
82 }
83
75 lock_sock(sk); 84 lock_sock(sk);
76 85
77 if (sk->sk_state != BT_OPEN) { 86 if (sk->sk_state != BT_OPEN) {
@@ -150,12 +159,44 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
150 if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) 159 if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
151 return -EINVAL; 160 return -EINVAL;
152 161
153 if (chan->src_type == BDADDR_BREDR && la.l2_bdaddr_type != BDADDR_BREDR) 162 /* Check that the socket wasn't bound to something that
154 return -EINVAL; 163 * conflicts with the address given to connect(). If chan->src
164 * is BDADDR_ANY it means bind() was never used, in which case
165 * chan->src_type and la.l2_bdaddr_type do not need to match.
166 */
167 if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) &&
168 bdaddr_type_is_le(la.l2_bdaddr_type)) {
169 /* Old user space versions will try to incorrectly bind
170 * the ATT socket using BDADDR_BREDR. We need to accept
171 * this and fix up the source address type only when
172 * both the source CID and destination CID indicate
173 * ATT. Anything else is an invalid combination.
174 */
175 if (chan->scid != L2CAP_CID_ATT ||
176 la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
177 return -EINVAL;
178
179 /* We don't have the hdev available here to make a
180 * better decision on random vs public, but since all
181 * user space versions that exhibit this issue anyway do
182 * not support random local addresses assuming public
183 * here is good enough.
184 */
185 chan->src_type = BDADDR_LE_PUBLIC;
186 }
155 187
156 if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR) 188 if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR)
157 return -EINVAL; 189 return -EINVAL;
158 190
191 if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
192 /* Connection oriented channels are not supported on LE */
193 if (la.l2_psm)
194 return -EINVAL;
195 /* We only allow ATT user space socket */
196 if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
197 return -EINVAL;
198 }
199
159 err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), 200 err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
160 &la.l2_bdaddr, la.l2_bdaddr_type); 201 &la.l2_bdaddr, la.l2_bdaddr_type);
161 if (err) 202 if (err)
@@ -879,6 +920,38 @@ static void l2cap_sock_kill(struct sock *sk)
879 sock_put(sk); 920 sock_put(sk);
880} 921}
881 922
923static int __l2cap_wait_ack(struct sock *sk)
924{
925 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
926 DECLARE_WAITQUEUE(wait, current);
927 int err = 0;
928 int timeo = HZ/5;
929
930 add_wait_queue(sk_sleep(sk), &wait);
931 set_current_state(TASK_INTERRUPTIBLE);
932 while (chan->unacked_frames > 0 && chan->conn) {
933 if (!timeo)
934 timeo = HZ/5;
935
936 if (signal_pending(current)) {
937 err = sock_intr_errno(timeo);
938 break;
939 }
940
941 release_sock(sk);
942 timeo = schedule_timeout(timeo);
943 lock_sock(sk);
944 set_current_state(TASK_INTERRUPTIBLE);
945
946 err = sock_error(sk);
947 if (err)
948 break;
949 }
950 set_current_state(TASK_RUNNING);
951 remove_wait_queue(sk_sleep(sk), &wait);
952 return err;
953}
954
882static int l2cap_sock_shutdown(struct socket *sock, int how) 955static int l2cap_sock_shutdown(struct socket *sock, int how)
883{ 956{
884 struct sock *sk = sock->sk; 957 struct sock *sk = sock->sk;
@@ -969,6 +1042,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
969{ 1042{
970 struct sock *sk, *parent = chan->data; 1043 struct sock *sk, *parent = chan->data;
971 1044
1045 lock_sock(parent);
1046
972 /* Check for backlog size */ 1047 /* Check for backlog size */
973 if (sk_acceptq_is_full(parent)) { 1048 if (sk_acceptq_is_full(parent)) {
974 BT_DBG("backlog full %d", parent->sk_ack_backlog); 1049 BT_DBG("backlog full %d", parent->sk_ack_backlog);
@@ -986,6 +1061,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
986 1061
987 bt_accept_enqueue(parent, sk); 1062 bt_accept_enqueue(parent, sk);
988 1063
1064 release_sock(parent);
1065
989 return l2cap_pi(sk)->chan; 1066 return l2cap_pi(sk)->chan;
990} 1067}
991 1068
@@ -1072,26 +1149,33 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
1072 release_sock(sk); 1149 release_sock(sk);
1073} 1150}
1074 1151
1075static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state) 1152static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
1153 int err)
1076{ 1154{
1077 struct sock *sk = chan->data; 1155 struct sock *sk = chan->data;
1078 1156
1079 sk->sk_state = state; 1157 sk->sk_state = state;
1158
1159 if (err)
1160 sk->sk_err = err;
1080} 1161}
1081 1162
1082static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, 1163static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
1083 unsigned long len, int nb) 1164 unsigned long len, int nb)
1084{ 1165{
1166 struct sock *sk = chan->data;
1085 struct sk_buff *skb; 1167 struct sk_buff *skb;
1086 int err; 1168 int err;
1087 1169
1088 l2cap_chan_unlock(chan); 1170 l2cap_chan_unlock(chan);
1089 skb = bt_skb_send_alloc(chan->sk, len, nb, &err); 1171 skb = bt_skb_send_alloc(sk, len, nb, &err);
1090 l2cap_chan_lock(chan); 1172 l2cap_chan_lock(chan);
1091 1173
1092 if (!skb) 1174 if (!skb)
1093 return ERR_PTR(err); 1175 return ERR_PTR(err);
1094 1176
1177 bt_cb(skb)->chan = chan;
1178
1095 return skb; 1179 return skb;
1096} 1180}
1097 1181
@@ -1117,11 +1201,15 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1117 1201
1118static void l2cap_sock_defer_cb(struct l2cap_chan *chan) 1202static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
1119{ 1203{
1120 struct sock *sk = chan->data; 1204 struct sock *parent, *sk = chan->data;
1121 struct sock *parent = bt_sk(sk)->parent; 1205
1206 lock_sock(sk);
1122 1207
1208 parent = bt_sk(sk)->parent;
1123 if (parent) 1209 if (parent)
1124 parent->sk_data_ready(parent, 0); 1210 parent->sk_data_ready(parent, 0);
1211
1212 release_sock(sk);
1125} 1213}
1126 1214
1127static void l2cap_sock_resume_cb(struct l2cap_chan *chan) 1215static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
@@ -1132,6 +1220,22 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
1132 sk->sk_state_change(sk); 1220 sk->sk_state_change(sk);
1133} 1221}
1134 1222
1223static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan)
1224{
1225 struct sock *sk = chan->data;
1226
1227 lock_sock(sk);
1228 sk->sk_shutdown = SHUTDOWN_MASK;
1229 release_sock(sk);
1230}
1231
1232static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
1233{
1234 struct sock *sk = chan->data;
1235
1236 return sk->sk_sndtimeo;
1237}
1238
1135static struct l2cap_ops l2cap_chan_ops = { 1239static struct l2cap_ops l2cap_chan_ops = {
1136 .name = "L2CAP Socket Interface", 1240 .name = "L2CAP Socket Interface",
1137 .new_connection = l2cap_sock_new_connection_cb, 1241 .new_connection = l2cap_sock_new_connection_cb,
@@ -1142,6 +1246,8 @@ static struct l2cap_ops l2cap_chan_ops = {
1142 .ready = l2cap_sock_ready_cb, 1246 .ready = l2cap_sock_ready_cb,
1143 .defer = l2cap_sock_defer_cb, 1247 .defer = l2cap_sock_defer_cb,
1144 .resume = l2cap_sock_resume_cb, 1248 .resume = l2cap_sock_resume_cb,
1249 .set_shutdown = l2cap_sock_set_shutdown_cb,
1250 .get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
1145 .alloc_skb = l2cap_sock_alloc_skb_cb, 1251 .alloc_skb = l2cap_sock_alloc_skb_cb,
1146}; 1252};
1147 1253
@@ -1268,8 +1374,6 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
1268 1374
1269 l2cap_chan_hold(chan); 1375 l2cap_chan_hold(chan);
1270 1376
1271 chan->sk = sk;
1272
1273 l2cap_pi(sk)->chan = chan; 1377 l2cap_pi(sk)->chan = chan;
1274 1378
1275 return sk; 1379 return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 861e389f4b4c..074d83690a41 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -536,6 +536,156 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
536 return ptr; 536 return ptr;
537} 537}
538 538
539static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
540{
541 struct pending_cmd *cmd;
542
543 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
544 if (cmd->opcode == opcode)
545 return cmd;
546 }
547
548 return NULL;
549}
550
551static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
552{
553 u8 ad_len = 0;
554 size_t name_len;
555
556 name_len = strlen(hdev->dev_name);
557 if (name_len > 0) {
558 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
559
560 if (name_len > max_len) {
561 name_len = max_len;
562 ptr[1] = EIR_NAME_SHORT;
563 } else
564 ptr[1] = EIR_NAME_COMPLETE;
565
566 ptr[0] = name_len + 1;
567
568 memcpy(ptr + 2, hdev->dev_name, name_len);
569
570 ad_len += (name_len + 2);
571 ptr += (name_len + 2);
572 }
573
574 return ad_len;
575}
576
577static void update_scan_rsp_data(struct hci_request *req)
578{
579 struct hci_dev *hdev = req->hdev;
580 struct hci_cp_le_set_scan_rsp_data cp;
581 u8 len;
582
583 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
584 return;
585
586 memset(&cp, 0, sizeof(cp));
587
588 len = create_scan_rsp_data(hdev, cp.data);
589
590 if (hdev->scan_rsp_data_len == len &&
591 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
592 return;
593
594 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
595 hdev->scan_rsp_data_len = len;
596
597 cp.length = len;
598
599 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
600}
601
602static u8 get_adv_discov_flags(struct hci_dev *hdev)
603{
604 struct pending_cmd *cmd;
605
606 /* If there's a pending mgmt command the flags will not yet have
607 * their final values, so check for this first.
608 */
609 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
610 if (cmd) {
611 struct mgmt_mode *cp = cmd->param;
612 if (cp->val == 0x01)
613 return LE_AD_GENERAL;
614 else if (cp->val == 0x02)
615 return LE_AD_LIMITED;
616 } else {
617 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
618 return LE_AD_LIMITED;
619 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
620 return LE_AD_GENERAL;
621 }
622
623 return 0;
624}
625
626static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
627{
628 u8 ad_len = 0, flags = 0;
629
630 flags |= get_adv_discov_flags(hdev);
631
632 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
633 if (lmp_le_br_capable(hdev))
634 flags |= LE_AD_SIM_LE_BREDR_CTRL;
635 if (lmp_host_le_br_capable(hdev))
636 flags |= LE_AD_SIM_LE_BREDR_HOST;
637 } else {
638 flags |= LE_AD_NO_BREDR;
639 }
640
641 if (flags) {
642 BT_DBG("adv flags 0x%02x", flags);
643
644 ptr[0] = 2;
645 ptr[1] = EIR_FLAGS;
646 ptr[2] = flags;
647
648 ad_len += 3;
649 ptr += 3;
650 }
651
652 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
653 ptr[0] = 2;
654 ptr[1] = EIR_TX_POWER;
655 ptr[2] = (u8) hdev->adv_tx_power;
656
657 ad_len += 3;
658 ptr += 3;
659 }
660
661 return ad_len;
662}
663
664static void update_adv_data(struct hci_request *req)
665{
666 struct hci_dev *hdev = req->hdev;
667 struct hci_cp_le_set_adv_data cp;
668 u8 len;
669
670 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
671 return;
672
673 memset(&cp, 0, sizeof(cp));
674
675 len = create_adv_data(hdev, cp.data);
676
677 if (hdev->adv_data_len == len &&
678 memcmp(cp.data, hdev->adv_data, len) == 0)
679 return;
680
681 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
682 hdev->adv_data_len = len;
683
684 cp.length = len;
685
686 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
687}
688
539static void create_eir(struct hci_dev *hdev, u8 *data) 689static void create_eir(struct hci_dev *hdev, u8 *data)
540{ 690{
541 u8 *ptr = data; 691 u8 *ptr = data;
@@ -634,6 +784,9 @@ static void update_class(struct hci_request *req)
634 if (!hdev_is_powered(hdev)) 784 if (!hdev_is_powered(hdev))
635 return; 785 return;
636 786
787 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
788 return;
789
637 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 790 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
638 return; 791 return;
639 792
@@ -641,6 +794,9 @@ static void update_class(struct hci_request *req)
641 cod[1] = hdev->major_class; 794 cod[1] = hdev->major_class;
642 cod[2] = get_service_classes(hdev); 795 cod[2] = get_service_classes(hdev);
643 796
797 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
798 cod[1] |= 0x20;
799
644 if (memcmp(cod, hdev->dev_class, 3) == 0) 800 if (memcmp(cod, hdev->dev_class, 3) == 0)
645 return; 801 return;
646 802
@@ -765,18 +921,6 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
765 } 921 }
766} 922}
767 923
768static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
769{
770 struct pending_cmd *cmd;
771
772 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
773 if (cmd->opcode == opcode)
774 return cmd;
775 }
776
777 return NULL;
778}
779
780static void mgmt_pending_remove(struct pending_cmd *cmd) 924static void mgmt_pending_remove(struct pending_cmd *cmd)
781{ 925{
782 list_del(&cmd->list); 926 list_del(&cmd->list);
@@ -939,6 +1083,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
939{ 1083{
940 struct pending_cmd *cmd; 1084 struct pending_cmd *cmd;
941 struct mgmt_mode *cp; 1085 struct mgmt_mode *cp;
1086 struct hci_request req;
942 bool changed; 1087 bool changed;
943 1088
944 BT_DBG("status 0x%02x", status); 1089 BT_DBG("status 0x%02x", status);
@@ -952,22 +1097,38 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
952 if (status) { 1097 if (status) {
953 u8 mgmt_err = mgmt_status(status); 1098 u8 mgmt_err = mgmt_status(status);
954 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); 1099 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1100 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
955 goto remove_cmd; 1101 goto remove_cmd;
956 } 1102 }
957 1103
958 cp = cmd->param; 1104 cp = cmd->param;
959 if (cp->val) 1105 if (cp->val) {
960 changed = !test_and_set_bit(HCI_DISCOVERABLE, 1106 changed = !test_and_set_bit(HCI_DISCOVERABLE,
961 &hdev->dev_flags); 1107 &hdev->dev_flags);
962 else 1108
1109 if (hdev->discov_timeout > 0) {
1110 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1111 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1112 to);
1113 }
1114 } else {
963 changed = test_and_clear_bit(HCI_DISCOVERABLE, 1115 changed = test_and_clear_bit(HCI_DISCOVERABLE,
964 &hdev->dev_flags); 1116 &hdev->dev_flags);
1117 }
965 1118
966 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); 1119 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
967 1120
968 if (changed) 1121 if (changed)
969 new_settings(hdev, cmd->sk); 1122 new_settings(hdev, cmd->sk);
970 1123
1124 /* When the discoverable mode gets changed, make sure
1125 * that class of device has the limited discoverable
1126 * bit correctly set.
1127 */
1128 hci_req_init(&req, hdev);
1129 update_class(&req);
1130 hci_req_run(&req, NULL);
1131
971remove_cmd: 1132remove_cmd:
972 mgmt_pending_remove(cmd); 1133 mgmt_pending_remove(cmd);
973 1134
@@ -982,22 +1143,27 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
982 struct pending_cmd *cmd; 1143 struct pending_cmd *cmd;
983 struct hci_request req; 1144 struct hci_request req;
984 u16 timeout; 1145 u16 timeout;
985 u8 scan, status; 1146 u8 scan;
986 int err; 1147 int err;
987 1148
988 BT_DBG("request for %s", hdev->name); 1149 BT_DBG("request for %s", hdev->name);
989 1150
990 status = mgmt_bredr_support(hdev); 1151 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
991 if (status) 1152 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
992 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1153 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
993 status); 1154 MGMT_STATUS_REJECTED);
994 1155
995 if (cp->val != 0x00 && cp->val != 0x01) 1156 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
996 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
997 MGMT_STATUS_INVALID_PARAMS); 1158 MGMT_STATUS_INVALID_PARAMS);
998 1159
999 timeout = __le16_to_cpu(cp->timeout); 1160 timeout = __le16_to_cpu(cp->timeout);
1000 if (!cp->val && timeout > 0) 1161
1162 /* Disabling discoverable requires that no timeout is set,
1163 * and enabling limited discoverable requires a timeout.
1164 */
1165 if ((cp->val == 0x00 && timeout > 0) ||
1166 (cp->val == 0x02 && timeout == 0))
1001 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 1167 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1002 MGMT_STATUS_INVALID_PARAMS); 1168 MGMT_STATUS_INVALID_PARAMS);
1003 1169
@@ -1025,6 +1191,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1025 if (!hdev_is_powered(hdev)) { 1191 if (!hdev_is_powered(hdev)) {
1026 bool changed = false; 1192 bool changed = false;
1027 1193
1194 /* Setting limited discoverable when powered off is
1195 * not a valid operation since it requires a timeout
1196 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1197 */
1028 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { 1198 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1029 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1199 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1030 changed = true; 1200 changed = true;
@@ -1040,16 +1210,20 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1040 goto failed; 1210 goto failed;
1041 } 1211 }
1042 1212
1043 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { 1213 /* If the current mode is the same, then just update the timeout
1044 if (hdev->discov_timeout > 0) { 1214 * value with the new value. And if only the timeout gets updated,
1045 cancel_delayed_work(&hdev->discov_off); 1215 * then no need for any HCI transactions.
1046 hdev->discov_timeout = 0; 1216 */
1047 } 1217 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1218 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1219 &hdev->dev_flags)) {
1220 cancel_delayed_work(&hdev->discov_off);
1221 hdev->discov_timeout = timeout;
1048 1222
1049 if (cp->val && timeout > 0) { 1223 if (cp->val && hdev->discov_timeout > 0) {
1050 hdev->discov_timeout = timeout; 1224 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1051 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 1225 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1052 msecs_to_jiffies(hdev->discov_timeout * 1000)); 1226 to);
1053 } 1227 }
1054 1228
1055 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); 1229 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -1062,24 +1236,66 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1062 goto failed; 1236 goto failed;
1063 } 1237 }
1064 1238
1239 /* Cancel any potential discoverable timeout that might be
1240 * still active and store new timeout value. The arming of
1241 * the timeout happens in the complete handler.
1242 */
1243 cancel_delayed_work(&hdev->discov_off);
1244 hdev->discov_timeout = timeout;
1245
1246 /* Limited discoverable mode */
1247 if (cp->val == 0x02)
1248 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1249 else
1250 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1251
1065 hci_req_init(&req, hdev); 1252 hci_req_init(&req, hdev);
1066 1253
1254 /* The procedure for LE-only controllers is much simpler - just
1255 * update the advertising data.
1256 */
1257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1258 goto update_ad;
1259
1067 scan = SCAN_PAGE; 1260 scan = SCAN_PAGE;
1068 1261
1069 if (cp->val) 1262 if (cp->val) {
1263 struct hci_cp_write_current_iac_lap hci_cp;
1264
1265 if (cp->val == 0x02) {
1266 /* Limited discoverable mode */
1267 hci_cp.num_iac = 2;
1268 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1269 hci_cp.iac_lap[1] = 0x8b;
1270 hci_cp.iac_lap[2] = 0x9e;
1271 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1272 hci_cp.iac_lap[4] = 0x8b;
1273 hci_cp.iac_lap[5] = 0x9e;
1274 } else {
1275 /* General discoverable mode */
1276 hci_cp.num_iac = 1;
1277 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1278 hci_cp.iac_lap[1] = 0x8b;
1279 hci_cp.iac_lap[2] = 0x9e;
1280 }
1281
1282 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1283 (hci_cp.num_iac * 3) + 1, &hci_cp);
1284
1070 scan |= SCAN_INQUIRY; 1285 scan |= SCAN_INQUIRY;
1071 else 1286 } else {
1072 cancel_delayed_work(&hdev->discov_off); 1287 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1288 }
1073 1289
1074 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1290 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1291
1292update_ad:
1293 update_adv_data(&req);
1075 1294
1076 err = hci_req_run(&req, set_discoverable_complete); 1295 err = hci_req_run(&req, set_discoverable_complete);
1077 if (err < 0) 1296 if (err < 0)
1078 mgmt_pending_remove(cmd); 1297 mgmt_pending_remove(cmd);
1079 1298
1080 if (cp->val)
1081 hdev->discov_timeout = timeout;
1082
1083failed: 1299failed:
1084 hci_dev_unlock(hdev); 1300 hci_dev_unlock(hdev);
1085 return err; 1301 return err;
@@ -1091,6 +1307,9 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
1091 struct hci_cp_write_page_scan_activity acp; 1307 struct hci_cp_write_page_scan_activity acp;
1092 u8 type; 1308 u8 type;
1093 1309
1310 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1311 return;
1312
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 1313 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return; 1314 return;
1096 1315
@@ -1146,10 +1365,7 @@ static void enable_advertising(struct hci_request *req)
1146 cp.min_interval = __constant_cpu_to_le16(0x0800); 1365 cp.min_interval = __constant_cpu_to_le16(0x0800);
1147 cp.max_interval = __constant_cpu_to_le16(0x0800); 1366 cp.max_interval = __constant_cpu_to_le16(0x0800);
1148 cp.type = get_adv_type(hdev); 1367 cp.type = get_adv_type(hdev);
1149 if (bacmp(&hdev->bdaddr, BDADDR_ANY)) 1368 cp.own_address_type = hdev->own_addr_type;
1150 cp.own_address_type = ADDR_LE_DEV_PUBLIC;
1151 else
1152 cp.own_address_type = ADDR_LE_DEV_RANDOM;
1153 cp.channel_map = 0x07; 1369 cp.channel_map = 0x07;
1154 1370
1155 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); 1371 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
@@ -1202,6 +1418,32 @@ unlock:
1202 hci_dev_unlock(hdev); 1418 hci_dev_unlock(hdev);
1203} 1419}
1204 1420
1421static int set_connectable_update_settings(struct hci_dev *hdev,
1422 struct sock *sk, u8 val)
1423{
1424 bool changed = false;
1425 int err;
1426
1427 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1428 changed = true;
1429
1430 if (val) {
1431 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1432 } else {
1433 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1434 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1435 }
1436
1437 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1438 if (err < 0)
1439 return err;
1440
1441 if (changed)
1442 return new_settings(hdev, sk);
1443
1444 return 0;
1445}
1446
1205static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, 1447static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1206 u16 len) 1448 u16 len)
1207{ 1449{
@@ -1225,25 +1467,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1225 hci_dev_lock(hdev); 1467 hci_dev_lock(hdev);
1226 1468
1227 if (!hdev_is_powered(hdev)) { 1469 if (!hdev_is_powered(hdev)) {
1228 bool changed = false; 1470 err = set_connectable_update_settings(hdev, sk, cp->val);
1229
1230 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1231 changed = true;
1232
1233 if (cp->val) {
1234 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1235 } else {
1236 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1237 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1238 }
1239
1240 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1241 if (err < 0)
1242 goto failed;
1243
1244 if (changed)
1245 err = new_settings(hdev, sk);
1246
1247 goto failed; 1471 goto failed;
1248 } 1472 }
1249 1473
@@ -1262,16 +1486,24 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1262 1486
1263 hci_req_init(&req, hdev); 1487 hci_req_init(&req, hdev);
1264 1488
1265 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && 1489 /* If BR/EDR is not enabled and we disable advertising as a
1266 cp->val != test_bit(HCI_PSCAN, &hdev->flags)) { 1490 * by-product of disabling connectable, we need to update the
1267 1491 * advertising flags.
1492 */
1493 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1494 if (!cp->val) {
1495 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1496 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1497 }
1498 update_adv_data(&req);
1499 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1268 if (cp->val) { 1500 if (cp->val) {
1269 scan = SCAN_PAGE; 1501 scan = SCAN_PAGE;
1270 } else { 1502 } else {
1271 scan = 0; 1503 scan = 0;
1272 1504
1273 if (test_bit(HCI_ISCAN, &hdev->flags) && 1505 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1274 hdev->discov_timeout > 0) 1506 hdev->discov_timeout > 0)
1275 cancel_delayed_work(&hdev->discov_off); 1507 cancel_delayed_work(&hdev->discov_off);
1276 } 1508 }
1277 1509
@@ -1297,8 +1529,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1297 if (err < 0) { 1529 if (err < 0) {
1298 mgmt_pending_remove(cmd); 1530 mgmt_pending_remove(cmd);
1299 if (err == -ENODATA) 1531 if (err == -ENODATA)
1300 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, 1532 err = set_connectable_update_settings(hdev, sk,
1301 hdev); 1533 cp->val);
1302 goto failed; 1534 goto failed;
1303 } 1535 }
1304 1536
@@ -1556,6 +1788,24 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
1556 1788
1557 if (match.sk) 1789 if (match.sk)
1558 sock_put(match.sk); 1790 sock_put(match.sk);
1791
1792 /* Make sure the controller has a good default for
1793 * advertising data. Restrict the update to when LE
1794 * has actually been enabled. During power on, the
1795 * update in powered_update_hci will take care of it.
1796 */
1797 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1798 struct hci_request req;
1799
1800 hci_dev_lock(hdev);
1801
1802 hci_req_init(&req, hdev);
1803 update_adv_data(&req);
1804 update_scan_rsp_data(&req);
1805 hci_req_run(&req, NULL);
1806
1807 hci_dev_unlock(hdev);
1808 }
1559} 1809}
1560 1810
1561static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1811static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -1623,18 +1873,18 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1623 goto unlock; 1873 goto unlock;
1624 } 1874 }
1625 1875
1876 hci_req_init(&req, hdev);
1877
1626 memset(&hci_cp, 0, sizeof(hci_cp)); 1878 memset(&hci_cp, 0, sizeof(hci_cp));
1627 1879
1628 if (val) { 1880 if (val) {
1629 hci_cp.le = val; 1881 hci_cp.le = val;
1630 hci_cp.simul = lmp_le_br_capable(hdev); 1882 hci_cp.simul = lmp_le_br_capable(hdev);
1883 } else {
1884 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1885 disable_advertising(&req);
1631 } 1886 }
1632 1887
1633 hci_req_init(&req, hdev);
1634
1635 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) && !val)
1636 disable_advertising(&req);
1637
1638 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), 1888 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1639 &hci_cp); 1889 &hci_cp);
1640 1890
@@ -2772,8 +3022,11 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2772 update_eir(&req); 3022 update_eir(&req);
2773 } 3023 }
2774 3024
3025 /* The name is stored in the scan response data and so
3026 * no need to udpate the advertising data here.
3027 */
2775 if (lmp_le_capable(hdev)) 3028 if (lmp_le_capable(hdev))
2776 hci_update_ad(&req); 3029 update_scan_rsp_data(&req);
2777 3030
2778 err = hci_req_run(&req, set_name_complete); 3031 err = hci_req_run(&req, set_name_complete);
2779 if (err < 0) 3032 if (err < 0)
@@ -3038,10 +3291,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3038 param_cp.type = LE_SCAN_ACTIVE; 3291 param_cp.type = LE_SCAN_ACTIVE;
3039 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); 3292 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3040 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); 3293 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3041 if (bacmp(&hdev->bdaddr, BDADDR_ANY)) 3294 param_cp.own_address_type = hdev->own_addr_type;
3042 param_cp.own_address_type = ADDR_LE_DEV_PUBLIC;
3043 else
3044 param_cp.own_address_type = ADDR_LE_DEV_RANDOM;
3045 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 3295 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3046 &param_cp); 3296 &param_cp);
3047 3297
@@ -3725,7 +3975,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3725 goto unlock; 3975 goto unlock;
3726 } 3976 }
3727 3977
3728 /* We need to flip the bit already here so that hci_update_ad 3978 /* We need to flip the bit already here so that update_adv_data
3729 * generates the correct flags. 3979 * generates the correct flags.
3730 */ 3980 */
3731 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); 3981 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
@@ -3735,7 +3985,10 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3735 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 3985 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3736 set_bredr_scan(&req); 3986 set_bredr_scan(&req);
3737 3987
3738 hci_update_ad(&req); 3988 /* Since only the advertising data flags will change, there
3989 * is no need to update the scan response data.
3990 */
3991 update_adv_data(&req);
3739 3992
3740 err = hci_req_run(&req, set_bredr_complete); 3993 err = hci_req_run(&req, set_bredr_complete);
3741 if (err < 0) 3994 if (err < 0)
@@ -4036,9 +4289,6 @@ static int powered_update_hci(struct hci_dev *hdev)
4036 cp.simul != lmp_host_le_br_capable(hdev)) 4289 cp.simul != lmp_host_le_br_capable(hdev))
4037 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, 4290 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4038 sizeof(cp), &cp); 4291 sizeof(cp), &cp);
4039
4040 /* In case BR/EDR was toggled during the AUTO_OFF phase */
4041 hci_update_ad(&req);
4042 } 4292 }
4043 4293
4044 if (lmp_le_capable(hdev)) { 4294 if (lmp_le_capable(hdev)) {
@@ -4047,6 +4297,15 @@ static int powered_update_hci(struct hci_dev *hdev)
4047 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6, 4297 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4048 &hdev->static_addr); 4298 &hdev->static_addr);
4049 4299
4300 /* Make sure the controller has a good default for
4301 * advertising data. This also applies to the case
4302 * where BR/EDR was toggled during the AUTO_OFF phase.
4303 */
4304 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4305 update_adv_data(&req);
4306 update_scan_rsp_data(&req);
4307 }
4308
4050 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 4309 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4051 enable_advertising(&req); 4310 enable_advertising(&req);
4052 } 4311 }
@@ -4121,59 +4380,91 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4121 mgmt_pending_remove(cmd); 4380 mgmt_pending_remove(cmd);
4122} 4381}
4123 4382
4124int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 4383void mgmt_discoverable_timeout(struct hci_dev *hdev)
4125{ 4384{
4126 bool changed = false; 4385 struct hci_request req;
4127 int err = 0; 4386
4387 hci_dev_lock(hdev);
4388
4389 /* When discoverable timeout triggers, then just make sure
4390 * the limited discoverable flag is cleared. Even in the case
4391 * of a timeout triggered from general discoverable, it is
4392 * safe to unconditionally clear the flag.
4393 */
4394 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4395 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4396
4397 hci_req_init(&req, hdev);
4398 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4399 u8 scan = SCAN_PAGE;
4400 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4401 sizeof(scan), &scan);
4402 }
4403 update_class(&req);
4404 update_adv_data(&req);
4405 hci_req_run(&req, NULL);
4406
4407 hdev->discov_timeout = 0;
4408
4409 new_settings(hdev, NULL);
4410
4411 hci_dev_unlock(hdev);
4412}
4413
4414void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4415{
4416 bool changed;
4128 4417
4129 /* Nothing needed here if there's a pending command since that 4418 /* Nothing needed here if there's a pending command since that
4130 * commands request completion callback takes care of everything 4419 * commands request completion callback takes care of everything
4131 * necessary. 4420 * necessary.
4132 */ 4421 */
4133 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) 4422 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4134 return 0; 4423 return;
4135 4424
4136 if (discoverable) { 4425 if (discoverable) {
4137 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 4426 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4138 changed = true;
4139 } else { 4427 } else {
4140 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 4428 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4141 changed = true; 4429 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4142 } 4430 }
4143 4431
4144 if (changed) 4432 if (changed) {
4145 err = new_settings(hdev, NULL); 4433 struct hci_request req;
4146 4434
4147 return err; 4435 /* In case this change in discoverable was triggered by
4436 * a disabling of connectable there could be a need to
4437 * update the advertising flags.
4438 */
4439 hci_req_init(&req, hdev);
4440 update_adv_data(&req);
4441 hci_req_run(&req, NULL);
4442
4443 new_settings(hdev, NULL);
4444 }
4148} 4445}
4149 4446
4150int mgmt_connectable(struct hci_dev *hdev, u8 connectable) 4447void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4151{ 4448{
4152 bool changed = false; 4449 bool changed;
4153 int err = 0;
4154 4450
4155 /* Nothing needed here if there's a pending command since that 4451 /* Nothing needed here if there's a pending command since that
4156 * commands request completion callback takes care of everything 4452 * commands request completion callback takes care of everything
4157 * necessary. 4453 * necessary.
4158 */ 4454 */
4159 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) 4455 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4160 return 0; 4456 return;
4161 4457
4162 if (connectable) { 4458 if (connectable)
4163 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4459 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4164 changed = true; 4460 else
4165 } else { 4461 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4166 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4167 changed = true;
4168 }
4169 4462
4170 if (changed) 4463 if (changed)
4171 err = new_settings(hdev, NULL); 4464 new_settings(hdev, NULL);
4172
4173 return err;
4174} 4465}
4175 4466
4176int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) 4467void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4177{ 4468{
4178 u8 mgmt_err = mgmt_status(status); 4469 u8 mgmt_err = mgmt_status(status);
4179 4470
@@ -4184,12 +4475,10 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4184 if (scan & SCAN_INQUIRY) 4475 if (scan & SCAN_INQUIRY)
4185 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, 4476 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4186 cmd_status_rsp, &mgmt_err); 4477 cmd_status_rsp, &mgmt_err);
4187
4188 return 0;
4189} 4478}
4190 4479
4191int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 4480void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4192 bool persistent) 4481 bool persistent)
4193{ 4482{
4194 struct mgmt_ev_new_link_key ev; 4483 struct mgmt_ev_new_link_key ev;
4195 4484
@@ -4202,10 +4491,10 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4202 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); 4491 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4203 ev.key.pin_len = key->pin_len; 4492 ev.key.pin_len = key->pin_len;
4204 4493
4205 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 4494 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4206} 4495}
4207 4496
4208int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent) 4497void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4209{ 4498{
4210 struct mgmt_ev_new_long_term_key ev; 4499 struct mgmt_ev_new_long_term_key ev;
4211 4500
@@ -4224,8 +4513,18 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4224 memcpy(ev.key.rand, key->rand, sizeof(key->rand)); 4513 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4225 memcpy(ev.key.val, key->val, sizeof(key->val)); 4514 memcpy(ev.key.val, key->val, sizeof(key->val));
4226 4515
4227 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), 4516 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4228 NULL); 4517}
4518
4519static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4520 u8 data_len)
4521{
4522 eir[eir_len++] = sizeof(type) + data_len;
4523 eir[eir_len++] = type;
4524 memcpy(&eir[eir_len], data, data_len);
4525 eir_len += data_len;
4526
4527 return eir_len;
4229} 4528}
4230 4529
4231void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4530void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -4345,7 +4644,7 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4345 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); 4644 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4346} 4645}
4347 4646
4348int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) 4647void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4349{ 4648{
4350 struct mgmt_ev_pin_code_request ev; 4649 struct mgmt_ev_pin_code_request ev;
4351 4650
@@ -4353,52 +4652,45 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4353 ev.addr.type = BDADDR_BREDR; 4652 ev.addr.type = BDADDR_BREDR;
4354 ev.secure = secure; 4653 ev.secure = secure;
4355 4654
4356 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), 4655 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4357 NULL);
4358} 4656}
4359 4657
4360int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 4658void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4361 u8 status) 4659 u8 status)
4362{ 4660{
4363 struct pending_cmd *cmd; 4661 struct pending_cmd *cmd;
4364 struct mgmt_rp_pin_code_reply rp; 4662 struct mgmt_rp_pin_code_reply rp;
4365 int err;
4366 4663
4367 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); 4664 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4368 if (!cmd) 4665 if (!cmd)
4369 return -ENOENT; 4666 return;
4370 4667
4371 bacpy(&rp.addr.bdaddr, bdaddr); 4668 bacpy(&rp.addr.bdaddr, bdaddr);
4372 rp.addr.type = BDADDR_BREDR; 4669 rp.addr.type = BDADDR_BREDR;
4373 4670
4374 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 4671 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4375 mgmt_status(status), &rp, sizeof(rp)); 4672 mgmt_status(status), &rp, sizeof(rp));
4376 4673
4377 mgmt_pending_remove(cmd); 4674 mgmt_pending_remove(cmd);
4378
4379 return err;
4380} 4675}
4381 4676
4382int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 4677void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4383 u8 status) 4678 u8 status)
4384{ 4679{
4385 struct pending_cmd *cmd; 4680 struct pending_cmd *cmd;
4386 struct mgmt_rp_pin_code_reply rp; 4681 struct mgmt_rp_pin_code_reply rp;
4387 int err;
4388 4682
4389 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); 4683 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4390 if (!cmd) 4684 if (!cmd)
4391 return -ENOENT; 4685 return;
4392 4686
4393 bacpy(&rp.addr.bdaddr, bdaddr); 4687 bacpy(&rp.addr.bdaddr, bdaddr);
4394 rp.addr.type = BDADDR_BREDR; 4688 rp.addr.type = BDADDR_BREDR;
4395 4689
4396 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, 4690 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4397 mgmt_status(status), &rp, sizeof(rp)); 4691 mgmt_status(status), &rp, sizeof(rp));
4398 4692
4399 mgmt_pending_remove(cmd); 4693 mgmt_pending_remove(cmd);
4400
4401 return err;
4402} 4694}
4403 4695
4404int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 4696int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -4500,8 +4792,8 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4500 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL); 4792 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4501} 4793}
4502 4794
4503int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4795void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4504 u8 addr_type, u8 status) 4796 u8 addr_type, u8 status)
4505{ 4797{
4506 struct mgmt_ev_auth_failed ev; 4798 struct mgmt_ev_auth_failed ev;
4507 4799
@@ -4509,40 +4801,36 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4509 ev.addr.type = link_to_bdaddr(link_type, addr_type); 4801 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4510 ev.status = mgmt_status(status); 4802 ev.status = mgmt_status(status);
4511 4803
4512 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); 4804 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4513} 4805}
4514 4806
4515int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) 4807void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4516{ 4808{
4517 struct cmd_lookup match = { NULL, hdev }; 4809 struct cmd_lookup match = { NULL, hdev };
4518 bool changed = false; 4810 bool changed;
4519 int err = 0;
4520 4811
4521 if (status) { 4812 if (status) {
4522 u8 mgmt_err = mgmt_status(status); 4813 u8 mgmt_err = mgmt_status(status);
4523 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, 4814 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4524 cmd_status_rsp, &mgmt_err); 4815 cmd_status_rsp, &mgmt_err);
4525 return 0; 4816 return;
4526 } 4817 }
4527 4818
4528 if (test_bit(HCI_AUTH, &hdev->flags)) { 4819 if (test_bit(HCI_AUTH, &hdev->flags))
4529 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 4820 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4530 changed = true; 4821 &hdev->dev_flags);
4531 } else { 4822 else
4532 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 4823 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4533 changed = true; 4824 &hdev->dev_flags);
4534 }
4535 4825
4536 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, 4826 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4537 &match); 4827 &match);
4538 4828
4539 if (changed) 4829 if (changed)
4540 err = new_settings(hdev, match.sk); 4830 new_settings(hdev, match.sk);
4541 4831
4542 if (match.sk) 4832 if (match.sk)
4543 sock_put(match.sk); 4833 sock_put(match.sk);
4544
4545 return err;
4546} 4834}
4547 4835
4548static void clear_eir(struct hci_request *req) 4836static void clear_eir(struct hci_request *req)
@@ -4560,12 +4848,11 @@ static void clear_eir(struct hci_request *req)
4560 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 4848 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4561} 4849}
4562 4850
4563int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) 4851void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4564{ 4852{
4565 struct cmd_lookup match = { NULL, hdev }; 4853 struct cmd_lookup match = { NULL, hdev };
4566 struct hci_request req; 4854 struct hci_request req;
4567 bool changed = false; 4855 bool changed = false;
4568 int err = 0;
4569 4856
4570 if (status) { 4857 if (status) {
4571 u8 mgmt_err = mgmt_status(status); 4858 u8 mgmt_err = mgmt_status(status);
@@ -4573,13 +4860,12 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4573 if (enable && test_and_clear_bit(HCI_SSP_ENABLED, 4860 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4574 &hdev->dev_flags)) { 4861 &hdev->dev_flags)) {
4575 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 4862 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4576 err = new_settings(hdev, NULL); 4863 new_settings(hdev, NULL);
4577 } 4864 }
4578 4865
4579 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, 4866 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4580 &mgmt_err); 4867 &mgmt_err);
4581 4868 return;
4582 return err;
4583 } 4869 }
4584 4870
4585 if (enable) { 4871 if (enable) {
@@ -4596,7 +4882,7 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4596 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); 4882 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4597 4883
4598 if (changed) 4884 if (changed)
4599 err = new_settings(hdev, match.sk); 4885 new_settings(hdev, match.sk);
4600 4886
4601 if (match.sk) 4887 if (match.sk)
4602 sock_put(match.sk); 4888 sock_put(match.sk);
@@ -4609,8 +4895,6 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4609 clear_eir(&req); 4895 clear_eir(&req);
4610 4896
4611 hci_req_run(&req, NULL); 4897 hci_req_run(&req, NULL);
4612
4613 return err;
4614} 4898}
4615 4899
4616static void sk_lookup(struct pending_cmd *cmd, void *data) 4900static void sk_lookup(struct pending_cmd *cmd, void *data)
@@ -4623,33 +4907,30 @@ static void sk_lookup(struct pending_cmd *cmd, void *data)
4623 } 4907 }
4624} 4908}
4625 4909
4626int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 4910void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4627 u8 status) 4911 u8 status)
4628{ 4912{
4629 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; 4913 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4630 int err = 0;
4631 4914
4632 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); 4915 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4633 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); 4916 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4634 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); 4917 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4635 4918
4636 if (!status) 4919 if (!status)
4637 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 4920 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
4638 3, NULL); 4921 NULL);
4639 4922
4640 if (match.sk) 4923 if (match.sk)
4641 sock_put(match.sk); 4924 sock_put(match.sk);
4642
4643 return err;
4644} 4925}
4645 4926
4646int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) 4927void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4647{ 4928{
4648 struct mgmt_cp_set_local_name ev; 4929 struct mgmt_cp_set_local_name ev;
4649 struct pending_cmd *cmd; 4930 struct pending_cmd *cmd;
4650 4931
4651 if (status) 4932 if (status)
4652 return 0; 4933 return;
4653 4934
4654 memset(&ev, 0, sizeof(ev)); 4935 memset(&ev, 0, sizeof(ev));
4655 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 4936 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
@@ -4663,42 +4944,38 @@ int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4663 * HCI dev don't send any mgmt signals. 4944 * HCI dev don't send any mgmt signals.
4664 */ 4945 */
4665 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) 4946 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4666 return 0; 4947 return;
4667 } 4948 }
4668 4949
4669 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), 4950 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4670 cmd ? cmd->sk : NULL); 4951 cmd ? cmd->sk : NULL);
4671} 4952}
4672 4953
4673int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, 4954void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4674 u8 *randomizer, u8 status) 4955 u8 *randomizer, u8 status)
4675{ 4956{
4676 struct pending_cmd *cmd; 4957 struct pending_cmd *cmd;
4677 int err;
4678 4958
4679 BT_DBG("%s status %u", hdev->name, status); 4959 BT_DBG("%s status %u", hdev->name, status);
4680 4960
4681 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); 4961 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4682 if (!cmd) 4962 if (!cmd)
4683 return -ENOENT; 4963 return;
4684 4964
4685 if (status) { 4965 if (status) {
4686 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 4966 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4687 mgmt_status(status)); 4967 mgmt_status(status));
4688 } else { 4968 } else {
4689 struct mgmt_rp_read_local_oob_data rp; 4969 struct mgmt_rp_read_local_oob_data rp;
4690 4970
4691 memcpy(rp.hash, hash, sizeof(rp.hash)); 4971 memcpy(rp.hash, hash, sizeof(rp.hash));
4692 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); 4972 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4693 4973
4694 err = cmd_complete(cmd->sk, hdev->id, 4974 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4695 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp, 4975 0, &rp, sizeof(rp));
4696 sizeof(rp));
4697 } 4976 }
4698 4977
4699 mgmt_pending_remove(cmd); 4978 mgmt_pending_remove(cmd);
4700
4701 return err;
4702} 4979}
4703 4980
4704void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4981void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 27e936a7ddd9..94d06cbfbc18 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2154,13 +2154,6 @@ static int __init rfcomm_init(void)
2154 goto unregister; 2154 goto unregister;
2155 } 2155 }
2156 2156
2157 if (bt_debugfs) {
2158 rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
2159 bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
2160 if (!rfcomm_dlc_debugfs)
2161 BT_ERR("Failed to create RFCOMM debug file");
2162 }
2163
2164 err = rfcomm_init_ttys(); 2157 err = rfcomm_init_ttys();
2165 if (err < 0) 2158 if (err < 0)
2166 goto stop; 2159 goto stop;
@@ -2171,6 +2164,13 @@ static int __init rfcomm_init(void)
2171 2164
2172 BT_INFO("RFCOMM ver %s", VERSION); 2165 BT_INFO("RFCOMM ver %s", VERSION);
2173 2166
2167 if (IS_ERR_OR_NULL(bt_debugfs))
2168 return 0;
2169
2170 rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
2171 bt_debugfs, NULL,
2172 &rfcomm_dlc_debugfs_fops);
2173
2174 return 0; 2174 return 0;
2175 2175
2176cleanup: 2176cleanup:
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index df17276eb32b..c4d3d423f89b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1051,15 +1051,15 @@ int __init rfcomm_init_sockets(void)
1051 goto error; 1051 goto error;
1052 } 1052 }
1053 1053
1054 if (bt_debugfs) {
1055 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
1056 bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
1057 if (!rfcomm_sock_debugfs)
1058 BT_ERR("Failed to create RFCOMM debug file");
1059 }
1060
1061 BT_INFO("RFCOMM socket layer initialized"); 1054 BT_INFO("RFCOMM socket layer initialized");
1062 1055
1056 if (IS_ERR_OR_NULL(bt_debugfs))
1057 return 0;
1058
1059 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
1060 bt_debugfs, NULL,
1061 &rfcomm_sock_debugfs_fops);
1062
1063 return 0; 1063 return 0;
1064 1064
1065error: 1065error:
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a92aebac56ca..12a0e51e21e1 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1177,15 +1177,14 @@ int __init sco_init(void)
1177 goto error; 1177 goto error;
1178 } 1178 }
1179 1179
1180 if (bt_debugfs) {
1181 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1182 NULL, &sco_debugfs_fops);
1183 if (!sco_debugfs)
1184 BT_ERR("Failed to create SCO debug file");
1185 }
1186
1187 BT_INFO("SCO socket layer initialized"); 1180 BT_INFO("SCO socket layer initialized");
1188 1181
1182 if (IS_ERR_OR_NULL(bt_debugfs))
1183 return 0;
1184
1185 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1186 NULL, &sco_debugfs_fops);
1187
1189 return 0; 1188 return 0;
1190 1189
1191error: 1190error:
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 463e50c58716..85a2796cac61 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -856,7 +856,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
856 856
857 if (hcon->type != LE_LINK) { 857 if (hcon->type != LE_LINK) {
858 kfree_skb(skb); 858 kfree_skb(skb);
859 return -ENOTSUPP; 859 return 0;
860 } 860 }
861 861
862 if (skb->len < 1) { 862 if (skb->len < 1) {
@@ -864,7 +864,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
864 return -EILSEQ; 864 return -EILSEQ;
865 } 865 }
866 866
867 if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) { 867 if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
868 err = -ENOTSUPP; 868 err = -ENOTSUPP;
869 reason = SMP_PAIRING_NOTSUPP; 869 reason = SMP_PAIRING_NOTSUPP;
870 goto done; 870 goto done;