aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/6lowpan.c66
-rw-r--r--net/bluetooth/Kconfig27
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/core.c7
-rw-r--r--net/bluetooth/cmtp/capi.c6
-rw-r--r--net/bluetooth/hci_conn.c21
-rw-r--r--net/bluetooth/hci_core.c1895
-rw-r--r--net/bluetooth/hci_debugfs.c1056
-rw-r--r--net/bluetooth/hci_debugfs.h26
-rw-r--r--net/bluetooth/hci_event.c248
-rw-r--r--net/bluetooth/hci_request.c556
-rw-r--r--net/bluetooth/hci_request.h54
-rw-r--r--net/bluetooth/hci_sock.c107
-rw-r--r--net/bluetooth/l2cap_core.c55
-rw-r--r--net/bluetooth/l2cap_sock.c11
-rw-r--r--net/bluetooth/mgmt.c617
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c11
-rw-r--r--net/bluetooth/sco.c10
-rw-r--r--net/bluetooth/selftest.c244
-rw-r--r--net/bluetooth/selftest.h45
-rw-r--r--net/bluetooth/smp.c468
-rw-r--r--net/bluetooth/smp.h13
24 files changed, 3504 insertions, 2053 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index c989253737f0..1742b849fcff 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -31,7 +31,7 @@
31 31
32#define VERSION "0.1" 32#define VERSION "0.1"
33 33
34static struct dentry *lowpan_psm_debugfs; 34static struct dentry *lowpan_enable_debugfs;
35static struct dentry *lowpan_control_debugfs; 35static struct dentry *lowpan_control_debugfs;
36 36
37#define IFACE_NAME_TEMPLATE "bt%d" 37#define IFACE_NAME_TEMPLATE "bt%d"
@@ -55,11 +55,7 @@ struct skb_cb {
55static LIST_HEAD(bt_6lowpan_devices); 55static LIST_HEAD(bt_6lowpan_devices);
56static DEFINE_SPINLOCK(devices_lock); 56static DEFINE_SPINLOCK(devices_lock);
57 57
58/* If psm is set to 0 (default value), then 6lowpan is disabled. 58static bool enable_6lowpan;
59 * Other values are used to indicate a Protocol Service Multiplexer
60 * value for 6lowpan.
61 */
62static u16 psm_6lowpan;
63 59
64/* We are listening incoming connections via this channel 60/* We are listening incoming connections via this channel
65 */ 61 */
@@ -761,7 +757,7 @@ static bool is_bt_6lowpan(struct hci_conn *hcon)
761 if (hcon->type != LE_LINK) 757 if (hcon->type != LE_LINK)
762 return false; 758 return false;
763 759
764 if (!psm_6lowpan) 760 if (!enable_6lowpan)
765 return false; 761 return false;
766 762
767 return true; 763 return true;
@@ -1085,7 +1081,7 @@ static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
1085 if (!pchan) 1081 if (!pchan)
1086 return -EINVAL; 1082 return -EINVAL;
1087 1083
1088 err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0, 1084 err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
1089 addr, dst_type); 1085 addr, dst_type);
1090 1086
1091 BT_DBG("chan %p err %d", pchan, err); 1087 BT_DBG("chan %p err %d", pchan, err);
@@ -1118,7 +1114,7 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
1118 struct l2cap_chan *pchan; 1114 struct l2cap_chan *pchan;
1119 int err; 1115 int err;
1120 1116
1121 if (psm_6lowpan == 0) 1117 if (!enable_6lowpan)
1122 return NULL; 1118 return NULL;
1123 1119
1124 pchan = chan_get(); 1120 pchan = chan_get();
@@ -1130,10 +1126,9 @@ static struct l2cap_chan *bt_6lowpan_listen(void)
1130 1126
1131 atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT); 1127 atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT);
1132 1128
1133 BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan, 1129 BT_DBG("chan %p src type %d", pchan, pchan->src_type);
1134 pchan->src_type);
1135 1130
1136 err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan)); 1131 err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
1137 if (err) { 1132 if (err) {
1138 l2cap_chan_put(pchan); 1133 l2cap_chan_put(pchan);
1139 BT_ERR("psm cannot be added err %d", err); 1134 BT_ERR("psm cannot be added err %d", err);
@@ -1219,22 +1214,23 @@ static void disconnect_all_peers(void)
1219 spin_unlock(&devices_lock); 1214 spin_unlock(&devices_lock);
1220} 1215}
1221 1216
1222struct set_psm { 1217struct set_enable {
1223 struct work_struct work; 1218 struct work_struct work;
1224 u16 psm; 1219 bool flag;
1225}; 1220};
1226 1221
1227static void do_psm_set(struct work_struct *work) 1222static void do_enable_set(struct work_struct *work)
1228{ 1223{
1229 struct set_psm *set_psm = container_of(work, struct set_psm, work); 1224 struct set_enable *set_enable = container_of(work,
1225 struct set_enable, work);
1230 1226
1231 if (set_psm->psm == 0 || psm_6lowpan != set_psm->psm) 1227 if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1232 /* Disconnect existing connections if 6lowpan is 1228 /* Disconnect existing connections if 6lowpan is
1233 * disabled (psm = 0), or if psm changes. 1229 * disabled
1234 */ 1230 */
1235 disconnect_all_peers(); 1231 disconnect_all_peers();
1236 1232
1237 psm_6lowpan = set_psm->psm; 1233 enable_6lowpan = set_enable->flag;
1238 1234
1239 if (listen_chan) { 1235 if (listen_chan) {
1240 l2cap_chan_close(listen_chan, 0); 1236 l2cap_chan_close(listen_chan, 0);
@@ -1243,33 +1239,33 @@ static void do_psm_set(struct work_struct *work)
1243 1239
1244 listen_chan = bt_6lowpan_listen(); 1240 listen_chan = bt_6lowpan_listen();
1245 1241
1246 kfree(set_psm); 1242 kfree(set_enable);
1247} 1243}
1248 1244
1249static int lowpan_psm_set(void *data, u64 val) 1245static int lowpan_enable_set(void *data, u64 val)
1250{ 1246{
1251 struct set_psm *set_psm; 1247 struct set_enable *set_enable;
1252 1248
1253 set_psm = kzalloc(sizeof(*set_psm), GFP_KERNEL); 1249 set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1254 if (!set_psm) 1250 if (!set_enable)
1255 return -ENOMEM; 1251 return -ENOMEM;
1256 1252
1257 set_psm->psm = val; 1253 set_enable->flag = !!val;
1258 INIT_WORK(&set_psm->work, do_psm_set); 1254 INIT_WORK(&set_enable->work, do_enable_set);
1259 1255
1260 schedule_work(&set_psm->work); 1256 schedule_work(&set_enable->work);
1261 1257
1262 return 0; 1258 return 0;
1263} 1259}
1264 1260
1265static int lowpan_psm_get(void *data, u64 *val) 1261static int lowpan_enable_get(void *data, u64 *val)
1266{ 1262{
1267 *val = psm_6lowpan; 1263 *val = enable_6lowpan;
1268 return 0; 1264 return 0;
1269} 1265}
1270 1266
1271DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get, 1267DEFINE_SIMPLE_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1272 lowpan_psm_set, "%llu\n"); 1268 lowpan_enable_set, "%llu\n");
1273 1269
1274static ssize_t lowpan_control_write(struct file *fp, 1270static ssize_t lowpan_control_write(struct file *fp,
1275 const char __user *user_buffer, 1271 const char __user *user_buffer,
@@ -1439,9 +1435,9 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
1439 1435
1440static int __init bt_6lowpan_init(void) 1436static int __init bt_6lowpan_init(void)
1441{ 1437{
1442 lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644, 1438 lowpan_enable_debugfs = debugfs_create_file("6lowpan_enable", 0644,
1443 bt_debugfs, NULL, 1439 bt_debugfs, NULL,
1444 &lowpan_psm_fops); 1440 &lowpan_enable_fops);
1445 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644, 1441 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1446 bt_debugfs, NULL, 1442 bt_debugfs, NULL,
1447 &lowpan_control_fops); 1443 &lowpan_control_fops);
@@ -1451,7 +1447,7 @@ static int __init bt_6lowpan_init(void)
1451 1447
1452static void __exit bt_6lowpan_exit(void) 1448static void __exit bt_6lowpan_exit(void)
1453{ 1449{
1454 debugfs_remove(lowpan_psm_debugfs); 1450 debugfs_remove(lowpan_enable_debugfs);
1455 debugfs_remove(lowpan_control_debugfs); 1451 debugfs_remove(lowpan_control_debugfs);
1456 1452
1457 if (listen_chan) { 1453 if (listen_chan) {
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 29bcafc41adf..7de74635a110 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -64,4 +64,31 @@ config BT_6LOWPAN
64 help 64 help
65 IPv6 compression over Bluetooth Low Energy. 65 IPv6 compression over Bluetooth Low Energy.
66 66
67config BT_SELFTEST
68 bool "Bluetooth self testing support"
69 depends on BT && DEBUG_KERNEL
70 help
71 Run self tests when initializing the Bluetooth subsystem. This
72 is a developer option and can cause significant delay when booting
73 the system.
74
75 When the Bluetooth subsystem is built as module, then the test
76 cases are run first thing at module load time. When the Bluetooth
77 subsystem is compiled into the kernel image, then the test cases
78 are run late in the initcall hierarchy.
79
80config BT_SELFTEST_ECDH
81 bool "ECDH test cases"
82 depends on BT_LE && BT_SELFTEST
83 help
84 Run test cases for ECDH cryptographic functionality used by the
85 Bluetooth Low Energy Secure Connections feature.
86
87config BT_SELFTEST_SMP
88 bool "SMP test cases"
89 depends on BT_LE && BT_SELFTEST
90 help
91 Run test cases for SMP cryptographic functionality, including both
92 legacy SMP as well as the Secure Connections features.
93
67source "drivers/bluetooth/Kconfig" 94source "drivers/bluetooth/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index a5432a6a0ae6..8e96e3072266 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,6 +13,8 @@ bluetooth_6lowpan-y := 6lowpan.o
13 13
14bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 14bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
15 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ 15 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
16 a2mp.o amp.o ecc.o 16 a2mp.o amp.o ecc.o hci_request.o hci_debugfs.o
17
18bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
17 19
18subdir-ccflags-y += -D__CHECK_ENDIAN__ 20subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 012e3b03589d..ce22e0cfa923 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -31,6 +31,8 @@
31#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33 33
34#include "selftest.h"
35
34#define VERSION "2.20" 36#define VERSION "2.20"
35 37
36/* Bluetooth sockets */ 38/* Bluetooth sockets */
@@ -716,6 +718,10 @@ static int __init bt_init(void)
716 718
717 BT_INFO("Core ver %s", VERSION); 719 BT_INFO("Core ver %s", VERSION);
718 720
721 err = bt_selftest();
722 if (err < 0)
723 return err;
724
719 bt_debugfs = debugfs_create_dir("bluetooth", NULL); 725 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
720 726
721 err = bt_sysfs_init(); 727 err = bt_sysfs_init();
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ce82722d049b..05f57e491ccb 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -511,13 +511,12 @@ static int bnep_session(void *arg)
511 511
512static struct device *bnep_get_device(struct bnep_session *session) 512static struct device *bnep_get_device(struct bnep_session *session)
513{ 513{
514 struct hci_conn *conn; 514 struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn;
515 515
516 conn = l2cap_pi(session->sock->sk)->chan->conn->hcon; 516 if (!conn || !conn->hcon)
517 if (!conn)
518 return NULL; 517 return NULL;
519 518
520 return &conn->dev; 519 return &conn->hcon->dev;
521} 520}
522 521
523static struct device_type bnep_type = { 522static struct device_type bnep_type = {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 1ca8a87a0787..75bd2c42e3e7 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -253,8 +253,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
253 if (skb->len < CAPI_MSG_BASELEN + 15) 253 if (skb->len < CAPI_MSG_BASELEN + 15)
254 break; 254 break;
255 255
256 controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10);
257
258 if (!info && ctrl) { 256 if (!info && ctrl) {
259 int len = min_t(uint, CAPI_MANUFACTURER_LEN, 257 int len = min_t(uint, CAPI_MANUFACTURER_LEN,
260 skb->data[CAPI_MSG_BASELEN + 14]); 258 skb->data[CAPI_MSG_BASELEN + 14]);
@@ -270,8 +268,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
270 if (skb->len < CAPI_MSG_BASELEN + 32) 268 if (skb->len < CAPI_MSG_BASELEN + 32)
271 break; 269 break;
272 270
273 controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
274
275 if (!info && ctrl) { 271 if (!info && ctrl) {
276 ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16); 272 ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16);
277 ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20); 273 ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20);
@@ -285,8 +281,6 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
285 if (skb->len < CAPI_MSG_BASELEN + 17) 281 if (skb->len < CAPI_MSG_BASELEN + 17)
286 break; 282 break;
287 283
288 controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
289
290 if (!info && ctrl) { 284 if (!info && ctrl) {
291 int len = min_t(uint, CAPI_SERIAL_LEN, 285 int len = min_t(uint, CAPI_SERIAL_LEN,
292 skb->data[CAPI_MSG_BASELEN + 16]); 286 skb->data[CAPI_MSG_BASELEN + 16]);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index fe18825cc8a4..c9b8fa544785 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -25,11 +25,13 @@
25/* Bluetooth HCI connection handling. */ 25/* Bluetooth HCI connection handling. */
26 26
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/debugfs.h>
28 29
29#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 31#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 32#include <net/bluetooth/l2cap.h>
32 33
34#include "hci_request.h"
33#include "smp.h" 35#include "smp.h"
34#include "a2mp.h" 36#include "a2mp.h"
35 37
@@ -546,6 +548,8 @@ int hci_conn_del(struct hci_conn *conn)
546 548
547 hci_conn_del_sysfs(conn); 549 hci_conn_del_sysfs(conn);
548 550
551 debugfs_remove_recursive(conn->debugfs);
552
549 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) 553 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
550 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); 554 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
551 555
@@ -629,7 +633,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
629 mgmt_reenable_advertising(hdev); 633 mgmt_reenable_advertising(hdev);
630} 634}
631 635
632static void create_le_conn_complete(struct hci_dev *hdev, u8 status) 636static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
633{ 637{
634 struct hci_conn *conn; 638 struct hci_conn *conn;
635 639
@@ -1080,21 +1084,6 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1080} 1084}
1081EXPORT_SYMBOL(hci_conn_check_secure); 1085EXPORT_SYMBOL(hci_conn_check_secure);
1082 1086
1083/* Change link key */
1084int hci_conn_change_link_key(struct hci_conn *conn)
1085{
1086 BT_DBG("hcon %p", conn);
1087
1088 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1089 struct hci_cp_change_conn_link_key cp;
1090 cp.handle = cpu_to_le16(conn->handle);
1091 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1092 sizeof(cp), &cp);
1093 }
1094
1095 return 0;
1096}
1097
1098/* Switch role */ 1087/* Switch role */
1099int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 1088int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1100{ 1089{
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 5dcacf9607e4..3322d3f4c85a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -37,6 +37,8 @@
37#include <net/bluetooth/l2cap.h> 37#include <net/bluetooth/l2cap.h>
38#include <net/bluetooth/mgmt.h> 38#include <net/bluetooth/mgmt.h>
39 39
40#include "hci_request.h"
41#include "hci_debugfs.h"
40#include "smp.h" 42#include "smp.h"
41 43
42static void hci_rx_work(struct work_struct *work); 44static void hci_rx_work(struct work_struct *work);
@@ -137,941 +139,9 @@ static const struct file_operations dut_mode_fops = {
137 .llseek = default_llseek, 139 .llseek = default_llseek,
138}; 140};
139 141
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct link_key *key;
278
279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
283 rcu_read_unlock();
284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
395 return -EALREADY;
396
397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
478 hdev->idle_timeout = val;
479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
537 hdev->sniff_min_interval = val;
538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
565 hdev->sniff_max_interval = val;
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
644 bdaddr_t addr;
645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
649 hci_copy_identity_address(hdev, &addr, &addr_type);
650
651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
652 16, hdev->irk, &hdev->rpa);
653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
720{
721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
723
724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
738
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
741
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
747 return -EINVAL;
748
749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
750 return -EALREADY;
751
752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
753
754 return count;
755}
756
757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
763
764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
792 struct smp_irk *irk;
793
794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
800 rcu_read_unlock();
801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
821 struct smp_ltk *ltk;
822
823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828 __le64_to_cpu(ltk->rand), 16, ltk->val);
829 rcu_read_unlock();
830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
854 hdev->le_conn_min_interval = val;
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
882 hdev->le_conn_max_interval = val;
883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
902static int conn_latency_set(void *data, u64 val)
903{
904 struct hci_dev *hdev = data;
905
906 if (val > 0x01f3)
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
910 hdev->le_conn_latency = val;
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int conn_latency_get(void *data, u64 *val)
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
921 *val = hdev->le_conn_latency;
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
929
930static int supervision_timeout_set(void *data, u64 val)
931{
932 struct hci_dev *hdev = data;
933
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
942}
943
944static int supervision_timeout_get(void *data, u64 *val)
945{
946 struct hci_dev *hdev = data;
947
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
951
952 return 0;
953}
954
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
957
958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
961
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
964
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
968
969 return 0;
970}
971
972static int adv_channel_map_get(void *data, u64 *val)
973{
974 struct hci_dev *hdev = data;
975
976 hci_dev_lock(hdev);
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
979
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
985
986static int adv_min_interval_set(void *data, u64 val)
987{
988 struct hci_dev *hdev = data;
989
990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
991 return -EINVAL;
992
993 hci_dev_lock(hdev);
994 hdev->le_adv_min_interval = val;
995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
1000static int adv_min_interval_get(void *data, u64 *val)
1001{
1002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
1009}
1010
1011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
1015{
1016 struct hci_dev *hdev = data;
1017
1018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1019 return -EINVAL;
1020
1021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
1024
1025 return 0;
1026}
1027
1028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
1031
1032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
1035
1036 return 0;
1037}
1038
1039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
1041
1042static int device_list_show(struct seq_file *f, void *ptr)
1043{
1044 struct hci_dev *hdev = f->private;
1045 struct hci_conn_params *p;
1046 struct bdaddr_list *b;
1047
1048 hci_dev_lock(hdev);
1049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1051 list_for_each_entry(p, &hdev->le_conn_params, list) {
1052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1053 p->auto_connect);
1054 }
1055 hci_dev_unlock(hdev);
1056
1057 return 0;
1058}
1059
1060static int device_list_open(struct inode *inode, struct file *file)
1061{
1062 return single_open(file, device_list_show, inode->i_private);
1063}
1064
1065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
1067 .read = seq_read,
1068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
1072/* ---- HCI requests ---- */ 142/* ---- HCI requests ---- */
1073 143
1074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) 144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
1075{ 145{
1076 BT_DBG("%s result 0x%2.2x", hdev->name, result); 146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1077 147
@@ -1427,43 +497,6 @@ static void le_setup(struct hci_request *req)
1427 set_bit(HCI_LE_ENABLED, &hdev->dev_flags); 497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1428} 498}
1429 499
1430static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1431{
1432 if (lmp_ext_inq_capable(hdev))
1433 return 0x02;
1434
1435 if (lmp_inq_rssi_capable(hdev))
1436 return 0x01;
1437
1438 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1439 hdev->lmp_subver == 0x0757)
1440 return 0x01;
1441
1442 if (hdev->manufacturer == 15) {
1443 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1444 return 0x01;
1445 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1446 return 0x01;
1447 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1448 return 0x01;
1449 }
1450
1451 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1452 hdev->lmp_subver == 0x1805)
1453 return 0x01;
1454
1455 return 0x00;
1456}
1457
1458static void hci_setup_inquiry_mode(struct hci_request *req)
1459{
1460 u8 mode;
1461
1462 mode = hci_get_inquiry_mode(req->hdev);
1463
1464 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1465}
1466
1467static void hci_setup_event_mask(struct hci_request *req) 500static void hci_setup_event_mask(struct hci_request *req)
1468{ 501{
1469 struct hci_dev *hdev = req->hdev; 502 struct hci_dev *hdev = req->hdev;
@@ -1553,10 +586,16 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
1553 if (lmp_le_capable(hdev)) 586 if (lmp_le_capable(hdev))
1554 le_setup(req); 587 le_setup(req);
1555 588
1556 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read 589 /* All Bluetooth 1.2 and later controllers should support the
1557 * local supported commands HCI command. 590 * HCI command for reading the local supported commands.
591 *
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
1558 */ 596 */
1559 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) 597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
1560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1561 600
1562 if (lmp_ssp_capable(hdev)) { 601 if (lmp_ssp_capable(hdev)) {
@@ -1570,6 +609,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
1570 609
1571 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1572 u8 mode = 0x01; 611 u8 mode = 0x01;
612
1573 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, 613 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1574 sizeof(mode), &mode); 614 sizeof(mode), &mode);
1575 } else { 615 } else {
@@ -1582,8 +622,18 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
1582 } 622 }
1583 } 623 }
1584 624
1585 if (lmp_inq_rssi_capable(hdev)) 625 if (lmp_inq_rssi_capable(hdev) ||
1586 hci_setup_inquiry_mode(req); 626 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
627 u8 mode;
628
629 /* If Extended Inquiry Result events are supported, then
630 * they are clearly preferred over Inquiry Result with RSSI
631 * events.
632 */
633 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
634
635 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
636 }
1587 637
1588 if (lmp_inq_tx_pwr_capable(hdev)) 638 if (lmp_inq_tx_pwr_capable(hdev))
1589 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 639 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
@@ -1682,27 +732,12 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
1682 732
1683 hci_setup_event_mask(req); 733 hci_setup_event_mask(req);
1684 734
1685 /* Some Broadcom based Bluetooth controllers do not support the 735 if (hdev->commands[6] & 0x20) {
1686 * Delete Stored Link Key command. They are clearly indicating its 736 struct hci_cp_read_stored_link_key cp;
1687 * absence in the bit mask of supported commands.
1688 *
1689 * Check the supported commands and only if the the command is marked
1690 * as supported send it. If not supported assume that the controller
1691 * does not have actual support for stored link keys which makes this
1692 * command redundant anyway.
1693 *
1694 * Some controllers indicate that they support handling deleting
1695 * stored link keys, but they don't. The quirk lets a driver
1696 * just disable this command.
1697 */
1698 if (hdev->commands[6] & 0x80 &&
1699 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1700 struct hci_cp_delete_stored_link_key cp;
1701 737
1702 bacpy(&cp.bdaddr, BDADDR_ANY); 738 bacpy(&cp.bdaddr, BDADDR_ANY);
1703 cp.delete_all = 0x01; 739 cp.read_all = 0x01;
1704 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, 740 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
1705 sizeof(cp), &cp);
1706 } 741 }
1707 742
1708 if (hdev->commands[5] & 0x10) 743 if (hdev->commands[5] & 0x10)
@@ -1735,6 +770,12 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
1735 * Parameter Request 770 * Parameter Request
1736 */ 771 */
1737 772
773 /* If the controller supports the Data Length Extension
774 * feature, enable the corresponding event.
775 */
776 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
777 events[0] |= 0x40; /* LE Data Length Change */
778
1738 /* If the controller supports Extended Scanner Filter 779 /* If the controller supports Extended Scanner Filter
1739 * Policies, enable the correspondig event. 780 * Policies, enable the correspondig event.
1740 */ 781 */
@@ -1765,6 +806,14 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
1765 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 806 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1766 } 807 }
1767 808
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
810 /* Read LE Maximum Data Length */
811 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
812
813 /* Read LE Suggested Default Data Length */
814 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
815 }
816
1768 hci_set_le_support(req); 817 hci_set_le_support(req);
1769 } 818 }
1770 819
@@ -1782,6 +831,29 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
1782{ 831{
1783 struct hci_dev *hdev = req->hdev; 832 struct hci_dev *hdev = req->hdev;
1784 833
834 /* Some Broadcom based Bluetooth controllers do not support the
835 * Delete Stored Link Key command. They are clearly indicating its
836 * absence in the bit mask of supported commands.
837 *
838 * Check the supported commands and only if the the command is marked
839 * as supported send it. If not supported assume that the controller
840 * does not have actual support for stored link keys which makes this
841 * command redundant anyway.
842 *
843 * Some controllers indicate that they support handling deleting
844 * stored link keys, but they don't. The quirk lets a driver
845 * just disable this command.
846 */
847 if (hdev->commands[6] & 0x80 &&
848 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
849 struct hci_cp_delete_stored_link_key cp;
850
851 bacpy(&cp.bdaddr, BDADDR_ANY);
852 cp.delete_all = 0x01;
853 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
854 sizeof(cp), &cp);
855 }
856
1785 /* Set event mask page 2 if the HCI command for it is supported */ 857 /* Set event mask page 2 if the HCI command for it is supported */
1786 if (hdev->commands[22] & 0x04) 858 if (hdev->commands[22] & 0x04)
1787 hci_set_event_mask_page_2(req); 859 hci_set_event_mask_page_2(req);
@@ -1799,8 +871,10 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
1799 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); 871 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1800 872
1801 /* Enable Secure Connections if supported and configured */ 873 /* Enable Secure Connections if supported and configured */
1802 if (bredr_sc_enabled(hdev)) { 874 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
875 bredr_sc_enabled(hdev)) {
1803 u8 support = 0x01; 876 u8 support = 0x01;
877
1804 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, 878 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1805 sizeof(support), &support); 879 sizeof(support), &support);
1806 } 880 }
@@ -1841,110 +915,29 @@ static int __hci_init(struct hci_dev *hdev)
1841 if (err < 0) 915 if (err < 0)
1842 return err; 916 return err;
1843 917
1844 /* Only create debugfs entries during the initial setup 918 /* This function is only called when the controller is actually in
1845 * phase and not every time the controller gets powered on. 919 * configured state. When the controller is marked as unconfigured,
920 * this initialization procedure is not run.
921 *
922 * It means that it is possible that a controller runs through its
923 * setup phase and then discovers missing settings. If that is the
924 * case, then this function will not be called. It then will only
925 * be called during the config phase.
926 *
927 * So only when in setup phase or config phase, create the debugfs
928 * entries and register the SMP channels.
1846 */ 929 */
1847 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) 930 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
931 !test_bit(HCI_CONFIG, &hdev->dev_flags))
1848 return 0; 932 return 0;
1849 933
1850 debugfs_create_file("features", 0444, hdev->debugfs, hdev, 934 hci_debugfs_create_common(hdev);
1851 &features_fops);
1852 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1853 &hdev->manufacturer);
1854 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1855 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1856 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1857 &device_list_fops);
1858 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1859 &blacklist_fops);
1860 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1861
1862 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1863 &conn_info_min_age_fops);
1864 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1865 &conn_info_max_age_fops);
1866
1867 if (lmp_bredr_capable(hdev)) {
1868 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1869 hdev, &inquiry_cache_fops);
1870 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1871 hdev, &link_keys_fops);
1872 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1873 hdev, &dev_class_fops);
1874 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1875 hdev, &voice_setting_fops);
1876 }
1877 935
1878 if (lmp_ssp_capable(hdev)) { 936 if (lmp_bredr_capable(hdev))
1879 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, 937 hci_debugfs_create_bredr(hdev);
1880 hdev, &auto_accept_delay_fops);
1881 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1882 hdev, &force_sc_support_fops);
1883 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1884 hdev, &sc_only_mode_fops);
1885 if (lmp_le_capable(hdev))
1886 debugfs_create_file("force_lesc_support", 0644,
1887 hdev->debugfs, hdev,
1888 &force_lesc_support_fops);
1889 }
1890
1891 if (lmp_sniff_capable(hdev)) {
1892 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1893 hdev, &idle_timeout_fops);
1894 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1895 hdev, &sniff_min_interval_fops);
1896 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1897 hdev, &sniff_max_interval_fops);
1898 }
1899 938
1900 if (lmp_le_capable(hdev)) { 939 if (lmp_le_capable(hdev))
1901 debugfs_create_file("identity", 0400, hdev->debugfs, 940 hci_debugfs_create_le(hdev);
1902 hdev, &identity_fops);
1903 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1904 hdev, &rpa_timeout_fops);
1905 debugfs_create_file("random_address", 0444, hdev->debugfs,
1906 hdev, &random_address_fops);
1907 debugfs_create_file("static_address", 0444, hdev->debugfs,
1908 hdev, &static_address_fops);
1909
1910 /* For controllers with a public address, provide a debug
1911 * option to force the usage of the configured static
1912 * address. By default the public address is used.
1913 */
1914 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1915 debugfs_create_file("force_static_address", 0644,
1916 hdev->debugfs, hdev,
1917 &force_static_address_fops);
1918
1919 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1920 &hdev->le_white_list_size);
1921 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1922 &white_list_fops);
1923 debugfs_create_file("identity_resolving_keys", 0400,
1924 hdev->debugfs, hdev,
1925 &identity_resolving_keys_fops);
1926 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1927 hdev, &long_term_keys_fops);
1928 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1929 hdev, &conn_min_interval_fops);
1930 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1931 hdev, &conn_max_interval_fops);
1932 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1933 hdev, &conn_latency_fops);
1934 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1935 hdev, &supervision_timeout_fops);
1936 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1937 hdev, &adv_channel_map_fops);
1938 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1939 hdev, &adv_min_interval_fops);
1940 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1941 hdev, &adv_max_interval_fops);
1942 debugfs_create_u16("discov_interleaved_timeout", 0644,
1943 hdev->debugfs,
1944 &hdev->discov_interleaved_timeout);
1945
1946 smp_register(hdev);
1947 }
1948 941
1949 return 0; 942 return 0;
1950} 943}
@@ -2624,6 +1617,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2624 cancel_delayed_work(&hdev->service_cache); 1617 cancel_delayed_work(&hdev->service_cache);
2625 1618
2626 cancel_delayed_work_sync(&hdev->le_scan_disable); 1619 cancel_delayed_work_sync(&hdev->le_scan_disable);
1620 cancel_delayed_work_sync(&hdev->le_scan_restart);
2627 1621
2628 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1622 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2629 cancel_delayed_work_sync(&hdev->rpa_expired); 1623 cancel_delayed_work_sync(&hdev->rpa_expired);
@@ -2635,6 +1629,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2635 1629
2636 hci_dev_lock(hdev); 1630 hci_dev_lock(hdev);
2637 1631
1632 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1633
2638 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 1634 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2639 if (hdev->dev_type == HCI_BREDR) 1635 if (hdev->dev_type == HCI_BREDR)
2640 mgmt_powered(hdev, 0); 1636 mgmt_powered(hdev, 0);
@@ -2645,6 +1641,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2645 hci_conn_hash_flush(hdev); 1641 hci_conn_hash_flush(hdev);
2646 hci_dev_unlock(hdev); 1642 hci_dev_unlock(hdev);
2647 1643
1644 smp_unregister(hdev);
1645
2648 hci_notify(hdev, HCI_DEV_DOWN); 1646 hci_notify(hdev, HCI_DEV_DOWN);
2649 1647
2650 if (hdev->flush) 1648 if (hdev->flush)
@@ -2724,32 +1722,14 @@ done:
2724 return err; 1722 return err;
2725} 1723}
2726 1724
2727int hci_dev_reset(__u16 dev) 1725static int hci_dev_do_reset(struct hci_dev *hdev)
2728{ 1726{
2729 struct hci_dev *hdev; 1727 int ret;
2730 int ret = 0;
2731 1728
2732 hdev = hci_dev_get(dev); 1729 BT_DBG("%s %p", hdev->name, hdev);
2733 if (!hdev)
2734 return -ENODEV;
2735 1730
2736 hci_req_lock(hdev); 1731 hci_req_lock(hdev);
2737 1732
2738 if (!test_bit(HCI_UP, &hdev->flags)) {
2739 ret = -ENETDOWN;
2740 goto done;
2741 }
2742
2743 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2744 ret = -EBUSY;
2745 goto done;
2746 }
2747
2748 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2749 ret = -EOPNOTSUPP;
2750 goto done;
2751 }
2752
2753 /* Drop queues */ 1733 /* Drop queues */
2754 skb_queue_purge(&hdev->rx_q); 1734 skb_queue_purge(&hdev->rx_q);
2755 skb_queue_purge(&hdev->cmd_q); 1735 skb_queue_purge(&hdev->cmd_q);
@@ -2772,12 +1752,41 @@ int hci_dev_reset(__u16 dev)
2772 1752
2773 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); 1753 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2774 1754
2775done:
2776 hci_req_unlock(hdev); 1755 hci_req_unlock(hdev);
2777 hci_dev_put(hdev);
2778 return ret; 1756 return ret;
2779} 1757}
2780 1758
1759int hci_dev_reset(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int err;
1763
1764 hdev = hci_dev_get(dev);
1765 if (!hdev)
1766 return -ENODEV;
1767
1768 if (!test_bit(HCI_UP, &hdev->flags)) {
1769 err = -ENETDOWN;
1770 goto done;
1771 }
1772
1773 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1774 err = -EBUSY;
1775 goto done;
1776 }
1777
1778 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1779 err = -EOPNOTSUPP;
1780 goto done;
1781 }
1782
1783 err = hci_dev_do_reset(hdev);
1784
1785done:
1786 hci_dev_put(hdev);
1787 return err;
1788}
1789
2781int hci_dev_reset_stat(__u16 dev) 1790int hci_dev_reset_stat(__u16 dev)
2782{ 1791{
2783 struct hci_dev *hdev; 1792 struct hci_dev *hdev;
@@ -3143,6 +2152,24 @@ static void hci_power_off(struct work_struct *work)
3143 hci_dev_do_close(hdev); 2152 hci_dev_do_close(hdev);
3144} 2153}
3145 2154
2155static void hci_error_reset(struct work_struct *work)
2156{
2157 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2158
2159 BT_DBG("%s", hdev->name);
2160
2161 if (hdev->hw_error)
2162 hdev->hw_error(hdev, hdev->hw_error_code);
2163 else
2164 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2165 hdev->hw_error_code);
2166
2167 if (hci_dev_do_close(hdev))
2168 return;
2169
2170 hci_dev_do_open(hdev);
2171}
2172
3146static void hci_discov_off(struct work_struct *work) 2173static void hci_discov_off(struct work_struct *work)
3147{ 2174{
3148 struct hci_dev *hdev; 2175 struct hci_dev *hdev;
@@ -3555,9 +2582,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3555 if (hash192 && rand192) { 2582 if (hash192 && rand192) {
3556 memcpy(data->hash192, hash192, sizeof(data->hash192)); 2583 memcpy(data->hash192, hash192, sizeof(data->hash192));
3557 memcpy(data->rand192, rand192, sizeof(data->rand192)); 2584 memcpy(data->rand192, rand192, sizeof(data->rand192));
2585 if (hash256 && rand256)
2586 data->present = 0x03;
3558 } else { 2587 } else {
3559 memset(data->hash192, 0, sizeof(data->hash192)); 2588 memset(data->hash192, 0, sizeof(data->hash192));
3560 memset(data->rand192, 0, sizeof(data->rand192)); 2589 memset(data->rand192, 0, sizeof(data->rand192));
2590 if (hash256 && rand256)
2591 data->present = 0x02;
2592 else
2593 data->present = 0x00;
3561 } 2594 }
3562 2595
3563 if (hash256 && rand256) { 2596 if (hash256 && rand256) {
@@ -3566,6 +2599,8 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3566 } else { 2599 } else {
3567 memset(data->hash256, 0, sizeof(data->hash256)); 2600 memset(data->hash256, 0, sizeof(data->hash256));
3568 memset(data->rand256, 0, sizeof(data->rand256)); 2601 memset(data->rand256, 0, sizeof(data->rand256));
2602 if (hash192 && rand192)
2603 data->present = 0x01;
3569 } 2604 }
3570 2605
3571 BT_DBG("%s for %pMR", hdev->name, bdaddr); 2606 BT_DBG("%s for %pMR", hdev->name, bdaddr);
@@ -3659,23 +2694,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3659 return NULL; 2694 return NULL;
3660} 2695}
3661 2696
3662static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3663{
3664 struct hci_conn *conn;
3665
3666 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3667 if (!conn)
3668 return false;
3669
3670 if (conn->dst_type != type)
3671 return false;
3672
3673 if (conn->state != BT_CONNECTED)
3674 return false;
3675
3676 return true;
3677}
3678
3679/* This function requires the caller holds hdev->lock */ 2697/* This function requires the caller holds hdev->lock */
3680struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 2698struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3681 bdaddr_t *addr, u8 addr_type) 2699 bdaddr_t *addr, u8 addr_type)
@@ -3731,47 +2749,6 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3731 return params; 2749 return params;
3732} 2750}
3733 2751
3734/* This function requires the caller holds hdev->lock */
3735int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3736 u8 auto_connect)
3737{
3738 struct hci_conn_params *params;
3739
3740 params = hci_conn_params_add(hdev, addr, addr_type);
3741 if (!params)
3742 return -EIO;
3743
3744 if (params->auto_connect == auto_connect)
3745 return 0;
3746
3747 list_del_init(&params->action);
3748
3749 switch (auto_connect) {
3750 case HCI_AUTO_CONN_DISABLED:
3751 case HCI_AUTO_CONN_LINK_LOSS:
3752 hci_update_background_scan(hdev);
3753 break;
3754 case HCI_AUTO_CONN_REPORT:
3755 list_add(&params->action, &hdev->pend_le_reports);
3756 hci_update_background_scan(hdev);
3757 break;
3758 case HCI_AUTO_CONN_DIRECT:
3759 case HCI_AUTO_CONN_ALWAYS:
3760 if (!is_connected(hdev, addr, addr_type)) {
3761 list_add(&params->action, &hdev->pend_le_conns);
3762 hci_update_background_scan(hdev);
3763 }
3764 break;
3765 }
3766
3767 params->auto_connect = auto_connect;
3768
3769 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3770 auto_connect);
3771
3772 return 0;
3773}
3774
3775static void hci_conn_params_free(struct hci_conn_params *params) 2752static void hci_conn_params_free(struct hci_conn_params *params)
3776{ 2753{
3777 if (params->conn) { 2754 if (params->conn) {
@@ -3828,7 +2805,7 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
3828 BT_DBG("All LE connection parameters were removed"); 2805 BT_DBG("All LE connection parameters were removed");
3829} 2806}
3830 2807
3831static void inquiry_complete(struct hci_dev *hdev, u8 status) 2808static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3832{ 2809{
3833 if (status) { 2810 if (status) {
3834 BT_ERR("Failed to start inquiry: status %d", status); 2811 BT_ERR("Failed to start inquiry: status %d", status);
@@ -3840,7 +2817,8 @@ static void inquiry_complete(struct hci_dev *hdev, u8 status)
3840 } 2817 }
3841} 2818}
3842 2819
3843static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status) 2820static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2821 u16 opcode)
3844{ 2822{
3845 /* General inquiry access code (GIAC) */ 2823 /* General inquiry access code (GIAC) */
3846 u8 lap[3] = { 0x33, 0x8b, 0x9e }; 2824 u8 lap[3] = { 0x33, 0x8b, 0x9e };
@@ -3853,6 +2831,8 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3853 return; 2831 return;
3854 } 2832 }
3855 2833
2834 hdev->discovery.scan_start = 0;
2835
3856 switch (hdev->discovery.type) { 2836 switch (hdev->discovery.type) {
3857 case DISCOV_TYPE_LE: 2837 case DISCOV_TYPE_LE:
3858 hci_dev_lock(hdev); 2838 hci_dev_lock(hdev);
@@ -3892,6 +2872,8 @@ static void le_scan_disable_work(struct work_struct *work)
3892 2872
3893 BT_DBG("%s", hdev->name); 2873 BT_DBG("%s", hdev->name);
3894 2874
2875 cancel_delayed_work_sync(&hdev->le_scan_restart);
2876
3895 hci_req_init(&req, hdev); 2877 hci_req_init(&req, hdev);
3896 2878
3897 hci_req_add_le_scan_disable(&req); 2879 hci_req_add_le_scan_disable(&req);
@@ -3901,110 +2883,72 @@ static void le_scan_disable_work(struct work_struct *work)
3901 BT_ERR("Disable LE scanning request failed: err %d", err); 2883 BT_ERR("Disable LE scanning request failed: err %d", err);
3902} 2884}
3903 2885
3904static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) 2886static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2887 u16 opcode)
3905{ 2888{
3906 struct hci_dev *hdev = req->hdev; 2889 unsigned long timeout, duration, scan_start, now;
3907 2890
3908 /* If we're advertising or initiating an LE connection we can't 2891 BT_DBG("%s", hdev->name);
3909 * go ahead and change the random address at this time. This is 2892
3910 * because the eventual initiator address used for the 2893 if (status) {
3911 * subsequently created connection will be undefined (some 2894 BT_ERR("Failed to restart LE scan: status %d", status);
3912 * controllers use the new address and others the one we had
3913 * when the operation started).
3914 *
3915 * In this kind of scenario skip the update and let the random
3916 * address be updated at the next cycle.
3917 */
3918 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3919 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3920 BT_DBG("Deferring random address update");
3921 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3922 return; 2895 return;
3923 } 2896 }
3924 2897
3925 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); 2898 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3926} 2899 !hdev->discovery.scan_start)
3927 2900 return;
3928int hci_update_random_address(struct hci_request *req, bool require_privacy,
3929 u8 *own_addr_type)
3930{
3931 struct hci_dev *hdev = req->hdev;
3932 int err;
3933 2901
3934 /* If privacy is enabled use a resolvable private address. If 2902 /* When the scan was started, hdev->le_scan_disable has been queued
3935 * current RPA has expired or there is something else than 2903 * after duration from scan_start. During scan restart this job
3936 * the current RPA in use, then generate a new one. 2904 * has been canceled, and we need to queue it again after proper
2905 * timeout, to make sure that scan does not run indefinitely.
3937 */ 2906 */
3938 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { 2907 duration = hdev->discovery.scan_duration;
3939 int to; 2908 scan_start = hdev->discovery.scan_start;
3940 2909 now = jiffies;
3941 *own_addr_type = ADDR_LE_DEV_RANDOM; 2910 if (now - scan_start <= duration) {
3942 2911 int elapsed;
3943 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) && 2912
3944 !bacmp(&hdev->random_addr, &hdev->rpa)) 2913 if (now >= scan_start)
3945 return 0; 2914 elapsed = now - scan_start;
3946 2915 else
3947 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 2916 elapsed = ULONG_MAX - scan_start + now;
3948 if (err < 0) {
3949 BT_ERR("%s failed to generate new RPA", hdev->name);
3950 return err;
3951 }
3952
3953 set_random_addr(req, &hdev->rpa);
3954
3955 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3956 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3957 2917
3958 return 0; 2918 timeout = duration - elapsed;
2919 } else {
2920 timeout = 0;
3959 } 2921 }
2922 queue_delayed_work(hdev->workqueue,
2923 &hdev->le_scan_disable, timeout);
2924}
3960 2925
3961 /* In case of required privacy without resolvable private address, 2926static void le_scan_restart_work(struct work_struct *work)
3962 * use an non-resolvable private address. This is useful for active 2927{
3963 * scanning and non-connectable advertising. 2928 struct hci_dev *hdev = container_of(work, struct hci_dev,
3964 */ 2929 le_scan_restart.work);
3965 if (require_privacy) { 2930 struct hci_request req;
3966 bdaddr_t nrpa; 2931 struct hci_cp_le_set_scan_enable cp;
2932 int err;
3967 2933
3968 while (true) { 2934 BT_DBG("%s", hdev->name);
3969 /* The non-resolvable private address is generated
3970 * from random six bytes with the two most significant
3971 * bits cleared.
3972 */
3973 get_random_bytes(&nrpa, 6);
3974 nrpa.b[5] &= 0x3f;
3975 2935
3976 /* The non-resolvable private address shall not be 2936 /* If controller is not scanning we are done. */
3977 * equal to the public address. 2937 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3978 */ 2938 return;
3979 if (bacmp(&hdev->bdaddr, &nrpa))
3980 break;
3981 }
3982 2939
3983 *own_addr_type = ADDR_LE_DEV_RANDOM; 2940 hci_req_init(&req, hdev);
3984 set_random_addr(req, &nrpa);
3985 return 0;
3986 }
3987 2941
3988 /* If forcing static address is in use or there is no public 2942 hci_req_add_le_scan_disable(&req);
3989 * address use the static address as random address (but skip
3990 * the HCI command if the current random address is already the
3991 * static one.
3992 */
3993 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3994 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3995 *own_addr_type = ADDR_LE_DEV_RANDOM;
3996 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3997 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3998 &hdev->static_addr);
3999 return 0;
4000 }
4001 2943
4002 /* Neither privacy nor static address is being used so use a 2944 memset(&cp, 0, sizeof(cp));
4003 * public address. 2945 cp.enable = LE_SCAN_ENABLE;
4004 */ 2946 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4005 *own_addr_type = ADDR_LE_DEV_PUBLIC; 2947 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
4006 2948
4007 return 0; 2949 err = hci_req_run(&req, le_scan_restart_work_complete);
2950 if (err)
2951 BT_ERR("Restart LE scan request failed: err %d", err);
4008} 2952}
4009 2953
4010/* Copy the Identity Address of the controller. 2954/* Copy the Identity Address of the controller.
@@ -4015,12 +2959,18 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
4015 * 2959 *
4016 * For debugging purposes it is possible to force controllers with a 2960 * For debugging purposes it is possible to force controllers with a
4017 * public address to use the static random address instead. 2961 * public address to use the static random address instead.
2962 *
2963 * In case BR/EDR has been disabled on a dual-mode controller and
2964 * userspace has configured a static address, then that address
2965 * becomes the identity address instead of the public BR/EDR address.
4018 */ 2966 */
4019void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 2967void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
4020 u8 *bdaddr_type) 2968 u8 *bdaddr_type)
4021{ 2969{
4022 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) || 2970 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
4023 !bacmp(&hdev->bdaddr, BDADDR_ANY)) { 2971 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2972 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2973 bacmp(&hdev->static_addr, BDADDR_ANY))) {
4024 bacpy(bdaddr, &hdev->static_addr); 2974 bacpy(bdaddr, &hdev->static_addr);
4025 *bdaddr_type = ADDR_LE_DEV_RANDOM; 2975 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4026 } else { 2976 } else {
@@ -4059,6 +3009,12 @@ struct hci_dev *hci_alloc_dev(void)
4059 hdev->le_conn_max_interval = 0x0038; 3009 hdev->le_conn_max_interval = 0x0038;
4060 hdev->le_conn_latency = 0x0000; 3010 hdev->le_conn_latency = 0x0000;
4061 hdev->le_supv_timeout = 0x002a; 3011 hdev->le_supv_timeout = 0x002a;
3012 hdev->le_def_tx_len = 0x001b;
3013 hdev->le_def_tx_time = 0x0148;
3014 hdev->le_max_tx_len = 0x001b;
3015 hdev->le_max_tx_time = 0x0148;
3016 hdev->le_max_rx_len = 0x001b;
3017 hdev->le_max_rx_time = 0x0148;
4062 3018
4063 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3019 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4064 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 3020 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@@ -4086,10 +3042,12 @@ struct hci_dev *hci_alloc_dev(void)
4086 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 3042 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4087 INIT_WORK(&hdev->tx_work, hci_tx_work); 3043 INIT_WORK(&hdev->tx_work, hci_tx_work);
4088 INIT_WORK(&hdev->power_on, hci_power_on); 3044 INIT_WORK(&hdev->power_on, hci_power_on);
3045 INIT_WORK(&hdev->error_reset, hci_error_reset);
4089 3046
4090 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 3047 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4091 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); 3048 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4092 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); 3049 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3050 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
4093 3051
4094 skb_queue_head_init(&hdev->rx_q); 3052 skb_queue_head_init(&hdev->rx_q);
4095 skb_queue_head_init(&hdev->cmd_q); 3053 skb_queue_head_init(&hdev->cmd_q);
@@ -4259,8 +3217,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
4259 rfkill_destroy(hdev->rfkill); 3217 rfkill_destroy(hdev->rfkill);
4260 } 3218 }
4261 3219
4262 smp_unregister(hdev);
4263
4264 device_del(&hdev->dev); 3220 device_del(&hdev->dev);
4265 3221
4266 debugfs_remove_recursive(hdev->debugfs); 3222 debugfs_remove_recursive(hdev->debugfs);
@@ -4539,76 +3495,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4539 } 3495 }
4540} 3496}
4541 3497
4542void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4543{
4544 skb_queue_head_init(&req->cmd_q);
4545 req->hdev = hdev;
4546 req->err = 0;
4547}
4548
4549int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4550{
4551 struct hci_dev *hdev = req->hdev;
4552 struct sk_buff *skb;
4553 unsigned long flags;
4554
4555 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4556
4557 /* If an error occurred during request building, remove all HCI
4558 * commands queued on the HCI request queue.
4559 */
4560 if (req->err) {
4561 skb_queue_purge(&req->cmd_q);
4562 return req->err;
4563 }
4564
4565 /* Do not allow empty requests */
4566 if (skb_queue_empty(&req->cmd_q))
4567 return -ENODATA;
4568
4569 skb = skb_peek_tail(&req->cmd_q);
4570 bt_cb(skb)->req.complete = complete;
4571
4572 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4573 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4574 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4575
4576 queue_work(hdev->workqueue, &hdev->cmd_work);
4577
4578 return 0;
4579}
4580
4581bool hci_req_pending(struct hci_dev *hdev) 3498bool hci_req_pending(struct hci_dev *hdev)
4582{ 3499{
4583 return (hdev->req_status == HCI_REQ_PEND); 3500 return (hdev->req_status == HCI_REQ_PEND);
4584} 3501}
4585 3502
4586static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4587 u32 plen, const void *param)
4588{
4589 int len = HCI_COMMAND_HDR_SIZE + plen;
4590 struct hci_command_hdr *hdr;
4591 struct sk_buff *skb;
4592
4593 skb = bt_skb_alloc(len, GFP_ATOMIC);
4594 if (!skb)
4595 return NULL;
4596
4597 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4598 hdr->opcode = cpu_to_le16(opcode);
4599 hdr->plen = plen;
4600
4601 if (plen)
4602 memcpy(skb_put(skb, plen), param, plen);
4603
4604 BT_DBG("skb len %d", skb->len);
4605
4606 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4607 bt_cb(skb)->opcode = opcode;
4608
4609 return skb;
4610}
4611
4612/* Send HCI command */ 3503/* Send HCI command */
4613int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 3504int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4614 const void *param) 3505 const void *param)
@@ -4634,43 +3525,6 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4634 return 0; 3525 return 0;
4635} 3526}
4636 3527
4637/* Queue a command to an asynchronous HCI request */
4638void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4639 const void *param, u8 event)
4640{
4641 struct hci_dev *hdev = req->hdev;
4642 struct sk_buff *skb;
4643
4644 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4645
4646 /* If an error occurred during request building, there is no point in
4647 * queueing the HCI command. We can simply return.
4648 */
4649 if (req->err)
4650 return;
4651
4652 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4653 if (!skb) {
4654 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4655 hdev->name, opcode);
4656 req->err = -ENOMEM;
4657 return;
4658 }
4659
4660 if (skb_queue_empty(&req->cmd_q))
4661 bt_cb(skb)->req.start = true;
4662
4663 bt_cb(skb)->req.event = event;
4664
4665 skb_queue_tail(&req->cmd_q, skb);
4666}
4667
4668void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4669 const void *param)
4670{
4671 hci_req_add_ev(req, opcode, plen, param, 0);
4672}
4673
4674/* Get data from the previously sent command */ 3528/* Get data from the previously sent command */
4675void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 3529void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4676{ 3530{
@@ -5429,7 +4283,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5429 4283
5430call_complete: 4284call_complete:
5431 if (req_complete) 4285 if (req_complete)
5432 req_complete(hdev, status); 4286 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
5433} 4287}
5434 4288
5435static void hci_rx_work(struct work_struct *work) 4289static void hci_rx_work(struct work_struct *work)
@@ -5518,302 +4372,3 @@ static void hci_cmd_work(struct work_struct *work)
5518 } 4372 }
5519 } 4373 }
5520} 4374}
5521
5522void hci_req_add_le_scan_disable(struct hci_request *req)
5523{
5524 struct hci_cp_le_set_scan_enable cp;
5525
5526 memset(&cp, 0, sizeof(cp));
5527 cp.enable = LE_SCAN_DISABLE;
5528 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5529}
5530
5531static void add_to_white_list(struct hci_request *req,
5532 struct hci_conn_params *params)
5533{
5534 struct hci_cp_le_add_to_white_list cp;
5535
5536 cp.bdaddr_type = params->addr_type;
5537 bacpy(&cp.bdaddr, &params->addr);
5538
5539 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5540}
5541
5542static u8 update_white_list(struct hci_request *req)
5543{
5544 struct hci_dev *hdev = req->hdev;
5545 struct hci_conn_params *params;
5546 struct bdaddr_list *b;
5547 uint8_t white_list_entries = 0;
5548
5549 /* Go through the current white list programmed into the
5550 * controller one by one and check if that address is still
5551 * in the list of pending connections or list of devices to
5552 * report. If not present in either list, then queue the
5553 * command to remove it from the controller.
5554 */
5555 list_for_each_entry(b, &hdev->le_white_list, list) {
5556 struct hci_cp_le_del_from_white_list cp;
5557
5558 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5559 &b->bdaddr, b->bdaddr_type) ||
5560 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5561 &b->bdaddr, b->bdaddr_type)) {
5562 white_list_entries++;
5563 continue;
5564 }
5565
5566 cp.bdaddr_type = b->bdaddr_type;
5567 bacpy(&cp.bdaddr, &b->bdaddr);
5568
5569 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5570 sizeof(cp), &cp);
5571 }
5572
5573 /* Since all no longer valid white list entries have been
5574 * removed, walk through the list of pending connections
5575 * and ensure that any new device gets programmed into
5576 * the controller.
5577 *
5578 * If the list of the devices is larger than the list of
5579 * available white list entries in the controller, then
5580 * just abort and return filer policy value to not use the
5581 * white list.
5582 */
5583 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5584 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5585 &params->addr, params->addr_type))
5586 continue;
5587
5588 if (white_list_entries >= hdev->le_white_list_size) {
5589 /* Select filter policy to accept all advertising */
5590 return 0x00;
5591 }
5592
5593 if (hci_find_irk_by_addr(hdev, &params->addr,
5594 params->addr_type)) {
5595 /* White list can not be used with RPAs */
5596 return 0x00;
5597 }
5598
5599 white_list_entries++;
5600 add_to_white_list(req, params);
5601 }
5602
5603 /* After adding all new pending connections, walk through
5604 * the list of pending reports and also add these to the
5605 * white list if there is still space.
5606 */
5607 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5608 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5609 &params->addr, params->addr_type))
5610 continue;
5611
5612 if (white_list_entries >= hdev->le_white_list_size) {
5613 /* Select filter policy to accept all advertising */
5614 return 0x00;
5615 }
5616
5617 if (hci_find_irk_by_addr(hdev, &params->addr,
5618 params->addr_type)) {
5619 /* White list can not be used with RPAs */
5620 return 0x00;
5621 }
5622
5623 white_list_entries++;
5624 add_to_white_list(req, params);
5625 }
5626
5627 /* Select filter policy to use white list */
5628 return 0x01;
5629}
5630
5631void hci_req_add_le_passive_scan(struct hci_request *req)
5632{
5633 struct hci_cp_le_set_scan_param param_cp;
5634 struct hci_cp_le_set_scan_enable enable_cp;
5635 struct hci_dev *hdev = req->hdev;
5636 u8 own_addr_type;
5637 u8 filter_policy;
5638
5639 /* Set require_privacy to false since no SCAN_REQ are send
5640 * during passive scanning. Not using an non-resolvable address
5641 * here is important so that peer devices using direct
5642 * advertising with our address will be correctly reported
5643 * by the controller.
5644 */
5645 if (hci_update_random_address(req, false, &own_addr_type))
5646 return;
5647
5648 /* Adding or removing entries from the white list must
5649 * happen before enabling scanning. The controller does
5650 * not allow white list modification while scanning.
5651 */
5652 filter_policy = update_white_list(req);
5653
5654 /* When the controller is using random resolvable addresses and
5655 * with that having LE privacy enabled, then controllers with
5656 * Extended Scanner Filter Policies support can now enable support
5657 * for handling directed advertising.
5658 *
5659 * So instead of using filter polices 0x00 (no whitelist)
5660 * and 0x01 (whitelist enabled) use the new filter policies
5661 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
5662 */
5663 if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
5664 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
5665 filter_policy |= 0x02;
5666
5667 memset(&param_cp, 0, sizeof(param_cp));
5668 param_cp.type = LE_SCAN_PASSIVE;
5669 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5670 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5671 param_cp.own_address_type = own_addr_type;
5672 param_cp.filter_policy = filter_policy;
5673 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5674 &param_cp);
5675
5676 memset(&enable_cp, 0, sizeof(enable_cp));
5677 enable_cp.enable = LE_SCAN_ENABLE;
5678 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5679 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5680 &enable_cp);
5681}
5682
5683static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5684{
5685 if (status)
5686 BT_DBG("HCI request failed to update background scanning: "
5687 "status 0x%2.2x", status);
5688}
5689
5690/* This function controls the background scanning based on hdev->pend_le_conns
5691 * list. If there are pending LE connection we start the background scanning,
5692 * otherwise we stop it.
5693 *
5694 * This function requires the caller holds hdev->lock.
5695 */
5696void hci_update_background_scan(struct hci_dev *hdev)
5697{
5698 struct hci_request req;
5699 struct hci_conn *conn;
5700 int err;
5701
5702 if (!test_bit(HCI_UP, &hdev->flags) ||
5703 test_bit(HCI_INIT, &hdev->flags) ||
5704 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5705 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5706 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5707 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5708 return;
5709
5710 /* No point in doing scanning if LE support hasn't been enabled */
5711 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5712 return;
5713
5714 /* If discovery is active don't interfere with it */
5715 if (hdev->discovery.state != DISCOVERY_STOPPED)
5716 return;
5717
5718 /* Reset RSSI and UUID filters when starting background scanning
5719 * since these filters are meant for service discovery only.
5720 *
5721 * The Start Discovery and Start Service Discovery operations
5722 * ensure to set proper values for RSSI threshold and UUID
5723 * filter list. So it is safe to just reset them here.
5724 */
5725 hci_discovery_filter_clear(hdev);
5726
5727 hci_req_init(&req, hdev);
5728
5729 if (list_empty(&hdev->pend_le_conns) &&
5730 list_empty(&hdev->pend_le_reports)) {
5731 /* If there is no pending LE connections or devices
5732 * to be scanned for, we should stop the background
5733 * scanning.
5734 */
5735
5736 /* If controller is not scanning we are done. */
5737 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5738 return;
5739
5740 hci_req_add_le_scan_disable(&req);
5741
5742 BT_DBG("%s stopping background scanning", hdev->name);
5743 } else {
5744 /* If there is at least one pending LE connection, we should
5745 * keep the background scan running.
5746 */
5747
5748 /* If controller is connecting, we should not start scanning
5749 * since some controllers are not able to scan and connect at
5750 * the same time.
5751 */
5752 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5753 if (conn)
5754 return;
5755
5756 /* If controller is currently scanning, we stop it to ensure we
5757 * don't miss any advertising (due to duplicates filter).
5758 */
5759 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5760 hci_req_add_le_scan_disable(&req);
5761
5762 hci_req_add_le_passive_scan(&req);
5763
5764 BT_DBG("%s starting background scanning", hdev->name);
5765 }
5766
5767 err = hci_req_run(&req, update_background_scan_complete);
5768 if (err)
5769 BT_ERR("Failed to run HCI request: err %d", err);
5770}
5771
5772static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5773{
5774 struct bdaddr_list *b;
5775
5776 list_for_each_entry(b, &hdev->whitelist, list) {
5777 struct hci_conn *conn;
5778
5779 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5780 if (!conn)
5781 return true;
5782
5783 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5784 return true;
5785 }
5786
5787 return false;
5788}
5789
5790void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5791{
5792 u8 scan;
5793
5794 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5795 return;
5796
5797 if (!hdev_is_powered(hdev))
5798 return;
5799
5800 if (mgmt_powering_down(hdev))
5801 return;
5802
5803 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5804 disconnected_whitelist_entries(hdev))
5805 scan = SCAN_PAGE;
5806 else
5807 scan = SCAN_DISABLED;
5808
5809 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5810 return;
5811
5812 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5813 scan |= SCAN_INQUIRY;
5814
5815 if (req)
5816 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5817 else
5818 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5819}
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
new file mode 100644
index 000000000000..65261e5d4b84
--- /dev/null
+++ b/net/bluetooth/hci_debugfs.c
@@ -0,0 +1,1056 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <linux/debugfs.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28
29#include "hci_debugfs.h"
30
31static int features_show(struct seq_file *f, void *ptr)
32{
33 struct hci_dev *hdev = f->private;
34 u8 p;
35
36 hci_dev_lock(hdev);
37 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
38 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
39 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
40 hdev->features[p][0], hdev->features[p][1],
41 hdev->features[p][2], hdev->features[p][3],
42 hdev->features[p][4], hdev->features[p][5],
43 hdev->features[p][6], hdev->features[p][7]);
44 }
45 if (lmp_le_capable(hdev))
46 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
47 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
48 hdev->le_features[0], hdev->le_features[1],
49 hdev->le_features[2], hdev->le_features[3],
50 hdev->le_features[4], hdev->le_features[5],
51 hdev->le_features[6], hdev->le_features[7]);
52 hci_dev_unlock(hdev);
53
54 return 0;
55}
56
57static int features_open(struct inode *inode, struct file *file)
58{
59 return single_open(file, features_show, inode->i_private);
60}
61
62static const struct file_operations features_fops = {
63 .open = features_open,
64 .read = seq_read,
65 .llseek = seq_lseek,
66 .release = single_release,
67};
68
69static int device_list_show(struct seq_file *f, void *ptr)
70{
71 struct hci_dev *hdev = f->private;
72 struct hci_conn_params *p;
73 struct bdaddr_list *b;
74
75 hci_dev_lock(hdev);
76 list_for_each_entry(b, &hdev->whitelist, list)
77 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
78 list_for_each_entry(p, &hdev->le_conn_params, list) {
79 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
80 p->auto_connect);
81 }
82 hci_dev_unlock(hdev);
83
84 return 0;
85}
86
87static int device_list_open(struct inode *inode, struct file *file)
88{
89 return single_open(file, device_list_show, inode->i_private);
90}
91
92static const struct file_operations device_list_fops = {
93 .open = device_list_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
99static int blacklist_show(struct seq_file *f, void *p)
100{
101 struct hci_dev *hdev = f->private;
102 struct bdaddr_list *b;
103
104 hci_dev_lock(hdev);
105 list_for_each_entry(b, &hdev->blacklist, list)
106 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
107 hci_dev_unlock(hdev);
108
109 return 0;
110}
111
112static int blacklist_open(struct inode *inode, struct file *file)
113{
114 return single_open(file, blacklist_show, inode->i_private);
115}
116
117static const struct file_operations blacklist_fops = {
118 .open = blacklist_open,
119 .read = seq_read,
120 .llseek = seq_lseek,
121 .release = single_release,
122};
123
124static int uuids_show(struct seq_file *f, void *p)
125{
126 struct hci_dev *hdev = f->private;
127 struct bt_uuid *uuid;
128
129 hci_dev_lock(hdev);
130 list_for_each_entry(uuid, &hdev->uuids, list) {
131 u8 i, val[16];
132
133 /* The Bluetooth UUID values are stored in big endian,
134 * but with reversed byte order. So convert them into
135 * the right order for the %pUb modifier.
136 */
137 for (i = 0; i < 16; i++)
138 val[i] = uuid->uuid[15 - i];
139
140 seq_printf(f, "%pUb\n", val);
141 }
142 hci_dev_unlock(hdev);
143
144 return 0;
145}
146
147static int uuids_open(struct inode *inode, struct file *file)
148{
149 return single_open(file, uuids_show, inode->i_private);
150}
151
152static const struct file_operations uuids_fops = {
153 .open = uuids_open,
154 .read = seq_read,
155 .llseek = seq_lseek,
156 .release = single_release,
157};
158
159static int remote_oob_show(struct seq_file *f, void *ptr)
160{
161 struct hci_dev *hdev = f->private;
162 struct oob_data *data;
163
164 hci_dev_lock(hdev);
165 list_for_each_entry(data, &hdev->remote_oob_data, list) {
166 seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n",
167 &data->bdaddr, data->bdaddr_type, data->present,
168 16, data->hash192, 16, data->rand192,
169 16, data->hash256, 19, data->rand256);
170 }
171 hci_dev_unlock(hdev);
172
173 return 0;
174}
175
176static int remote_oob_open(struct inode *inode, struct file *file)
177{
178 return single_open(file, remote_oob_show, inode->i_private);
179}
180
181static const struct file_operations remote_oob_fops = {
182 .open = remote_oob_open,
183 .read = seq_read,
184 .llseek = seq_lseek,
185 .release = single_release,
186};
187
188static int conn_info_min_age_set(void *data, u64 val)
189{
190 struct hci_dev *hdev = data;
191
192 if (val == 0 || val > hdev->conn_info_max_age)
193 return -EINVAL;
194
195 hci_dev_lock(hdev);
196 hdev->conn_info_min_age = val;
197 hci_dev_unlock(hdev);
198
199 return 0;
200}
201
202static int conn_info_min_age_get(void *data, u64 *val)
203{
204 struct hci_dev *hdev = data;
205
206 hci_dev_lock(hdev);
207 *val = hdev->conn_info_min_age;
208 hci_dev_unlock(hdev);
209
210 return 0;
211}
212
213DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
214 conn_info_min_age_set, "%llu\n");
215
216static int conn_info_max_age_set(void *data, u64 val)
217{
218 struct hci_dev *hdev = data;
219
220 if (val == 0 || val < hdev->conn_info_min_age)
221 return -EINVAL;
222
223 hci_dev_lock(hdev);
224 hdev->conn_info_max_age = val;
225 hci_dev_unlock(hdev);
226
227 return 0;
228}
229
230static int conn_info_max_age_get(void *data, u64 *val)
231{
232 struct hci_dev *hdev = data;
233
234 hci_dev_lock(hdev);
235 *val = hdev->conn_info_max_age;
236 hci_dev_unlock(hdev);
237
238 return 0;
239}
240
241DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
242 conn_info_max_age_set, "%llu\n");
243
244static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
245 size_t count, loff_t *ppos)
246{
247 struct hci_dev *hdev = file->private_data;
248 char buf[3];
249
250 buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
251 buf[1] = '\n';
252 buf[2] = '\0';
253 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
254}
255
256static const struct file_operations use_debug_keys_fops = {
257 .open = simple_open,
258 .read = use_debug_keys_read,
259 .llseek = default_llseek,
260};
261
262static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
263 size_t count, loff_t *ppos)
264{
265 struct hci_dev *hdev = file->private_data;
266 char buf[3];
267
268 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
269 buf[1] = '\n';
270 buf[2] = '\0';
271 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
272}
273
274static const struct file_operations sc_only_mode_fops = {
275 .open = simple_open,
276 .read = sc_only_mode_read,
277 .llseek = default_llseek,
278};
279
280void hci_debugfs_create_common(struct hci_dev *hdev)
281{
282 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
283 &features_fops);
284 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
285 &hdev->manufacturer);
286 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
287 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
288 debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
289 &hdev->hw_error_code);
290
291 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
292 &device_list_fops);
293 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
294 &blacklist_fops);
295 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
296 debugfs_create_file("remote_oob", 0400, hdev->debugfs, hdev,
297 &remote_oob_fops);
298
299 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
300 &conn_info_min_age_fops);
301 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
302 &conn_info_max_age_fops);
303
304 if (lmp_ssp_capable(hdev) || lmp_le_capable(hdev))
305 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
306 hdev, &use_debug_keys_fops);
307
308 if (lmp_sc_capable(hdev) || lmp_le_capable(hdev))
309 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
310 hdev, &sc_only_mode_fops);
311}
312
313static int inquiry_cache_show(struct seq_file *f, void *p)
314{
315 struct hci_dev *hdev = f->private;
316 struct discovery_state *cache = &hdev->discovery;
317 struct inquiry_entry *e;
318
319 hci_dev_lock(hdev);
320
321 list_for_each_entry(e, &cache->all, all) {
322 struct inquiry_data *data = &e->data;
323 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
324 &data->bdaddr,
325 data->pscan_rep_mode, data->pscan_period_mode,
326 data->pscan_mode, data->dev_class[2],
327 data->dev_class[1], data->dev_class[0],
328 __le16_to_cpu(data->clock_offset),
329 data->rssi, data->ssp_mode, e->timestamp);
330 }
331
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337static int inquiry_cache_open(struct inode *inode, struct file *file)
338{
339 return single_open(file, inquiry_cache_show, inode->i_private);
340}
341
342static const struct file_operations inquiry_cache_fops = {
343 .open = inquiry_cache_open,
344 .read = seq_read,
345 .llseek = seq_lseek,
346 .release = single_release,
347};
348
349static int link_keys_show(struct seq_file *f, void *ptr)
350{
351 struct hci_dev *hdev = f->private;
352 struct link_key *key;
353
354 rcu_read_lock();
355 list_for_each_entry_rcu(key, &hdev->link_keys, list)
356 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
357 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
358 rcu_read_unlock();
359
360 return 0;
361}
362
363static int link_keys_open(struct inode *inode, struct file *file)
364{
365 return single_open(file, link_keys_show, inode->i_private);
366}
367
368static const struct file_operations link_keys_fops = {
369 .open = link_keys_open,
370 .read = seq_read,
371 .llseek = seq_lseek,
372 .release = single_release,
373};
374
375static int dev_class_show(struct seq_file *f, void *ptr)
376{
377 struct hci_dev *hdev = f->private;
378
379 hci_dev_lock(hdev);
380 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
381 hdev->dev_class[1], hdev->dev_class[0]);
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387static int dev_class_open(struct inode *inode, struct file *file)
388{
389 return single_open(file, dev_class_show, inode->i_private);
390}
391
392static const struct file_operations dev_class_fops = {
393 .open = dev_class_open,
394 .read = seq_read,
395 .llseek = seq_lseek,
396 .release = single_release,
397};
398
399static int voice_setting_get(void *data, u64 *val)
400{
401 struct hci_dev *hdev = data;
402
403 hci_dev_lock(hdev);
404 *val = hdev->voice_setting;
405 hci_dev_unlock(hdev);
406
407 return 0;
408}
409
410DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
411 NULL, "0x%4.4llx\n");
412
413static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct hci_dev *hdev = file->private_data;
417 char buf[3];
418
419 buf[0] = hdev->ssp_debug_mode ? 'Y': 'N';
420 buf[1] = '\n';
421 buf[2] = '\0';
422 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
423}
424
425static const struct file_operations ssp_debug_mode_fops = {
426 .open = simple_open,
427 .read = ssp_debug_mode_read,
428 .llseek = default_llseek,
429};
430
431static int auto_accept_delay_set(void *data, u64 val)
432{
433 struct hci_dev *hdev = data;
434
435 hci_dev_lock(hdev);
436 hdev->auto_accept_delay = val;
437 hci_dev_unlock(hdev);
438
439 return 0;
440}
441
442static int auto_accept_delay_get(void *data, u64 *val)
443{
444 struct hci_dev *hdev = data;
445
446 hci_dev_lock(hdev);
447 *val = hdev->auto_accept_delay;
448 hci_dev_unlock(hdev);
449
450 return 0;
451}
452
453DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
454 auto_accept_delay_set, "%llu\n");
455
456static int idle_timeout_set(void *data, u64 val)
457{
458 struct hci_dev *hdev = data;
459
460 if (val != 0 && (val < 500 || val > 3600000))
461 return -EINVAL;
462
463 hci_dev_lock(hdev);
464 hdev->idle_timeout = val;
465 hci_dev_unlock(hdev);
466
467 return 0;
468}
469
470static int idle_timeout_get(void *data, u64 *val)
471{
472 struct hci_dev *hdev = data;
473
474 hci_dev_lock(hdev);
475 *val = hdev->idle_timeout;
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
482 idle_timeout_set, "%llu\n");
483
484static int sniff_min_interval_set(void *data, u64 val)
485{
486 struct hci_dev *hdev = data;
487
488 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
489 return -EINVAL;
490
491 hci_dev_lock(hdev);
492 hdev->sniff_min_interval = val;
493 hci_dev_unlock(hdev);
494
495 return 0;
496}
497
498static int sniff_min_interval_get(void *data, u64 *val)
499{
500 struct hci_dev *hdev = data;
501
502 hci_dev_lock(hdev);
503 *val = hdev->sniff_min_interval;
504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
510 sniff_min_interval_set, "%llu\n");
511
512static int sniff_max_interval_set(void *data, u64 val)
513{
514 struct hci_dev *hdev = data;
515
516 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
517 return -EINVAL;
518
519 hci_dev_lock(hdev);
520 hdev->sniff_max_interval = val;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526static int sniff_max_interval_get(void *data, u64 *val)
527{
528 struct hci_dev *hdev = data;
529
530 hci_dev_lock(hdev);
531 *val = hdev->sniff_max_interval;
532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
538 sniff_max_interval_set, "%llu\n");
539
540void hci_debugfs_create_bredr(struct hci_dev *hdev)
541{
542 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, hdev,
543 &inquiry_cache_fops);
544 debugfs_create_file("link_keys", 0400, hdev->debugfs, hdev,
545 &link_keys_fops);
546 debugfs_create_file("dev_class", 0444, hdev->debugfs, hdev,
547 &dev_class_fops);
548 debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev,
549 &voice_setting_fops);
550
551 if (lmp_ssp_capable(hdev)) {
552 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
553 hdev, &ssp_debug_mode_fops);
554 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
555 hdev, &auto_accept_delay_fops);
556 }
557
558 if (lmp_sniff_capable(hdev)) {
559 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
560 hdev, &idle_timeout_fops);
561 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
562 hdev, &sniff_min_interval_fops);
563 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
564 hdev, &sniff_max_interval_fops);
565 }
566}
567
568static int identity_show(struct seq_file *f, void *p)
569{
570 struct hci_dev *hdev = f->private;
571 bdaddr_t addr;
572 u8 addr_type;
573
574 hci_dev_lock(hdev);
575
576 hci_copy_identity_address(hdev, &addr, &addr_type);
577
578 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
579 16, hdev->irk, &hdev->rpa);
580
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int identity_open(struct inode *inode, struct file *file)
587{
588 return single_open(file, identity_show, inode->i_private);
589}
590
591static const struct file_operations identity_fops = {
592 .open = identity_open,
593 .read = seq_read,
594 .llseek = seq_lseek,
595 .release = single_release,
596};
597
598static int rpa_timeout_set(void *data, u64 val)
599{
600 struct hci_dev *hdev = data;
601
602 /* Require the RPA timeout to be at least 30 seconds and at most
603 * 24 hours.
604 */
605 if (val < 30 || val > (60 * 60 * 24))
606 return -EINVAL;
607
608 hci_dev_lock(hdev);
609 hdev->rpa_timeout = val;
610 hci_dev_unlock(hdev);
611
612 return 0;
613}
614
615static int rpa_timeout_get(void *data, u64 *val)
616{
617 struct hci_dev *hdev = data;
618
619 hci_dev_lock(hdev);
620 *val = hdev->rpa_timeout;
621 hci_dev_unlock(hdev);
622
623 return 0;
624}
625
626DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
627 rpa_timeout_set, "%llu\n");
628
629static int random_address_show(struct seq_file *f, void *p)
630{
631 struct hci_dev *hdev = f->private;
632
633 hci_dev_lock(hdev);
634 seq_printf(f, "%pMR\n", &hdev->random_addr);
635 hci_dev_unlock(hdev);
636
637 return 0;
638}
639
640static int random_address_open(struct inode *inode, struct file *file)
641{
642 return single_open(file, random_address_show, inode->i_private);
643}
644
645static const struct file_operations random_address_fops = {
646 .open = random_address_open,
647 .read = seq_read,
648 .llseek = seq_lseek,
649 .release = single_release,
650};
651
652static int static_address_show(struct seq_file *f, void *p)
653{
654 struct hci_dev *hdev = f->private;
655
656 hci_dev_lock(hdev);
657 seq_printf(f, "%pMR\n", &hdev->static_addr);
658 hci_dev_unlock(hdev);
659
660 return 0;
661}
662
663static int static_address_open(struct inode *inode, struct file *file)
664{
665 return single_open(file, static_address_show, inode->i_private);
666}
667
668static const struct file_operations static_address_fops = {
669 .open = static_address_open,
670 .read = seq_read,
671 .llseek = seq_lseek,
672 .release = single_release,
673};
674
675static ssize_t force_static_address_read(struct file *file,
676 char __user *user_buf,
677 size_t count, loff_t *ppos)
678{
679 struct hci_dev *hdev = file->private_data;
680 char buf[3];
681
682 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
683 buf[1] = '\n';
684 buf[2] = '\0';
685 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
686}
687
688static ssize_t force_static_address_write(struct file *file,
689 const char __user *user_buf,
690 size_t count, loff_t *ppos)
691{
692 struct hci_dev *hdev = file->private_data;
693 char buf[32];
694 size_t buf_size = min(count, (sizeof(buf)-1));
695 bool enable;
696
697 if (test_bit(HCI_UP, &hdev->flags))
698 return -EBUSY;
699
700 if (copy_from_user(buf, user_buf, buf_size))
701 return -EFAULT;
702
703 buf[buf_size] = '\0';
704 if (strtobool(buf, &enable))
705 return -EINVAL;
706
707 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
708 return -EALREADY;
709
710 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
711
712 return count;
713}
714
715static const struct file_operations force_static_address_fops = {
716 .open = simple_open,
717 .read = force_static_address_read,
718 .write = force_static_address_write,
719 .llseek = default_llseek,
720};
721
722static int white_list_show(struct seq_file *f, void *ptr)
723{
724 struct hci_dev *hdev = f->private;
725 struct bdaddr_list *b;
726
727 hci_dev_lock(hdev);
728 list_for_each_entry(b, &hdev->le_white_list, list)
729 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
730 hci_dev_unlock(hdev);
731
732 return 0;
733}
734
735static int white_list_open(struct inode *inode, struct file *file)
736{
737 return single_open(file, white_list_show, inode->i_private);
738}
739
740static const struct file_operations white_list_fops = {
741 .open = white_list_open,
742 .read = seq_read,
743 .llseek = seq_lseek,
744 .release = single_release,
745};
746
747static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
748{
749 struct hci_dev *hdev = f->private;
750 struct smp_irk *irk;
751
752 rcu_read_lock();
753 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
754 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
755 &irk->bdaddr, irk->addr_type,
756 16, irk->val, &irk->rpa);
757 }
758 rcu_read_unlock();
759
760 return 0;
761}
762
763static int identity_resolving_keys_open(struct inode *inode, struct file *file)
764{
765 return single_open(file, identity_resolving_keys_show,
766 inode->i_private);
767}
768
769static const struct file_operations identity_resolving_keys_fops = {
770 .open = identity_resolving_keys_open,
771 .read = seq_read,
772 .llseek = seq_lseek,
773 .release = single_release,
774};
775
776static int long_term_keys_show(struct seq_file *f, void *ptr)
777{
778 struct hci_dev *hdev = f->private;
779 struct smp_ltk *ltk;
780
781 rcu_read_lock();
782 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
783 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
784 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
785 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
786 __le64_to_cpu(ltk->rand), 16, ltk->val);
787 rcu_read_unlock();
788
789 return 0;
790}
791
792static int long_term_keys_open(struct inode *inode, struct file *file)
793{
794 return single_open(file, long_term_keys_show, inode->i_private);
795}
796
797static const struct file_operations long_term_keys_fops = {
798 .open = long_term_keys_open,
799 .read = seq_read,
800 .llseek = seq_lseek,
801 .release = single_release,
802};
803
804static int conn_min_interval_set(void *data, u64 val)
805{
806 struct hci_dev *hdev = data;
807
808 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
809 return -EINVAL;
810
811 hci_dev_lock(hdev);
812 hdev->le_conn_min_interval = val;
813 hci_dev_unlock(hdev);
814
815 return 0;
816}
817
818static int conn_min_interval_get(void *data, u64 *val)
819{
820 struct hci_dev *hdev = data;
821
822 hci_dev_lock(hdev);
823 *val = hdev->le_conn_min_interval;
824 hci_dev_unlock(hdev);
825
826 return 0;
827}
828
829DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
830 conn_min_interval_set, "%llu\n");
831
832static int conn_max_interval_set(void *data, u64 val)
833{
834 struct hci_dev *hdev = data;
835
836 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
837 return -EINVAL;
838
839 hci_dev_lock(hdev);
840 hdev->le_conn_max_interval = val;
841 hci_dev_unlock(hdev);
842
843 return 0;
844}
845
846static int conn_max_interval_get(void *data, u64 *val)
847{
848 struct hci_dev *hdev = data;
849
850 hci_dev_lock(hdev);
851 *val = hdev->le_conn_max_interval;
852 hci_dev_unlock(hdev);
853
854 return 0;
855}
856
857DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
858 conn_max_interval_set, "%llu\n");
859
860static int conn_latency_set(void *data, u64 val)
861{
862 struct hci_dev *hdev = data;
863
864 if (val > 0x01f3)
865 return -EINVAL;
866
867 hci_dev_lock(hdev);
868 hdev->le_conn_latency = val;
869 hci_dev_unlock(hdev);
870
871 return 0;
872}
873
874static int conn_latency_get(void *data, u64 *val)
875{
876 struct hci_dev *hdev = data;
877
878 hci_dev_lock(hdev);
879 *val = hdev->le_conn_latency;
880 hci_dev_unlock(hdev);
881
882 return 0;
883}
884
885DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
886 conn_latency_set, "%llu\n");
887
888static int supervision_timeout_set(void *data, u64 val)
889{
890 struct hci_dev *hdev = data;
891
892 if (val < 0x000a || val > 0x0c80)
893 return -EINVAL;
894
895 hci_dev_lock(hdev);
896 hdev->le_supv_timeout = val;
897 hci_dev_unlock(hdev);
898
899 return 0;
900}
901
902static int supervision_timeout_get(void *data, u64 *val)
903{
904 struct hci_dev *hdev = data;
905
906 hci_dev_lock(hdev);
907 *val = hdev->le_supv_timeout;
908 hci_dev_unlock(hdev);
909
910 return 0;
911}
912
913DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
914 supervision_timeout_set, "%llu\n");
915
916static int adv_channel_map_set(void *data, u64 val)
917{
918 struct hci_dev *hdev = data;
919
920 if (val < 0x01 || val > 0x07)
921 return -EINVAL;
922
923 hci_dev_lock(hdev);
924 hdev->le_adv_channel_map = val;
925 hci_dev_unlock(hdev);
926
927 return 0;
928}
929
930static int adv_channel_map_get(void *data, u64 *val)
931{
932 struct hci_dev *hdev = data;
933
934 hci_dev_lock(hdev);
935 *val = hdev->le_adv_channel_map;
936 hci_dev_unlock(hdev);
937
938 return 0;
939}
940
941DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
942 adv_channel_map_set, "%llu\n");
943
944static int adv_min_interval_set(void *data, u64 val)
945{
946 struct hci_dev *hdev = data;
947
948 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
949 return -EINVAL;
950
951 hci_dev_lock(hdev);
952 hdev->le_adv_min_interval = val;
953 hci_dev_unlock(hdev);
954
955 return 0;
956}
957
958static int adv_min_interval_get(void *data, u64 *val)
959{
960 struct hci_dev *hdev = data;
961
962 hci_dev_lock(hdev);
963 *val = hdev->le_adv_min_interval;
964 hci_dev_unlock(hdev);
965
966 return 0;
967}
968
969DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
970 adv_min_interval_set, "%llu\n");
971
972static int adv_max_interval_set(void *data, u64 val)
973{
974 struct hci_dev *hdev = data;
975
976 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
977 return -EINVAL;
978
979 hci_dev_lock(hdev);
980 hdev->le_adv_max_interval = val;
981 hci_dev_unlock(hdev);
982
983 return 0;
984}
985
986static int adv_max_interval_get(void *data, u64 *val)
987{
988 struct hci_dev *hdev = data;
989
990 hci_dev_lock(hdev);
991 *val = hdev->le_adv_max_interval;
992 hci_dev_unlock(hdev);
993
994 return 0;
995}
996
997DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
998 adv_max_interval_set, "%llu\n");
999
1000void hci_debugfs_create_le(struct hci_dev *hdev)
1001{
1002 debugfs_create_file("identity", 0400, hdev->debugfs, hdev,
1003 &identity_fops);
1004 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, hdev,
1005 &rpa_timeout_fops);
1006 debugfs_create_file("random_address", 0444, hdev->debugfs, hdev,
1007 &random_address_fops);
1008 debugfs_create_file("static_address", 0444, hdev->debugfs, hdev,
1009 &static_address_fops);
1010
1011 /* For controllers with a public address, provide a debug
1012 * option to force the usage of the configured static
1013 * address. By default the public address is used.
1014 */
1015 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1016 debugfs_create_file("force_static_address", 0644,
1017 hdev->debugfs, hdev,
1018 &force_static_address_fops);
1019
1020 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1021 &hdev->le_white_list_size);
1022 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1023 &white_list_fops);
1024 debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs,
1025 hdev, &identity_resolving_keys_fops);
1026 debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev,
1027 &long_term_keys_fops);
1028 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, hdev,
1029 &conn_min_interval_fops);
1030 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, hdev,
1031 &conn_max_interval_fops);
1032 debugfs_create_file("conn_latency", 0644, hdev->debugfs, hdev,
1033 &conn_latency_fops);
1034 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs, hdev,
1035 &supervision_timeout_fops);
1036 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, hdev,
1037 &adv_channel_map_fops);
1038 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs, hdev,
1039 &adv_min_interval_fops);
1040 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs, hdev,
1041 &adv_max_interval_fops);
1042 debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs,
1043 &hdev->discov_interleaved_timeout);
1044}
1045
1046void hci_debugfs_create_conn(struct hci_conn *conn)
1047{
1048 struct hci_dev *hdev = conn->hdev;
1049 char name[6];
1050
1051 if (IS_ERR_OR_NULL(hdev->debugfs))
1052 return;
1053
1054 snprintf(name, sizeof(name), "%u", conn->handle);
1055 conn->debugfs = debugfs_create_dir(name, hdev->debugfs);
1056}
diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h
new file mode 100644
index 000000000000..fb68efe083c5
--- /dev/null
+++ b/net/bluetooth/hci_debugfs.h
@@ -0,0 +1,26 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2014 Intel Corporation
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23void hci_debugfs_create_common(struct hci_dev *hdev);
24void hci_debugfs_create_bredr(struct hci_dev *hdev);
25void hci_debugfs_create_le(struct hci_dev *hdev);
26void hci_debugfs_create_conn(struct hci_conn *conn);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 3f2e8b830cbd..a3fb094822b6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -30,10 +30,15 @@
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h> 31#include <net/bluetooth/mgmt.h>
32 32
33#include "hci_request.h"
34#include "hci_debugfs.h"
33#include "a2mp.h" 35#include "a2mp.h"
34#include "amp.h" 36#include "amp.h"
35#include "smp.h" 37#include "smp.h"
36 38
39#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
37/* Handle HCI Event packets */ 42/* Handle HCI Event packets */
38 43
39static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 44static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -195,7 +200,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 /* Reset all non-persistent flags */ 200 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 201 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
197 202
198 hdev->discovery.state = DISCOVERY_STOPPED; 203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
204
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
201 207
@@ -212,6 +218,40 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
212 hci_bdaddr_list_clear(&hdev->le_white_list); 218 hci_bdaddr_list_clear(&hdev->le_white_list);
213} 219}
214 220
221static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
222 struct sk_buff *skb)
223{
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
226
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
228
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
230 if (!sent)
231 return;
232
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
236 }
237}
238
239static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
240 struct sk_buff *skb)
241{
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
245
246 if (rp->status)
247 return;
248
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
251 else
252 hdev->stored_num_keys = 0;
253}
254
215static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 255static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216{ 256{
217 __u8 status = *((__u8 *) skb->data); 257 __u8 status = *((__u8 *) skb->data);
@@ -489,9 +529,7 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
489 hdev->features[1][0] &= ~LMP_HOST_SC; 529 hdev->features[1][0] &= ~LMP_HOST_SC;
490 } 530 }
491 531
492 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 532 if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
493 mgmt_sc_enable_complete(hdev, sent->support, status);
494 else if (!status) {
495 if (sent->support) 533 if (sent->support)
496 set_bit(HCI_SC_ENABLED, &hdev->dev_flags); 534 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
497 else 535 else
@@ -1282,6 +1320,55 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1282 memcpy(hdev->le_states, rp->le_states, 8); 1320 memcpy(hdev->le_states, rp->le_states, 8);
1283} 1321}
1284 1322
1323static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1324 struct sk_buff *skb)
1325{
1326 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1327
1328 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1329
1330 if (rp->status)
1331 return;
1332
1333 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1334 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1335}
1336
1337static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1338 struct sk_buff *skb)
1339{
1340 struct hci_cp_le_write_def_data_len *sent;
1341 __u8 status = *((__u8 *) skb->data);
1342
1343 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1344
1345 if (status)
1346 return;
1347
1348 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1349 if (!sent)
1350 return;
1351
1352 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1353 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1354}
1355
1356static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1357 struct sk_buff *skb)
1358{
1359 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1360
1361 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1362
1363 if (rp->status)
1364 return;
1365
1366 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1367 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1368 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1369 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1370}
1371
1285static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1372static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1286 struct sk_buff *skb) 1373 struct sk_buff *skb)
1287{ 1374{
@@ -1402,6 +1489,21 @@ unlock:
1402 hci_dev_unlock(hdev); 1489 hci_dev_unlock(hdev);
1403} 1490}
1404 1491
1492static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1493{
1494 u8 status = *((u8 *) skb->data);
1495 u8 *mode;
1496
1497 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1498
1499 if (status)
1500 return;
1501
1502 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1503 if (mode)
1504 hdev->ssp_debug_mode = *mode;
1505}
1506
1405static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1507static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1406{ 1508{
1407 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1509 BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -2115,6 +2217,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2115 } else 2217 } else
2116 conn->state = BT_CONNECTED; 2218 conn->state = BT_CONNECTED;
2117 2219
2220 hci_debugfs_create_conn(conn);
2118 hci_conn_add_sysfs(conn); 2221 hci_conn_add_sysfs(conn);
2119 2222
2120 if (test_bit(HCI_AUTH, &hdev->flags)) 2223 if (test_bit(HCI_AUTH, &hdev->flags))
@@ -2130,7 +2233,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2130 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 2233 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2131 sizeof(cp), &cp); 2234 sizeof(cp), &cp);
2132 2235
2133 hci_update_page_scan(hdev, NULL); 2236 hci_update_page_scan(hdev);
2134 } 2237 }
2135 2238
2136 /* Set packet type for incoming connection */ 2239 /* Set packet type for incoming connection */
@@ -2316,7 +2419,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2316 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2419 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2317 hci_remove_link_key(hdev, &conn->dst); 2420 hci_remove_link_key(hdev, &conn->dst);
2318 2421
2319 hci_update_page_scan(hdev, NULL); 2422 hci_update_page_scan(hdev);
2320 } 2423 }
2321 2424
2322 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2425 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
@@ -2583,7 +2686,8 @@ static void hci_remote_features_evt(struct hci_dev *hdev,
2583 if (conn->state != BT_CONFIG) 2686 if (conn->state != BT_CONFIG)
2584 goto unlock; 2687 goto unlock;
2585 2688
2586 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 2689 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2690 lmp_ext_feat_capable(conn)) {
2587 struct hci_cp_read_remote_ext_features cp; 2691 struct hci_cp_read_remote_ext_features cp;
2588 cp.handle = ev->handle; 2692 cp.handle = ev->handle;
2589 cp.page = 0x01; 2693 cp.page = 0x01;
@@ -2662,6 +2766,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2662 hci_cc_reset(hdev, skb); 2766 hci_cc_reset(hdev, skb);
2663 break; 2767 break;
2664 2768
2769 case HCI_OP_READ_STORED_LINK_KEY:
2770 hci_cc_read_stored_link_key(hdev, skb);
2771 break;
2772
2773 case HCI_OP_DELETE_STORED_LINK_KEY:
2774 hci_cc_delete_stored_link_key(hdev, skb);
2775 break;
2776
2665 case HCI_OP_WRITE_LOCAL_NAME: 2777 case HCI_OP_WRITE_LOCAL_NAME:
2666 hci_cc_write_local_name(hdev, skb); 2778 hci_cc_write_local_name(hdev, skb);
2667 break; 2779 break;
@@ -2854,6 +2966,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2854 hci_cc_le_read_supported_states(hdev, skb); 2966 hci_cc_le_read_supported_states(hdev, skb);
2855 break; 2967 break;
2856 2968
2969 case HCI_OP_LE_READ_DEF_DATA_LEN:
2970 hci_cc_le_read_def_data_len(hdev, skb);
2971 break;
2972
2973 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2974 hci_cc_le_write_def_data_len(hdev, skb);
2975 break;
2976
2977 case HCI_OP_LE_READ_MAX_DATA_LEN:
2978 hci_cc_le_read_max_data_len(hdev, skb);
2979 break;
2980
2857 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2981 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2858 hci_cc_write_le_host_supported(hdev, skb); 2982 hci_cc_write_le_host_supported(hdev, skb);
2859 break; 2983 break;
@@ -2874,6 +2998,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2874 hci_cc_read_tx_power(hdev, skb); 2998 hci_cc_read_tx_power(hdev, skb);
2875 break; 2999 break;
2876 3000
3001 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3002 hci_cc_write_ssp_debug_mode(hdev, skb);
3003 break;
3004
2877 default: 3005 default:
2878 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 3006 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2879 break; 3007 break;
@@ -2992,7 +3120,9 @@ static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2992{ 3120{
2993 struct hci_ev_hardware_error *ev = (void *) skb->data; 3121 struct hci_ev_hardware_error *ev = (void *) skb->data;
2994 3122
2995 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code); 3123 hdev->hw_error_code = ev->code;
3124
3125 queue_work(hdev->req_workqueue, &hdev->error_reset);
2996} 3126}
2997 3127
2998static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3128static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3584,6 +3714,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3584 conn->handle = __le16_to_cpu(ev->handle); 3714 conn->handle = __le16_to_cpu(ev->handle);
3585 conn->state = BT_CONNECTED; 3715 conn->state = BT_CONNECTED;
3586 3716
3717 hci_debugfs_create_conn(conn);
3587 hci_conn_add_sysfs(conn); 3718 hci_conn_add_sysfs(conn);
3588 break; 3719 break;
3589 3720
@@ -3750,6 +3881,52 @@ static u8 hci_get_auth_req(struct hci_conn *conn)
3750 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 3881 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3751} 3882}
3752 3883
3884static u8 bredr_oob_data_present(struct hci_conn *conn)
3885{
3886 struct hci_dev *hdev = conn->hdev;
3887 struct oob_data *data;
3888
3889 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3890 if (!data)
3891 return 0x00;
3892
3893 if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
3894 if (bredr_sc_enabled(hdev)) {
3895 /* When Secure Connections is enabled, then just
3896 * return the present value stored with the OOB
3897 * data. The stored value contains the right present
3898 * information. However it can only be trusted when
3899 * not in Secure Connection Only mode.
3900 */
3901 if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
3902 return data->present;
3903
3904 /* When Secure Connections Only mode is enabled, then
3905 * the P-256 values are required. If they are not
3906 * available, then do not declare that OOB data is
3907 * present.
3908 */
3909 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3910 !memcmp(data->hash256, ZERO_KEY, 16))
3911 return 0x00;
3912
3913 return 0x02;
3914 }
3915
3916 /* When Secure Connections is not enabled or actually
3917 * not supported by the hardware, then check that if
3918 * P-192 data values are present.
3919 */
3920 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3921 !memcmp(data->hash192, ZERO_KEY, 16))
3922 return 0x00;
3923
3924 return 0x01;
3925 }
3926
3927 return 0x00;
3928}
3929
3753static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3930static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3754{ 3931{
3755 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3932 struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -3801,12 +3978,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3801 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 3978 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3802 3979
3803 cp.authentication = conn->auth_type; 3980 cp.authentication = conn->auth_type;
3804 3981 cp.oob_data = bredr_oob_data_present(conn);
3805 if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3806 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3807 cp.oob_data = 0x01;
3808 else
3809 cp.oob_data = 0x00;
3810 3982
3811 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3983 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3812 sizeof(cp), &cp); 3984 sizeof(cp), &cp);
@@ -4058,33 +4230,39 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4058 goto unlock; 4230 goto unlock;
4059 4231
4060 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 4232 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4061 if (data) { 4233 if (!data) {
4062 if (bredr_sc_enabled(hdev)) { 4234 struct hci_cp_remote_oob_data_neg_reply cp;
4063 struct hci_cp_remote_oob_ext_data_reply cp;
4064
4065 bacpy(&cp.bdaddr, &ev->bdaddr);
4066 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4067 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4068 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4069 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4070 4235
4071 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 4236 bacpy(&cp.bdaddr, &ev->bdaddr);
4072 sizeof(cp), &cp); 4237 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4073 } else { 4238 sizeof(cp), &cp);
4074 struct hci_cp_remote_oob_data_reply cp; 4239 goto unlock;
4240 }
4075 4241
4076 bacpy(&cp.bdaddr, &ev->bdaddr); 4242 if (bredr_sc_enabled(hdev)) {
4077 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 4243 struct hci_cp_remote_oob_ext_data_reply cp;
4078 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4079 4244
4080 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 4245 bacpy(&cp.bdaddr, &ev->bdaddr);
4081 sizeof(cp), &cp); 4246 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4247 memset(cp.hash192, 0, sizeof(cp.hash192));
4248 memset(cp.rand192, 0, sizeof(cp.rand192));
4249 } else {
4250 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4251 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4082 } 4252 }
4253 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4254 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4255
4256 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4257 sizeof(cp), &cp);
4083 } else { 4258 } else {
4084 struct hci_cp_remote_oob_data_neg_reply cp; 4259 struct hci_cp_remote_oob_data_reply cp;
4085 4260
4086 bacpy(&cp.bdaddr, &ev->bdaddr); 4261 bacpy(&cp.bdaddr, &ev->bdaddr);
4087 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 4262 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4263 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4264
4265 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4088 sizeof(cp), &cp); 4266 sizeof(cp), &cp);
4089 } 4267 }
4090 4268
@@ -4124,6 +4302,7 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4124 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 4302 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4125 hci_conn_drop(hcon); 4303 hci_conn_drop(hcon);
4126 4304
4305 hci_debugfs_create_conn(hcon);
4127 hci_conn_add_sysfs(hcon); 4306 hci_conn_add_sysfs(hcon);
4128 4307
4129 amp_physical_cfm(bredr_hcon, hcon); 4308 amp_physical_cfm(bredr_hcon, hcon);
@@ -4330,6 +4509,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4330 conn->le_conn_latency = le16_to_cpu(ev->latency); 4509 conn->le_conn_latency = le16_to_cpu(ev->latency);
4331 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 4510 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4332 4511
4512 hci_debugfs_create_conn(conn);
4333 hci_conn_add_sysfs(conn); 4513 hci_conn_add_sysfs(conn);
4334 4514
4335 hci_proto_connect_cfm(conn, ev->status); 4515 hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
new file mode 100644
index 000000000000..b59f92c6df0c
--- /dev/null
+++ b/net/bluetooth/hci_request.c
@@ -0,0 +1,556 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
30void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
31{
32 skb_queue_head_init(&req->cmd_q);
33 req->hdev = hdev;
34 req->err = 0;
35}
36
37int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
38{
39 struct hci_dev *hdev = req->hdev;
40 struct sk_buff *skb;
41 unsigned long flags;
42
43 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
44
45 /* If an error occurred during request building, remove all HCI
46 * commands queued on the HCI request queue.
47 */
48 if (req->err) {
49 skb_queue_purge(&req->cmd_q);
50 return req->err;
51 }
52
53 /* Do not allow empty requests */
54 if (skb_queue_empty(&req->cmd_q))
55 return -ENODATA;
56
57 skb = skb_peek_tail(&req->cmd_q);
58 bt_cb(skb)->req.complete = complete;
59
60 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
61 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
62 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
63
64 queue_work(hdev->workqueue, &hdev->cmd_work);
65
66 return 0;
67}
68
69struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
70 const void *param)
71{
72 int len = HCI_COMMAND_HDR_SIZE + plen;
73 struct hci_command_hdr *hdr;
74 struct sk_buff *skb;
75
76 skb = bt_skb_alloc(len, GFP_ATOMIC);
77 if (!skb)
78 return NULL;
79
80 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
81 hdr->opcode = cpu_to_le16(opcode);
82 hdr->plen = plen;
83
84 if (plen)
85 memcpy(skb_put(skb, plen), param, plen);
86
87 BT_DBG("skb len %d", skb->len);
88
89 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
90 bt_cb(skb)->opcode = opcode;
91
92 return skb;
93}
94
95/* Queue a command to an asynchronous HCI request */
96void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
97 const void *param, u8 event)
98{
99 struct hci_dev *hdev = req->hdev;
100 struct sk_buff *skb;
101
102 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
103
104 /* If an error occurred during request building, there is no point in
105 * queueing the HCI command. We can simply return.
106 */
107 if (req->err)
108 return;
109
110 skb = hci_prepare_cmd(hdev, opcode, plen, param);
111 if (!skb) {
112 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
113 hdev->name, opcode);
114 req->err = -ENOMEM;
115 return;
116 }
117
118 if (skb_queue_empty(&req->cmd_q))
119 bt_cb(skb)->req.start = true;
120
121 bt_cb(skb)->req.event = event;
122
123 skb_queue_tail(&req->cmd_q, skb);
124}
125
126void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
127 const void *param)
128{
129 hci_req_add_ev(req, opcode, plen, param, 0);
130}
131
132void hci_req_add_le_scan_disable(struct hci_request *req)
133{
134 struct hci_cp_le_set_scan_enable cp;
135
136 memset(&cp, 0, sizeof(cp));
137 cp.enable = LE_SCAN_DISABLE;
138 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
139}
140
141static void add_to_white_list(struct hci_request *req,
142 struct hci_conn_params *params)
143{
144 struct hci_cp_le_add_to_white_list cp;
145
146 cp.bdaddr_type = params->addr_type;
147 bacpy(&cp.bdaddr, &params->addr);
148
149 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
150}
151
152static u8 update_white_list(struct hci_request *req)
153{
154 struct hci_dev *hdev = req->hdev;
155 struct hci_conn_params *params;
156 struct bdaddr_list *b;
157 uint8_t white_list_entries = 0;
158
159 /* Go through the current white list programmed into the
160 * controller one by one and check if that address is still
161 * in the list of pending connections or list of devices to
162 * report. If not present in either list, then queue the
163 * command to remove it from the controller.
164 */
165 list_for_each_entry(b, &hdev->le_white_list, list) {
166 struct hci_cp_le_del_from_white_list cp;
167
168 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
169 &b->bdaddr, b->bdaddr_type) ||
170 hci_pend_le_action_lookup(&hdev->pend_le_reports,
171 &b->bdaddr, b->bdaddr_type)) {
172 white_list_entries++;
173 continue;
174 }
175
176 cp.bdaddr_type = b->bdaddr_type;
177 bacpy(&cp.bdaddr, &b->bdaddr);
178
179 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
180 sizeof(cp), &cp);
181 }
182
183 /* Since all no longer valid white list entries have been
184 * removed, walk through the list of pending connections
185 * and ensure that any new device gets programmed into
186 * the controller.
187 *
188 * If the list of the devices is larger than the list of
189 * available white list entries in the controller, then
190 * just abort and return filer policy value to not use the
191 * white list.
192 */
193 list_for_each_entry(params, &hdev->pend_le_conns, action) {
194 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
195 &params->addr, params->addr_type))
196 continue;
197
198 if (white_list_entries >= hdev->le_white_list_size) {
199 /* Select filter policy to accept all advertising */
200 return 0x00;
201 }
202
203 if (hci_find_irk_by_addr(hdev, &params->addr,
204 params->addr_type)) {
205 /* White list can not be used with RPAs */
206 return 0x00;
207 }
208
209 white_list_entries++;
210 add_to_white_list(req, params);
211 }
212
213 /* After adding all new pending connections, walk through
214 * the list of pending reports and also add these to the
215 * white list if there is still space.
216 */
217 list_for_each_entry(params, &hdev->pend_le_reports, action) {
218 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
219 &params->addr, params->addr_type))
220 continue;
221
222 if (white_list_entries >= hdev->le_white_list_size) {
223 /* Select filter policy to accept all advertising */
224 return 0x00;
225 }
226
227 if (hci_find_irk_by_addr(hdev, &params->addr,
228 params->addr_type)) {
229 /* White list can not be used with RPAs */
230 return 0x00;
231 }
232
233 white_list_entries++;
234 add_to_white_list(req, params);
235 }
236
237 /* Select filter policy to use white list */
238 return 0x01;
239}
240
241void hci_req_add_le_passive_scan(struct hci_request *req)
242{
243 struct hci_cp_le_set_scan_param param_cp;
244 struct hci_cp_le_set_scan_enable enable_cp;
245 struct hci_dev *hdev = req->hdev;
246 u8 own_addr_type;
247 u8 filter_policy;
248
249 /* Set require_privacy to false since no SCAN_REQ are send
250 * during passive scanning. Not using an non-resolvable address
251 * here is important so that peer devices using direct
252 * advertising with our address will be correctly reported
253 * by the controller.
254 */
255 if (hci_update_random_address(req, false, &own_addr_type))
256 return;
257
258 /* Adding or removing entries from the white list must
259 * happen before enabling scanning. The controller does
260 * not allow white list modification while scanning.
261 */
262 filter_policy = update_white_list(req);
263
264 /* When the controller is using random resolvable addresses and
265 * with that having LE privacy enabled, then controllers with
266 * Extended Scanner Filter Policies support can now enable support
267 * for handling directed advertising.
268 *
269 * So instead of using filter polices 0x00 (no whitelist)
270 * and 0x01 (whitelist enabled) use the new filter policies
271 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
272 */
273 if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
274 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
275 filter_policy |= 0x02;
276
277 memset(&param_cp, 0, sizeof(param_cp));
278 param_cp.type = LE_SCAN_PASSIVE;
279 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
280 param_cp.window = cpu_to_le16(hdev->le_scan_window);
281 param_cp.own_address_type = own_addr_type;
282 param_cp.filter_policy = filter_policy;
283 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
284 &param_cp);
285
286 memset(&enable_cp, 0, sizeof(enable_cp));
287 enable_cp.enable = LE_SCAN_ENABLE;
288 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
289 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
290 &enable_cp);
291}
292
293static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
294{
295 struct hci_dev *hdev = req->hdev;
296
297 /* If we're advertising or initiating an LE connection we can't
298 * go ahead and change the random address at this time. This is
299 * because the eventual initiator address used for the
300 * subsequently created connection will be undefined (some
301 * controllers use the new address and others the one we had
302 * when the operation started).
303 *
304 * In this kind of scenario skip the update and let the random
305 * address be updated at the next cycle.
306 */
307 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
308 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
309 BT_DBG("Deferring random address update");
310 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
311 return;
312 }
313
314 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
315}
316
317int hci_update_random_address(struct hci_request *req, bool require_privacy,
318 u8 *own_addr_type)
319{
320 struct hci_dev *hdev = req->hdev;
321 int err;
322
323 /* If privacy is enabled use a resolvable private address. If
324 * current RPA has expired or there is something else than
325 * the current RPA in use, then generate a new one.
326 */
327 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
328 int to;
329
330 *own_addr_type = ADDR_LE_DEV_RANDOM;
331
332 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
333 !bacmp(&hdev->random_addr, &hdev->rpa))
334 return 0;
335
336 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
337 if (err < 0) {
338 BT_ERR("%s failed to generate new RPA", hdev->name);
339 return err;
340 }
341
342 set_random_addr(req, &hdev->rpa);
343
344 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
345 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
346
347 return 0;
348 }
349
350 /* In case of required privacy without resolvable private address,
351 * use an non-resolvable private address. This is useful for active
352 * scanning and non-connectable advertising.
353 */
354 if (require_privacy) {
355 bdaddr_t nrpa;
356
357 while (true) {
358 /* The non-resolvable private address is generated
359 * from random six bytes with the two most significant
360 * bits cleared.
361 */
362 get_random_bytes(&nrpa, 6);
363 nrpa.b[5] &= 0x3f;
364
365 /* The non-resolvable private address shall not be
366 * equal to the public address.
367 */
368 if (bacmp(&hdev->bdaddr, &nrpa))
369 break;
370 }
371
372 *own_addr_type = ADDR_LE_DEV_RANDOM;
373 set_random_addr(req, &nrpa);
374 return 0;
375 }
376
377 /* If forcing static address is in use or there is no public
378 * address use the static address as random address (but skip
379 * the HCI command if the current random address is already the
380 * static one.
381 *
382 * In case BR/EDR has been disabled on a dual-mode controller
383 * and a static address has been configured, then use that
384 * address instead of the public BR/EDR address.
385 */
386 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
387 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
388 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
389 bacmp(&hdev->static_addr, BDADDR_ANY))) {
390 *own_addr_type = ADDR_LE_DEV_RANDOM;
391 if (bacmp(&hdev->static_addr, &hdev->random_addr))
392 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
393 &hdev->static_addr);
394 return 0;
395 }
396
397 /* Neither privacy nor static address is being used so use a
398 * public address.
399 */
400 *own_addr_type = ADDR_LE_DEV_PUBLIC;
401
402 return 0;
403}
404
405static bool disconnected_whitelist_entries(struct hci_dev *hdev)
406{
407 struct bdaddr_list *b;
408
409 list_for_each_entry(b, &hdev->whitelist, list) {
410 struct hci_conn *conn;
411
412 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
413 if (!conn)
414 return true;
415
416 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
417 return true;
418 }
419
420 return false;
421}
422
423void __hci_update_page_scan(struct hci_request *req)
424{
425 struct hci_dev *hdev = req->hdev;
426 u8 scan;
427
428 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
429 return;
430
431 if (!hdev_is_powered(hdev))
432 return;
433
434 if (mgmt_powering_down(hdev))
435 return;
436
437 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
438 disconnected_whitelist_entries(hdev))
439 scan = SCAN_PAGE;
440 else
441 scan = SCAN_DISABLED;
442
443 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
444 return;
445
446 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
447 scan |= SCAN_INQUIRY;
448
449 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
450}
451
452void hci_update_page_scan(struct hci_dev *hdev)
453{
454 struct hci_request req;
455
456 hci_req_init(&req, hdev);
457 __hci_update_page_scan(&req);
458 hci_req_run(&req, NULL);
459}
460
461/* This function controls the background scanning based on hdev->pend_le_conns
462 * list. If there are pending LE connection we start the background scanning,
463 * otherwise we stop it.
464 *
465 * This function requires the caller holds hdev->lock.
466 */
467void __hci_update_background_scan(struct hci_request *req)
468{
469 struct hci_dev *hdev = req->hdev;
470 struct hci_conn *conn;
471
472 if (!test_bit(HCI_UP, &hdev->flags) ||
473 test_bit(HCI_INIT, &hdev->flags) ||
474 test_bit(HCI_SETUP, &hdev->dev_flags) ||
475 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
476 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
477 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
478 return;
479
480 /* No point in doing scanning if LE support hasn't been enabled */
481 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
482 return;
483
484 /* If discovery is active don't interfere with it */
485 if (hdev->discovery.state != DISCOVERY_STOPPED)
486 return;
487
488 /* Reset RSSI and UUID filters when starting background scanning
489 * since these filters are meant for service discovery only.
490 *
491 * The Start Discovery and Start Service Discovery operations
492 * ensure to set proper values for RSSI threshold and UUID
493 * filter list. So it is safe to just reset them here.
494 */
495 hci_discovery_filter_clear(hdev);
496
497 if (list_empty(&hdev->pend_le_conns) &&
498 list_empty(&hdev->pend_le_reports)) {
499 /* If there is no pending LE connections or devices
500 * to be scanned for, we should stop the background
501 * scanning.
502 */
503
504 /* If controller is not scanning we are done. */
505 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
506 return;
507
508 hci_req_add_le_scan_disable(req);
509
510 BT_DBG("%s stopping background scanning", hdev->name);
511 } else {
512 /* If there is at least one pending LE connection, we should
513 * keep the background scan running.
514 */
515
516 /* If controller is connecting, we should not start scanning
517 * since some controllers are not able to scan and connect at
518 * the same time.
519 */
520 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
521 if (conn)
522 return;
523
524 /* If controller is currently scanning, we stop it to ensure we
525 * don't miss any advertising (due to duplicates filter).
526 */
527 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
528 hci_req_add_le_scan_disable(req);
529
530 hci_req_add_le_passive_scan(req);
531
532 BT_DBG("%s starting background scanning", hdev->name);
533 }
534}
535
536static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
537 u16 opcode)
538{
539 if (status)
540 BT_DBG("HCI request failed to update background scanning: "
541 "status 0x%2.2x", status);
542}
543
544void hci_update_background_scan(struct hci_dev *hdev)
545{
546 int err;
547 struct hci_request req;
548
549 hci_req_init(&req, hdev);
550
551 __hci_update_background_scan(&req);
552
553 err = hci_req_run(&req, update_background_scan_complete);
554 if (err && err != -ENODATA)
555 BT_ERR("Failed to run HCI request: err %d", err);
556}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
new file mode 100644
index 000000000000..adf074d33544
--- /dev/null
+++ b/net/bluetooth/hci_request.h
@@ -0,0 +1,54 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2014 Intel Corporation
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23struct hci_request {
24 struct hci_dev *hdev;
25 struct sk_buff_head cmd_q;
26
27 /* If something goes wrong when building the HCI request, the error
28 * value is stored in this field.
29 */
30 int err;
31};
32
33void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
34int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
35void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
36 const void *param);
37void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
38 const void *param, u8 event);
39void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
40
41struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
42 const void *param);
43
44void hci_req_add_le_scan_disable(struct hci_request *req);
45void hci_req_add_le_passive_scan(struct hci_request *req);
46
47void hci_update_page_scan(struct hci_dev *hdev);
48void __hci_update_page_scan(struct hci_request *req);
49
50int hci_update_random_address(struct hci_request *req, bool require_privacy,
51 u8 *own_addr_type);
52
53void hci_update_background_scan(struct hci_dev *hdev);
54void __hci_update_background_scan(struct hci_request *req);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 2c245fdf319a..1d65c5be7c82 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -216,11 +216,39 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
216 read_unlock(&hci_sk_list.lock); 216 read_unlock(&hci_sk_list.lock);
217} 217}
218 218
219static void queue_monitor_skb(struct sk_buff *skb)
220{
221 struct sock *sk;
222
223 BT_DBG("len %d", skb->len);
224
225 read_lock(&hci_sk_list.lock);
226
227 sk_for_each(sk, &hci_sk_list.head) {
228 struct sk_buff *nskb;
229
230 if (sk->sk_state != BT_BOUND)
231 continue;
232
233 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
234 continue;
235
236 nskb = skb_clone(skb, GFP_ATOMIC);
237 if (!nskb)
238 continue;
239
240 if (sock_queue_rcv_skb(sk, nskb))
241 kfree_skb(nskb);
242 }
243
244 read_unlock(&hci_sk_list.lock);
245}
246
219/* Send frame to monitor socket */ 247/* Send frame to monitor socket */
220void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) 248void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
221{ 249{
222 struct sock *sk;
223 struct sk_buff *skb_copy = NULL; 250 struct sk_buff *skb_copy = NULL;
251 struct hci_mon_hdr *hdr;
224 __le16 opcode; 252 __le16 opcode;
225 253
226 if (!atomic_read(&monitor_promisc)) 254 if (!atomic_read(&monitor_promisc))
@@ -251,74 +279,21 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
251 return; 279 return;
252 } 280 }
253 281
254 read_lock(&hci_sk_list.lock); 282 /* Create a private copy with headroom */
255 283 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
256 sk_for_each(sk, &hci_sk_list.head) { 284 if (!skb_copy)
257 struct sk_buff *nskb; 285 return;
258
259 if (sk->sk_state != BT_BOUND)
260 continue;
261
262 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
263 continue;
264
265 if (!skb_copy) {
266 struct hci_mon_hdr *hdr;
267
268 /* Create a private copy with headroom */
269 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
270 GFP_ATOMIC, true);
271 if (!skb_copy)
272 continue;
273
274 /* Put header before the data */
275 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
276 hdr->opcode = opcode;
277 hdr->index = cpu_to_le16(hdev->id);
278 hdr->len = cpu_to_le16(skb->len);
279 }
280
281 nskb = skb_clone(skb_copy, GFP_ATOMIC);
282 if (!nskb)
283 continue;
284
285 if (sock_queue_rcv_skb(sk, nskb))
286 kfree_skb(nskb);
287 }
288 286
289 read_unlock(&hci_sk_list.lock); 287 /* Put header before the data */
288 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
289 hdr->opcode = opcode;
290 hdr->index = cpu_to_le16(hdev->id);
291 hdr->len = cpu_to_le16(skb->len);
290 292
293 queue_monitor_skb(skb_copy);
291 kfree_skb(skb_copy); 294 kfree_skb(skb_copy);
292} 295}
293 296
294static void send_monitor_event(struct sk_buff *skb)
295{
296 struct sock *sk;
297
298 BT_DBG("len %d", skb->len);
299
300 read_lock(&hci_sk_list.lock);
301
302 sk_for_each(sk, &hci_sk_list.head) {
303 struct sk_buff *nskb;
304
305 if (sk->sk_state != BT_BOUND)
306 continue;
307
308 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
309 continue;
310
311 nskb = skb_clone(skb, GFP_ATOMIC);
312 if (!nskb)
313 continue;
314
315 if (sock_queue_rcv_skb(sk, nskb))
316 kfree_skb(nskb);
317 }
318
319 read_unlock(&hci_sk_list.lock);
320}
321
322static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) 297static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
323{ 298{
324 struct hci_mon_hdr *hdr; 299 struct hci_mon_hdr *hdr;
@@ -422,7 +397,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
422 397
423 skb = create_monitor_event(hdev, event); 398 skb = create_monitor_event(hdev, event);
424 if (skb) { 399 if (skb) {
425 send_monitor_event(skb); 400 queue_monitor_skb(skb);
426 kfree_skb(skb); 401 kfree_skb(skb);
427 } 402 }
428 } 403 }
@@ -1230,6 +1205,8 @@ int __init hci_sock_init(void)
1230{ 1205{
1231 int err; 1206 int err;
1232 1207
1208 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1209
1233 err = proto_register(&hci_sk_proto, 0); 1210 err = proto_register(&hci_sk_proto, 0);
1234 if (err < 0) 1211 if (err < 0)
1235 return err; 1212 return err;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d04dc0095736..6ba33f9631e8 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -63,10 +63,10 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 63static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event); 64 struct sk_buff_head *skbs, u8 event);
65 65
66static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type) 66static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67{ 67{
68 if (hcon->type == LE_LINK) { 68 if (link_type == LE_LINK) {
69 if (type == ADDR_LE_DEV_PUBLIC) 69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC; 70 return BDADDR_LE_PUBLIC;
71 else 71 else
72 return BDADDR_LE_RANDOM; 72 return BDADDR_LE_RANDOM;
@@ -75,6 +75,16 @@ static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
75 return BDADDR_BREDR; 75 return BDADDR_BREDR;
76} 76}
77 77
78static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79{
80 return bdaddr_type(hcon->type, hcon->src_type);
81}
82
83static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84{
85 return bdaddr_type(hcon->type, hcon->dst_type);
86}
87
78/* ---- L2CAP channels ---- */ 88/* ---- L2CAP channels ---- */
79 89
80static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, 90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
@@ -646,7 +656,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
646 list_for_each_entry(chan, &conn->chan_l, list) { 656 list_for_each_entry(chan, &conn->chan_l, list) {
647 l2cap_chan_lock(chan); 657 l2cap_chan_lock(chan);
648 bacpy(&chan->dst, &hcon->dst); 658 bacpy(&chan->dst, &hcon->dst);
649 chan->dst_type = bdaddr_type(hcon, hcon->dst_type); 659 chan->dst_type = bdaddr_dst_type(hcon);
650 l2cap_chan_unlock(chan); 660 l2cap_chan_unlock(chan);
651 } 661 }
652 662
@@ -3790,8 +3800,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3790 3800
3791 bacpy(&chan->src, &conn->hcon->src); 3801 bacpy(&chan->src, &conn->hcon->src);
3792 bacpy(&chan->dst, &conn->hcon->dst); 3802 bacpy(&chan->dst, &conn->hcon->dst);
3793 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type); 3803 chan->src_type = bdaddr_src_type(conn->hcon);
3794 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type); 3804 chan->dst_type = bdaddr_dst_type(conn->hcon);
3795 chan->psm = psm; 3805 chan->psm = psm;
3796 chan->dcid = scid; 3806 chan->dcid = scid;
3797 chan->local_amp_id = amp_id; 3807 chan->local_amp_id = amp_id;
@@ -5441,8 +5451,8 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
5441 5451
5442 bacpy(&chan->src, &conn->hcon->src); 5452 bacpy(&chan->src, &conn->hcon->src);
5443 bacpy(&chan->dst, &conn->hcon->dst); 5453 bacpy(&chan->dst, &conn->hcon->dst);
5444 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type); 5454 chan->src_type = bdaddr_src_type(conn->hcon);
5445 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type); 5455 chan->dst_type = bdaddr_dst_type(conn->hcon);
5446 chan->psm = psm; 5456 chan->psm = psm;
5447 chan->dcid = scid; 5457 chan->dcid = scid;
5448 chan->omtu = mtu; 5458 chan->omtu = mtu;
@@ -6881,7 +6891,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6881 */ 6891 */
6882 if (hcon->type == LE_LINK && 6892 if (hcon->type == LE_LINK &&
6883 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst, 6893 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6884 bdaddr_type(hcon, hcon->dst_type))) { 6894 bdaddr_dst_type(hcon))) {
6885 kfree_skb(skb); 6895 kfree_skb(skb);
6886 return; 6896 return;
6887 } 6897 }
@@ -6968,7 +6978,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6968 6978
6969 if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) && 6979 if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
6970 (bredr_sc_enabled(hcon->hdev) || 6980 (bredr_sc_enabled(hcon->hdev) ||
6971 test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags))) 6981 test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags)))
6972 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; 6982 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6973 6983
6974 mutex_init(&conn->ident_lock); 6984 mutex_init(&conn->ident_lock);
@@ -7123,7 +7133,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7123 7133
7124 /* Update source addr of the socket */ 7134 /* Update source addr of the socket */
7125 bacpy(&chan->src, &hcon->src); 7135 bacpy(&chan->src, &hcon->src);
7126 chan->src_type = bdaddr_type(hcon, hcon->src_type); 7136 chan->src_type = bdaddr_src_type(hcon);
7127 7137
7128 __l2cap_chan_add(conn, chan); 7138 __l2cap_chan_add(conn, chan);
7129 7139
@@ -7197,8 +7207,10 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7197 * global list (by passing NULL as first parameter). 7207 * global list (by passing NULL as first parameter).
7198 */ 7208 */
7199static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, 7209static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7200 bdaddr_t *src, u8 link_type) 7210 struct hci_conn *hcon)
7201{ 7211{
7212 u8 src_type = bdaddr_src_type(hcon);
7213
7202 read_lock(&chan_list_lock); 7214 read_lock(&chan_list_lock);
7203 7215
7204 if (c) 7216 if (c)
@@ -7211,11 +7223,9 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7211 continue; 7223 continue;
7212 if (c->state != BT_LISTEN) 7224 if (c->state != BT_LISTEN)
7213 continue; 7225 continue;
7214 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY)) 7226 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7215 continue; 7227 continue;
7216 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) 7228 if (src_type != c->src_type)
7217 continue;
7218 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7219 continue; 7229 continue;
7220 7230
7221 l2cap_chan_hold(c); 7231 l2cap_chan_hold(c);
@@ -7246,7 +7256,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7246 if (!conn) 7256 if (!conn)
7247 return; 7257 return;
7248 7258
7249 dst_type = bdaddr_type(hcon, hcon->dst_type); 7259 dst_type = bdaddr_dst_type(hcon);
7250 7260
7251 /* If device is blocked, do not create channels for it */ 7261 /* If device is blocked, do not create channels for it */
7252 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type)) 7262 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
@@ -7257,7 +7267,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7257 * we left off, because the list lock would prevent calling the 7267 * we left off, because the list lock would prevent calling the
7258 * potentially sleeping l2cap_chan_lock() function. 7268 * potentially sleeping l2cap_chan_lock() function.
7259 */ 7269 */
7260 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type); 7270 pchan = l2cap_global_fixed_chan(NULL, hcon);
7261 while (pchan) { 7271 while (pchan) {
7262 struct l2cap_chan *chan, *next; 7272 struct l2cap_chan *chan, *next;
7263 7273
@@ -7270,7 +7280,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7270 if (chan) { 7280 if (chan) {
7271 bacpy(&chan->src, &hcon->src); 7281 bacpy(&chan->src, &hcon->src);
7272 bacpy(&chan->dst, &hcon->dst); 7282 bacpy(&chan->dst, &hcon->dst);
7273 chan->src_type = bdaddr_type(hcon, hcon->src_type); 7283 chan->src_type = bdaddr_src_type(hcon);
7274 chan->dst_type = dst_type; 7284 chan->dst_type = dst_type;
7275 7285
7276 __l2cap_chan_add(conn, chan); 7286 __l2cap_chan_add(conn, chan);
@@ -7278,8 +7288,7 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7278 7288
7279 l2cap_chan_unlock(pchan); 7289 l2cap_chan_unlock(pchan);
7280next: 7290next:
7281 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr, 7291 next = l2cap_global_fixed_chan(pchan, hcon);
7282 hcon->type);
7283 l2cap_chan_put(pchan); 7292 l2cap_chan_put(pchan);
7284 pchan = next; 7293 pchan = next;
7285 } 7294 }
@@ -7527,8 +7536,8 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
7527 read_lock(&chan_list_lock); 7536 read_lock(&chan_list_lock);
7528 7537
7529 list_for_each_entry(c, &chan_list, global_l) { 7538 list_for_each_entry(c, &chan_list, global_l) {
7530 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 7539 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7531 &c->src, &c->dst, 7540 &c->src, c->src_type, &c->dst, c->dst_type,
7532 c->state, __le16_to_cpu(c->psm), 7541 c->state, __le16_to_cpu(c->psm),
7533 c->scid, c->dcid, c->imtu, c->omtu, 7542 c->scid, c->dcid, c->imtu, c->omtu,
7534 c->sec_level, c->mode); 7543 c->sec_level, c->mode);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f65caf41953f..60694f0f4c73 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -302,7 +302,7 @@ done:
302static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, 302static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
303 int flags) 303 int flags)
304{ 304{
305 DECLARE_WAITQUEUE(wait, current); 305 DEFINE_WAIT_FUNC(wait, woken_wake_function);
306 struct sock *sk = sock->sk, *nsk; 306 struct sock *sk = sock->sk, *nsk;
307 long timeo; 307 long timeo;
308 int err = 0; 308 int err = 0;
@@ -316,8 +316,6 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
316 /* Wait for an incoming connection. (wake-one). */ 316 /* Wait for an incoming connection. (wake-one). */
317 add_wait_queue_exclusive(sk_sleep(sk), &wait); 317 add_wait_queue_exclusive(sk_sleep(sk), &wait);
318 while (1) { 318 while (1) {
319 set_current_state(TASK_INTERRUPTIBLE);
320
321 if (sk->sk_state != BT_LISTEN) { 319 if (sk->sk_state != BT_LISTEN) {
322 err = -EBADFD; 320 err = -EBADFD;
323 break; 321 break;
@@ -338,10 +336,11 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
338 } 336 }
339 337
340 release_sock(sk); 338 release_sock(sk);
341 timeo = schedule_timeout(timeo); 339
340 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
341
342 lock_sock_nested(sk, L2CAP_NESTING_PARENT); 342 lock_sock_nested(sk, L2CAP_NESTING_PARENT);
343 } 343 }
344 __set_current_state(TASK_RUNNING);
345 remove_wait_queue(sk_sleep(sk), &wait); 344 remove_wait_queue(sk_sleep(sk), &wait);
346 345
347 if (err) 346 if (err)
@@ -1614,6 +1613,8 @@ int __init l2cap_init_sockets(void)
1614{ 1613{
1615 int err; 1614 int err;
1616 1615
1616 BUILD_BUG_ON(sizeof(struct sockaddr_l2) > sizeof(struct sockaddr));
1617
1617 err = proto_register(&l2cap_proto, 0); 1618 err = proto_register(&l2cap_proto, 0);
1618 if (err < 0) 1619 if (err < 0)
1619 return err; 1620 return err;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 693ce8bcd06e..9ec5390c85eb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -32,6 +32,7 @@
32#include <net/bluetooth/l2cap.h> 32#include <net/bluetooth/l2cap.h>
33#include <net/bluetooth/mgmt.h> 33#include <net/bluetooth/mgmt.h>
34 34
35#include "hci_request.h"
35#include "smp.h" 36#include "smp.h"
36 37
37#define MGMT_VERSION 1 38#define MGMT_VERSION 1
@@ -130,6 +131,9 @@ static const u16 mgmt_events[] = {
130 131
131#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) 132#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 133
134#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
135 "\x00\x00\x00\x00\x00\x00\x00\x00"
136
133struct pending_cmd { 137struct pending_cmd {
134 struct list_head list; 138 struct list_head list;
135 u16 opcode; 139 u16 opcode;
@@ -138,7 +142,7 @@ struct pending_cmd {
138 size_t param_len; 142 size_t param_len;
139 struct sock *sk; 143 struct sock *sk;
140 void *user_data; 144 void *user_data;
141 void (*cmd_complete)(struct pending_cmd *cmd, u8 status); 145 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
142}; 146};
143 147
144/* HCI to MGMT error code conversion table */ 148/* HCI to MGMT error code conversion table */
@@ -569,8 +573,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
569 settings |= MGMT_SETTING_HS; 573 settings |= MGMT_SETTING_HS;
570 } 574 }
571 575
572 if (lmp_sc_capable(hdev) || 576 if (lmp_sc_capable(hdev))
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN; 577 settings |= MGMT_SETTING_SECURE_CONN;
575 } 578 }
576 579
@@ -1251,7 +1254,7 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1251 sizeof(settings)); 1254 sizeof(settings));
1252} 1255}
1253 1256
1254static void clean_up_hci_complete(struct hci_dev *hdev, u8 status) 1257static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1255{ 1258{
1256 BT_DBG("%s status 0x%02x", hdev->name, status); 1259 BT_DBG("%s status 0x%02x", hdev->name, status);
1257 1260
@@ -1486,16 +1489,16 @@ static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1486 cmd_status_rsp(cmd, data); 1489 cmd_status_rsp(cmd, data);
1487} 1490}
1488 1491
1489static void generic_cmd_complete(struct pending_cmd *cmd, u8 status) 1492static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1490{ 1493{
1491 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1494 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1492 cmd->param_len); 1495 cmd->param, cmd->param_len);
1493} 1496}
1494 1497
1495static void addr_cmd_complete(struct pending_cmd *cmd, u8 status) 1498static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1496{ 1499{
1497 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1500 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info)); 1501 sizeof(struct mgmt_addr_info));
1499} 1502}
1500 1503
1501static u8 mgmt_bredr_support(struct hci_dev *hdev) 1504static u8 mgmt_bredr_support(struct hci_dev *hdev)
@@ -1518,7 +1521,8 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
1518 return MGMT_STATUS_SUCCESS; 1521 return MGMT_STATUS_SUCCESS;
1519} 1522}
1520 1523
1521static void set_discoverable_complete(struct hci_dev *hdev, u8 status) 1524static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1525 u16 opcode)
1522{ 1526{
1523 struct pending_cmd *cmd; 1527 struct pending_cmd *cmd;
1524 struct mgmt_mode *cp; 1528 struct mgmt_mode *cp;
@@ -1566,7 +1570,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1566 * entries. 1570 * entries.
1567 */ 1571 */
1568 hci_req_init(&req, hdev); 1572 hci_req_init(&req, hdev);
1569 hci_update_page_scan(hdev, &req); 1573 __hci_update_page_scan(&req);
1570 update_class(&req); 1574 update_class(&req);
1571 hci_req_run(&req, NULL); 1575 hci_req_run(&req, NULL);
1572 1576
@@ -1777,7 +1781,8 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
1777 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); 1781 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1778} 1782}
1779 1783
1780static void set_connectable_complete(struct hci_dev *hdev, u8 status) 1784static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1785 u16 opcode)
1781{ 1786{
1782 struct pending_cmd *cmd; 1787 struct pending_cmd *cmd;
1783 struct mgmt_mode *cp; 1788 struct mgmt_mode *cp;
@@ -1813,7 +1818,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1813 1818
1814 if (conn_changed || discov_changed) { 1819 if (conn_changed || discov_changed) {
1815 new_settings(hdev, cmd->sk); 1820 new_settings(hdev, cmd->sk);
1816 hci_update_page_scan(hdev, NULL); 1821 hci_update_page_scan(hdev);
1817 if (discov_changed) 1822 if (discov_changed)
1818 mgmt_update_adv_data(hdev); 1823 mgmt_update_adv_data(hdev);
1819 hci_update_background_scan(hdev); 1824 hci_update_background_scan(hdev);
@@ -1847,7 +1852,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
1847 return err; 1852 return err;
1848 1853
1849 if (changed) { 1854 if (changed) {
1850 hci_update_page_scan(hdev, NULL); 1855 hci_update_page_scan(hdev);
1851 hci_update_background_scan(hdev); 1856 hci_update_background_scan(hdev);
1852 return new_settings(hdev, sk); 1857 return new_settings(hdev, sk);
1853 } 1858 }
@@ -2195,7 +2200,7 @@ unlock:
2195 return err; 2200 return err;
2196} 2201}
2197 2202
2198static void le_enable_complete(struct hci_dev *hdev, u8 status) 2203static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2199{ 2204{
2200 struct cmd_lookup match = { NULL, hdev }; 2205 struct cmd_lookup match = { NULL, hdev };
2201 2206
@@ -2227,9 +2232,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
2227 hci_req_init(&req, hdev); 2232 hci_req_init(&req, hdev);
2228 update_adv_data(&req); 2233 update_adv_data(&req);
2229 update_scan_rsp_data(&req); 2234 update_scan_rsp_data(&req);
2235 __hci_update_background_scan(&req);
2230 hci_req_run(&req, NULL); 2236 hci_req_run(&req, NULL);
2231
2232 hci_update_background_scan(hdev);
2233 } 2237 }
2234 2238
2235unlock: 2239unlock:
@@ -2386,7 +2390,7 @@ unlock:
2386 hci_dev_unlock(hdev); 2390 hci_dev_unlock(hdev);
2387} 2391}
2388 2392
2389static void add_uuid_complete(struct hci_dev *hdev, u8 status) 2393static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2390{ 2394{
2391 BT_DBG("status 0x%02x", status); 2395 BT_DBG("status 0x%02x", status);
2392 2396
@@ -2465,7 +2469,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
2465 return false; 2469 return false;
2466} 2470}
2467 2471
2468static void remove_uuid_complete(struct hci_dev *hdev, u8 status) 2472static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2469{ 2473{
2470 BT_DBG("status 0x%02x", status); 2474 BT_DBG("status 0x%02x", status);
2471 2475
@@ -2550,7 +2554,7 @@ unlock:
2550 return err; 2554 return err;
2551} 2555}
2552 2556
2553static void set_class_complete(struct hci_dev *hdev, u8 status) 2557static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2554{ 2558{
2555 BT_DBG("status 0x%02x", status); 2559 BT_DBG("status 0x%02x", status);
2556 2560
@@ -3098,16 +3102,17 @@ static struct pending_cmd *find_pairing(struct hci_conn *conn)
3098 return NULL; 3102 return NULL;
3099} 3103}
3100 3104
3101static void pairing_complete(struct pending_cmd *cmd, u8 status) 3105static int pairing_complete(struct pending_cmd *cmd, u8 status)
3102{ 3106{
3103 struct mgmt_rp_pair_device rp; 3107 struct mgmt_rp_pair_device rp;
3104 struct hci_conn *conn = cmd->user_data; 3108 struct hci_conn *conn = cmd->user_data;
3109 int err;
3105 3110
3106 bacpy(&rp.addr.bdaddr, &conn->dst); 3111 bacpy(&rp.addr.bdaddr, &conn->dst);
3107 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); 3112 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3108 3113
3109 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, 3114 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3110 &rp, sizeof(rp)); 3115 &rp, sizeof(rp));
3111 3116
3112 /* So we don't get further callbacks for this connection */ 3117 /* So we don't get further callbacks for this connection */
3113 conn->connect_cfm_cb = NULL; 3118 conn->connect_cfm_cb = NULL;
@@ -3122,6 +3127,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
3122 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); 3127 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3123 3128
3124 hci_conn_put(conn); 3129 hci_conn_put(conn);
3130
3131 return err;
3125} 3132}
3126 3133
3127void mgmt_smp_complete(struct hci_conn *conn, bool complete) 3134void mgmt_smp_complete(struct hci_conn *conn, bool complete)
@@ -3481,7 +3488,7 @@ static void update_name(struct hci_request *req)
3481 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); 3488 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3482} 3489}
3483 3490
3484static void set_name_complete(struct hci_dev *hdev, u8 status) 3491static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3485{ 3492{
3486 struct mgmt_cp_set_local_name *cp; 3493 struct mgmt_cp_set_local_name *cp;
3487 struct pending_cmd *cmd; 3494 struct pending_cmd *cmd;
@@ -3629,10 +3636,16 @@ unlock:
3629static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 3636static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3630 void *data, u16 len) 3637 void *data, u16 len)
3631{ 3638{
3639 struct mgmt_addr_info *addr = data;
3632 int err; 3640 int err;
3633 3641
3634 BT_DBG("%s ", hdev->name); 3642 BT_DBG("%s ", hdev->name);
3635 3643
3644 if (!bdaddr_type_is_valid(addr->type))
3645 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3646 MGMT_STATUS_INVALID_PARAMS, addr,
3647 sizeof(*addr));
3648
3636 hci_dev_lock(hdev); 3649 hci_dev_lock(hdev);
3637 3650
3638 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) { 3651 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
@@ -3659,28 +3672,53 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3659 status, &cp->addr, sizeof(cp->addr)); 3672 status, &cp->addr, sizeof(cp->addr));
3660 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { 3673 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3661 struct mgmt_cp_add_remote_oob_ext_data *cp = data; 3674 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3662 u8 *rand192, *hash192; 3675 u8 *rand192, *hash192, *rand256, *hash256;
3663 u8 status; 3676 u8 status;
3664 3677
3665 if (cp->addr.type != BDADDR_BREDR) {
3666 err = cmd_complete(sk, hdev->id,
3667 MGMT_OP_ADD_REMOTE_OOB_DATA,
3668 MGMT_STATUS_INVALID_PARAMS,
3669 &cp->addr, sizeof(cp->addr));
3670 goto unlock;
3671 }
3672
3673 if (bdaddr_type_is_le(cp->addr.type)) { 3678 if (bdaddr_type_is_le(cp->addr.type)) {
3679 /* Enforce zero-valued 192-bit parameters as
3680 * long as legacy SMP OOB isn't implemented.
3681 */
3682 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3683 memcmp(cp->hash192, ZERO_KEY, 16)) {
3684 err = cmd_complete(sk, hdev->id,
3685 MGMT_OP_ADD_REMOTE_OOB_DATA,
3686 MGMT_STATUS_INVALID_PARAMS,
3687 addr, sizeof(*addr));
3688 goto unlock;
3689 }
3690
3674 rand192 = NULL; 3691 rand192 = NULL;
3675 hash192 = NULL; 3692 hash192 = NULL;
3676 } else { 3693 } else {
3677 rand192 = cp->rand192; 3694 /* In case one of the P-192 values is set to zero,
3678 hash192 = cp->hash192; 3695 * then just disable OOB data for P-192.
3696 */
3697 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3698 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3699 rand192 = NULL;
3700 hash192 = NULL;
3701 } else {
3702 rand192 = cp->rand192;
3703 hash192 = cp->hash192;
3704 }
3705 }
3706
3707 /* In case one of the P-256 values is set to zero, then just
3708 * disable OOB data for P-256.
3709 */
3710 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3711 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3712 rand256 = NULL;
3713 hash256 = NULL;
3714 } else {
3715 rand256 = cp->rand256;
3716 hash256 = cp->hash256;
3679 } 3717 }
3680 3718
3681 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, 3719 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3682 cp->addr.type, hash192, rand192, 3720 cp->addr.type, hash192, rand192,
3683 cp->hash256, cp->rand256); 3721 hash256, rand256);
3684 if (err < 0) 3722 if (err < 0)
3685 status = MGMT_STATUS_FAILED; 3723 status = MGMT_STATUS_FAILED;
3686 else 3724 else
@@ -3832,7 +3870,8 @@ static bool trigger_discovery(struct hci_request *req, u8 *status)
3832 return true; 3870 return true;
3833} 3871}
3834 3872
3835static void start_discovery_complete(struct hci_dev *hdev, u8 status) 3873static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3874 u16 opcode)
3836{ 3875{
3837 struct pending_cmd *cmd; 3876 struct pending_cmd *cmd;
3838 unsigned long timeout; 3877 unsigned long timeout;
@@ -3857,6 +3896,9 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3857 3896
3858 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 3897 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3859 3898
3899 /* If the scan involves LE scan, pick proper timeout to schedule
3900 * hdev->le_scan_disable that will stop it.
3901 */
3860 switch (hdev->discovery.type) { 3902 switch (hdev->discovery.type) {
3861 case DISCOV_TYPE_LE: 3903 case DISCOV_TYPE_LE:
3862 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 3904 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
@@ -3873,9 +3915,23 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3873 break; 3915 break;
3874 } 3916 }
3875 3917
3876 if (timeout) 3918 if (timeout) {
3919 /* When service discovery is used and the controller has
3920 * a strict duplicate filter, it is important to remember
3921 * the start and duration of the scan. This is required
3922 * for restarting scanning during the discovery phase.
3923 */
3924 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
3925 &hdev->quirks) &&
3926 (hdev->discovery.uuid_count > 0 ||
3927 hdev->discovery.rssi != HCI_RSSI_INVALID)) {
3928 hdev->discovery.scan_start = jiffies;
3929 hdev->discovery.scan_duration = timeout;
3930 }
3931
3877 queue_delayed_work(hdev->workqueue, 3932 queue_delayed_work(hdev->workqueue,
3878 &hdev->le_scan_disable, timeout); 3933 &hdev->le_scan_disable, timeout);
3934 }
3879 3935
3880unlock: 3936unlock:
3881 hci_dev_unlock(hdev); 3937 hci_dev_unlock(hdev);
@@ -3947,9 +4003,10 @@ failed:
3947 return err; 4003 return err;
3948} 4004}
3949 4005
3950static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status) 4006static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3951{ 4007{
3952 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1); 4008 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4009 cmd->param, 1);
3953} 4010}
3954 4011
3955static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, 4012static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4060,7 +4117,7 @@ failed:
4060 return err; 4117 return err;
4061} 4118}
4062 4119
4063static void stop_discovery_complete(struct hci_dev *hdev, u8 status) 4120static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4064{ 4121{
4065 struct pending_cmd *cmd; 4122 struct pending_cmd *cmd;
4066 4123
@@ -4286,7 +4343,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4286 return err; 4343 return err;
4287} 4344}
4288 4345
4289static void set_advertising_complete(struct hci_dev *hdev, u8 status) 4346static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4347 u16 opcode)
4290{ 4348{
4291 struct cmd_lookup match = { NULL, hdev }; 4349 struct cmd_lookup match = { NULL, hdev };
4292 4350
@@ -4493,7 +4551,8 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4493 return err; 4551 return err;
4494} 4552}
4495 4553
4496static void fast_connectable_complete(struct hci_dev *hdev, u8 status) 4554static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4555 u16 opcode)
4497{ 4556{
4498 struct pending_cmd *cmd; 4557 struct pending_cmd *cmd;
4499 4558
@@ -4591,7 +4650,7 @@ unlock:
4591 return err; 4650 return err;
4592} 4651}
4593 4652
4594static void set_bredr_complete(struct hci_dev *hdev, u8 status) 4653static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4595{ 4654{
4596 struct pending_cmd *cmd; 4655 struct pending_cmd *cmd;
4597 4656
@@ -4675,6 +4734,28 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4675 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, 4734 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4676 MGMT_STATUS_REJECTED); 4735 MGMT_STATUS_REJECTED);
4677 goto unlock; 4736 goto unlock;
4737 } else {
4738 /* When configuring a dual-mode controller to operate
4739 * with LE only and using a static address, then switching
4740 * BR/EDR back on is not allowed.
4741 *
4742 * Dual-mode controllers shall operate with the public
4743 * address as its identity address for BR/EDR and LE. So
4744 * reject the attempt to create an invalid configuration.
4745 *
4746 * The same restrictions applies when secure connections
4747 * has been enabled. For BR/EDR this is a controller feature
4748 * while for LE it is a host stack feature. This means that
4749 * switching BR/EDR back on when secure connections has been
4750 * enabled is not a supported transaction.
4751 */
4752 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4753 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4754 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
4755 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4756 MGMT_STATUS_REJECTED);
4757 goto unlock;
4758 }
4678 } 4759 }
4679 4760
4680 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) { 4761 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
@@ -4697,7 +4778,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4697 hci_req_init(&req, hdev); 4778 hci_req_init(&req, hdev);
4698 4779
4699 write_fast_connectable(&req, false); 4780 write_fast_connectable(&req, false);
4700 hci_update_page_scan(hdev, &req); 4781 __hci_update_page_scan(&req);
4701 4782
4702 /* Since only the advertising data flags will change, there 4783 /* Since only the advertising data flags will change, there
4703 * is no need to update the scan response data. 4784 * is no need to update the scan response data.
@@ -4713,30 +4794,80 @@ unlock:
4713 return err; 4794 return err;
4714} 4795}
4715 4796
4797static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4798{
4799 struct pending_cmd *cmd;
4800 struct mgmt_mode *cp;
4801
4802 BT_DBG("%s status %u", hdev->name, status);
4803
4804 hci_dev_lock(hdev);
4805
4806 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4807 if (!cmd)
4808 goto unlock;
4809
4810 if (status) {
4811 cmd_status(cmd->sk, cmd->index, cmd->opcode,
4812 mgmt_status(status));
4813 goto remove;
4814 }
4815
4816 cp = cmd->param;
4817
4818 switch (cp->val) {
4819 case 0x00:
4820 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4821 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4822 break;
4823 case 0x01:
4824 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4825 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4826 break;
4827 case 0x02:
4828 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
4829 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4830 break;
4831 }
4832
4833 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4834 new_settings(hdev, cmd->sk);
4835
4836remove:
4837 mgmt_pending_remove(cmd);
4838unlock:
4839 hci_dev_unlock(hdev);
4840}
4841
4716static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, 4842static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4717 void *data, u16 len) 4843 void *data, u16 len)
4718{ 4844{
4719 struct mgmt_mode *cp = data; 4845 struct mgmt_mode *cp = data;
4720 struct pending_cmd *cmd; 4846 struct pending_cmd *cmd;
4847 struct hci_request req;
4721 u8 val; 4848 u8 val;
4722 int err; 4849 int err;
4723 4850
4724 BT_DBG("request for %s", hdev->name); 4851 BT_DBG("request for %s", hdev->name);
4725 4852
4726 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && 4853 if (!lmp_sc_capable(hdev) &&
4727 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) 4854 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4728 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4855 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4729 MGMT_STATUS_NOT_SUPPORTED); 4856 MGMT_STATUS_NOT_SUPPORTED);
4730 4857
4858 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
4859 lmp_sc_capable(hdev) &&
4860 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4861 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4862 MGMT_STATUS_REJECTED);
4863
4731 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) 4864 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4732 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4865 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4733 MGMT_STATUS_INVALID_PARAMS); 4866 MGMT_STATUS_INVALID_PARAMS);
4734 4867
4735 hci_dev_lock(hdev); 4868 hci_dev_lock(hdev);
4736 4869
4737 if (!hdev_is_powered(hdev) || 4870 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4738 (!lmp_sc_capable(hdev) &&
4739 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4740 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { 4871 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4741 bool changed; 4872 bool changed;
4742 4873
@@ -4783,17 +4914,14 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4783 goto failed; 4914 goto failed;
4784 } 4915 }
4785 4916
4786 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val); 4917 hci_req_init(&req, hdev);
4918 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4919 err = hci_req_run(&req, sc_enable_complete);
4787 if (err < 0) { 4920 if (err < 0) {
4788 mgmt_pending_remove(cmd); 4921 mgmt_pending_remove(cmd);
4789 goto failed; 4922 goto failed;
4790 } 4923 }
4791 4924
4792 if (cp->val == 0x02)
4793 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4794 else
4795 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4796
4797failed: 4925failed:
4798 hci_dev_unlock(hdev); 4926 hci_dev_unlock(hdev);
4799 return err; 4927 return err;
@@ -5091,10 +5219,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5091 return err; 5219 return err;
5092} 5220}
5093 5221
5094static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status) 5222static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5095{ 5223{
5096 struct hci_conn *conn = cmd->user_data; 5224 struct hci_conn *conn = cmd->user_data;
5097 struct mgmt_rp_get_conn_info rp; 5225 struct mgmt_rp_get_conn_info rp;
5226 int err;
5098 5227
5099 memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); 5228 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5100 5229
@@ -5108,14 +5237,17 @@ static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5108 rp.max_tx_power = HCI_TX_POWER_INVALID; 5237 rp.max_tx_power = HCI_TX_POWER_INVALID;
5109 } 5238 }
5110 5239
5111 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, 5240 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5112 &rp, sizeof(rp)); 5241 &rp, sizeof(rp));
5113 5242
5114 hci_conn_drop(conn); 5243 hci_conn_drop(conn);
5115 hci_conn_put(conn); 5244 hci_conn_put(conn);
5245
5246 return err;
5116} 5247}
5117 5248
5118static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status) 5249static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5250 u16 opcode)
5119{ 5251{
5120 struct hci_cp_read_rssi *cp; 5252 struct hci_cp_read_rssi *cp;
5121 struct pending_cmd *cmd; 5253 struct pending_cmd *cmd;
@@ -5286,11 +5418,12 @@ unlock:
5286 return err; 5418 return err;
5287} 5419}
5288 5420
5289static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status) 5421static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5290{ 5422{
5291 struct hci_conn *conn = cmd->user_data; 5423 struct hci_conn *conn = cmd->user_data;
5292 struct mgmt_rp_get_clock_info rp; 5424 struct mgmt_rp_get_clock_info rp;
5293 struct hci_dev *hdev; 5425 struct hci_dev *hdev;
5426 int err;
5294 5427
5295 memset(&rp, 0, sizeof(rp)); 5428 memset(&rp, 0, sizeof(rp));
5296 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr)); 5429 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
@@ -5310,15 +5443,18 @@ static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5310 } 5443 }
5311 5444
5312complete: 5445complete:
5313 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp)); 5446 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5447 sizeof(rp));
5314 5448
5315 if (conn) { 5449 if (conn) {
5316 hci_conn_drop(conn); 5450 hci_conn_drop(conn);
5317 hci_conn_put(conn); 5451 hci_conn_put(conn);
5318 } 5452 }
5453
5454 return err;
5319} 5455}
5320 5456
5321static void get_clock_info_complete(struct hci_dev *hdev, u8 status) 5457static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5322{ 5458{
5323 struct hci_cp_read_clock *hci_cp; 5459 struct hci_cp_read_clock *hci_cp;
5324 struct pending_cmd *cmd; 5460 struct pending_cmd *cmd;
@@ -5425,6 +5561,65 @@ unlock:
5425 return err; 5561 return err;
5426} 5562}
5427 5563
5564static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5565{
5566 struct hci_conn *conn;
5567
5568 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5569 if (!conn)
5570 return false;
5571
5572 if (conn->dst_type != type)
5573 return false;
5574
5575 if (conn->state != BT_CONNECTED)
5576 return false;
5577
5578 return true;
5579}
5580
5581/* This function requires the caller holds hdev->lock */
5582static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5583 u8 addr_type, u8 auto_connect)
5584{
5585 struct hci_dev *hdev = req->hdev;
5586 struct hci_conn_params *params;
5587
5588 params = hci_conn_params_add(hdev, addr, addr_type);
5589 if (!params)
5590 return -EIO;
5591
5592 if (params->auto_connect == auto_connect)
5593 return 0;
5594
5595 list_del_init(&params->action);
5596
5597 switch (auto_connect) {
5598 case HCI_AUTO_CONN_DISABLED:
5599 case HCI_AUTO_CONN_LINK_LOSS:
5600 __hci_update_background_scan(req);
5601 break;
5602 case HCI_AUTO_CONN_REPORT:
5603 list_add(&params->action, &hdev->pend_le_reports);
5604 __hci_update_background_scan(req);
5605 break;
5606 case HCI_AUTO_CONN_DIRECT:
5607 case HCI_AUTO_CONN_ALWAYS:
5608 if (!is_connected(hdev, addr, addr_type)) {
5609 list_add(&params->action, &hdev->pend_le_conns);
5610 __hci_update_background_scan(req);
5611 }
5612 break;
5613 }
5614
5615 params->auto_connect = auto_connect;
5616
5617 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5618 auto_connect);
5619
5620 return 0;
5621}
5622
5428static void device_added(struct sock *sk, struct hci_dev *hdev, 5623static void device_added(struct sock *sk, struct hci_dev *hdev,
5429 bdaddr_t *bdaddr, u8 type, u8 action) 5624 bdaddr_t *bdaddr, u8 type, u8 action)
5430{ 5625{
@@ -5437,10 +5632,31 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
5437 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); 5632 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5438} 5633}
5439 5634
5635static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5636{
5637 struct pending_cmd *cmd;
5638
5639 BT_DBG("status 0x%02x", status);
5640
5641 hci_dev_lock(hdev);
5642
5643 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5644 if (!cmd)
5645 goto unlock;
5646
5647 cmd->cmd_complete(cmd, mgmt_status(status));
5648 mgmt_pending_remove(cmd);
5649
5650unlock:
5651 hci_dev_unlock(hdev);
5652}
5653
5440static int add_device(struct sock *sk, struct hci_dev *hdev, 5654static int add_device(struct sock *sk, struct hci_dev *hdev,
5441 void *data, u16 len) 5655 void *data, u16 len)
5442{ 5656{
5443 struct mgmt_cp_add_device *cp = data; 5657 struct mgmt_cp_add_device *cp = data;
5658 struct pending_cmd *cmd;
5659 struct hci_request req;
5444 u8 auto_conn, addr_type; 5660 u8 auto_conn, addr_type;
5445 int err; 5661 int err;
5446 5662
@@ -5457,14 +5673,24 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
5457 MGMT_STATUS_INVALID_PARAMS, 5673 MGMT_STATUS_INVALID_PARAMS,
5458 &cp->addr, sizeof(cp->addr)); 5674 &cp->addr, sizeof(cp->addr));
5459 5675
5676 hci_req_init(&req, hdev);
5677
5460 hci_dev_lock(hdev); 5678 hci_dev_lock(hdev);
5461 5679
5680 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5681 if (!cmd) {
5682 err = -ENOMEM;
5683 goto unlock;
5684 }
5685
5686 cmd->cmd_complete = addr_cmd_complete;
5687
5462 if (cp->addr.type == BDADDR_BREDR) { 5688 if (cp->addr.type == BDADDR_BREDR) {
5463 /* Only incoming connections action is supported for now */ 5689 /* Only incoming connections action is supported for now */
5464 if (cp->action != 0x01) { 5690 if (cp->action != 0x01) {
5465 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, 5691 err = cmd->cmd_complete(cmd,
5466 MGMT_STATUS_INVALID_PARAMS, 5692 MGMT_STATUS_INVALID_PARAMS);
5467 &cp->addr, sizeof(cp->addr)); 5693 mgmt_pending_remove(cmd);
5468 goto unlock; 5694 goto unlock;
5469 } 5695 }
5470 5696
@@ -5473,7 +5699,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
5473 if (err) 5699 if (err)
5474 goto unlock; 5700 goto unlock;
5475 5701
5476 hci_update_page_scan(hdev, NULL); 5702 __hci_update_page_scan(&req);
5477 5703
5478 goto added; 5704 goto added;
5479 } 5705 }
@@ -5493,19 +5719,25 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
5493 /* If the connection parameters don't exist for this device, 5719 /* If the connection parameters don't exist for this device,
5494 * they will be created and configured with defaults. 5720 * they will be created and configured with defaults.
5495 */ 5721 */
5496 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type, 5722 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5497 auto_conn) < 0) { 5723 auto_conn) < 0) {
5498 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, 5724 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5499 MGMT_STATUS_FAILED, 5725 mgmt_pending_remove(cmd);
5500 &cp->addr, sizeof(cp->addr));
5501 goto unlock; 5726 goto unlock;
5502 } 5727 }
5503 5728
5504added: 5729added:
5505 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); 5730 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5506 5731
5507 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, 5732 err = hci_req_run(&req, add_device_complete);
5508 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr)); 5733 if (err < 0) {
5734 /* ENODATA means no HCI commands were needed (e.g. if
5735 * the adapter is powered off).
5736 */
5737 if (err == -ENODATA)
5738 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5739 mgmt_pending_remove(cmd);
5740 }
5509 5741
5510unlock: 5742unlock:
5511 hci_dev_unlock(hdev); 5743 hci_dev_unlock(hdev);
@@ -5523,24 +5755,55 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
5523 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); 5755 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5524} 5756}
5525 5757
5758static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5759{
5760 struct pending_cmd *cmd;
5761
5762 BT_DBG("status 0x%02x", status);
5763
5764 hci_dev_lock(hdev);
5765
5766 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5767 if (!cmd)
5768 goto unlock;
5769
5770 cmd->cmd_complete(cmd, mgmt_status(status));
5771 mgmt_pending_remove(cmd);
5772
5773unlock:
5774 hci_dev_unlock(hdev);
5775}
5776
5526static int remove_device(struct sock *sk, struct hci_dev *hdev, 5777static int remove_device(struct sock *sk, struct hci_dev *hdev,
5527 void *data, u16 len) 5778 void *data, u16 len)
5528{ 5779{
5529 struct mgmt_cp_remove_device *cp = data; 5780 struct mgmt_cp_remove_device *cp = data;
5781 struct pending_cmd *cmd;
5782 struct hci_request req;
5530 int err; 5783 int err;
5531 5784
5532 BT_DBG("%s", hdev->name); 5785 BT_DBG("%s", hdev->name);
5533 5786
5787 hci_req_init(&req, hdev);
5788
5534 hci_dev_lock(hdev); 5789 hci_dev_lock(hdev);
5535 5790
5791 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5792 if (!cmd) {
5793 err = -ENOMEM;
5794 goto unlock;
5795 }
5796
5797 cmd->cmd_complete = addr_cmd_complete;
5798
5536 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { 5799 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5537 struct hci_conn_params *params; 5800 struct hci_conn_params *params;
5538 u8 addr_type; 5801 u8 addr_type;
5539 5802
5540 if (!bdaddr_type_is_valid(cp->addr.type)) { 5803 if (!bdaddr_type_is_valid(cp->addr.type)) {
5541 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, 5804 err = cmd->cmd_complete(cmd,
5542 MGMT_STATUS_INVALID_PARAMS, 5805 MGMT_STATUS_INVALID_PARAMS);
5543 &cp->addr, sizeof(cp->addr)); 5806 mgmt_pending_remove(cmd);
5544 goto unlock; 5807 goto unlock;
5545 } 5808 }
5546 5809
@@ -5549,14 +5812,13 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
5549 &cp->addr.bdaddr, 5812 &cp->addr.bdaddr,
5550 cp->addr.type); 5813 cp->addr.type);
5551 if (err) { 5814 if (err) {
5552 err = cmd_complete(sk, hdev->id, 5815 err = cmd->cmd_complete(cmd,
5553 MGMT_OP_REMOVE_DEVICE, 5816 MGMT_STATUS_INVALID_PARAMS);
5554 MGMT_STATUS_INVALID_PARAMS, 5817 mgmt_pending_remove(cmd);
5555 &cp->addr, sizeof(cp->addr));
5556 goto unlock; 5818 goto unlock;
5557 } 5819 }
5558 5820
5559 hci_update_page_scan(hdev, NULL); 5821 __hci_update_page_scan(&req);
5560 5822
5561 device_removed(sk, hdev, &cp->addr.bdaddr, 5823 device_removed(sk, hdev, &cp->addr.bdaddr,
5562 cp->addr.type); 5824 cp->addr.type);
@@ -5571,23 +5833,23 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
5571 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, 5833 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5572 addr_type); 5834 addr_type);
5573 if (!params) { 5835 if (!params) {
5574 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, 5836 err = cmd->cmd_complete(cmd,
5575 MGMT_STATUS_INVALID_PARAMS, 5837 MGMT_STATUS_INVALID_PARAMS);
5576 &cp->addr, sizeof(cp->addr)); 5838 mgmt_pending_remove(cmd);
5577 goto unlock; 5839 goto unlock;
5578 } 5840 }
5579 5841
5580 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { 5842 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5581 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, 5843 err = cmd->cmd_complete(cmd,
5582 MGMT_STATUS_INVALID_PARAMS, 5844 MGMT_STATUS_INVALID_PARAMS);
5583 &cp->addr, sizeof(cp->addr)); 5845 mgmt_pending_remove(cmd);
5584 goto unlock; 5846 goto unlock;
5585 } 5847 }
5586 5848
5587 list_del(&params->action); 5849 list_del(&params->action);
5588 list_del(&params->list); 5850 list_del(&params->list);
5589 kfree(params); 5851 kfree(params);
5590 hci_update_background_scan(hdev); 5852 __hci_update_background_scan(&req);
5591 5853
5592 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); 5854 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5593 } else { 5855 } else {
@@ -5595,9 +5857,9 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
5595 struct bdaddr_list *b, *btmp; 5857 struct bdaddr_list *b, *btmp;
5596 5858
5597 if (cp->addr.type) { 5859 if (cp->addr.type) {
5598 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, 5860 err = cmd->cmd_complete(cmd,
5599 MGMT_STATUS_INVALID_PARAMS, 5861 MGMT_STATUS_INVALID_PARAMS);
5600 &cp->addr, sizeof(cp->addr)); 5862 mgmt_pending_remove(cmd);
5601 goto unlock; 5863 goto unlock;
5602 } 5864 }
5603 5865
@@ -5607,7 +5869,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
5607 kfree(b); 5869 kfree(b);
5608 } 5870 }
5609 5871
5610 hci_update_page_scan(hdev, NULL); 5872 __hci_update_page_scan(&req);
5611 5873
5612 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { 5874 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5613 if (p->auto_connect == HCI_AUTO_CONN_DISABLED) 5875 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
@@ -5620,12 +5882,19 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
5620 5882
5621 BT_DBG("All LE connection parameters were removed"); 5883 BT_DBG("All LE connection parameters were removed");
5622 5884
5623 hci_update_background_scan(hdev); 5885 __hci_update_background_scan(&req);
5624 } 5886 }
5625 5887
5626complete: 5888complete:
5627 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, 5889 err = hci_req_run(&req, remove_device_complete);
5628 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr)); 5890 if (err < 0) {
5891 /* ENODATA means no HCI commands were needed (e.g. if
5892 * the adapter is powered off).
5893 */
5894 if (err == -ENODATA)
5895 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5896 mgmt_pending_remove(cmd);
5897 }
5629 5898
5630unlock: 5899unlock:
5631 hci_dev_unlock(hdev); 5900 hci_dev_unlock(hdev);
@@ -6037,8 +6306,9 @@ void mgmt_index_removed(struct hci_dev *hdev)
6037} 6306}
6038 6307
6039/* This function requires the caller holds hdev->lock */ 6308/* This function requires the caller holds hdev->lock */
6040static void restart_le_actions(struct hci_dev *hdev) 6309static void restart_le_actions(struct hci_request *req)
6041{ 6310{
6311 struct hci_dev *hdev = req->hdev;
6042 struct hci_conn_params *p; 6312 struct hci_conn_params *p;
6043 6313
6044 list_for_each_entry(p, &hdev->le_conn_params, list) { 6314 list_for_each_entry(p, &hdev->le_conn_params, list) {
@@ -6060,18 +6330,25 @@ static void restart_le_actions(struct hci_dev *hdev)
6060 } 6330 }
6061 } 6331 }
6062 6332
6063 hci_update_background_scan(hdev); 6333 __hci_update_background_scan(req);
6064} 6334}
6065 6335
6066static void powered_complete(struct hci_dev *hdev, u8 status) 6336static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6067{ 6337{
6068 struct cmd_lookup match = { NULL, hdev }; 6338 struct cmd_lookup match = { NULL, hdev };
6069 6339
6070 BT_DBG("status 0x%02x", status); 6340 BT_DBG("status 0x%02x", status);
6071 6341
6072 hci_dev_lock(hdev); 6342 if (!status) {
6343 /* Register the available SMP channels (BR/EDR and LE) only
6344 * when successfully powering on the controller. This late
6345 * registration is required so that LE SMP can clearly
6346 * decide if the public address or static address is used.
6347 */
6348 smp_register(hdev);
6349 }
6073 6350
6074 restart_le_actions(hdev); 6351 hci_dev_lock(hdev);
6075 6352
6076 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 6353 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6077 6354
@@ -6092,14 +6369,16 @@ static int powered_update_hci(struct hci_dev *hdev)
6092 6369
6093 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 6370 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6094 !lmp_host_ssp_capable(hdev)) { 6371 !lmp_host_ssp_capable(hdev)) {
6095 u8 ssp = 1; 6372 u8 mode = 0x01;
6096 6373
6097 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp); 6374 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6098 } 6375
6376 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6377 u8 support = 0x01;
6099 6378
6100 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { 6379 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6101 u8 sc = 0x01; 6380 sizeof(support), &support);
6102 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, sizeof(sc), &sc); 6381 }
6103 } 6382 }
6104 6383
6105 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && 6384 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
@@ -6130,6 +6409,8 @@ static int powered_update_hci(struct hci_dev *hdev)
6130 6409
6131 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 6410 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6132 enable_advertising(&req); 6411 enable_advertising(&req);
6412
6413 restart_le_actions(&req);
6133 } 6414 }
6134 6415
6135 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 6416 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
@@ -6139,7 +6420,7 @@ static int powered_update_hci(struct hci_dev *hdev)
6139 6420
6140 if (lmp_bredr_capable(hdev)) { 6421 if (lmp_bredr_capable(hdev)) {
6141 write_fast_connectable(&req, false); 6422 write_fast_connectable(&req, false);
6142 hci_update_page_scan(hdev, &req); 6423 __hci_update_page_scan(&req);
6143 update_class(&req); 6424 update_class(&req);
6144 update_name(&req); 6425 update_name(&req);
6145 update_eir(&req); 6426 update_eir(&req);
@@ -6817,43 +7098,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6817 hci_req_run(&req, NULL); 7098 hci_req_run(&req, NULL);
6818} 7099}
6819 7100
6820void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6821{
6822 struct cmd_lookup match = { NULL, hdev };
6823 bool changed = false;
6824
6825 if (status) {
6826 u8 mgmt_err = mgmt_status(status);
6827
6828 if (enable) {
6829 if (test_and_clear_bit(HCI_SC_ENABLED,
6830 &hdev->dev_flags))
6831 new_settings(hdev, NULL);
6832 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6833 }
6834
6835 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6836 cmd_status_rsp, &mgmt_err);
6837 return;
6838 }
6839
6840 if (enable) {
6841 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6842 } else {
6843 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6844 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6845 }
6846
6847 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6848 settings_rsp, &match);
6849
6850 if (changed)
6851 new_settings(hdev, match.sk);
6852
6853 if (match.sk)
6854 sock_put(match.sk);
6855}
6856
6857static void sk_lookup(struct pending_cmd *cmd, void *data) 7101static void sk_lookup(struct pending_cmd *cmd, void *data)
6858{ 7102{
6859 struct cmd_lookup *match = data; 7103 struct cmd_lookup *match = data;
@@ -6924,28 +7168,21 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6924 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 7168 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6925 mgmt_status(status)); 7169 mgmt_status(status));
6926 } else { 7170 } else {
6927 if (bredr_sc_enabled(hdev) && hash256 && rand256) { 7171 struct mgmt_rp_read_local_oob_data rp;
6928 struct mgmt_rp_read_local_oob_ext_data rp; 7172 size_t rp_size = sizeof(rp);
6929 7173
6930 memcpy(rp.hash192, hash192, sizeof(rp.hash192)); 7174 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6931 memcpy(rp.rand192, rand192, sizeof(rp.rand192)); 7175 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6932 7176
7177 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6933 memcpy(rp.hash256, hash256, sizeof(rp.hash256)); 7178 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6934 memcpy(rp.rand256, rand256, sizeof(rp.rand256)); 7179 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6935
6936 cmd_complete(cmd->sk, hdev->id,
6937 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6938 &rp, sizeof(rp));
6939 } else { 7180 } else {
6940 struct mgmt_rp_read_local_oob_data rp; 7181 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
6941
6942 memcpy(rp.hash, hash192, sizeof(rp.hash));
6943 memcpy(rp.rand, rand192, sizeof(rp.rand));
6944
6945 cmd_complete(cmd->sk, hdev->id,
6946 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6947 &rp, sizeof(rp));
6948 } 7182 }
7183
7184 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7185 &rp, rp_size);
6949 } 7186 }
6950 7187
6951 mgmt_pending_remove(cmd); 7188 mgmt_pending_remove(cmd);
@@ -7018,6 +7255,21 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7018 return false; 7255 return false;
7019} 7256}
7020 7257
7258static void restart_le_scan(struct hci_dev *hdev)
7259{
7260 /* If controller is not scanning we are done. */
7261 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
7262 return;
7263
7264 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7265 hdev->discovery.scan_start +
7266 hdev->discovery.scan_duration))
7267 return;
7268
7269 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7270 DISCOV_LE_RESTART_DELAY);
7271}
7272
7021void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 7273void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7022 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, 7274 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7023 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) 7275 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
@@ -7040,14 +7292,18 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7040 7292
7041 /* When using service discovery with a RSSI threshold, then check 7293 /* When using service discovery with a RSSI threshold, then check
7042 * if such a RSSI threshold is specified. If a RSSI threshold has 7294 * if such a RSSI threshold is specified. If a RSSI threshold has
7043 * been specified, then all results with a RSSI smaller than the 7295 * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
7044 * RSSI threshold will be dropped. 7296 * then all results with a RSSI smaller than the RSSI threshold will be
7297 * dropped. If the quirk is set, let it through for further processing,
7298 * as we might need to restart the scan.
7045 * 7299 *
7046 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry, 7300 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7047 * the results are also dropped. 7301 * the results are also dropped.
7048 */ 7302 */
7049 if (hdev->discovery.rssi != HCI_RSSI_INVALID && 7303 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7050 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID)) 7304 (rssi == HCI_RSSI_INVALID ||
7305 (rssi < hdev->discovery.rssi &&
7306 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7051 return; 7307 return;
7052 7308
7053 /* Make sure that the buffer is big enough. The 5 extra bytes 7309 /* Make sure that the buffer is big enough. The 5 extra bytes
@@ -7066,7 +7322,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7066 * However when using service discovery, the value 127 will be 7322 * However when using service discovery, the value 127 will be
7067 * returned when the RSSI is not available. 7323 * returned when the RSSI is not available.
7068 */ 7324 */
7069 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi) 7325 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7326 link_type == ACL_LINK)
7070 rssi = 0; 7327 rssi = 0;
7071 7328
7072 bacpy(&ev->addr.bdaddr, bdaddr); 7329 bacpy(&ev->addr.bdaddr, bdaddr);
@@ -7081,12 +7338,20 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7081 * kept and checking possible scan response data 7338 * kept and checking possible scan response data
7082 * will be skipped. 7339 * will be skipped.
7083 */ 7340 */
7084 if (hdev->discovery.uuid_count > 0) 7341 if (hdev->discovery.uuid_count > 0) {
7085 match = eir_has_uuids(eir, eir_len, 7342 match = eir_has_uuids(eir, eir_len,
7086 hdev->discovery.uuid_count, 7343 hdev->discovery.uuid_count,
7087 hdev->discovery.uuids); 7344 hdev->discovery.uuids);
7088 else 7345 /* If duplicate filtering does not report RSSI changes,
7346 * then restart scanning to ensure updated result with
7347 * updated RSSI values.
7348 */
7349 if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7350 &hdev->quirks))
7351 restart_le_scan(hdev);
7352 } else {
7089 match = true; 7353 match = true;
7354 }
7090 7355
7091 if (!match && !scan_rsp_len) 7356 if (!match && !scan_rsp_len)
7092 return; 7357 return;
@@ -7119,6 +7384,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7119 hdev->discovery.uuid_count, 7384 hdev->discovery.uuid_count,
7120 hdev->discovery.uuids)) 7385 hdev->discovery.uuids))
7121 return; 7386 return;
7387
7388 /* If duplicate filtering does not report RSSI changes,
7389 * then restart scanning to ensure updated result with
7390 * updated RSSI values.
7391 */
7392 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
7393 &hdev->quirks))
7394 restart_le_scan(hdev);
7122 } 7395 }
7123 7396
7124 /* Append scan response data to event */ 7397 /* Append scan response data to event */
@@ -7132,6 +7405,14 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7132 return; 7405 return;
7133 } 7406 }
7134 7407
7408 /* Validate the reported RSSI value against the RSSI threshold once more
7409 * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
7410 * scanning.
7411 */
7412 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7413 rssi < hdev->discovery.rssi)
7414 return;
7415
7135 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); 7416 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7136 ev_size = sizeof(*ev) + eir_len + scan_rsp_len; 7417 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7137 7418
@@ -7174,7 +7455,7 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7174 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); 7455 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7175} 7456}
7176 7457
7177static void adv_enable_complete(struct hci_dev *hdev, u8 status) 7458static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7178{ 7459{
7179 BT_DBG("%s status %u", hdev->name, status); 7460 BT_DBG("%s status %u", hdev->name, status);
7180} 7461}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 73f8c75abe6e..4fea24275b17 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -771,7 +771,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
771 771
772 bacpy(&addr.l2_bdaddr, dst); 772 bacpy(&addr.l2_bdaddr, dst);
773 addr.l2_family = AF_BLUETOOTH; 773 addr.l2_family = AF_BLUETOOTH;
774 addr.l2_psm = cpu_to_le16(RFCOMM_PSM); 774 addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM);
775 addr.l2_cid = 0; 775 addr.l2_cid = 0;
776 addr.l2_bdaddr_type = BDADDR_BREDR; 776 addr.l2_bdaddr_type = BDADDR_BREDR;
777 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); 777 *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
@@ -2038,7 +2038,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
2038 /* Bind socket */ 2038 /* Bind socket */
2039 bacpy(&addr.l2_bdaddr, ba); 2039 bacpy(&addr.l2_bdaddr, ba);
2040 addr.l2_family = AF_BLUETOOTH; 2040 addr.l2_family = AF_BLUETOOTH;
2041 addr.l2_psm = cpu_to_le16(RFCOMM_PSM); 2041 addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM);
2042 addr.l2_cid = 0; 2042 addr.l2_cid = 0;
2043 addr.l2_bdaddr_type = BDADDR_BREDR; 2043 addr.l2_bdaddr_type = BDADDR_BREDR;
2044 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); 2044 err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 2348176401a0..3c6d2c8ac1a4 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -468,7 +468,7 @@ done:
468 468
469static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) 469static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
470{ 470{
471 DECLARE_WAITQUEUE(wait, current); 471 DEFINE_WAIT_FUNC(wait, woken_wake_function);
472 struct sock *sk = sock->sk, *nsk; 472 struct sock *sk = sock->sk, *nsk;
473 long timeo; 473 long timeo;
474 int err = 0; 474 int err = 0;
@@ -487,8 +487,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
487 /* Wait for an incoming connection. (wake-one). */ 487 /* Wait for an incoming connection. (wake-one). */
488 add_wait_queue_exclusive(sk_sleep(sk), &wait); 488 add_wait_queue_exclusive(sk_sleep(sk), &wait);
489 while (1) { 489 while (1) {
490 set_current_state(TASK_INTERRUPTIBLE);
491
492 if (sk->sk_state != BT_LISTEN) { 490 if (sk->sk_state != BT_LISTEN) {
493 err = -EBADFD; 491 err = -EBADFD;
494 break; 492 break;
@@ -509,10 +507,11 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
509 } 507 }
510 508
511 release_sock(sk); 509 release_sock(sk);
512 timeo = schedule_timeout(timeo); 510
511 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
512
513 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 513 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
514 } 514 }
515 __set_current_state(TASK_RUNNING);
516 remove_wait_queue(sk_sleep(sk), &wait); 515 remove_wait_queue(sk_sleep(sk), &wait);
517 516
518 if (err) 517 if (err)
@@ -1058,6 +1057,8 @@ int __init rfcomm_init_sockets(void)
1058{ 1057{
1059 int err; 1058 int err;
1060 1059
1060 BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr));
1061
1061 err = proto_register(&rfcomm_proto, 0); 1062 err = proto_register(&rfcomm_proto, 0);
1062 if (err < 0) 1063 if (err < 0)
1063 return err; 1064 return err;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 30e5ea3f1ad3..76321b546e84 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -618,7 +618,7 @@ done:
618 618
619static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags) 619static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
620{ 620{
621 DECLARE_WAITQUEUE(wait, current); 621 DEFINE_WAIT_FUNC(wait, woken_wake_function);
622 struct sock *sk = sock->sk, *ch; 622 struct sock *sk = sock->sk, *ch;
623 long timeo; 623 long timeo;
624 int err = 0; 624 int err = 0;
@@ -632,8 +632,6 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
632 /* Wait for an incoming connection. (wake-one). */ 632 /* Wait for an incoming connection. (wake-one). */
633 add_wait_queue_exclusive(sk_sleep(sk), &wait); 633 add_wait_queue_exclusive(sk_sleep(sk), &wait);
634 while (1) { 634 while (1) {
635 set_current_state(TASK_INTERRUPTIBLE);
636
637 if (sk->sk_state != BT_LISTEN) { 635 if (sk->sk_state != BT_LISTEN) {
638 err = -EBADFD; 636 err = -EBADFD;
639 break; 637 break;
@@ -654,10 +652,10 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
654 } 652 }
655 653
656 release_sock(sk); 654 release_sock(sk);
657 timeo = schedule_timeout(timeo); 655
656 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
658 lock_sock(sk); 657 lock_sock(sk);
659 } 658 }
660 __set_current_state(TASK_RUNNING);
661 remove_wait_queue(sk_sleep(sk), &wait); 659 remove_wait_queue(sk_sleep(sk), &wait);
662 660
663 if (err) 661 if (err)
@@ -1184,6 +1182,8 @@ int __init sco_init(void)
1184{ 1182{
1185 int err; 1183 int err;
1186 1184
1185 BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr));
1186
1187 err = proto_register(&sco_proto, 0); 1187 err = proto_register(&sco_proto, 0);
1188 if (err < 0) 1188 if (err < 0)
1189 return err; 1189 return err;
diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c
new file mode 100644
index 000000000000..378f4064952c
--- /dev/null
+++ b/net/bluetooth/selftest.c
@@ -0,0 +1,244 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "ecc.h"
28#include "smp.h"
29#include "selftest.h"
30
31#if IS_ENABLED(CONFIG_BT_SELFTEST_ECDH)
32
33static const u8 priv_a_1[32] __initconst = {
34 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58,
35 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a,
36 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74,
37 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f,
38};
39static const u8 priv_b_1[32] __initconst = {
40 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
41 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
42 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
43 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55,
44};
45static const u8 pub_a_1[64] __initconst = {
46 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
47 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
48 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
49 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20,
50
51 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74,
52 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76,
53 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63,
54 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc,
55};
56static const u8 pub_b_1[64] __initconst = {
57 0x90, 0xa1, 0xaa, 0x2f, 0xb2, 0x77, 0x90, 0x55,
58 0x9f, 0xa6, 0x15, 0x86, 0xfd, 0x8a, 0xb5, 0x47,
59 0x00, 0x4c, 0x9e, 0xf1, 0x84, 0x22, 0x59, 0x09,
60 0x96, 0x1d, 0xaf, 0x1f, 0xf0, 0xf0, 0xa1, 0x1e,
61
62 0x4a, 0x21, 0xb1, 0x15, 0xf9, 0xaf, 0x89, 0x5f,
63 0x76, 0x36, 0x8e, 0xe2, 0x30, 0x11, 0x2d, 0x47,
64 0x60, 0x51, 0xb8, 0x9a, 0x3a, 0x70, 0x56, 0x73,
65 0x37, 0xad, 0x9d, 0x42, 0x3e, 0xf3, 0x55, 0x4c,
66};
67static const u8 dhkey_1[32] __initconst = {
68 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
69 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,
70 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
71 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec,
72};
73
74static const u8 priv_a_2[32] __initconst = {
75 0x63, 0x76, 0x45, 0xd0, 0xf7, 0x73, 0xac, 0xb7,
76 0xff, 0xdd, 0x03, 0x72, 0xb9, 0x72, 0x85, 0xb4,
77 0x41, 0xb6, 0x5d, 0x0c, 0x5d, 0x54, 0x84, 0x60,
78 0x1a, 0xa3, 0x9a, 0x3c, 0x69, 0x16, 0xa5, 0x06,
79};
80static const u8 priv_b_2[32] __initconst = {
81 0xba, 0x30, 0x55, 0x50, 0x19, 0xa2, 0xca, 0xa3,
82 0xa5, 0x29, 0x08, 0xc6, 0xb5, 0x03, 0x88, 0x7e,
83 0x03, 0x2b, 0x50, 0x73, 0xd4, 0x2e, 0x50, 0x97,
84 0x64, 0xcd, 0x72, 0x0d, 0x67, 0xa0, 0x9a, 0x52,
85};
86static const u8 pub_a_2[64] __initconst = {
87 0xdd, 0x78, 0x5c, 0x74, 0x03, 0x9b, 0x7e, 0x98,
88 0xcb, 0x94, 0x87, 0x4a, 0xad, 0xfa, 0xf8, 0xd5,
89 0x43, 0x3e, 0x5c, 0xaf, 0xea, 0xb5, 0x4c, 0xf4,
90 0x9e, 0x80, 0x79, 0x57, 0x7b, 0xa4, 0x31, 0x2c,
91
92 0x4f, 0x5d, 0x71, 0x43, 0x77, 0x43, 0xf8, 0xea,
93 0xd4, 0x3e, 0xbd, 0x17, 0x91, 0x10, 0x21, 0xd0,
94 0x1f, 0x87, 0x43, 0x8e, 0x40, 0xe2, 0x52, 0xcd,
95 0xbe, 0xdf, 0x98, 0x38, 0x18, 0x12, 0x95, 0x91,
96};
97static const u8 pub_b_2[64] __initconst = {
98 0xcc, 0x00, 0x65, 0xe1, 0xf5, 0x6c, 0x0d, 0xcf,
99 0xec, 0x96, 0x47, 0x20, 0x66, 0xc9, 0xdb, 0x84,
100 0x81, 0x75, 0xa8, 0x4d, 0xc0, 0xdf, 0xc7, 0x9d,
101 0x1b, 0x3f, 0x3d, 0xf2, 0x3f, 0xe4, 0x65, 0xf4,
102
103 0x79, 0xb2, 0xec, 0xd8, 0xca, 0x55, 0xa1, 0xa8,
104 0x43, 0x4d, 0x6b, 0xca, 0x10, 0xb0, 0xc2, 0x01,
105 0xc2, 0x33, 0x4e, 0x16, 0x24, 0xc4, 0xef, 0xee,
106 0x99, 0xd8, 0xbb, 0xbc, 0x48, 0xd0, 0x01, 0x02,
107};
108static const u8 dhkey_2[32] __initconst = {
109 0x69, 0xeb, 0x21, 0x32, 0xf2, 0xc6, 0x05, 0x41,
110 0x60, 0x19, 0xcd, 0x5e, 0x94, 0xe1, 0xe6, 0x5f,
111 0x33, 0x07, 0xe3, 0x38, 0x4b, 0x68, 0xe5, 0x62,
112 0x3f, 0x88, 0x6d, 0x2f, 0x3a, 0x84, 0x85, 0xab,
113};
114
115static const u8 priv_a_3[32] __initconst = {
116 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58,
117 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a,
118 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74,
119 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f,
120};
121static const u8 pub_a_3[64] __initconst = {
122 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
123 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
124 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
125 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20,
126
127 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74,
128 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76,
129 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63,
130 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc,
131};
132static const u8 dhkey_3[32] __initconst = {
133 0x2d, 0xab, 0x00, 0x48, 0xcb, 0xb3, 0x7b, 0xda,
134 0x55, 0x7b, 0x8b, 0x72, 0xa8, 0x57, 0x87, 0xc3,
135 0x87, 0x27, 0x99, 0x32, 0xfc, 0x79, 0x5f, 0xae,
136 0x7c, 0x1c, 0xf9, 0x49, 0xe6, 0xd7, 0xaa, 0x70,
137};
138
139static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
140 const u8 pub_a[64], const u8 pub_b[64],
141 const u8 dhkey[32])
142{
143 u8 dhkey_a[32], dhkey_b[32];
144
145 ecdh_shared_secret(pub_b, priv_a, dhkey_a);
146 ecdh_shared_secret(pub_a, priv_b, dhkey_b);
147
148 if (memcmp(dhkey_a, dhkey, 32))
149 return -EINVAL;
150
151 if (memcmp(dhkey_b, dhkey, 32))
152 return -EINVAL;
153
154 return 0;
155}
156
157static int __init test_ecdh(void)
158{
159 ktime_t calltime, delta, rettime;
160 unsigned long long duration;
161 int err;
162
163 calltime = ktime_get();
164
165 err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
166 if (err) {
167 BT_ERR("ECDH sample 1 failed");
168 return err;
169 }
170
171 err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
172 if (err) {
173 BT_ERR("ECDH sample 2 failed");
174 return err;
175 }
176
177 err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
178 if (err) {
179 BT_ERR("ECDH sample 3 failed");
180 return err;
181 }
182
183 rettime = ktime_get();
184 delta = ktime_sub(rettime, calltime);
185 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
186
187 BT_INFO("ECDH test passed in %llu usecs", duration);
188
189 return 0;
190}
191
192#else
193
194static inline int test_ecdh(void)
195{
196 return 0;
197}
198
199#endif
200
201static int __init run_selftest(void)
202{
203 int err;
204
205 BT_INFO("Starting self testing");
206
207 err = test_ecdh();
208 if (err)
209 goto done;
210
211 err = bt_selftest_smp();
212
213done:
214 BT_INFO("Finished self testing");
215
216 return err;
217}
218
219#if IS_MODULE(CONFIG_BT)
220
221/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=m and is just a
222 * wrapper to allow running this at module init.
223 *
224 * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all.
225 */
226int __init bt_selftest(void)
227{
228 return run_selftest();
229}
230
231#else
232
233/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=y and is run
234 * via late_initcall() as last item in the initialization sequence.
235 *
236 * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all.
237 */
238static int __init bt_selftest_init(void)
239{
240 return run_selftest();
241}
242late_initcall(bt_selftest_init);
243
244#endif
diff --git a/net/bluetooth/selftest.h b/net/bluetooth/selftest.h
new file mode 100644
index 000000000000..2aa0a346a913
--- /dev/null
+++ b/net/bluetooth/selftest.h
@@ -0,0 +1,45 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2014 Intel Corporation
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23#if IS_ENABLED(CONFIG_BT_SELFTEST) && IS_MODULE(CONFIG_BT)
24
25/* When CONFIG_BT_SELFTEST=y and the CONFIG_BT=m, then the self testing
26 * is run at module loading time.
27 */
28int bt_selftest(void);
29
30#else
31
32/* When CONFIG_BT_SELFTEST=y and CONFIG_BT=y, then the self testing
33 * is run via late_initcall() to make sure that subsys_initcall() of
34 * the Bluetooth subsystem and device_initcall() of the Crypto subsystem
35 * do not clash.
36 *
37 * When CONFIG_BT_SELFTEST=n, then this turns into an empty call that
38 * has no impact.
39 */
40static inline int bt_selftest(void)
41{
42 return 0;
43}
44
45#endif
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index b67749bb55bf..c09a821f381d 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,6 +20,7 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/debugfs.h>
23#include <linux/crypto.h> 24#include <linux/crypto.h>
24#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
25#include <crypto/b128ops.h> 26#include <crypto/b128ops.h>
@@ -223,8 +224,9 @@ static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
223 return err; 224 return err;
224} 225}
225 226
226static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16], 227static int smp_f5(struct crypto_hash *tfm_cmac, const u8 w[32],
227 u8 a1[7], u8 a2[7], u8 mackey[16], u8 ltk[16]) 228 const u8 n1[16], const u8 n2[16], const u8 a1[7],
229 const u8 a2[7], u8 mackey[16], u8 ltk[16])
228{ 230{
229 /* The btle, salt and length "magic" values are as defined in 231 /* The btle, salt and length "magic" values are as defined in
230 * the SMP section of the Bluetooth core specification. In ASCII 232 * the SMP section of the Bluetooth core specification. In ASCII
@@ -276,7 +278,7 @@ static int smp_f5(struct crypto_hash *tfm_cmac, u8 w[32], u8 n1[16], u8 n2[16],
276} 278}
277 279
278static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16], 280static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
279 const u8 n1[16], u8 n2[16], const u8 r[16], 281 const u8 n1[16], const u8 n2[16], const u8 r[16],
280 const u8 io_cap[3], const u8 a1[7], const u8 a2[7], 282 const u8 io_cap[3], const u8 a1[7], const u8 a2[7],
281 u8 res[16]) 283 u8 res[16])
282{ 284{
@@ -298,7 +300,7 @@ static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
298 if (err) 300 if (err)
299 return err; 301 return err;
300 302
301 BT_DBG("res %16phN", res); 303 SMP_DBG("res %16phN", res);
302 304
303 return err; 305 return err;
304} 306}
@@ -618,7 +620,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
618 620
619 oob_data = hci_find_remote_oob_data(hdev, &hcon->dst, 621 oob_data = hci_find_remote_oob_data(hdev, &hcon->dst,
620 bdaddr_type); 622 bdaddr_type);
621 if (oob_data) { 623 if (oob_data && oob_data->present) {
622 set_bit(SMP_FLAG_OOB, &smp->flags); 624 set_bit(SMP_FLAG_OOB, &smp->flags);
623 oob_flag = SMP_OOB_PRESENT; 625 oob_flag = SMP_OOB_PRESENT;
624 memcpy(smp->rr, oob_data->rand256, 16); 626 memcpy(smp->rr, oob_data->rand256, 16);
@@ -1674,7 +1676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
1674 if (conn->hcon->type == ACL_LINK) { 1676 if (conn->hcon->type == ACL_LINK) {
1675 /* We must have a BR/EDR SC link */ 1677 /* We must have a BR/EDR SC link */
1676 if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) && 1678 if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) &&
1677 !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags)) 1679 !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
1678 return SMP_CROSS_TRANSP_NOT_ALLOWED; 1680 return SMP_CROSS_TRANSP_NOT_ALLOWED;
1679 1681
1680 set_bit(SMP_FLAG_SC, &smp->flags); 1682 set_bit(SMP_FLAG_SC, &smp->flags);
@@ -2303,8 +2305,12 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
2303 * implementations are not known of and in order to not over 2305 * implementations are not known of and in order to not over
2304 * complicate our implementation, simply pretend that we never 2306 * complicate our implementation, simply pretend that we never
2305 * received an IRK for such a device. 2307 * received an IRK for such a device.
2308 *
2309 * The Identity Address must also be a Static Random or Public
2310 * Address, which hci_is_identity_address() checks for.
2306 */ 2311 */
2307 if (!bacmp(&info->bdaddr, BDADDR_ANY)) { 2312 if (!bacmp(&info->bdaddr, BDADDR_ANY) ||
2313 !hci_is_identity_address(&info->bdaddr, info->addr_type)) {
2308 BT_ERR("Ignoring IRK with no identity address"); 2314 BT_ERR("Ignoring IRK with no identity address");
2309 goto distribute; 2315 goto distribute;
2310 } 2316 }
@@ -2737,7 +2743,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
2737 2743
2738 /* BR/EDR must use Secure Connections for SMP */ 2744 /* BR/EDR must use Secure Connections for SMP */
2739 if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) && 2745 if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) &&
2740 !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags)) 2746 !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
2741 return; 2747 return;
2742 2748
2743 /* If our LE support is not enabled don't do anything */ 2749 /* If our LE support is not enabled don't do anything */
@@ -2944,11 +2950,30 @@ create_chan:
2944 2950
2945 l2cap_chan_set_defaults(chan); 2951 l2cap_chan_set_defaults(chan);
2946 2952
2947 bacpy(&chan->src, &hdev->bdaddr); 2953 if (cid == L2CAP_CID_SMP) {
2948 if (cid == L2CAP_CID_SMP) 2954 /* If usage of static address is forced or if the devices
2949 chan->src_type = BDADDR_LE_PUBLIC; 2955 * does not have a public address, then listen on the static
2950 else 2956 * address.
2957 *
2958 * In case BR/EDR has been disabled on a dual-mode controller
2959 * and a static address has been configued, then listen on
2960 * the static address instead.
2961 */
2962 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2963 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2964 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2965 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2966 bacpy(&chan->src, &hdev->static_addr);
2967 chan->src_type = BDADDR_LE_RANDOM;
2968 } else {
2969 bacpy(&chan->src, &hdev->bdaddr);
2970 chan->src_type = BDADDR_LE_PUBLIC;
2971 }
2972 } else {
2973 bacpy(&chan->src, &hdev->bdaddr);
2951 chan->src_type = BDADDR_BREDR; 2974 chan->src_type = BDADDR_BREDR;
2975 }
2976
2952 chan->state = BT_LISTEN; 2977 chan->state = BT_LISTEN;
2953 chan->mode = L2CAP_MODE_BASIC; 2978 chan->mode = L2CAP_MODE_BASIC;
2954 chan->imtu = L2CAP_DEFAULT_MTU; 2979 chan->imtu = L2CAP_DEFAULT_MTU;
@@ -2975,21 +3000,108 @@ static void smp_del_chan(struct l2cap_chan *chan)
2975 l2cap_chan_put(chan); 3000 l2cap_chan_put(chan);
2976} 3001}
2977 3002
3003static ssize_t force_bredr_smp_read(struct file *file,
3004 char __user *user_buf,
3005 size_t count, loff_t *ppos)
3006{
3007 struct hci_dev *hdev = file->private_data;
3008 char buf[3];
3009
3010 buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N';
3011 buf[1] = '\n';
3012 buf[2] = '\0';
3013 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
3014}
3015
3016static ssize_t force_bredr_smp_write(struct file *file,
3017 const char __user *user_buf,
3018 size_t count, loff_t *ppos)
3019{
3020 struct hci_dev *hdev = file->private_data;
3021 char buf[32];
3022 size_t buf_size = min(count, (sizeof(buf)-1));
3023 bool enable;
3024
3025 if (copy_from_user(buf, user_buf, buf_size))
3026 return -EFAULT;
3027
3028 buf[buf_size] = '\0';
3029 if (strtobool(buf, &enable))
3030 return -EINVAL;
3031
3032 if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
3033 return -EALREADY;
3034
3035 if (enable) {
3036 struct l2cap_chan *chan;
3037
3038 chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR);
3039 if (IS_ERR(chan))
3040 return PTR_ERR(chan);
3041
3042 hdev->smp_bredr_data = chan;
3043 } else {
3044 struct l2cap_chan *chan;
3045
3046 chan = hdev->smp_bredr_data;
3047 hdev->smp_bredr_data = NULL;
3048 smp_del_chan(chan);
3049 }
3050
3051 change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags);
3052
3053 return count;
3054}
3055
3056static const struct file_operations force_bredr_smp_fops = {
3057 .open = simple_open,
3058 .read = force_bredr_smp_read,
3059 .write = force_bredr_smp_write,
3060 .llseek = default_llseek,
3061};
3062
2978int smp_register(struct hci_dev *hdev) 3063int smp_register(struct hci_dev *hdev)
2979{ 3064{
2980 struct l2cap_chan *chan; 3065 struct l2cap_chan *chan;
2981 3066
2982 BT_DBG("%s", hdev->name); 3067 BT_DBG("%s", hdev->name);
2983 3068
3069 /* If the controller does not support Low Energy operation, then
3070 * there is also no need to register any SMP channel.
3071 */
3072 if (!lmp_le_capable(hdev))
3073 return 0;
3074
3075 if (WARN_ON(hdev->smp_data)) {
3076 chan = hdev->smp_data;
3077 hdev->smp_data = NULL;
3078 smp_del_chan(chan);
3079 }
3080
2984 chan = smp_add_cid(hdev, L2CAP_CID_SMP); 3081 chan = smp_add_cid(hdev, L2CAP_CID_SMP);
2985 if (IS_ERR(chan)) 3082 if (IS_ERR(chan))
2986 return PTR_ERR(chan); 3083 return PTR_ERR(chan);
2987 3084
2988 hdev->smp_data = chan; 3085 hdev->smp_data = chan;
2989 3086
2990 if (!lmp_sc_capable(hdev) && 3087 /* If the controller does not support BR/EDR Secure Connections
2991 !test_bit(HCI_FORCE_LESC, &hdev->dbg_flags)) 3088 * feature, then the BR/EDR SMP channel shall not be present.
3089 *
3090 * To test this with Bluetooth 4.0 controllers, create a debugfs
3091 * switch that allows forcing BR/EDR SMP support and accepting
3092 * cross-transport pairing on non-AES encrypted connections.
3093 */
3094 if (!lmp_sc_capable(hdev)) {
3095 debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs,
3096 hdev, &force_bredr_smp_fops);
2992 return 0; 3097 return 0;
3098 }
3099
3100 if (WARN_ON(hdev->smp_bredr_data)) {
3101 chan = hdev->smp_bredr_data;
3102 hdev->smp_bredr_data = NULL;
3103 smp_del_chan(chan);
3104 }
2993 3105
2994 chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR); 3106 chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR);
2995 if (IS_ERR(chan)) { 3107 if (IS_ERR(chan)) {
@@ -3021,3 +3133,331 @@ void smp_unregister(struct hci_dev *hdev)
3021 smp_del_chan(chan); 3133 smp_del_chan(chan);
3022 } 3134 }
3023} 3135}
3136
3137#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
3138
3139static int __init test_ah(struct crypto_blkcipher *tfm_aes)
3140{
3141 const u8 irk[16] = {
3142 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
3143 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
3144 const u8 r[3] = { 0x94, 0x81, 0x70 };
3145 const u8 exp[3] = { 0xaa, 0xfb, 0x0d };
3146 u8 res[3];
3147 int err;
3148
3149 err = smp_ah(tfm_aes, irk, r, res);
3150 if (err)
3151 return err;
3152
3153 if (memcmp(res, exp, 3))
3154 return -EINVAL;
3155
3156 return 0;
3157}
3158
3159static int __init test_c1(struct crypto_blkcipher *tfm_aes)
3160{
3161 const u8 k[16] = {
3162 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3163 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
3164 const u8 r[16] = {
3165 0xe0, 0x2e, 0x70, 0xc6, 0x4e, 0x27, 0x88, 0x63,
3166 0x0e, 0x6f, 0xad, 0x56, 0x21, 0xd5, 0x83, 0x57 };
3167 const u8 preq[7] = { 0x01, 0x01, 0x00, 0x00, 0x10, 0x07, 0x07 };
3168 const u8 pres[7] = { 0x02, 0x03, 0x00, 0x00, 0x08, 0x00, 0x05 };
3169 const u8 _iat = 0x01;
3170 const u8 _rat = 0x00;
3171 const bdaddr_t ra = { { 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1 } };
3172 const bdaddr_t ia = { { 0xa6, 0xa5, 0xa4, 0xa3, 0xa2, 0xa1 } };
3173 const u8 exp[16] = {
3174 0x86, 0x3b, 0xf1, 0xbe, 0xc5, 0x4d, 0xa7, 0xd2,
3175 0xea, 0x88, 0x89, 0x87, 0xef, 0x3f, 0x1e, 0x1e };
3176 u8 res[16];
3177 int err;
3178
3179 err = smp_c1(tfm_aes, k, r, preq, pres, _iat, &ia, _rat, &ra, res);
3180 if (err)
3181 return err;
3182
3183 if (memcmp(res, exp, 16))
3184 return -EINVAL;
3185
3186 return 0;
3187}
3188
3189static int __init test_s1(struct crypto_blkcipher *tfm_aes)
3190{
3191 const u8 k[16] = {
3192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
3194 const u8 r1[16] = {
3195 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 };
3196 const u8 r2[16] = {
3197 0x00, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99 };
3198 const u8 exp[16] = {
3199 0x62, 0xa0, 0x6d, 0x79, 0xae, 0x16, 0x42, 0x5b,
3200 0x9b, 0xf4, 0xb0, 0xe8, 0xf0, 0xe1, 0x1f, 0x9a };
3201 u8 res[16];
3202 int err;
3203
3204 err = smp_s1(tfm_aes, k, r1, r2, res);
3205 if (err)
3206 return err;
3207
3208 if (memcmp(res, exp, 16))
3209 return -EINVAL;
3210
3211 return 0;
3212}
3213
3214static int __init test_f4(struct crypto_hash *tfm_cmac)
3215{
3216 const u8 u[32] = {
3217 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
3218 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
3219 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
3220 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 };
3221 const u8 v[32] = {
3222 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
3223 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
3224 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
3225 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 };
3226 const u8 x[16] = {
3227 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
3228 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
3229 const u8 z = 0x00;
3230 const u8 exp[16] = {
3231 0x2d, 0x87, 0x74, 0xa9, 0xbe, 0xa1, 0xed, 0xf1,
3232 0x1c, 0xbd, 0xa9, 0x07, 0xf1, 0x16, 0xc9, 0xf2 };
3233 u8 res[16];
3234 int err;
3235
3236 err = smp_f4(tfm_cmac, u, v, x, z, res);
3237 if (err)
3238 return err;
3239
3240 if (memcmp(res, exp, 16))
3241 return -EINVAL;
3242
3243 return 0;
3244}
3245
3246static int __init test_f5(struct crypto_hash *tfm_cmac)
3247{
3248 const u8 w[32] = {
3249 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
3250 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,
3251 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
3252 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
3253 const u8 n1[16] = {
3254 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
3255 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
3256 const u8 n2[16] = {
3257 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
3258 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
3259 const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };
3260 const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };
3261 const u8 exp_ltk[16] = {
3262 0x38, 0x0a, 0x75, 0x94, 0xb5, 0x22, 0x05, 0x98,
3263 0x23, 0xcd, 0xd7, 0x69, 0x11, 0x79, 0x86, 0x69 };
3264 const u8 exp_mackey[16] = {
3265 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
3266 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };
3267 u8 mackey[16], ltk[16];
3268 int err;
3269
3270 err = smp_f5(tfm_cmac, w, n1, n2, a1, a2, mackey, ltk);
3271 if (err)
3272 return err;
3273
3274 if (memcmp(mackey, exp_mackey, 16))
3275 return -EINVAL;
3276
3277 if (memcmp(ltk, exp_ltk, 16))
3278 return -EINVAL;
3279
3280 return 0;
3281}
3282
3283static int __init test_f6(struct crypto_hash *tfm_cmac)
3284{
3285 const u8 w[16] = {
3286 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
3287 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };
3288 const u8 n1[16] = {
3289 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
3290 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
3291 const u8 n2[16] = {
3292 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
3293 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
3294 const u8 r[16] = {
3295 0xc8, 0x0f, 0x2d, 0x0c, 0xd2, 0x42, 0xda, 0x08,
3296 0x54, 0xbb, 0x53, 0xb4, 0x3b, 0x34, 0xa3, 0x12 };
3297 const u8 io_cap[3] = { 0x02, 0x01, 0x01 };
3298 const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };
3299 const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };
3300 const u8 exp[16] = {
3301 0x61, 0x8f, 0x95, 0xda, 0x09, 0x0b, 0x6c, 0xd2,
3302 0xc5, 0xe8, 0xd0, 0x9c, 0x98, 0x73, 0xc4, 0xe3 };
3303 u8 res[16];
3304 int err;
3305
3306 err = smp_f6(tfm_cmac, w, n1, n2, r, io_cap, a1, a2, res);
3307 if (err)
3308 return err;
3309
3310 if (memcmp(res, exp, 16))
3311 return -EINVAL;
3312
3313 return 0;
3314}
3315
3316static int __init test_g2(struct crypto_hash *tfm_cmac)
3317{
3318 const u8 u[32] = {
3319 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
3320 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef,
3321 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e,
3322 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 };
3323 const u8 v[32] = {
3324 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b,
3325 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59,
3326 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90,
3327 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 };
3328 const u8 x[16] = {
3329 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,
3330 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };
3331 const u8 y[16] = {
3332 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,
3333 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };
3334 const u32 exp_val = 0x2f9ed5ba % 1000000;
3335 u32 val;
3336 int err;
3337
3338 err = smp_g2(tfm_cmac, u, v, x, y, &val);
3339 if (err)
3340 return err;
3341
3342 if (val != exp_val)
3343 return -EINVAL;
3344
3345 return 0;
3346}
3347
3348static int __init test_h6(struct crypto_hash *tfm_cmac)
3349{
3350 const u8 w[16] = {
3351 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
3352 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };
3353 const u8 key_id[4] = { 0x72, 0x62, 0x65, 0x6c };
3354 const u8 exp[16] = {
3355 0x99, 0x63, 0xb1, 0x80, 0xe2, 0xa9, 0xd3, 0xe8,
3356 0x1c, 0xc9, 0x6d, 0xe7, 0x02, 0xe1, 0x9a, 0x2d };
3357 u8 res[16];
3358 int err;
3359
3360 err = smp_h6(tfm_cmac, w, key_id, res);
3361 if (err)
3362 return err;
3363
3364 if (memcmp(res, exp, 16))
3365 return -EINVAL;
3366
3367 return 0;
3368}
3369
3370static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
3371 struct crypto_hash *tfm_cmac)
3372{
3373 ktime_t calltime, delta, rettime;
3374 unsigned long long duration;
3375 int err;
3376
3377 calltime = ktime_get();
3378
3379 err = test_ah(tfm_aes);
3380 if (err) {
3381 BT_ERR("smp_ah test failed");
3382 return err;
3383 }
3384
3385 err = test_c1(tfm_aes);
3386 if (err) {
3387 BT_ERR("smp_c1 test failed");
3388 return err;
3389 }
3390
3391 err = test_s1(tfm_aes);
3392 if (err) {
3393 BT_ERR("smp_s1 test failed");
3394 return err;
3395 }
3396
3397 err = test_f4(tfm_cmac);
3398 if (err) {
3399 BT_ERR("smp_f4 test failed");
3400 return err;
3401 }
3402
3403 err = test_f5(tfm_cmac);
3404 if (err) {
3405 BT_ERR("smp_f5 test failed");
3406 return err;
3407 }
3408
3409 err = test_f6(tfm_cmac);
3410 if (err) {
3411 BT_ERR("smp_f6 test failed");
3412 return err;
3413 }
3414
3415 err = test_g2(tfm_cmac);
3416 if (err) {
3417 BT_ERR("smp_g2 test failed");
3418 return err;
3419 }
3420
3421 err = test_h6(tfm_cmac);
3422 if (err) {
3423 BT_ERR("smp_h6 test failed");
3424 return err;
3425 }
3426
3427 rettime = ktime_get();
3428 delta = ktime_sub(rettime, calltime);
3429 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
3430
3431 BT_INFO("SMP test passed in %llu usecs", duration);
3432
3433 return 0;
3434}
3435
3436int __init bt_selftest_smp(void)
3437{
3438 struct crypto_blkcipher *tfm_aes;
3439 struct crypto_hash *tfm_cmac;
3440 int err;
3441
3442 tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
3443 if (IS_ERR(tfm_aes)) {
3444 BT_ERR("Unable to create ECB crypto context");
3445 return PTR_ERR(tfm_aes);
3446 }
3447
3448 tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
3449 if (IS_ERR(tfm_cmac)) {
3450 BT_ERR("Unable to create CMAC crypto context");
3451 crypto_free_blkcipher(tfm_aes);
3452 return PTR_ERR(tfm_cmac);
3453 }
3454
3455 err = run_selftests(tfm_aes, tfm_cmac);
3456
3457 crypto_free_hash(tfm_cmac);
3458 crypto_free_blkcipher(tfm_aes);
3459
3460 return err;
3461}
3462
3463#endif
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 3296bf42ae80..60c5b73fcb4b 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -192,4 +192,17 @@ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa);
192int smp_register(struct hci_dev *hdev); 192int smp_register(struct hci_dev *hdev);
193void smp_unregister(struct hci_dev *hdev); 193void smp_unregister(struct hci_dev *hdev);
194 194
195#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
196
197int bt_selftest_smp(void);
198
199#else
200
201static inline int bt_selftest_smp(void)
202{
203 return 0;
204}
205
206#endif
207
195#endif /* __SMP_H */ 208#endif /* __SMP_H */