aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 14:54:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 14:54:29 -0400
commit28f3d717618156c0dcd2f497d791b578a7931d87 (patch)
tree37b11581b51929b5473541e53bd242b3e1a9f666 /net
parent654443e20dfc0617231f28a07c96a979ee1a0239 (diff)
parent1ca7ee30630e1022dbcf1b51be20580815ffab73 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull more networking updates from David Miller: "Ok, everything from here on out will be bug fixes." 1) One final sync of wireless and bluetooth stuff from John Linville. These changes have all been in his tree for more than a week, and therefore have had the necessary -next exposure. John was just away on a trip and didn't have a change to send the pull request until a day or two ago. 2) Put back some defines in user exposed header file areas that were removed during the tokenring purge. From Stephen Hemminger and Paul Gortmaker. 3) A bug fix for UDP hash table allocation got lost in the pile due to one of those "you got it.. no I've got it.." situations. :-) From Tim Bird. 4) SKB coalescing in TCP needs to have stricter checks, otherwise we'll try to coalesce overlapping frags and crash. Fix from Eric Dumazet. 5) RCU routing table lookups can race with free_fib_info(), causing crashes when we deref the device pointers in the route. Fix by releasing the net device in the RCU callback. From Yanmin Zhang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (293 commits) tcp: take care of overlaps in tcp_try_coalesce() ipv4: fix the rcu race between free_fib_info and ip_route_output_slow mm: add a low limit to alloc_large_system_hash ipx: restore token ring define to include/linux/ipx.h if: restore token ring ARP type to header xen: do not disable netfront in dom0 phy/micrel: Fix ID of KSZ9021 mISDN: Add X-Tensions USB ISDN TA XC-525 gianfar:don't add FCB length to hard_header_len Bluetooth: Report proper error number in disconnection Bluetooth: Create flags for bt_sk() Bluetooth: report the right security level in getsockopt Bluetooth: Lock the L2CAP channel when sending Bluetooth: Restore locking semantics when looking up L2CAP channels Bluetooth: Fix a redundant and problematic incoming MTU check Bluetooth: Add support for Foxconn/Hon Hai AR5BBU22 0489:E03C Bluetooth: Fix EIR data generation for mgmt_device_found Bluetooth: Fix Inquiry with RSSI event mask Bluetooth: improve readability of l2cap_seq_list code Bluetooth: Fix skb length calculation ...
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/hci_conn.c56
-rw-r--r--net/bluetooth/hci_core.c267
-rw-r--r--net/bluetooth/hci_event.c75
-rw-r--r--net/bluetooth/hci_sysfs.c5
-rw-r--r--net/bluetooth/l2cap_core.c762
-rw-r--r--net/bluetooth/l2cap_sock.c76
-rw-r--r--net/bluetooth/mgmt.c286
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/sco.c75
-rw-r--r--net/bluetooth/smp.c2
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/udp.c30
-rw-r--r--net/mac80211/agg-tx.c10
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/ibss.c5
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mesh_plink.c65
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mac80211/wep.c15
-rw-r--r--net/mac80211/wpa.c10
-rw-r--r--net/nfc/core.c112
-rw-r--r--net/nfc/hci/Kconfig1
-rw-r--r--net/nfc/hci/core.c78
-rw-r--r--net/nfc/hci/shdlc.c12
-rw-r--r--net/nfc/llcp/commands.c4
-rw-r--r--net/nfc/llcp/llcp.c7
-rw-r--r--net/nfc/llcp/sock.c57
-rw-r--r--net/nfc/nci/core.c27
-rw-r--r--net/nfc/nci/data.c8
-rw-r--r--net/nfc/nci/lib.c1
-rw-r--r--net/nfc/nci/ntf.c2
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/nfc/nfc.h2
-rw-r--r--net/wireless/chan.c2
-rw-r--r--net/wireless/core.c4
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c69
-rw-r--r--net/wireless/util.c2
46 files changed, 1350 insertions, 855 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6fb68a9743af..46e7f86acfc9 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
210 } 210 }
211 211
212 if (sk->sk_state == BT_CONNECTED || !newsock || 212 if (sk->sk_state == BT_CONNECTED || !newsock ||
213 bt_sk(parent)->defer_setup) { 213 test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
214 bt_accept_unlink(sk); 214 bt_accept_unlink(sk);
215 if (newsock) 215 if (newsock)
216 sock_graft(sk, newsock); 216 sock_graft(sk, newsock);
@@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
410 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { 410 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
411 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); 411 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
412 if (sk->sk_state == BT_CONNECTED || 412 if (sk->sk_state == BT_CONNECTED ||
413 (bt_sk(parent)->defer_setup && 413 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
414 sk->sk_state == BT_CONNECT2)) 414 sk->sk_state == BT_CONNECT2))
415 return POLLIN | POLLRDNORM; 415 return POLLIN | POLLRDNORM;
416 } 416 }
417 417
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
450 sk->sk_state == BT_CONFIG) 450 sk->sk_state == BT_CONFIG)
451 return mask; 451 return mask;
452 452
453 if (!bt_sk(sk)->suspended && sock_writeable(sk)) 453 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
454 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 454 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
455 else 455 else
456 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 456 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 88884d1d95fd..031d7d656754 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
340 } 340 }
341 341
342 /* Strip 802.1p header */ 342 /* Strip 802.1p header */
343 if (ntohs(s->eh.h_proto) == 0x8100) { 343 if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
344 if (!skb_pull(skb, 4)) 344 if (!skb_pull(skb, 4))
345 goto badframe; 345 goto badframe;
346 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); 346 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5238b6b3ea6a..3f18a6ed9731 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
223} 223}
224EXPORT_SYMBOL(hci_le_start_enc); 224EXPORT_SYMBOL(hci_le_start_enc);
225 225
226void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
227{
228 struct hci_dev *hdev = conn->hdev;
229 struct hci_cp_le_ltk_reply cp;
230
231 BT_DBG("%p", conn);
232
233 memset(&cp, 0, sizeof(cp));
234
235 cp.handle = cpu_to_le16(conn->handle);
236 memcpy(cp.ltk, ltk, sizeof(ltk));
237
238 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
239}
240EXPORT_SYMBOL(hci_le_ltk_reply);
241
242void hci_le_ltk_neg_reply(struct hci_conn *conn)
243{
244 struct hci_dev *hdev = conn->hdev;
245 struct hci_cp_le_ltk_neg_reply cp;
246
247 BT_DBG("%p", conn);
248
249 memset(&cp, 0, sizeof(cp));
250
251 cp.handle = cpu_to_le16(conn->handle);
252
253 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
254}
255
256/* Device _must_ be locked */ 226/* Device _must_ be locked */
257void hci_sco_setup(struct hci_conn *conn, __u8 status) 227void hci_sco_setup(struct hci_conn *conn, __u8 status)
258{ 228{
@@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
513 483
514/* Create SCO, ACL or LE connection. 484/* Create SCO, ACL or LE connection.
515 * Device _must_ be locked */ 485 * Device _must_ be locked */
516struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 486struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
487 __u8 dst_type, __u8 sec_level, __u8 auth_type)
517{ 488{
518 struct hci_conn *acl; 489 struct hci_conn *acl;
519 struct hci_conn *sco; 490 struct hci_conn *sco;
@@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
522 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 493 BT_DBG("%s dst %s", hdev->name, batostr(dst));
523 494
524 if (type == LE_LINK) { 495 if (type == LE_LINK) {
525 struct adv_entry *entry;
526
527 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
528 if (le) 497 if (!le) {
529 return ERR_PTR(-EBUSY); 498 le = hci_conn_add(hdev, LE_LINK, dst);
530 499 if (!le)
531 entry = hci_find_adv_entry(hdev, dst); 500 return ERR_PTR(-ENOMEM);
532 if (!entry)
533 return ERR_PTR(-EHOSTUNREACH);
534 501
535 le = hci_conn_add(hdev, LE_LINK, dst); 502 le->dst_type = bdaddr_to_le(dst_type);
536 if (!le) 503 hci_le_connect(le);
537 return ERR_PTR(-ENOMEM); 504 }
538
539 le->dst_type = entry->bdaddr_type;
540 505
541 hci_le_connect(le); 506 le->pending_sec_level = sec_level;
507 le->auth_type = auth_type;
542 508
543 hci_conn_hold(le); 509 hci_conn_hold(le);
544 510
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d6dc44cd15b0..411ace8e647b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
83 */ 83 */
84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) { 84 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 u16 opcode = __le16_to_cpu(sent->opcode);
86 struct sk_buff *skb; 87 struct sk_buff *skb;
87 88
88 /* Some CSR based controllers generate a spontaneous 89 /* Some CSR based controllers generate a spontaneous
@@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
92 * command. 93 * command.
93 */ 94 */
94 95
95 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET) 96 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
96 return; 97 return;
97 98
98 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC); 99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
@@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
251 252
252 /* Read Local Version */ 253 /* Read Local Version */
253 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255
256 /* Read Local AMP Info */
257 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
254} 258}
255 259
256static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 260static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
384 case DISCOVERY_STOPPED: 388 case DISCOVERY_STOPPED:
385 if (hdev->discovery.state != DISCOVERY_STARTING) 389 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0); 390 mgmt_discovering(hdev, 0);
387 hdev->discovery.type = 0;
388 break; 391 break;
389 case DISCOVERY_STARTING: 392 case DISCOVERY_STARTING:
390 break; 393 break;
@@ -1089,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
1089 .set_block = hci_rfkill_set_block, 1092 .set_block = hci_rfkill_set_block,
1090}; 1093};
1091 1094
1092/* Alloc HCI device */
1093struct hci_dev *hci_alloc_dev(void)
1094{
1095 struct hci_dev *hdev;
1096
1097 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1098 if (!hdev)
1099 return NULL;
1100
1101 hci_init_sysfs(hdev);
1102 skb_queue_head_init(&hdev->driver_init);
1103
1104 return hdev;
1105}
1106EXPORT_SYMBOL(hci_alloc_dev);
1107
1108/* Free HCI device */
1109void hci_free_dev(struct hci_dev *hdev)
1110{
1111 skb_queue_purge(&hdev->driver_init);
1112
1113 /* will free via device release */
1114 put_device(&hdev->dev);
1115}
1116EXPORT_SYMBOL(hci_free_dev);
1117
1118static void hci_power_on(struct work_struct *work) 1095static void hci_power_on(struct work_struct *work)
1119{ 1096{
1120 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 1097 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@@ -1336,7 +1313,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1336} 1313}
1337 1314
1338int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, 1315int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1339 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16 1316 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1340 ediv, u8 rand[8]) 1317 ediv, u8 rand[8])
1341{ 1318{
1342 struct smp_ltk *key, *old_key; 1319 struct smp_ltk *key, *old_key;
@@ -1544,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1544 return mgmt_device_unblocked(hdev, bdaddr, type); 1521 return mgmt_device_unblocked(hdev, bdaddr, type);
1545} 1522}
1546 1523
1547static void hci_clear_adv_cache(struct work_struct *work)
1548{
1549 struct hci_dev *hdev = container_of(work, struct hci_dev,
1550 adv_work.work);
1551
1552 hci_dev_lock(hdev);
1553
1554 hci_adv_entries_clear(hdev);
1555
1556 hci_dev_unlock(hdev);
1557}
1558
1559int hci_adv_entries_clear(struct hci_dev *hdev)
1560{
1561 struct adv_entry *entry, *tmp;
1562
1563 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1564 list_del(&entry->list);
1565 kfree(entry);
1566 }
1567
1568 BT_DBG("%s adv cache cleared", hdev->name);
1569
1570 return 0;
1571}
1572
1573struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1574{
1575 struct adv_entry *entry;
1576
1577 list_for_each_entry(entry, &hdev->adv_entries, list)
1578 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1579 return entry;
1580
1581 return NULL;
1582}
1583
1584static inline int is_connectable_adv(u8 evt_type)
1585{
1586 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1587 return 1;
1588
1589 return 0;
1590}
1591
1592int hci_add_adv_entry(struct hci_dev *hdev,
1593 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1594 return -EINVAL;
1595
1596 /* Only new entries should be added to adv_entries. So, if
1597 * bdaddr was found, don't add it. */
1598 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1599 return 0;
1600
1601 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1602 if (!entry)
1603 return -ENOMEM;
1604
1605 bacpy(&entry->bdaddr, &ev->bdaddr);
1606 entry->bdaddr_type = ev->bdaddr_type;
1607
1608 list_add(&entry->list, &hdev->adv_entries);
1609
1610 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1611 batostr(&entry->bdaddr), entry->bdaddr_type);
1612
1613 return 0;
1614}
1615
1616static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt) 1524static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1617{ 1525{
1618 struct le_scan_params *param = (struct le_scan_params *) opt; 1526 struct le_scan_params *param = (struct le_scan_params *) opt;
@@ -1670,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1670 return 0; 1578 return 0;
1671} 1579}
1672 1580
1581int hci_cancel_le_scan(struct hci_dev *hdev)
1582{
1583 BT_DBG("%s", hdev->name);
1584
1585 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1586 return -EALREADY;
1587
1588 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1589 struct hci_cp_le_set_scan_enable cp;
1590
1591 /* Send HCI command to disable LE Scan */
1592 memset(&cp, 0, sizeof(cp));
1593 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594 }
1595
1596 return 0;
1597}
1598
1673static void le_scan_disable_work(struct work_struct *work) 1599static void le_scan_disable_work(struct work_struct *work)
1674{ 1600{
1675 struct hci_dev *hdev = container_of(work, struct hci_dev, 1601 struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1714,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1714 return 0; 1640 return 0;
1715} 1641}
1716 1642
1717/* Register HCI device */ 1643/* Alloc HCI device */
1718int hci_register_dev(struct hci_dev *hdev) 1644struct hci_dev *hci_alloc_dev(void)
1719{ 1645{
1720 struct list_head *head = &hci_dev_list, *p; 1646 struct hci_dev *hdev;
1721 int i, id, error;
1722
1723 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1724
1725 if (!hdev->open || !hdev->close)
1726 return -EINVAL;
1727
1728 /* Do not allow HCI_AMP devices to register at index 0,
1729 * so the index can be used as the AMP controller ID.
1730 */
1731 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1732
1733 write_lock(&hci_dev_list_lock);
1734
1735 /* Find first available device id */
1736 list_for_each(p, &hci_dev_list) {
1737 if (list_entry(p, struct hci_dev, list)->id != id)
1738 break;
1739 head = p; id++;
1740 }
1741
1742 sprintf(hdev->name, "hci%d", id);
1743 hdev->id = id;
1744 list_add_tail(&hdev->list, head);
1745 1647
1746 mutex_init(&hdev->lock); 1648 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1649 if (!hdev)
1650 return NULL;
1747 1651
1748 hdev->flags = 0;
1749 hdev->dev_flags = 0;
1750 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 1652 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1751 hdev->esco_type = (ESCO_HV1); 1653 hdev->esco_type = (ESCO_HV1);
1752 hdev->link_mode = (HCI_LM_ACCEPT); 1654 hdev->link_mode = (HCI_LM_ACCEPT);
1753 hdev->io_capability = 0x03; /* No Input No Output */ 1655 hdev->io_capability = 0x03; /* No Input No Output */
1754 1656
1755 hdev->idle_timeout = 0;
1756 hdev->sniff_max_interval = 800; 1657 hdev->sniff_max_interval = 800;
1757 hdev->sniff_min_interval = 80; 1658 hdev->sniff_min_interval = 80;
1758 1659
1660 mutex_init(&hdev->lock);
1661 mutex_init(&hdev->req_lock);
1662
1663 INIT_LIST_HEAD(&hdev->mgmt_pending);
1664 INIT_LIST_HEAD(&hdev->blacklist);
1665 INIT_LIST_HEAD(&hdev->uuids);
1666 INIT_LIST_HEAD(&hdev->link_keys);
1667 INIT_LIST_HEAD(&hdev->long_term_keys);
1668 INIT_LIST_HEAD(&hdev->remote_oob_data);
1669
1759 INIT_WORK(&hdev->rx_work, hci_rx_work); 1670 INIT_WORK(&hdev->rx_work, hci_rx_work);
1760 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 1671 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1761 INIT_WORK(&hdev->tx_work, hci_tx_work); 1672 INIT_WORK(&hdev->tx_work, hci_tx_work);
1673 INIT_WORK(&hdev->power_on, hci_power_on);
1674 INIT_WORK(&hdev->le_scan, le_scan_work);
1762 1675
1676 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1677 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1678 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1763 1679
1680 skb_queue_head_init(&hdev->driver_init);
1764 skb_queue_head_init(&hdev->rx_q); 1681 skb_queue_head_init(&hdev->rx_q);
1765 skb_queue_head_init(&hdev->cmd_q); 1682 skb_queue_head_init(&hdev->cmd_q);
1766 skb_queue_head_init(&hdev->raw_q); 1683 skb_queue_head_init(&hdev->raw_q);
1767 1684
1768 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1769
1770 for (i = 0; i < NUM_REASSEMBLY; i++)
1771 hdev->reassembly[i] = NULL;
1772
1773 init_waitqueue_head(&hdev->req_wait_q); 1685 init_waitqueue_head(&hdev->req_wait_q);
1774 mutex_init(&hdev->req_lock);
1775 1686
1776 discovery_init(hdev); 1687 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1777 1688
1689 hci_init_sysfs(hdev);
1690 discovery_init(hdev);
1778 hci_conn_hash_init(hdev); 1691 hci_conn_hash_init(hdev);
1779 1692
1780 INIT_LIST_HEAD(&hdev->mgmt_pending); 1693 return hdev;
1781 1694}
1782 INIT_LIST_HEAD(&hdev->blacklist); 1695EXPORT_SYMBOL(hci_alloc_dev);
1783 1696
1784 INIT_LIST_HEAD(&hdev->uuids); 1697/* Free HCI device */
1698void hci_free_dev(struct hci_dev *hdev)
1699{
1700 skb_queue_purge(&hdev->driver_init);
1785 1701
1786 INIT_LIST_HEAD(&hdev->link_keys); 1702 /* will free via device release */
1787 INIT_LIST_HEAD(&hdev->long_term_keys); 1703 put_device(&hdev->dev);
1704}
1705EXPORT_SYMBOL(hci_free_dev);
1788 1706
1789 INIT_LIST_HEAD(&hdev->remote_oob_data); 1707/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev)
1709{
1710 struct list_head *head, *p;
1711 int id, error;
1790 1712
1791 INIT_LIST_HEAD(&hdev->adv_entries); 1713 if (!hdev->open || !hdev->close)
1714 return -EINVAL;
1792 1715
1793 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache); 1716 write_lock(&hci_dev_list_lock);
1794 INIT_WORK(&hdev->power_on, hci_power_on);
1795 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1796 1717
1797 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); 1718 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID.
1720 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1722 head = &hci_dev_list;
1798 1723
1799 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1724 /* Find first available device id */
1725 list_for_each(p, &hci_dev_list) {
1726 int nid = list_entry(p, struct hci_dev, list)->id;
1727 if (nid > id)
1728 break;
1729 if (nid == id)
1730 id++;
1731 head = p;
1732 }
1800 1733
1801 atomic_set(&hdev->promisc, 0); 1734 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id;
1802 1736
1803 INIT_WORK(&hdev->le_scan, le_scan_work); 1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1804 1738
1805 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); 1739 list_add(&hdev->list, head);
1806 1740
1807 write_unlock(&hci_dev_list_lock); 1741 write_unlock(&hci_dev_list_lock);
1808 1742
@@ -1884,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
1884 1818
1885 hci_del_sysfs(hdev); 1819 hci_del_sysfs(hdev);
1886 1820
1887 cancel_delayed_work_sync(&hdev->adv_work);
1888
1889 destroy_workqueue(hdev->workqueue); 1821 destroy_workqueue(hdev->workqueue);
1890 1822
1891 hci_dev_lock(hdev); 1823 hci_dev_lock(hdev);
@@ -1894,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
1894 hci_link_keys_clear(hdev); 1826 hci_link_keys_clear(hdev);
1895 hci_smp_ltks_clear(hdev); 1827 hci_smp_ltks_clear(hdev);
1896 hci_remote_oob_data_clear(hdev); 1828 hci_remote_oob_data_clear(hdev);
1897 hci_adv_entries_clear(hdev);
1898 hci_dev_unlock(hdev); 1829 hci_dev_unlock(hdev);
1899 1830
1900 hci_dev_put(hdev); 1831 hci_dev_put(hdev);
@@ -2231,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2231 struct hci_dev *hdev = conn->hdev; 2162 struct hci_dev *hdev = conn->hdev;
2232 struct sk_buff *list; 2163 struct sk_buff *list;
2233 2164
2165 skb->len = skb_headlen(skb);
2166 skb->data_len = 0;
2167
2168 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2169 hci_add_acl_hdr(skb, conn->handle, flags);
2170
2234 list = skb_shinfo(skb)->frag_list; 2171 list = skb_shinfo(skb)->frag_list;
2235 if (!list) { 2172 if (!list) {
2236 /* Non fragmented */ 2173 /* Non fragmented */
@@ -2274,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2274 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); 2211 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2275 2212
2276 skb->dev = (void *) hdev; 2213 skb->dev = (void *) hdev;
2277 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2278 hci_add_acl_hdr(skb, conn->handle, flags);
2279 2214
2280 hci_queue_acl(conn, &chan->data_q, skb, flags); 2215 hci_queue_acl(conn, &chan->data_q, skb, flags);
2281 2216
@@ -2313,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2313{ 2248{
2314 struct hci_conn_hash *h = &hdev->conn_hash; 2249 struct hci_conn_hash *h = &hdev->conn_hash;
2315 struct hci_conn *conn = NULL, *c; 2250 struct hci_conn *conn = NULL, *c;
2316 int num = 0, min = ~0; 2251 unsigned int num = 0, min = ~0;
2317 2252
2318 /* We don't have to lock device here. Connections are always 2253 /* We don't have to lock device here. Connections are always
2319 * added and removed with TX task disabled. */ 2254 * added and removed with TX task disabled. */
@@ -2394,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2394{ 2329{
2395 struct hci_conn_hash *h = &hdev->conn_hash; 2330 struct hci_conn_hash *h = &hdev->conn_hash;
2396 struct hci_chan *chan = NULL; 2331 struct hci_chan *chan = NULL;
2397 int num = 0, min = ~0, cur_prio = 0; 2332 unsigned int num = 0, min = ~0, cur_prio = 0;
2398 struct hci_conn *conn; 2333 struct hci_conn *conn;
2399 int cnt, q, conn_num = 0; 2334 int cnt, q, conn_num = 0;
2400 2335
@@ -2945,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
2945 BT_DBG("%s", hdev->name); 2880 BT_DBG("%s", hdev->name);
2946 2881
2947 if (!test_bit(HCI_INQUIRY, &hdev->flags)) 2882 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2948 return -EPERM; 2883 return -EALREADY;
2949 2884
2950 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); 2885 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2951} 2886}
2887
2888u8 bdaddr_to_le(u8 bdaddr_type)
2889{
2890 switch (bdaddr_type) {
2891 case BDADDR_LE_PUBLIC:
2892 return ADDR_LE_DEV_PUBLIC;
2893
2894 default:
2895 /* Fallback to LE Random address type */
2896 return ADDR_LE_DEV_RANDOM;
2897 }
2898}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1266f78fa8e3..4eefb7f65cf6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
69 hci_conn_check_pending(hdev); 69 hci_conn_check_pending(hdev);
70} 70}
71 71
72static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{
74 __u8 status = *((__u8 *) skb->data);
75
76 BT_DBG("%s status 0x%x", hdev->name, status);
77
78 if (status)
79 return;
80
81 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82}
83
72static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 84static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{ 85{
74 __u8 status = *((__u8 *) skb->data); 86 __u8 status = *((__u8 *) skb->data);
@@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
78 if (status) 90 if (status)
79 return; 91 return;
80 92
93 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
94
81 hci_conn_check_pending(hdev); 95 hci_conn_check_pending(hdev);
82} 96}
83 97
@@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
192 hci_req_complete(hdev, HCI_OP_RESET, status); 206 hci_req_complete(hdev, HCI_OP_RESET, status);
193 207
194 /* Reset all non-persistent flags */ 208 /* Reset all non-persistent flags */
195 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS)); 209 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
210 BIT(HCI_PERIODIC_INQ));
196 211
197 hdev->discovery.state = DISCOVERY_STOPPED; 212 hdev->discovery.state = DISCOVERY_STOPPED;
198} 213}
@@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
505 events[5] |= 0x10; /* Synchronous Connection Changed */ 520 events[5] |= 0x10; /* Synchronous Connection Changed */
506 521
507 if (hdev->features[3] & LMP_RSSI_INQ) 522 if (hdev->features[3] & LMP_RSSI_INQ)
508 events[4] |= 0x04; /* Inquiry Result with RSSI */ 523 events[4] |= 0x02; /* Inquiry Result with RSSI */
509 524
510 if (hdev->features[5] & LMP_SNIFF_SUBR) 525 if (hdev->features[5] & LMP_SNIFF_SUBR)
511 events[5] |= 0x20; /* Sniff Subrating */ 526 events[5] |= 0x20; /* Sniff Subrating */
@@ -615,6 +630,7 @@ done:
615 630
616static void hci_setup_link_policy(struct hci_dev *hdev) 631static void hci_setup_link_policy(struct hci_dev *hdev)
617{ 632{
633 struct hci_cp_write_def_link_policy cp;
618 u16 link_policy = 0; 634 u16 link_policy = 0;
619 635
620 if (hdev->features[0] & LMP_RSWITCH) 636 if (hdev->features[0] & LMP_RSWITCH)
@@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
626 if (hdev->features[1] & LMP_PARK) 642 if (hdev->features[1] & LMP_PARK)
627 link_policy |= HCI_LP_PARK; 643 link_policy |= HCI_LP_PARK;
628 644
629 link_policy = cpu_to_le16(link_policy); 645 cp.policy = cpu_to_le16(link_policy);
630 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy), 646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
631 &link_policy);
632} 647}
633 648
634static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
710 725
711 memset(&cp, 0, sizeof(cp)); 726 memset(&cp, 0, sizeof(cp));
712 727
713 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 728 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
714 cp.le = 1; 729 cp.le = 1;
715 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); 730 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
716 } 731 }
@@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
887static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
888 struct sk_buff *skb) 903 struct sk_buff *skb)
889{ 904{
890 __u8 status = *((__u8 *) skb->data); 905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
891 906
892 BT_DBG("%s status 0x%x", hdev->name, status); 907 BT_DBG("%s status 0x%x", hdev->name, rp->status);
908
909 if (!rp->status)
910 hdev->inq_tx_power = rp->tx_power;
893 911
894 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status); 912 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
895} 913}
896 914
897static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 915static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1082 1100
1083 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 1101 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1084 1102
1085 cancel_delayed_work_sync(&hdev->adv_work);
1086
1087 hci_dev_lock(hdev); 1103 hci_dev_lock(hdev);
1088 hci_adv_entries_clear(hdev);
1089 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1104 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 hci_dev_unlock(hdev); 1105 hci_dev_unlock(hdev);
1091 break; 1106 break;
1092 1107
1093 case LE_SCANNING_DISABLED: 1108 case LE_SCANNING_DISABLED:
1094 if (status) 1109 if (status) {
1110 hci_dev_lock(hdev);
1111 mgmt_stop_discovery_failed(hdev, status);
1112 hci_dev_unlock(hdev);
1095 return; 1113 return;
1114 }
1096 1115
1097 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1116 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1098 1117
1099 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT); 1118 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1100 1119 hdev->discovery.state == DISCOVERY_FINDING) {
1101 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 mgmt_interleaved_discovery(hdev); 1120 mgmt_interleaved_discovery(hdev);
1103 } else { 1121 } else {
1104 hci_dev_lock(hdev); 1122 hci_dev_lock(hdev);
@@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1625 if (status) { 1643 if (status) {
1626 if (conn && conn->state == BT_CONNECT) { 1644 if (conn && conn->state == BT_CONNECT) {
1627 conn->state = BT_CLOSED; 1645 conn->state = BT_CLOSED;
1646 mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
1647 conn->dst_type, status);
1628 hci_proto_connect_cfm(conn, status); 1648 hci_proto_connect_cfm(conn, status);
1629 hci_conn_del(conn); 1649 hci_conn_del(conn);
1630 } 1650 }
@@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1699 if (!num_rsp) 1719 if (!num_rsp)
1700 return; 1720 return;
1701 1721
1722 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1723 return;
1724
1702 hci_dev_lock(hdev); 1725 hci_dev_lock(hdev);
1703 1726
1704 for (; num_rsp; num_rsp--, info++) { 1727 for (; num_rsp; num_rsp--, info++) {
@@ -2040,7 +2063,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
2040 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2063 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2041 2064
2042 if (ev->status && conn->state == BT_CONNECTED) { 2065 if (ev->status && conn->state == BT_CONNECTED) {
2043 hci_acl_disconn(conn, 0x13); 2066 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2044 hci_conn_put(conn); 2067 hci_conn_put(conn);
2045 goto unlock; 2068 goto unlock;
2046 } 2069 }
@@ -2154,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2154 hci_cc_inquiry_cancel(hdev, skb); 2177 hci_cc_inquiry_cancel(hdev, skb);
2155 break; 2178 break;
2156 2179
2180 case HCI_OP_PERIODIC_INQ:
2181 hci_cc_periodic_inq(hdev, skb);
2182 break;
2183
2157 case HCI_OP_EXIT_PERIODIC_INQ: 2184 case HCI_OP_EXIT_PERIODIC_INQ:
2158 hci_cc_exit_periodic_inq(hdev, skb); 2185 hci_cc_exit_periodic_inq(hdev, skb);
2159 break; 2186 break;
@@ -2806,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2806 if (!num_rsp) 2833 if (!num_rsp)
2807 return; 2834 return;
2808 2835
2836 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2837 return;
2838
2809 hci_dev_lock(hdev); 2839 hci_dev_lock(hdev);
2810 2840
2811 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2841 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@@ -2971,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2971 struct inquiry_data data; 3001 struct inquiry_data data;
2972 struct extended_inquiry_info *info = (void *) (skb->data + 1); 3002 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2973 int num_rsp = *((__u8 *) skb->data); 3003 int num_rsp = *((__u8 *) skb->data);
3004 size_t eir_len;
2974 3005
2975 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3006 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2976 3007
2977 if (!num_rsp) 3008 if (!num_rsp)
2978 return; 3009 return;
2979 3010
3011 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3012 return;
3013
2980 hci_dev_lock(hdev); 3014 hci_dev_lock(hdev);
2981 3015
2982 for (; num_rsp; num_rsp--, info++) { 3016 for (; num_rsp; num_rsp--, info++) {
@@ -3000,9 +3034,10 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
3000 3034
3001 name_known = hci_inquiry_cache_update(hdev, &data, name_known, 3035 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3002 &ssp); 3036 &ssp);
3037 eir_len = eir_get_length(info->data, sizeof(info->data));
3003 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3038 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3004 info->dev_class, info->rssi, !name_known, 3039 info->dev_class, info->rssi, !name_known,
3005 ssp, info->data, sizeof(info->data)); 3040 ssp, info->data, eir_len);
3006 } 3041 }
3007 3042
3008 hci_dev_unlock(hdev); 3043 hci_dev_unlock(hdev);
@@ -3322,8 +3357,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3322 while (num_reports--) { 3357 while (num_reports--) {
3323 struct hci_ev_le_advertising_info *ev = ptr; 3358 struct hci_ev_le_advertising_info *ev = ptr;
3324 3359
3325 hci_add_adv_entry(hdev, ev);
3326
3327 rssi = ev->data[ev->length]; 3360 rssi = ev->data[ev->length];
3328 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, 3361 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3329 NULL, rssi, 0, 1, ev->data, ev->length); 3362 NULL, rssi, 0, 1, ev->data, ev->length);
@@ -3343,7 +3376,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3343 struct hci_conn *conn; 3376 struct hci_conn *conn;
3344 struct smp_ltk *ltk; 3377 struct smp_ltk *ltk;
3345 3378
3346 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); 3379 BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
3347 3380
3348 hci_dev_lock(hdev); 3381 hci_dev_lock(hdev);
3349 3382
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index bc154298979a..937f3187eafa 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = {
444 444
445static void print_bt_uuid(struct seq_file *f, u8 *uuid) 445static void print_bt_uuid(struct seq_file *f, u8 *uuid)
446{ 446{
447 u32 data0, data4; 447 __be32 data0, data4;
448 u16 data1, data2, data3, data5; 448 __be16 data1, data2, data3, data5;
449 449
450 memcpy(&data0, &uuid[0], 4); 450 memcpy(&data0, &uuid[0], 4);
451 memcpy(&data1, &uuid[4], 2); 451 memcpy(&data1, &uuid[4], 2);
@@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
533 533
534 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 534 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
535 535
536 dev->parent = hdev->parent;
537 dev_set_name(dev, "%s", hdev->name); 536 dev_set_name(dev, "%s", hdev->name);
538 537
539 err = device_add(dev); 538 err = device_add(dev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6f9c25b633a6..24f144b72a96 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4,6 +4,7 @@
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> 4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc. 5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems 6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
7 8
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 10
@@ -70,7 +71,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data); 71 void *data);
71static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72static void l2cap_send_disconn_req(struct l2cap_conn *conn, 73static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err); 74 struct l2cap_chan *chan, int err);
74 75
75/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
76 77
@@ -97,13 +98,15 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16
97} 98}
98 99
99/* Find channel with given SCID. 100/* Find channel with given SCID.
100 * Returns locked socket */ 101 * Returns locked channel. */
101static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) 102static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
102{ 103{
103 struct l2cap_chan *c; 104 struct l2cap_chan *c;
104 105
105 mutex_lock(&conn->chan_lock); 106 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid); 107 c = __l2cap_get_chan_by_scid(conn, cid);
108 if (c)
109 l2cap_chan_lock(c);
107 mutex_unlock(&conn->chan_lock); 110 mutex_unlock(&conn->chan_lock);
108 111
109 return c; 112 return c;
@@ -120,17 +123,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8
120 return NULL; 123 return NULL;
121} 124}
122 125
123static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
124{
125 struct l2cap_chan *c;
126
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
130
131 return c;
132}
133
134static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) 126static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
135{ 127{
136 struct l2cap_chan *c; 128 struct l2cap_chan *c;
@@ -232,6 +224,124 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 release_sock(sk); 224 release_sock(sk);
233} 225}
234 226
227/* ---- L2CAP sequence number lists ---- */
228
229/* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
235 * allocs or frees.
236 */
237
238static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
239{
240 size_t alloc_size, i;
241
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
245 */
246 alloc_size = roundup_pow_of_two(size);
247
248 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
249 if (!seq_list->list)
250 return -ENOMEM;
251
252 seq_list->mask = alloc_size - 1;
253 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
254 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
255 for (i = 0; i < alloc_size; i++)
256 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
257
258 return 0;
259}
260
261static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
262{
263 kfree(seq_list->list);
264}
265
266static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
267 u16 seq)
268{
269 /* Constant-time check for list membership */
270 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
271}
272
273static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
274{
275 u16 mask = seq_list->mask;
276
277 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR;
280 } else if (seq_list->head == seq) {
281 /* Head can be removed in constant time */
282 seq_list->head = seq_list->list[seq & mask];
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
284
285 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
286 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
287 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
288 }
289 } else {
290 /* Walk the list to find the sequence number */
291 u16 prev = seq_list->head;
292 while (seq_list->list[prev & mask] != seq) {
293 prev = seq_list->list[prev & mask];
294 if (prev == L2CAP_SEQ_LIST_TAIL)
295 return L2CAP_SEQ_LIST_CLEAR;
296 }
297
298 /* Unlink the number from the list and clear it */
299 seq_list->list[prev & mask] = seq_list->list[seq & mask];
300 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->tail == seq)
302 seq_list->tail = prev;
303 }
304 return seq;
305}
306
307static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
308{
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list, seq_list->head);
311}
312
313static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
314{
315 u16 i;
316
317 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
318 return;
319
320 for (i = 0; i <= seq_list->mask; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
322
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
325}
326
327static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
328{
329 u16 mask = seq_list->mask;
330
331 /* All appends happen in constant time */
332
333 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
337 seq_list->head = seq;
338 else
339 seq_list->list[seq_list->tail & mask] = seq;
340
341 seq_list->tail = seq;
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
343}
344
235static void l2cap_chan_timeout(struct work_struct *work) 345static void l2cap_chan_timeout(struct work_struct *work)
236{ 346{
237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 347 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -262,7 +372,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
262 l2cap_chan_put(chan); 372 l2cap_chan_put(chan);
263} 373}
264 374
265struct l2cap_chan *l2cap_chan_create(struct sock *sk) 375struct l2cap_chan *l2cap_chan_create(void)
266{ 376{
267 struct l2cap_chan *chan; 377 struct l2cap_chan *chan;
268 378
@@ -272,8 +382,6 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
272 382
273 mutex_init(&chan->lock); 383 mutex_init(&chan->lock);
274 384
275 chan->sk = sk;
276
277 write_lock(&chan_list_lock); 385 write_lock(&chan_list_lock);
278 list_add(&chan->global_l, &chan_list); 386 list_add(&chan->global_l, &chan_list);
279 write_unlock(&chan_list_lock); 387 write_unlock(&chan_list_lock);
@@ -284,7 +392,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
284 392
285 atomic_set(&chan->refcnt, 1); 393 atomic_set(&chan->refcnt, 1);
286 394
287 BT_DBG("sk %p chan %p", sk, chan); 395 BT_DBG("chan %p", chan);
288 396
289 return chan; 397 return chan;
290} 398}
@@ -298,10 +406,21 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
298 l2cap_chan_put(chan); 406 l2cap_chan_put(chan);
299} 407}
300 408
301void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 409void l2cap_chan_set_defaults(struct l2cap_chan *chan)
410{
411 chan->fcs = L2CAP_FCS_CRC16;
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW;
416
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
418}
419
420static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
302{ 421{
303 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 422 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
304 chan->psm, chan->dcid); 423 __le16_to_cpu(chan->psm), chan->dcid);
305 424
306 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; 425 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
307 426
@@ -347,7 +466,7 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
347 list_add(&chan->list, &conn->chan_l); 466 list_add(&chan->list, &conn->chan_l);
348} 467}
349 468
350void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 469static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
351{ 470{
352 mutex_lock(&conn->chan_lock); 471 mutex_lock(&conn->chan_lock);
353 __l2cap_chan_add(conn, chan); 472 __l2cap_chan_add(conn, chan);
@@ -405,6 +524,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
405 524
406 skb_queue_purge(&chan->srej_q); 525 skb_queue_purge(&chan->srej_q);
407 526
527 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list);
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
409 list_del(&l->list); 530 list_del(&l->list);
410 kfree(l); 531 kfree(l);
@@ -453,7 +574,6 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
453 case BT_CONFIG: 574 case BT_CONFIG:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && 575 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) { 576 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo); 577 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason); 578 l2cap_send_disconn_req(conn, chan, reason);
459 } else 579 } else
@@ -466,7 +586,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
466 struct l2cap_conn_rsp rsp; 586 struct l2cap_conn_rsp rsp;
467 __u16 result; 587 __u16 result;
468 588
469 if (bt_sk(sk)->defer_setup) 589 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
470 result = L2CAP_CR_SEC_BLOCK; 590 result = L2CAP_CR_SEC_BLOCK;
471 else 591 else
472 result = L2CAP_CR_BAD_PSM; 592 result = L2CAP_CR_BAD_PSM;
@@ -599,6 +719,117 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
599 hci_send_acl(chan->conn->hchan, skb, flags); 719 hci_send_acl(chan->conn->hchan, skb, flags);
600} 720}
601 721
722static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
723{
724 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
725 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
726
727 if (enh & L2CAP_CTRL_FRAME_TYPE) {
728 /* S-Frame */
729 control->sframe = 1;
730 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
731 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
732
733 control->sar = 0;
734 control->txseq = 0;
735 } else {
736 /* I-Frame */
737 control->sframe = 0;
738 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
739 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
740
741 control->poll = 0;
742 control->super = 0;
743 }
744}
745
746static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
747{
748 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
749 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
750
751 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
752 /* S-Frame */
753 control->sframe = 1;
754 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
755 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
756
757 control->sar = 0;
758 control->txseq = 0;
759 } else {
760 /* I-Frame */
761 control->sframe = 0;
762 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
763 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
764
765 control->poll = 0;
766 control->super = 0;
767 }
768}
769
770static inline void __unpack_control(struct l2cap_chan *chan,
771 struct sk_buff *skb)
772{
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control);
776 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control);
779 }
780}
781
782static u32 __pack_extended_control(struct l2cap_ctrl *control)
783{
784 u32 packed;
785
786 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
787 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
788
789 if (control->sframe) {
790 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
791 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
792 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
793 } else {
794 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
795 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
796 }
797
798 return packed;
799}
800
801static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
802{
803 u16 packed;
804
805 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
807
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_CTRL_FRAME_TYPE;
812 } else {
813 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
815 }
816
817 return packed;
818}
819
820static inline void __pack_control(struct l2cap_chan *chan,
821 struct l2cap_ctrl *control,
822 struct sk_buff *skb)
823{
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
825 put_unaligned_le32(__pack_extended_control(control),
826 skb->data + L2CAP_HDR_SIZE);
827 } else {
828 put_unaligned_le16(__pack_enhanced_control(control),
829 skb->data + L2CAP_HDR_SIZE);
830 }
831}
832
602static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 833static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
603{ 834{
604 struct sk_buff *skb; 835 struct sk_buff *skb;
@@ -681,10 +912,38 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); 912 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
682} 913}
683 914
915static void l2cap_chan_ready(struct l2cap_chan *chan)
916{
917 struct sock *sk = chan->sk;
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
928
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934
935 release_sock(sk);
936}
937
684static void l2cap_do_start(struct l2cap_chan *chan) 938static void l2cap_do_start(struct l2cap_chan *chan)
685{ 939{
686 struct l2cap_conn *conn = chan->conn; 940 struct l2cap_conn *conn = chan->conn;
687 941
942 if (conn->hcon->type == LE_LINK) {
943 l2cap_chan_ready(chan);
944 return;
945 }
946
688 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { 947 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
689 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 948 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
690 return; 949 return;
@@ -791,7 +1050,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
791 1050
792 if (l2cap_chan_check_security(chan)) { 1051 if (l2cap_chan_check_security(chan)) {
793 lock_sock(sk); 1052 lock_sock(sk);
794 if (bt_sk(sk)->defer_setup) { 1053 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) {
795 struct sock *parent = bt_sk(sk)->parent; 1055 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -830,10 +1090,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
830 mutex_unlock(&conn->chan_lock); 1090 mutex_unlock(&conn->chan_lock);
831} 1091}
832 1092
833/* Find socket with cid and source bdaddr. 1093/* Find socket with cid and source/destination bdaddr.
834 * Returns closest match, locked. 1094 * Returns closest match, locked.
835 */ 1095 */
836static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src) 1096static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1097 bdaddr_t *src,
1098 bdaddr_t *dst)
837{ 1099{
838 struct l2cap_chan *c, *c1 = NULL; 1100 struct l2cap_chan *c, *c1 = NULL;
839 1101
@@ -846,14 +1108,22 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
846 continue; 1108 continue;
847 1109
848 if (c->scid == cid) { 1110 if (c->scid == cid) {
1111 int src_match, dst_match;
1112 int src_any, dst_any;
1113
849 /* Exact match. */ 1114 /* Exact match. */
850 if (!bacmp(&bt_sk(sk)->src, src)) { 1115 src_match = !bacmp(&bt_sk(sk)->src, src);
1116 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1117 if (src_match && dst_match) {
851 read_unlock(&chan_list_lock); 1118 read_unlock(&chan_list_lock);
852 return c; 1119 return c;
853 } 1120 }
854 1121
855 /* Closest match */ 1122 /* Closest match */
856 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 1123 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1124 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1125 if ((src_match && dst_any) || (src_any && dst_match) ||
1126 (src_any && dst_any))
857 c1 = c; 1127 c1 = c;
858 } 1128 }
859 } 1129 }
@@ -872,7 +1142,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
872 1142
873 /* Check if we have socket listening on cid */ 1143 /* Check if we have socket listening on cid */
874 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, 1144 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
875 conn->src); 1145 conn->src, conn->dst);
876 if (!pchan) 1146 if (!pchan)
877 return; 1147 return;
878 1148
@@ -910,29 +1180,6 @@ clean:
910 release_sock(parent); 1180 release_sock(parent);
911} 1181}
912 1182
913static void l2cap_chan_ready(struct l2cap_chan *chan)
914{
915 struct sock *sk = chan->sk;
916 struct sock *parent;
917
918 lock_sock(sk);
919
920 parent = bt_sk(sk)->parent;
921
922 BT_DBG("sk %p, parent %p", sk, parent);
923
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
926
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
929
930 if (parent)
931 parent->sk_data_ready(parent, 0);
932
933 release_sock(sk);
934}
935
936static void l2cap_conn_ready(struct l2cap_conn *conn) 1183static void l2cap_conn_ready(struct l2cap_conn *conn)
937{ 1184{
938 struct l2cap_chan *chan; 1185 struct l2cap_chan *chan;
@@ -1016,6 +1263,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1016 1263
1017 /* Kill channels */ 1264 /* Kill channels */
1018 list_for_each_entry_safe(chan, l, &conn->chan_l, list) { 1265 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1266 l2cap_chan_hold(chan);
1019 l2cap_chan_lock(chan); 1267 l2cap_chan_lock(chan);
1020 1268
1021 l2cap_chan_del(chan, err); 1269 l2cap_chan_del(chan, err);
@@ -1023,6 +1271,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1023 l2cap_chan_unlock(chan); 1271 l2cap_chan_unlock(chan);
1024 1272
1025 chan->ops->close(chan->data); 1273 chan->ops->close(chan->data);
1274 l2cap_chan_put(chan);
1026 } 1275 }
1027 1276
1028 mutex_unlock(&conn->chan_lock); 1277 mutex_unlock(&conn->chan_lock);
@@ -1100,10 +1349,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1100 1349
1101/* ---- Socket interface ---- */ 1350/* ---- Socket interface ---- */
1102 1351
1103/* Find socket with psm and source bdaddr. 1352/* Find socket with psm and source / destination bdaddr.
1104 * Returns closest match. 1353 * Returns closest match.
1105 */ 1354 */
1106static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src) 1355static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1356 bdaddr_t *src,
1357 bdaddr_t *dst)
1107{ 1358{
1108 struct l2cap_chan *c, *c1 = NULL; 1359 struct l2cap_chan *c, *c1 = NULL;
1109 1360
@@ -1116,14 +1367,22 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
1116 continue; 1367 continue;
1117 1368
1118 if (c->psm == psm) { 1369 if (c->psm == psm) {
1370 int src_match, dst_match;
1371 int src_any, dst_any;
1372
1119 /* Exact match. */ 1373 /* Exact match. */
1120 if (!bacmp(&bt_sk(sk)->src, src)) { 1374 src_match = !bacmp(&bt_sk(sk)->src, src);
1375 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1376 if (src_match && dst_match) {
1121 read_unlock(&chan_list_lock); 1377 read_unlock(&chan_list_lock);
1122 return c; 1378 return c;
1123 } 1379 }
1124 1380
1125 /* Closest match */ 1381 /* Closest match */
1126 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 1382 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1383 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1384 if ((src_match && dst_any) || (src_any && dst_match) ||
1385 (src_any && dst_any))
1127 c1 = c; 1386 c1 = c;
1128 } 1387 }
1129 } 1388 }
@@ -1133,7 +1392,8 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
1133 return c1; 1392 return c1;
1134} 1393}
1135 1394
1136int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst) 1395int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1396 bdaddr_t *dst, u8 dst_type)
1137{ 1397{
1138 struct sock *sk = chan->sk; 1398 struct sock *sk = chan->sk;
1139 bdaddr_t *src = &bt_sk(sk)->src; 1399 bdaddr_t *src = &bt_sk(sk)->src;
@@ -1143,8 +1403,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1143 __u8 auth_type; 1403 __u8 auth_type;
1144 int err; 1404 int err;
1145 1405
1146 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), 1406 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1147 chan->psm); 1407 dst_type, __le16_to_cpu(chan->psm));
1148 1408
1149 hdev = hci_get_route(dst, src); 1409 hdev = hci_get_route(dst, src);
1150 if (!hdev) 1410 if (!hdev)
@@ -1218,11 +1478,11 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1218 auth_type = l2cap_get_auth_type(chan); 1478 auth_type = l2cap_get_auth_type(chan);
1219 1479
1220 if (chan->dcid == L2CAP_CID_LE_DATA) 1480 if (chan->dcid == L2CAP_CID_LE_DATA)
1221 hcon = hci_connect(hdev, LE_LINK, dst, 1481 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1222 chan->sec_level, auth_type); 1482 chan->sec_level, auth_type);
1223 else 1483 else
1224 hcon = hci_connect(hdev, ACL_LINK, dst, 1484 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1225 chan->sec_level, auth_type); 1485 chan->sec_level, auth_type);
1226 1486
1227 if (IS_ERR(hcon)) { 1487 if (IS_ERR(hcon)) {
1228 err = PTR_ERR(hcon); 1488 err = PTR_ERR(hcon);
@@ -1236,6 +1496,18 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
1236 goto done; 1496 goto done;
1237 } 1497 }
1238 1498
1499 if (hcon->type == LE_LINK) {
1500 err = 0;
1501
1502 if (!list_empty(&conn->chan_l)) {
1503 err = -EBUSY;
1504 hci_conn_put(hcon);
1505 }
1506
1507 if (err)
1508 goto done;
1509 }
1510
1239 /* Update source addr of the socket */ 1511 /* Update source addr of the socket */
1240 bacpy(src, conn->src); 1512 bacpy(src, conn->src);
1241 1513
@@ -1346,7 +1618,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1346 1618
1347 while ((skb = skb_peek(&chan->tx_q)) && 1619 while ((skb = skb_peek(&chan->tx_q)) &&
1348 chan->unacked_frames) { 1620 chan->unacked_frames) {
1349 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq) 1621 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1350 break; 1622 break;
1351 1623
1352 skb = skb_dequeue(&chan->tx_q); 1624 skb = skb_dequeue(&chan->tx_q);
@@ -1368,6 +1640,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan)
1368 while ((skb = skb_dequeue(&chan->tx_q))) { 1640 while ((skb = skb_dequeue(&chan->tx_q))) {
1369 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); 1641 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1370 control |= __set_txseq(chan, chan->next_tx_seq); 1642 control |= __set_txseq(chan, chan->next_tx_seq);
1643 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1371 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); 1644 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1372 1645
1373 if (chan->fcs == L2CAP_FCS_CRC16) { 1646 if (chan->fcs == L2CAP_FCS_CRC16) {
@@ -1393,21 +1666,21 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1393 if (!skb) 1666 if (!skb)
1394 return; 1667 return;
1395 1668
1396 while (bt_cb(skb)->tx_seq != tx_seq) { 1669 while (bt_cb(skb)->control.txseq != tx_seq) {
1397 if (skb_queue_is_last(&chan->tx_q, skb)) 1670 if (skb_queue_is_last(&chan->tx_q, skb))
1398 return; 1671 return;
1399 1672
1400 skb = skb_queue_next(&chan->tx_q, skb); 1673 skb = skb_queue_next(&chan->tx_q, skb);
1401 } 1674 }
1402 1675
1403 if (chan->remote_max_tx && 1676 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1404 bt_cb(skb)->retries == chan->remote_max_tx) { 1677 chan->remote_max_tx) {
1405 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1678 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1406 return; 1679 return;
1407 } 1680 }
1408 1681
1409 tx_skb = skb_clone(skb, GFP_ATOMIC); 1682 tx_skb = skb_clone(skb, GFP_ATOMIC);
1410 bt_cb(skb)->retries++; 1683 bt_cb(skb)->control.retries++;
1411 1684
1412 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1685 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1413 control &= __get_sar_mask(chan); 1686 control &= __get_sar_mask(chan);
@@ -1440,17 +1713,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1440 if (chan->state != BT_CONNECTED) 1713 if (chan->state != BT_CONNECTED)
1441 return -ENOTCONN; 1714 return -ENOTCONN;
1442 1715
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0;
1718
1443 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1719 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1444 1720
1445 if (chan->remote_max_tx && 1721 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1446 bt_cb(skb)->retries == chan->remote_max_tx) { 1722 chan->remote_max_tx) {
1447 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1723 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1448 break; 1724 break;
1449 } 1725 }
1450 1726
1451 tx_skb = skb_clone(skb, GFP_ATOMIC); 1727 tx_skb = skb_clone(skb, GFP_ATOMIC);
1452 1728
1453 bt_cb(skb)->retries++; 1729 bt_cb(skb)->control.retries++;
1454 1730
1455 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1731 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1456 control &= __get_sar_mask(chan); 1732 control &= __get_sar_mask(chan);
@@ -1460,6 +1736,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1460 1736
1461 control |= __set_reqseq(chan, chan->buffer_seq); 1737 control |= __set_reqseq(chan, chan->buffer_seq);
1462 control |= __set_txseq(chan, chan->next_tx_seq); 1738 control |= __set_txseq(chan, chan->next_tx_seq);
1739 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1463 1740
1464 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1741 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1465 1742
@@ -1474,11 +1751,11 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1474 1751
1475 __set_retrans_timer(chan); 1752 __set_retrans_timer(chan);
1476 1753
1477 bt_cb(skb)->tx_seq = chan->next_tx_seq; 1754 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1478 1755
1479 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1756 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1480 1757
1481 if (bt_cb(skb)->retries == 1) { 1758 if (bt_cb(skb)->control.retries == 1) {
1482 chan->unacked_frames++; 1759 chan->unacked_frames++;
1483 1760
1484 if (!nsent++) 1761 if (!nsent++)
@@ -1554,7 +1831,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1554{ 1831{
1555 struct l2cap_conn *conn = chan->conn; 1832 struct l2cap_conn *conn = chan->conn;
1556 struct sk_buff **frag; 1833 struct sk_buff **frag;
1557 int err, sent = 0; 1834 int sent = 0;
1558 1835
1559 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) 1836 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1560 return -EFAULT; 1837 return -EFAULT;
@@ -1565,14 +1842,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1565 /* Continuation fragments (no L2CAP header) */ 1842 /* Continuation fragments (no L2CAP header) */
1566 frag = &skb_shinfo(skb)->frag_list; 1843 frag = &skb_shinfo(skb)->frag_list;
1567 while (len) { 1844 while (len) {
1845 struct sk_buff *tmp;
1846
1568 count = min_t(unsigned int, conn->mtu, len); 1847 count = min_t(unsigned int, conn->mtu, len);
1569 1848
1570 *frag = chan->ops->alloc_skb(chan, count, 1849 tmp = chan->ops->alloc_skb(chan, count,
1571 msg->msg_flags & MSG_DONTWAIT, 1850 msg->msg_flags & MSG_DONTWAIT);
1572 &err); 1851 if (IS_ERR(tmp))
1852 return PTR_ERR(tmp);
1853
1854 *frag = tmp;
1573 1855
1574 if (!*frag)
1575 return err;
1576 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 1856 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1577 return -EFAULT; 1857 return -EFAULT;
1578 1858
@@ -1581,6 +1861,9 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1581 sent += count; 1861 sent += count;
1582 len -= count; 1862 len -= count;
1583 1863
1864 skb->len += (*frag)->len;
1865 skb->data_len += (*frag)->len;
1866
1584 frag = &(*frag)->next; 1867 frag = &(*frag)->next;
1585 } 1868 }
1586 1869
@@ -1601,18 +1884,17 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1601 count = min_t(unsigned int, (conn->mtu - hlen), len); 1884 count = min_t(unsigned int, (conn->mtu - hlen), len);
1602 1885
1603 skb = chan->ops->alloc_skb(chan, count + hlen, 1886 skb = chan->ops->alloc_skb(chan, count + hlen,
1604 msg->msg_flags & MSG_DONTWAIT, &err); 1887 msg->msg_flags & MSG_DONTWAIT);
1605 1888 if (IS_ERR(skb))
1606 if (!skb) 1889 return skb;
1607 return ERR_PTR(err);
1608 1890
1609 skb->priority = priority; 1891 skb->priority = priority;
1610 1892
1611 /* Create L2CAP header */ 1893 /* Create L2CAP header */
1612 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1894 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1613 lh->cid = cpu_to_le16(chan->dcid); 1895 lh->cid = cpu_to_le16(chan->dcid);
1614 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1896 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1615 put_unaligned_le16(chan->psm, skb_put(skb, 2)); 1897 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1616 1898
1617 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 1899 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1618 if (unlikely(err < 0)) { 1900 if (unlikely(err < 0)) {
@@ -1628,25 +1910,24 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1628{ 1910{
1629 struct l2cap_conn *conn = chan->conn; 1911 struct l2cap_conn *conn = chan->conn;
1630 struct sk_buff *skb; 1912 struct sk_buff *skb;
1631 int err, count, hlen = L2CAP_HDR_SIZE; 1913 int err, count;
1632 struct l2cap_hdr *lh; 1914 struct l2cap_hdr *lh;
1633 1915
1634 BT_DBG("chan %p len %d", chan, (int)len); 1916 BT_DBG("chan %p len %d", chan, (int)len);
1635 1917
1636 count = min_t(unsigned int, (conn->mtu - hlen), len); 1918 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1637 1919
1638 skb = chan->ops->alloc_skb(chan, count + hlen, 1920 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1639 msg->msg_flags & MSG_DONTWAIT, &err); 1921 msg->msg_flags & MSG_DONTWAIT);
1640 1922 if (IS_ERR(skb))
1641 if (!skb) 1923 return skb;
1642 return ERR_PTR(err);
1643 1924
1644 skb->priority = priority; 1925 skb->priority = priority;
1645 1926
1646 /* Create L2CAP header */ 1927 /* Create L2CAP header */
1647 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1648 lh->cid = cpu_to_le16(chan->dcid); 1929 lh->cid = cpu_to_le16(chan->dcid);
1649 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1930 lh->len = cpu_to_le16(len);
1650 1931
1651 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); 1932 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1652 if (unlikely(err < 0)) { 1933 if (unlikely(err < 0)) {
@@ -1658,7 +1939,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1658 1939
1659static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, 1940static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1660 struct msghdr *msg, size_t len, 1941 struct msghdr *msg, size_t len,
1661 u32 control, u16 sdulen) 1942 u16 sdulen)
1662{ 1943{
1663 struct l2cap_conn *conn = chan->conn; 1944 struct l2cap_conn *conn = chan->conn;
1664 struct sk_buff *skb; 1945 struct sk_buff *skb;
@@ -1684,17 +1965,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1684 count = min_t(unsigned int, (conn->mtu - hlen), len); 1965 count = min_t(unsigned int, (conn->mtu - hlen), len);
1685 1966
1686 skb = chan->ops->alloc_skb(chan, count + hlen, 1967 skb = chan->ops->alloc_skb(chan, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err); 1968 msg->msg_flags & MSG_DONTWAIT);
1688 1969 if (IS_ERR(skb))
1689 if (!skb) 1970 return skb;
1690 return ERR_PTR(err);
1691 1971
1692 /* Create L2CAP header */ 1972 /* Create L2CAP header */
1693 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1973 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1694 lh->cid = cpu_to_le16(chan->dcid); 1974 lh->cid = cpu_to_le16(chan->dcid);
1695 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1975 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1696 1976
1697 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 1977 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1698 1978
1699 if (sdulen) 1979 if (sdulen)
1700 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 1980 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1708,61 +1988,82 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1708 if (chan->fcs == L2CAP_FCS_CRC16) 1988 if (chan->fcs == L2CAP_FCS_CRC16)
1709 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); 1989 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1710 1990
1711 bt_cb(skb)->retries = 0; 1991 bt_cb(skb)->control.retries = 0;
1712 return skb; 1992 return skb;
1713} 1993}
1714 1994
1715static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) 1995static int l2cap_segment_sdu(struct l2cap_chan *chan,
1996 struct sk_buff_head *seg_queue,
1997 struct msghdr *msg, size_t len)
1716{ 1998{
1717 struct sk_buff *skb; 1999 struct sk_buff *skb;
1718 struct sk_buff_head sar_queue; 2000 u16 sdu_len;
1719 u32 control; 2001 size_t pdu_len;
1720 size_t size = 0; 2002 int err = 0;
2003 u8 sar;
1721 2004
1722 skb_queue_head_init(&sar_queue); 2005 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
1723 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1724 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1725 if (IS_ERR(skb))
1726 return PTR_ERR(skb);
1727 2006
1728 __skb_queue_tail(&sar_queue, skb); 2007 /* It is critical that ERTM PDUs fit in a single HCI fragment,
1729 len -= chan->remote_mps; 2008 * so fragmented skbs are not used. The HCI layer's handling
1730 size += chan->remote_mps; 2009 * of fragmented skbs is not compatible with ERTM's queueing.
2010 */
1731 2011
1732 while (len > 0) { 2012 /* PDU size is derived from the HCI MTU */
1733 size_t buflen; 2013 pdu_len = chan->conn->mtu;
1734 2014
1735 if (len > chan->remote_mps) { 2015 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
1736 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); 2016
1737 buflen = chan->remote_mps; 2017 /* Adjust for largest possible L2CAP overhead. */
1738 } else { 2018 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
1739 control = __set_ctrl_sar(chan, L2CAP_SAR_END); 2019
1740 buflen = len; 2020 /* Remote device may have requested smaller PDUs */
1741 } 2021 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2022
2023 if (len <= pdu_len) {
2024 sar = L2CAP_SAR_UNSEGMENTED;
2025 sdu_len = 0;
2026 pdu_len = len;
2027 } else {
2028 sar = L2CAP_SAR_START;
2029 sdu_len = len;
2030 pdu_len -= L2CAP_SDULEN_SIZE;
2031 }
2032
2033 while (len > 0) {
2034 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
1742 2035
1743 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1744 if (IS_ERR(skb)) { 2036 if (IS_ERR(skb)) {
1745 skb_queue_purge(&sar_queue); 2037 __skb_queue_purge(seg_queue);
1746 return PTR_ERR(skb); 2038 return PTR_ERR(skb);
1747 } 2039 }
1748 2040
1749 __skb_queue_tail(&sar_queue, skb); 2041 bt_cb(skb)->control.sar = sar;
1750 len -= buflen; 2042 __skb_queue_tail(seg_queue, skb);
1751 size += buflen; 2043
2044 len -= pdu_len;
2045 if (sdu_len) {
2046 sdu_len = 0;
2047 pdu_len += L2CAP_SDULEN_SIZE;
2048 }
2049
2050 if (len <= pdu_len) {
2051 sar = L2CAP_SAR_END;
2052 pdu_len = len;
2053 } else {
2054 sar = L2CAP_SAR_CONTINUE;
2055 }
1752 } 2056 }
1753 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1754 if (chan->tx_send_head == NULL)
1755 chan->tx_send_head = sar_queue.next;
1756 2057
1757 return size; 2058 return err;
1758} 2059}
1759 2060
1760int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2061int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1761 u32 priority) 2062 u32 priority)
1762{ 2063{
1763 struct sk_buff *skb; 2064 struct sk_buff *skb;
1764 u32 control;
1765 int err; 2065 int err;
2066 struct sk_buff_head seg_queue;
1766 2067
1767 /* Connectionless channel */ 2068 /* Connectionless channel */
1768 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2069 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
@@ -1791,42 +2092,47 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1791 2092
1792 case L2CAP_MODE_ERTM: 2093 case L2CAP_MODE_ERTM:
1793 case L2CAP_MODE_STREAMING: 2094 case L2CAP_MODE_STREAMING:
1794 /* Entire SDU fits into one PDU */ 2095 /* Check outgoing MTU */
1795 if (len <= chan->remote_mps) { 2096 if (len > chan->omtu) {
1796 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); 2097 err = -EMSGSIZE;
1797 skb = l2cap_create_iframe_pdu(chan, msg, len, control, 2098 break;
1798 0); 2099 }
1799 if (IS_ERR(skb))
1800 return PTR_ERR(skb);
1801 2100
1802 __skb_queue_tail(&chan->tx_q, skb); 2101 __skb_queue_head_init(&seg_queue);
1803 2102
1804 if (chan->tx_send_head == NULL) 2103 /* Do segmentation before calling in to the state machine,
1805 chan->tx_send_head = skb; 2104 * since it's possible to block while waiting for memory
2105 * allocation.
2106 */
2107 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
1806 2108
1807 } else { 2109 /* The channel could have been closed while segmenting,
1808 /* Segment SDU into multiples PDUs */ 2110 * check that it is still connected.
1809 err = l2cap_sar_segment_sdu(chan, msg, len); 2111 */
1810 if (err < 0) 2112 if (chan->state != BT_CONNECTED) {
1811 return err; 2113 __skb_queue_purge(&seg_queue);
2114 err = -ENOTCONN;
1812 } 2115 }
1813 2116
1814 if (chan->mode == L2CAP_MODE_STREAMING) { 2117 if (err)
1815 l2cap_streaming_send(chan);
1816 err = len;
1817 break; 2118 break;
1818 }
1819 2119
1820 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 2120 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
1821 test_bit(CONN_WAIT_F, &chan->conn_state)) { 2121 chan->tx_send_head = seg_queue.next;
1822 err = len; 2122 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
1823 break; 2123
1824 } 2124 if (chan->mode == L2CAP_MODE_ERTM)
2125 err = l2cap_ertm_send(chan);
2126 else
2127 l2cap_streaming_send(chan);
1825 2128
1826 err = l2cap_ertm_send(chan);
1827 if (err >= 0) 2129 if (err >= 0)
1828 err = len; 2130 err = len;
1829 2131
2132 /* If the skbs were not queued for sending, they'll still be in
2133 * seg_queue and need to be purged.
2134 */
2135 __skb_queue_purge(&seg_queue);
1830 break; 2136 break;
1831 2137
1832 default: 2138 default:
@@ -2040,13 +2346,29 @@ static void l2cap_ack_timeout(struct work_struct *work)
2040 l2cap_chan_put(chan); 2346 l2cap_chan_put(chan);
2041} 2347}
2042 2348
2043static inline void l2cap_ertm_init(struct l2cap_chan *chan) 2349static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2044{ 2350{
2351 int err;
2352
2353 chan->next_tx_seq = 0;
2354 chan->expected_tx_seq = 0;
2045 chan->expected_ack_seq = 0; 2355 chan->expected_ack_seq = 0;
2046 chan->unacked_frames = 0; 2356 chan->unacked_frames = 0;
2047 chan->buffer_seq = 0; 2357 chan->buffer_seq = 0;
2048 chan->num_acked = 0; 2358 chan->num_acked = 0;
2049 chan->frames_sent = 0; 2359 chan->frames_sent = 0;
2360 chan->last_acked_seq = 0;
2361 chan->sdu = NULL;
2362 chan->sdu_last_frag = NULL;
2363 chan->sdu_len = 0;
2364
2365 skb_queue_head_init(&chan->tx_q);
2366
2367 if (chan->mode != L2CAP_MODE_ERTM)
2368 return 0;
2369
2370 chan->rx_state = L2CAP_RX_STATE_RECV;
2371 chan->tx_state = L2CAP_TX_STATE_XMIT;
2050 2372
2051 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); 2373 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2052 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); 2374 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
@@ -2055,6 +2377,11 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2055 skb_queue_head_init(&chan->srej_q); 2377 skb_queue_head_init(&chan->srej_q);
2056 2378
2057 INIT_LIST_HEAD(&chan->srej_l); 2379 INIT_LIST_HEAD(&chan->srej_l);
2380 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2381 if (err < 0)
2382 return err;
2383
2384 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2058} 2385}
2059 2386
2060static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2387static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2378,9 +2705,9 @@ done:
2378 chan->remote_mps = size; 2705 chan->remote_mps = size;
2379 2706
2380 rfc.retrans_timeout = 2707 rfc.retrans_timeout =
2381 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); 2708 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2382 rfc.monitor_timeout = 2709 rfc.monitor_timeout =
2383 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 2710 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2384 2711
2385 set_bit(CONF_MODE_DONE, &chan->conf_state); 2712 set_bit(CONF_MODE_DONE, &chan->conf_state);
2386 2713
@@ -2644,10 +2971,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2644 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 2971 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2645 __le16 psm = req->psm; 2972 __le16 psm = req->psm;
2646 2973
2647 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); 2974 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2648 2975
2649 /* Check if we have socket listening on psm */ 2976 /* Check if we have socket listening on psm */
2650 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src); 2977 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2651 if (!pchan) { 2978 if (!pchan) {
2652 result = L2CAP_CR_BAD_PSM; 2979 result = L2CAP_CR_BAD_PSM;
2653 goto sendresp; 2980 goto sendresp;
@@ -2706,7 +3033,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2706 3033
2707 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 3034 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2708 if (l2cap_chan_check_security(chan)) { 3035 if (l2cap_chan_check_security(chan)) {
2709 if (bt_sk(sk)->defer_setup) { 3036 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
2710 __l2cap_state_change(chan, BT_CONNECT2); 3037 __l2cap_state_change(chan, BT_CONNECT2);
2711 result = L2CAP_CR_PEND; 3038 result = L2CAP_CR_PEND;
2712 status = L2CAP_CS_AUTHOR_PEND; 3039 status = L2CAP_CS_AUTHOR_PEND;
@@ -2848,7 +3175,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2848 u16 dcid, flags; 3175 u16 dcid, flags;
2849 u8 rsp[64]; 3176 u8 rsp[64];
2850 struct l2cap_chan *chan; 3177 struct l2cap_chan *chan;
2851 int len; 3178 int len, err = 0;
2852 3179
2853 dcid = __le16_to_cpu(req->dcid); 3180 dcid = __le16_to_cpu(req->dcid);
2854 flags = __le16_to_cpu(req->flags); 3181 flags = __le16_to_cpu(req->flags);
@@ -2859,8 +3186,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2859 if (!chan) 3186 if (!chan)
2860 return -ENOENT; 3187 return -ENOENT;
2861 3188
2862 l2cap_chan_lock(chan);
2863
2864 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3189 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2865 struct l2cap_cmd_rej_cid rej; 3190 struct l2cap_cmd_rej_cid rej;
2866 3191
@@ -2915,13 +3240,15 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2915 3240
2916 l2cap_state_change(chan, BT_CONNECTED); 3241 l2cap_state_change(chan, BT_CONNECTED);
2917 3242
2918 chan->next_tx_seq = 0; 3243 if (chan->mode == L2CAP_MODE_ERTM ||
2919 chan->expected_tx_seq = 0; 3244 chan->mode == L2CAP_MODE_STREAMING)
2920 skb_queue_head_init(&chan->tx_q); 3245 err = l2cap_ertm_init(chan);
2921 if (chan->mode == L2CAP_MODE_ERTM) 3246
2922 l2cap_ertm_init(chan); 3247 if (err < 0)
3248 l2cap_send_disconn_req(chan->conn, chan, -err);
3249 else
3250 l2cap_chan_ready(chan);
2923 3251
2924 l2cap_chan_ready(chan);
2925 goto unlock; 3252 goto unlock;
2926 } 3253 }
2927 3254
@@ -2949,7 +3276,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2949 3276
2950unlock: 3277unlock:
2951 l2cap_chan_unlock(chan); 3278 l2cap_chan_unlock(chan);
2952 return 0; 3279 return err;
2953} 3280}
2954 3281
2955static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3282static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -2957,21 +3284,20 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2957 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 3284 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2958 u16 scid, flags, result; 3285 u16 scid, flags, result;
2959 struct l2cap_chan *chan; 3286 struct l2cap_chan *chan;
2960 int len = cmd->len - sizeof(*rsp); 3287 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3288 int err = 0;
2961 3289
2962 scid = __le16_to_cpu(rsp->scid); 3290 scid = __le16_to_cpu(rsp->scid);
2963 flags = __le16_to_cpu(rsp->flags); 3291 flags = __le16_to_cpu(rsp->flags);
2964 result = __le16_to_cpu(rsp->result); 3292 result = __le16_to_cpu(rsp->result);
2965 3293
2966 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", 3294 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
2967 scid, flags, result); 3295 result, len);
2968 3296
2969 chan = l2cap_get_chan_by_scid(conn, scid); 3297 chan = l2cap_get_chan_by_scid(conn, scid);
2970 if (!chan) 3298 if (!chan)
2971 return 0; 3299 return 0;
2972 3300
2973 l2cap_chan_lock(chan);
2974
2975 switch (result) { 3301 switch (result) {
2976 case L2CAP_CONF_SUCCESS: 3302 case L2CAP_CONF_SUCCESS:
2977 l2cap_conf_rfc_get(chan, rsp->data, len); 3303 l2cap_conf_rfc_get(chan, rsp->data, len);
@@ -3045,18 +3371,19 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3045 set_default_fcs(chan); 3371 set_default_fcs(chan);
3046 3372
3047 l2cap_state_change(chan, BT_CONNECTED); 3373 l2cap_state_change(chan, BT_CONNECTED);
3048 chan->next_tx_seq = 0; 3374 if (chan->mode == L2CAP_MODE_ERTM ||
3049 chan->expected_tx_seq = 0; 3375 chan->mode == L2CAP_MODE_STREAMING)
3050 skb_queue_head_init(&chan->tx_q); 3376 err = l2cap_ertm_init(chan);
3051 if (chan->mode == L2CAP_MODE_ERTM)
3052 l2cap_ertm_init(chan);
3053 3377
3054 l2cap_chan_ready(chan); 3378 if (err < 0)
3379 l2cap_send_disconn_req(chan->conn, chan, -err);
3380 else
3381 l2cap_chan_ready(chan);
3055 } 3382 }
3056 3383
3057done: 3384done:
3058 l2cap_chan_unlock(chan); 3385 l2cap_chan_unlock(chan);
3059 return 0; 3386 return err;
3060} 3387}
3061 3388
3062static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 3389static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -3092,11 +3419,13 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3092 sk->sk_shutdown = SHUTDOWN_MASK; 3419 sk->sk_shutdown = SHUTDOWN_MASK;
3093 release_sock(sk); 3420 release_sock(sk);
3094 3421
3422 l2cap_chan_hold(chan);
3095 l2cap_chan_del(chan, ECONNRESET); 3423 l2cap_chan_del(chan, ECONNRESET);
3096 3424
3097 l2cap_chan_unlock(chan); 3425 l2cap_chan_unlock(chan);
3098 3426
3099 chan->ops->close(chan->data); 3427 chan->ops->close(chan->data);
3428 l2cap_chan_put(chan);
3100 3429
3101 mutex_unlock(&conn->chan_lock); 3430 mutex_unlock(&conn->chan_lock);
3102 3431
@@ -3124,11 +3453,13 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3124 3453
3125 l2cap_chan_lock(chan); 3454 l2cap_chan_lock(chan);
3126 3455
3456 l2cap_chan_hold(chan);
3127 l2cap_chan_del(chan, 0); 3457 l2cap_chan_del(chan, 0);
3128 3458
3129 l2cap_chan_unlock(chan); 3459 l2cap_chan_unlock(chan);
3130 3460
3131 chan->ops->close(chan->data); 3461 chan->ops->close(chan->data);
3462 l2cap_chan_put(chan);
3132 3463
3133 mutex_unlock(&conn->chan_lock); 3464 mutex_unlock(&conn->chan_lock);
3134 3465
@@ -3265,8 +3596,8 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3265 /* Placeholder: Always reject */ 3596 /* Placeholder: Always reject */
3266 rsp.dcid = 0; 3597 rsp.dcid = 0;
3267 rsp.scid = cpu_to_le16(scid); 3598 rsp.scid = cpu_to_le16(scid);
3268 rsp.result = L2CAP_CR_NO_MEM; 3599 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3269 rsp.status = L2CAP_CS_NO_INFO; 3600 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3270 3601
3271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, 3602 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3272 sizeof(rsp), &rsp); 3603 sizeof(rsp), &rsp);
@@ -3665,19 +3996,19 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
3665 struct sk_buff *next_skb; 3996 struct sk_buff *next_skb;
3666 int tx_seq_offset, next_tx_seq_offset; 3997 int tx_seq_offset, next_tx_seq_offset;
3667 3998
3668 bt_cb(skb)->tx_seq = tx_seq; 3999 bt_cb(skb)->control.txseq = tx_seq;
3669 bt_cb(skb)->sar = sar; 4000 bt_cb(skb)->control.sar = sar;
3670 4001
3671 next_skb = skb_peek(&chan->srej_q); 4002 next_skb = skb_peek(&chan->srej_q);
3672 4003
3673 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 4004 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3674 4005
3675 while (next_skb) { 4006 while (next_skb) {
3676 if (bt_cb(next_skb)->tx_seq == tx_seq) 4007 if (bt_cb(next_skb)->control.txseq == tx_seq)
3677 return -EINVAL; 4008 return -EINVAL;
3678 4009
3679 next_tx_seq_offset = __seq_offset(chan, 4010 next_tx_seq_offset = __seq_offset(chan,
3680 bt_cb(next_skb)->tx_seq, chan->buffer_seq); 4011 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
3681 4012
3682 if (next_tx_seq_offset > tx_seq_offset) { 4013 if (next_tx_seq_offset > tx_seq_offset) {
3683 __skb_queue_before(&chan->srej_q, next_skb, skb); 4014 __skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3800,6 +4131,7 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3800 BT_DBG("chan %p, Enter local busy", chan); 4131 BT_DBG("chan %p, Enter local busy", chan);
3801 4132
3802 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4133 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4134 l2cap_seq_list_clear(&chan->srej_list);
3803 4135
3804 __set_ack_timer(chan); 4136 __set_ack_timer(chan);
3805} 4137}
@@ -3848,11 +4180,11 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3848 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4180 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3849 int err; 4181 int err;
3850 4182
3851 if (bt_cb(skb)->tx_seq != tx_seq) 4183 if (bt_cb(skb)->control.txseq != tx_seq)
3852 break; 4184 break;
3853 4185
3854 skb = skb_dequeue(&chan->srej_q); 4186 skb = skb_dequeue(&chan->srej_q);
3855 control = __set_ctrl_sar(chan, bt_cb(skb)->sar); 4187 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
3856 err = l2cap_reassemble_sdu(chan, skb, control); 4188 err = l2cap_reassemble_sdu(chan, skb, control);
3857 4189
3858 if (err < 0) { 4190 if (err < 0) {
@@ -3892,6 +4224,7 @@ static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3892 while (tx_seq != chan->expected_tx_seq) { 4224 while (tx_seq != chan->expected_tx_seq) {
3893 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 4225 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3894 control |= __set_reqseq(chan, chan->expected_tx_seq); 4226 control |= __set_reqseq(chan, chan->expected_tx_seq);
4227 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
3895 l2cap_send_sframe(chan, control); 4228 l2cap_send_sframe(chan, control);
3896 4229
3897 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 4230 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -4022,8 +4355,8 @@ expected:
4022 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4355 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4023 4356
4024 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4357 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4025 bt_cb(skb)->tx_seq = tx_seq; 4358 bt_cb(skb)->control.txseq = tx_seq;
4026 bt_cb(skb)->sar = sar; 4359 bt_cb(skb)->control.sar = sar;
4027 __skb_queue_tail(&chan->srej_q, skb); 4360 __skb_queue_tail(&chan->srej_q, skb);
4028 return 0; 4361 return 0;
4029 } 4362 }
@@ -4220,6 +4553,8 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4220 u16 req_seq; 4553 u16 req_seq;
4221 int len, next_tx_seq_offset, req_seq_offset; 4554 int len, next_tx_seq_offset, req_seq_offset;
4222 4555
4556 __unpack_control(chan, skb);
4557
4223 control = __get_control(chan, skb->data); 4558 control = __get_control(chan, skb->data);
4224 skb_pull(skb, __ctrl_size(chan)); 4559 skb_pull(skb, __ctrl_size(chan));
4225 len = skb->len; 4560 len = skb->len;
@@ -4295,8 +4630,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4295 return 0; 4630 return 0;
4296 } 4631 }
4297 4632
4298 l2cap_chan_lock(chan);
4299
4300 BT_DBG("chan %p, len %d", chan, skb->len); 4633 BT_DBG("chan %p, len %d", chan, skb->len);
4301 4634
4302 if (chan->state != BT_CONNECTED) 4635 if (chan->state != BT_CONNECTED)
@@ -4375,7 +4708,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4375{ 4708{
4376 struct l2cap_chan *chan; 4709 struct l2cap_chan *chan;
4377 4710
4378 chan = l2cap_global_chan_by_psm(0, psm, conn->src); 4711 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4379 if (!chan) 4712 if (!chan)
4380 goto drop; 4713 goto drop;
4381 4714
@@ -4396,11 +4729,12 @@ drop:
4396 return 0; 4729 return 0;
4397} 4730}
4398 4731
4399static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb) 4732static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4733 struct sk_buff *skb)
4400{ 4734{
4401 struct l2cap_chan *chan; 4735 struct l2cap_chan *chan;
4402 4736
4403 chan = l2cap_global_chan_by_scid(0, cid, conn->src); 4737 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4404 if (!chan) 4738 if (!chan)
4405 goto drop; 4739 goto drop;
4406 4740
@@ -4445,7 +4779,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4445 break; 4779 break;
4446 4780
4447 case L2CAP_CID_CONN_LESS: 4781 case L2CAP_CID_CONN_LESS:
4448 psm = get_unaligned_le16(skb->data); 4782 psm = get_unaligned((__le16 *) skb->data);
4449 skb_pull(skb, 2); 4783 skb_pull(skb, 2);
4450 l2cap_conless_channel(conn, psm, skb); 4784 l2cap_conless_channel(conn, psm, skb);
4451 break; 4785 break;
@@ -4540,7 +4874,6 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4540 4874
4541 if (encrypt == 0x00) { 4875 if (encrypt == 0x00) {
4542 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4876 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4543 __clear_chan_timer(chan);
4544 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); 4877 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4545 } else if (chan->sec_level == BT_SECURITY_HIGH) 4878 } else if (chan->sec_level == BT_SECURITY_HIGH)
4546 l2cap_chan_close(chan, ECONNREFUSED); 4879 l2cap_chan_close(chan, ECONNREFUSED);
@@ -4561,7 +4894,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4561 BT_DBG("conn %p", conn); 4894 BT_DBG("conn %p", conn);
4562 4895
4563 if (hcon->type == LE_LINK) { 4896 if (hcon->type == LE_LINK) {
4564 smp_distribute_keys(conn, 0); 4897 if (!status && encrypt)
4898 smp_distribute_keys(conn, 0);
4565 cancel_delayed_work(&conn->security_timer); 4899 cancel_delayed_work(&conn->security_timer);
4566 } 4900 }
4567 4901
@@ -4591,7 +4925,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4591 chan->state == BT_CONFIG)) { 4925 chan->state == BT_CONFIG)) {
4592 struct sock *sk = chan->sk; 4926 struct sock *sk = chan->sk;
4593 4927
4594 bt_sk(sk)->suspended = false; 4928 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
4595 sk->sk_state_change(sk); 4929 sk->sk_state_change(sk);
4596 4930
4597 l2cap_check_encryption(chan, encrypt); 4931 l2cap_check_encryption(chan, encrypt);
@@ -4603,7 +4937,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4603 if (!status) { 4937 if (!status) {
4604 l2cap_send_conn_req(chan); 4938 l2cap_send_conn_req(chan);
4605 } else { 4939 } else {
4606 __clear_chan_timer(chan);
4607 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); 4940 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4608 } 4941 }
4609 } else if (chan->state == BT_CONNECT2) { 4942 } else if (chan->state == BT_CONNECT2) {
@@ -4614,7 +4947,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4614 lock_sock(sk); 4947 lock_sock(sk);
4615 4948
4616 if (!status) { 4949 if (!status) {
4617 if (bt_sk(sk)->defer_setup) { 4950 if (test_bit(BT_SK_DEFER_SETUP,
4951 &bt_sk(sk)->flags)) {
4618 struct sock *parent = bt_sk(sk)->parent; 4952 struct sock *parent = bt_sk(sk)->parent;
4619 res = L2CAP_CR_PEND; 4953 res = L2CAP_CR_PEND;
4620 stat = L2CAP_CS_AUTHOR_PEND; 4954 stat = L2CAP_CS_AUTHOR_PEND;
@@ -4664,8 +4998,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4664 4998
4665 if (!(flags & ACL_CONT)) { 4999 if (!(flags & ACL_CONT)) {
4666 struct l2cap_hdr *hdr; 5000 struct l2cap_hdr *hdr;
4667 struct l2cap_chan *chan;
4668 u16 cid;
4669 int len; 5001 int len;
4670 5002
4671 if (conn->rx_len) { 5003 if (conn->rx_len) {
@@ -4685,7 +5017,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4685 5017
4686 hdr = (struct l2cap_hdr *) skb->data; 5018 hdr = (struct l2cap_hdr *) skb->data;
4687 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; 5019 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4688 cid = __le16_to_cpu(hdr->cid);
4689 5020
4690 if (len == skb->len) { 5021 if (len == skb->len) {
4691 /* Complete frame received */ 5022 /* Complete frame received */
@@ -4702,23 +5033,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4702 goto drop; 5033 goto drop;
4703 } 5034 }
4704 5035
4705 chan = l2cap_get_chan_by_scid(conn, cid);
4706
4707 if (chan && chan->sk) {
4708 struct sock *sk = chan->sk;
4709 lock_sock(sk);
4710
4711 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4712 BT_ERR("Frame exceeding recv MTU (len %d, "
4713 "MTU %d)", len,
4714 chan->imtu);
4715 release_sock(sk);
4716 l2cap_conn_unreliable(conn, ECOMM);
4717 goto drop;
4718 }
4719 release_sock(sk);
4720 }
4721
4722 /* Allocate skb for the complete frame (with header) */ 5036 /* Allocate skb for the complete frame (with header) */
4723 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); 5037 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4724 if (!conn->rx_skb) 5038 if (!conn->rx_skb)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 04e7c172d49c..3bb1611b9d48 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -124,7 +124,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
124 return -EINVAL; 124 return -EINVAL;
125 125
126 err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), 126 err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
127 &la.l2_bdaddr); 127 &la.l2_bdaddr, la.l2_bdaddr_type);
128 if (err) 128 if (err)
129 return err; 129 return err;
130 130
@@ -148,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
148 148
149 lock_sock(sk); 149 lock_sock(sk);
150 150
151 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) 151 if (sk->sk_state != BT_BOUND) {
152 || sk->sk_state != BT_BOUND) {
153 err = -EBADFD; 152 err = -EBADFD;
154 goto done; 153 goto done;
155 } 154 }
156 155
156 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
157 err = -EINVAL;
158 goto done;
159 }
160
157 switch (chan->mode) { 161 switch (chan->mode) {
158 case L2CAP_MODE_BASIC: 162 case L2CAP_MODE_BASIC:
159 break; 163 break;
@@ -320,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
320 324
321 case L2CAP_CONNINFO: 325 case L2CAP_CONNINFO:
322 if (sk->sk_state != BT_CONNECTED && 326 if (sk->sk_state != BT_CONNECTED &&
323 !(sk->sk_state == BT_CONNECT2 && 327 !(sk->sk_state == BT_CONNECT2 &&
324 bt_sk(sk)->defer_setup)) { 328 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
325 err = -ENOTCONN; 329 err = -ENOTCONN;
326 break; 330 break;
327 } 331 }
@@ -375,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
375 } 379 }
376 380
377 memset(&sec, 0, sizeof(sec)); 381 memset(&sec, 0, sizeof(sec));
378 sec.level = chan->sec_level; 382 if (chan->conn)
383 sec.level = chan->conn->hcon->sec_level;
384 else
385 sec.level = chan->sec_level;
379 386
380 if (sk->sk_state == BT_CONNECTED) 387 if (sk->sk_state == BT_CONNECTED)
381 sec.key_size = chan->conn->hcon->enc_key_size; 388 sec.key_size = chan->conn->hcon->enc_key_size;
@@ -392,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
392 break; 399 break;
393 } 400 }
394 401
395 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) 402 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
403 (u32 __user *) optval))
396 err = -EFAULT; 404 err = -EFAULT;
397 405
398 break; 406 break;
@@ -594,10 +602,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
594 602
595 /* or for ACL link */ 603 /* or for ACL link */
596 } else if ((sk->sk_state == BT_CONNECT2 && 604 } else if ((sk->sk_state == BT_CONNECT2 &&
597 bt_sk(sk)->defer_setup) || 605 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
598 sk->sk_state == BT_CONNECTED) { 606 sk->sk_state == BT_CONNECTED) {
599 if (!l2cap_chan_check_security(chan)) 607 if (!l2cap_chan_check_security(chan))
600 bt_sk(sk)->suspended = true; 608 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
601 else 609 else
602 sk->sk_state_change(sk); 610 sk->sk_state_change(sk);
603 } else { 611 } else {
@@ -616,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
616 break; 624 break;
617 } 625 }
618 626
619 bt_sk(sk)->defer_setup = opt; 627 if (opt)
628 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
629 else
630 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
620 break; 631 break;
621 632
622 case BT_FLUSHABLE: 633 case BT_FLUSHABLE:
@@ -716,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
716 if (msg->msg_flags & MSG_OOB) 727 if (msg->msg_flags & MSG_OOB)
717 return -EOPNOTSUPP; 728 return -EOPNOTSUPP;
718 729
719 lock_sock(sk); 730 if (sk->sk_state != BT_CONNECTED)
720
721 if (sk->sk_state != BT_CONNECTED) {
722 release_sock(sk);
723 return -ENOTCONN; 731 return -ENOTCONN;
724 }
725 732
733 l2cap_chan_lock(chan);
726 err = l2cap_chan_send(chan, msg, len, sk->sk_priority); 734 err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
735 l2cap_chan_unlock(chan);
727 736
728 release_sock(sk);
729 return err; 737 return err;
730} 738}
731 739
@@ -737,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
737 745
738 lock_sock(sk); 746 lock_sock(sk);
739 747
740 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 748 if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
749 &bt_sk(sk)->flags)) {
741 sk->sk_state = BT_CONFIG; 750 sk->sk_state = BT_CONFIG;
742 pi->chan->state = BT_CONFIG; 751 pi->chan->state = BT_CONFIG;
743 752
@@ -931,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state)
931} 940}
932 941
933static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, 942static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
934 unsigned long len, int nb, 943 unsigned long len, int nb)
935 int *err)
936{ 944{
937 struct sock *sk = chan->sk; 945 struct sk_buff *skb;
946 int err;
947
948 l2cap_chan_unlock(chan);
949 skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
950 l2cap_chan_lock(chan);
951
952 if (!skb)
953 return ERR_PTR(err);
938 954
939 return bt_skb_send_alloc(sk, len, nb, err); 955 return skb;
940} 956}
941 957
942static struct l2cap_ops l2cap_chan_ops = { 958static struct l2cap_ops l2cap_chan_ops = {
@@ -952,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk)
952{ 968{
953 BT_DBG("sk %p", sk); 969 BT_DBG("sk %p", sk);
954 970
971 l2cap_chan_put(l2cap_pi(sk)->chan);
955 if (l2cap_pi(sk)->rx_busy_skb) { 972 if (l2cap_pi(sk)->rx_busy_skb) {
956 kfree_skb(l2cap_pi(sk)->rx_busy_skb); 973 kfree_skb(l2cap_pi(sk)->rx_busy_skb);
957 l2cap_pi(sk)->rx_busy_skb = NULL; 974 l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -972,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
972 struct l2cap_chan *pchan = l2cap_pi(parent)->chan; 989 struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
973 990
974 sk->sk_type = parent->sk_type; 991 sk->sk_type = parent->sk_type;
975 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 992 bt_sk(sk)->flags = bt_sk(parent)->flags;
976 993
977 chan->chan_type = pchan->chan_type; 994 chan->chan_type = pchan->chan_type;
978 chan->imtu = pchan->imtu; 995 chan->imtu = pchan->imtu;
@@ -1010,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
1010 } else { 1027 } else {
1011 chan->mode = L2CAP_MODE_BASIC; 1028 chan->mode = L2CAP_MODE_BASIC;
1012 } 1029 }
1013 chan->max_tx = L2CAP_DEFAULT_MAX_TX; 1030
1014 chan->fcs = L2CAP_FCS_CRC16; 1031 l2cap_chan_set_defaults(chan);
1015 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1016 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
1017 chan->sec_level = BT_SECURITY_LOW;
1018 chan->flags = 0;
1019 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1020 } 1032 }
1021 1033
1022 /* Default config options */ 1034 /* Default config options */
@@ -1052,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
1052 sk->sk_protocol = proto; 1064 sk->sk_protocol = proto;
1053 sk->sk_state = BT_OPEN; 1065 sk->sk_state = BT_OPEN;
1054 1066
1055 chan = l2cap_chan_create(sk); 1067 chan = l2cap_chan_create();
1056 if (!chan) { 1068 if (!chan) {
1057 l2cap_sock_kill(sk); 1069 l2cap_sock_kill(sk);
1058 return NULL; 1070 return NULL;
1059 } 1071 }
1060 1072
1073 l2cap_chan_hold(chan);
1074
1075 chan->sk = sk;
1076
1061 l2cap_pi(sk)->chan = chan; 1077 l2cap_pi(sk)->chan = chan;
1062 1078
1063 return sk; 1079 return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4bb03b111122..25d220776079 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,10 +35,9 @@
35#include <net/bluetooth/smp.h> 35#include <net/bluetooth/smp.h>
36 36
37bool enable_hs; 37bool enable_hs;
38bool enable_le;
39 38
40#define MGMT_VERSION 1 39#define MGMT_VERSION 1
41#define MGMT_REVISION 0 40#define MGMT_REVISION 1
42 41
43static const u16 mgmt_commands[] = { 42static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST, 43 MGMT_OP_READ_INDEX_LIST,
@@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = {
78 MGMT_OP_CONFIRM_NAME, 77 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE, 78 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE, 79 MGMT_OP_UNBLOCK_DEVICE,
80 MGMT_OP_SET_DEVICE_ID,
81}; 81};
82 82
83static const u16 mgmt_events[] = { 83static const u16 mgmt_events[] = {
@@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
224 224
225 ev = (void *) skb_put(skb, sizeof(*ev)); 225 ev = (void *) skb_put(skb, sizeof(*ev));
226 ev->status = status; 226 ev->status = status;
227 put_unaligned_le16(cmd, &ev->opcode); 227 ev->opcode = cpu_to_le16(cmd);
228 228
229 err = sock_queue_rcv_skb(sk, skb); 229 err = sock_queue_rcv_skb(sk, skb);
230 if (err < 0) 230 if (err < 0)
@@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
254 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 254 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
255 255
256 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); 256 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
257 put_unaligned_le16(cmd, &ev->opcode); 257 ev->opcode = cpu_to_le16(cmd);
258 ev->status = status; 258 ev->status = status;
259 259
260 if (rp) 260 if (rp)
@@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
275 BT_DBG("sock %p", sk); 275 BT_DBG("sock %p", sk);
276 276
277 rp.version = MGMT_VERSION; 277 rp.version = MGMT_VERSION;
278 put_unaligned_le16(MGMT_REVISION, &rp.revision); 278 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
279 279
280 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp, 280 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
281 sizeof(rp)); 281 sizeof(rp));
@@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
285 u16 data_len) 285 u16 data_len)
286{ 286{
287 struct mgmt_rp_read_commands *rp; 287 struct mgmt_rp_read_commands *rp;
288 u16 num_commands = ARRAY_SIZE(mgmt_commands); 288 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
289 u16 num_events = ARRAY_SIZE(mgmt_events); 289 const u16 num_events = ARRAY_SIZE(mgmt_events);
290 u16 *opcode; 290 __le16 *opcode;
291 size_t rp_size; 291 size_t rp_size;
292 int i, err; 292 int i, err;
293 293
@@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
299 if (!rp) 299 if (!rp)
300 return -ENOMEM; 300 return -ENOMEM;
301 301
302 put_unaligned_le16(num_commands, &rp->num_commands); 302 rp->num_commands = __constant_cpu_to_le16(num_commands);
303 put_unaligned_le16(num_events, &rp->num_events); 303 rp->num_events = __constant_cpu_to_le16(num_events);
304 304
305 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++) 305 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
306 put_unaligned_le16(mgmt_commands[i], opcode); 306 put_unaligned_le16(mgmt_commands[i], opcode);
@@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
341 return -ENOMEM; 341 return -ENOMEM;
342 } 342 }
343 343
344 put_unaligned_le16(count, &rp->num_controllers); 344 rp->num_controllers = cpu_to_le16(count);
345 345
346 i = 0; 346 i = 0;
347 list_for_each_entry(d, &hci_dev_list, list) { 347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (test_bit(HCI_SETUP, &d->dev_flags)) 348 if (test_bit(HCI_SETUP, &d->dev_flags))
349 continue; 349 continue;
350 350
351 put_unaligned_le16(d->id, &rp->index[i++]); 351 rp->index[i++] = cpu_to_le16(d->id);
352 BT_DBG("Added hci%u", d->id); 352 BT_DBG("Added hci%u", d->id);
353 } 353 }
354 354
@@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
383 if (enable_hs) 383 if (enable_hs)
384 settings |= MGMT_SETTING_HS; 384 settings |= MGMT_SETTING_HS;
385 385
386 if (enable_le) { 386 if (hdev->features[4] & LMP_LE)
387 if (hdev->features[4] & LMP_LE) 387 settings |= MGMT_SETTING_LE;
388 settings |= MGMT_SETTING_LE;
389 }
390 388
391 return settings; 389 return settings;
392} 390}
@@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
442 return 0; 440 return 0;
443 } 441 }
444 442
445 memcpy(&val, &uuid128[12], 4); 443 val = get_unaligned_le32(&uuid128[12]);
446
447 val = le32_to_cpu(val);
448 if (val > 0xffff) 444 if (val > 0xffff)
449 return 0; 445 return 0;
450 446
@@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
479 ptr += (name_len + 2); 475 ptr += (name_len + 2);
480 } 476 }
481 477
478 if (hdev->inq_tx_power) {
479 ptr[0] = 2;
480 ptr[1] = EIR_TX_POWER;
481 ptr[2] = (u8) hdev->inq_tx_power;
482
483 eir_len += 3;
484 ptr += 3;
485 }
486
487 if (hdev->devid_source > 0) {
488 ptr[0] = 9;
489 ptr[1] = EIR_DEVICE_ID;
490
491 put_unaligned_le16(hdev->devid_source, ptr + 2);
492 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
493 put_unaligned_le16(hdev->devid_product, ptr + 6);
494 put_unaligned_le16(hdev->devid_version, ptr + 8);
495
496 eir_len += 10;
497 ptr += 10;
498 }
499
482 memset(uuid16_list, 0, sizeof(uuid16_list)); 500 memset(uuid16_list, 0, sizeof(uuid16_list));
483 501
484 /* Group all UUID16 types */ 502 /* Group all UUID16 types */
@@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
642 bacpy(&rp.bdaddr, &hdev->bdaddr); 660 bacpy(&rp.bdaddr, &hdev->bdaddr);
643 661
644 rp.version = hdev->hci_ver; 662 rp.version = hdev->hci_ver;
645 663 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
646 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
647 664
648 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); 665 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
649 rp.current_settings = cpu_to_le32(get_current_settings(hdev)); 666 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
@@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
840 857
841 BT_DBG("request for %s", hdev->name); 858 BT_DBG("request for %s", hdev->name);
842 859
843 timeout = get_unaligned_le16(&cp->timeout); 860 timeout = __le16_to_cpu(cp->timeout);
844 if (!cp->val && timeout > 0) 861 if (!cp->val && timeout > 0)
845 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 862 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
846 MGMT_STATUS_INVALID_PARAMS); 863 MGMT_STATUS_INVALID_PARAMS);
@@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1122 } 1139 }
1123 1140
1124 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { 1141 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1125 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 1142 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1126 MGMT_STATUS_BUSY); 1143 MGMT_STATUS_BUSY);
1127 goto failed; 1144 goto failed;
1128 } 1145 }
1129 1146
@@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1179 1196
1180 hci_dev_lock(hdev); 1197 hci_dev_lock(hdev);
1181 1198
1182 if (!enable_le || !(hdev->features[4] & LMP_LE)) { 1199 if (!(hdev->features[4] & LMP_LE)) {
1183 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1200 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1184 MGMT_STATUS_NOT_SUPPORTED); 1201 MGMT_STATUS_NOT_SUPPORTED);
1185 goto unlock; 1202 goto unlock;
@@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1227 1244
1228 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), 1245 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1229 &hci_cp); 1246 &hci_cp);
1230 if (err < 0) { 1247 if (err < 0)
1231 mgmt_pending_remove(cmd); 1248 mgmt_pending_remove(cmd);
1232 goto unlock;
1233 }
1234 1249
1235unlock: 1250unlock:
1236 hci_dev_unlock(hdev); 1251 hci_dev_unlock(hdev);
@@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1280 } 1295 }
1281 1296
1282 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); 1297 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1283 if (!cmd) { 1298 if (!cmd)
1284 err = -ENOMEM; 1299 err = -ENOMEM;
1285 goto failed;
1286 }
1287 1300
1288failed: 1301failed:
1289 hci_dev_unlock(hdev); 1302 hci_dev_unlock(hdev);
@@ -1368,10 +1381,8 @@ update_class:
1368 } 1381 }
1369 1382
1370 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); 1383 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1371 if (!cmd) { 1384 if (!cmd)
1372 err = -ENOMEM; 1385 err = -ENOMEM;
1373 goto unlock;
1374 }
1375 1386
1376unlock: 1387unlock:
1377 hci_dev_unlock(hdev); 1388 hci_dev_unlock(hdev);
@@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1422 } 1433 }
1423 1434
1424 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); 1435 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1425 if (!cmd) { 1436 if (!cmd)
1426 err = -ENOMEM; 1437 err = -ENOMEM;
1427 goto unlock;
1428 }
1429 1438
1430unlock: 1439unlock:
1431 hci_dev_unlock(hdev); 1440 hci_dev_unlock(hdev);
@@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1439 u16 key_count, expected_len; 1448 u16 key_count, expected_len;
1440 int i; 1449 int i;
1441 1450
1442 key_count = get_unaligned_le16(&cp->key_count); 1451 key_count = __le16_to_cpu(cp->key_count);
1443 1452
1444 expected_len = sizeof(*cp) + key_count * 1453 expected_len = sizeof(*cp) + key_count *
1445 sizeof(struct mgmt_link_key_info); 1454 sizeof(struct mgmt_link_key_info);
@@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1512 goto unlock; 1521 goto unlock;
1513 } 1522 }
1514 1523
1515 if (cp->addr.type == MGMT_ADDR_BREDR) 1524 if (cp->addr.type == BDADDR_BREDR)
1516 err = hci_remove_link_key(hdev, &cp->addr.bdaddr); 1525 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1517 else 1526 else
1518 err = hci_remove_ltk(hdev, &cp->addr.bdaddr); 1527 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
@@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1524 } 1533 }
1525 1534
1526 if (cp->disconnect) { 1535 if (cp->disconnect) {
1527 if (cp->addr.type == MGMT_ADDR_BREDR) 1536 if (cp->addr.type == BDADDR_BREDR)
1528 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1529 &cp->addr.bdaddr); 1538 &cp->addr.bdaddr);
1530 else 1539 else
@@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1548 goto unlock; 1557 goto unlock;
1549 } 1558 }
1550 1559
1551 put_unaligned_le16(conn->handle, &dc.handle); 1560 dc.handle = cpu_to_le16(conn->handle);
1552 dc.reason = 0x13; /* Remote User Terminated Connection */ 1561 dc.reason = 0x13; /* Remote User Terminated Connection */
1553 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1562 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1554 if (err < 0) 1563 if (err < 0)
@@ -1584,7 +1593,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1584 goto failed; 1593 goto failed;
1585 } 1594 }
1586 1595
1587 if (cp->addr.type == MGMT_ADDR_BREDR) 1596 if (cp->addr.type == BDADDR_BREDR)
1588 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 1597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
1589 else 1598 else
1590 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1601 goto failed; 1610 goto failed;
1602 } 1611 }
1603 1612
1604 put_unaligned_le16(conn->handle, &dc.handle); 1613 dc.handle = cpu_to_le16(conn->handle);
1605 dc.reason = 0x13; /* Remote User Terminated Connection */ 1614 dc.reason = 0x13; /* Remote User Terminated Connection */
1606 1615
1607 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1616 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@@ -1613,22 +1622,22 @@ failed:
1613 return err; 1622 return err;
1614} 1623}
1615 1624
1616static u8 link_to_mgmt(u8 link_type, u8 addr_type) 1625static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1617{ 1626{
1618 switch (link_type) { 1627 switch (link_type) {
1619 case LE_LINK: 1628 case LE_LINK:
1620 switch (addr_type) { 1629 switch (addr_type) {
1621 case ADDR_LE_DEV_PUBLIC: 1630 case ADDR_LE_DEV_PUBLIC:
1622 return MGMT_ADDR_LE_PUBLIC; 1631 return BDADDR_LE_PUBLIC;
1623 case ADDR_LE_DEV_RANDOM: 1632
1624 return MGMT_ADDR_LE_RANDOM;
1625 default: 1633 default:
1626 return MGMT_ADDR_INVALID; 1634 /* Fallback to LE Random address type */
1635 return BDADDR_LE_RANDOM;
1627 } 1636 }
1628 case ACL_LINK: 1637
1629 return MGMT_ADDR_BREDR;
1630 default: 1638 default:
1631 return MGMT_ADDR_INVALID; 1639 /* Fallback to BR/EDR type */
1640 return BDADDR_BREDR;
1632 } 1641 }
1633} 1642}
1634 1643
@@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1669 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) 1678 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1670 continue; 1679 continue;
1671 bacpy(&rp->addr[i].bdaddr, &c->dst); 1680 bacpy(&rp->addr[i].bdaddr, &c->dst);
1672 rp->addr[i].type = link_to_mgmt(c->type, c->dst_type); 1681 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1673 if (rp->addr[i].type == MGMT_ADDR_INVALID) 1682 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1674 continue; 1683 continue;
1675 i++; 1684 i++;
1676 } 1685 }
1677 1686
1678 put_unaligned_le16(i, &rp->conn_count); 1687 rp->conn_count = cpu_to_le16(i);
1679 1688
1680 /* Recalculate length in case of filtered SCO connections, etc */ 1689 /* Recalculate length in case of filtered SCO connections, etc */
1681 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 1690 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
@@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
1836 struct hci_conn *conn = cmd->user_data; 1845 struct hci_conn *conn = cmd->user_data;
1837 1846
1838 bacpy(&rp.addr.bdaddr, &conn->dst); 1847 bacpy(&rp.addr.bdaddr, &conn->dst);
1839 rp.addr.type = link_to_mgmt(conn->type, conn->dst_type); 1848 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
1840 1849
1841 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, 1850 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
1842 &rp, sizeof(rp)); 1851 &rp, sizeof(rp));
@@ -1890,12 +1899,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1890 else 1899 else
1891 auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1900 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1892 1901
1893 if (cp->addr.type == MGMT_ADDR_BREDR) 1902 if (cp->addr.type == BDADDR_BREDR)
1894 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level, 1903 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
1895 auth_type); 1904 cp->addr.type, sec_level, auth_type);
1896 else 1905 else
1897 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level, 1906 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
1898 auth_type); 1907 cp->addr.type, sec_level, auth_type);
1899 1908
1900 memset(&rp, 0, sizeof(rp)); 1909 memset(&rp, 0, sizeof(rp));
1901 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 1910 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@@ -1923,7 +1932,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1923 } 1932 }
1924 1933
1925 /* For LE, just connecting isn't a proof that the pairing finished */ 1934 /* For LE, just connecting isn't a proof that the pairing finished */
1926 if (cp->addr.type == MGMT_ADDR_BREDR) 1935 if (cp->addr.type == BDADDR_BREDR)
1927 conn->connect_cfm_cb = pairing_complete_cb; 1936 conn->connect_cfm_cb = pairing_complete_cb;
1928 1937
1929 conn->security_cfm_cb = pairing_complete_cb; 1938 conn->security_cfm_cb = pairing_complete_cb;
@@ -2000,7 +2009,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2000 goto done; 2009 goto done;
2001 } 2010 }
2002 2011
2003 if (type == MGMT_ADDR_BREDR) 2012 if (type == BDADDR_BREDR)
2004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr); 2013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
2005 else 2014 else
2006 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 2015 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
@@ -2011,7 +2020,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2011 goto done; 2020 goto done;
2012 } 2021 }
2013 2022
2014 if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) { 2023 if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
2015 /* Continue with pairing via SMP */ 2024 /* Continue with pairing via SMP */
2016 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 2025 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2017 2026
@@ -2295,6 +2304,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2295 goto failed; 2304 goto failed;
2296 } 2305 }
2297 2306
2307 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2308 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2309 MGMT_STATUS_BUSY);
2310 goto failed;
2311 }
2312
2298 if (hdev->discovery.state != DISCOVERY_STOPPED) { 2313 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2299 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2314 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2300 MGMT_STATUS_BUSY); 2315 MGMT_STATUS_BUSY);
@@ -2381,27 +2396,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2381 goto unlock; 2396 goto unlock;
2382 } 2397 }
2383 2398
2384 if (hdev->discovery.state == DISCOVERY_FINDING) { 2399 switch (hdev->discovery.state) {
2385 err = hci_cancel_inquiry(hdev); 2400 case DISCOVERY_FINDING:
2386 if (err < 0) 2401 if (test_bit(HCI_INQUIRY, &hdev->flags))
2387 mgmt_pending_remove(cmd); 2402 err = hci_cancel_inquiry(hdev);
2388 else 2403 else
2389 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 2404 err = hci_cancel_le_scan(hdev);
2390 goto unlock;
2391 }
2392 2405
2393 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING); 2406 break;
2394 if (!e) { 2407
2395 mgmt_pending_remove(cmd); 2408 case DISCOVERY_RESOLVING:
2396 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0, 2409 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2397 &mgmt_cp->type, sizeof(mgmt_cp->type)); 2410 NAME_PENDING);
2398 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2411 if (!e) {
2399 goto unlock; 2412 mgmt_pending_remove(cmd);
2413 err = cmd_complete(sk, hdev->id,
2414 MGMT_OP_STOP_DISCOVERY, 0,
2415 &mgmt_cp->type,
2416 sizeof(mgmt_cp->type));
2417 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2418 goto unlock;
2419 }
2420
2421 bacpy(&cp.bdaddr, &e->data.bdaddr);
2422 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2423 sizeof(cp), &cp);
2424
2425 break;
2426
2427 default:
2428 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2429 err = -EFAULT;
2400 } 2430 }
2401 2431
2402 bacpy(&cp.bdaddr, &e->data.bdaddr);
2403 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2404 &cp);
2405 if (err < 0) 2432 if (err < 0)
2406 mgmt_pending_remove(cmd); 2433 mgmt_pending_remove(cmd);
2407 else 2434 else
@@ -2501,6 +2528,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
2501 return err; 2528 return err;
2502} 2529}
2503 2530
2531static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
2532 u16 len)
2533{
2534 struct mgmt_cp_set_device_id *cp = data;
2535 int err;
2536 __u16 source;
2537
2538 BT_DBG("%s", hdev->name);
2539
2540 source = __le16_to_cpu(cp->source);
2541
2542 if (source > 0x0002)
2543 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
2544 MGMT_STATUS_INVALID_PARAMS);
2545
2546 hci_dev_lock(hdev);
2547
2548 hdev->devid_source = source;
2549 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
2550 hdev->devid_product = __le16_to_cpu(cp->product);
2551 hdev->devid_version = __le16_to_cpu(cp->version);
2552
2553 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
2554
2555 update_eir(hdev);
2556
2557 hci_dev_unlock(hdev);
2558
2559 return err;
2560}
2561
2504static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, 2562static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2505 void *data, u16 len) 2563 void *data, u16 len)
2506{ 2564{
@@ -2565,7 +2623,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2565 u16 key_count, expected_len; 2623 u16 key_count, expected_len;
2566 int i; 2624 int i;
2567 2625
2568 key_count = get_unaligned_le16(&cp->key_count); 2626 key_count = __le16_to_cpu(cp->key_count);
2569 2627
2570 expected_len = sizeof(*cp) + key_count * 2628 expected_len = sizeof(*cp) + key_count *
2571 sizeof(struct mgmt_ltk_info); 2629 sizeof(struct mgmt_ltk_info);
@@ -2591,7 +2649,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2591 else 2649 else
2592 type = HCI_SMP_LTK_SLAVE; 2650 type = HCI_SMP_LTK_SLAVE;
2593 2651
2594 hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type, 2652 hci_add_ltk(hdev, &key->addr.bdaddr,
2653 bdaddr_to_le(key->addr.type),
2595 type, 0, key->authenticated, key->val, 2654 type, 0, key->authenticated, key->val,
2596 key->enc_size, key->ediv, key->rand); 2655 key->enc_size, key->ediv, key->rand);
2597 } 2656 }
@@ -2601,7 +2660,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2601 return 0; 2660 return 0;
2602} 2661}
2603 2662
2604struct mgmt_handler { 2663static const struct mgmt_handler {
2605 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 2664 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
2606 u16 data_len); 2665 u16 data_len);
2607 bool var_len; 2666 bool var_len;
@@ -2647,6 +2706,7 @@ struct mgmt_handler {
2647 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE }, 2706 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
2648 { block_device, false, MGMT_BLOCK_DEVICE_SIZE }, 2707 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
2649 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE }, 2708 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
2709 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
2650}; 2710};
2651 2711
2652 2712
@@ -2657,7 +2717,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2657 struct mgmt_hdr *hdr; 2717 struct mgmt_hdr *hdr;
2658 u16 opcode, index, len; 2718 u16 opcode, index, len;
2659 struct hci_dev *hdev = NULL; 2719 struct hci_dev *hdev = NULL;
2660 struct mgmt_handler *handler; 2720 const struct mgmt_handler *handler;
2661 int err; 2721 int err;
2662 2722
2663 BT_DBG("got %zu bytes", msglen); 2723 BT_DBG("got %zu bytes", msglen);
@@ -2675,9 +2735,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2675 } 2735 }
2676 2736
2677 hdr = buf; 2737 hdr = buf;
2678 opcode = get_unaligned_le16(&hdr->opcode); 2738 opcode = __le16_to_cpu(hdr->opcode);
2679 index = get_unaligned_le16(&hdr->index); 2739 index = __le16_to_cpu(hdr->index);
2680 len = get_unaligned_le16(&hdr->len); 2740 len = __le16_to_cpu(hdr->len);
2681 2741
2682 if (len != msglen - sizeof(*hdr)) { 2742 if (len != msglen - sizeof(*hdr)) {
2683 err = -EINVAL; 2743 err = -EINVAL;
@@ -2884,7 +2944,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
2884 return 0; 2944 return 0;
2885} 2945}
2886 2946
2887int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent) 2947int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2948 bool persistent)
2888{ 2949{
2889 struct mgmt_ev_new_link_key ev; 2950 struct mgmt_ev_new_link_key ev;
2890 2951
@@ -2892,7 +2953,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persisten
2892 2953
2893 ev.store_hint = persistent; 2954 ev.store_hint = persistent;
2894 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 2955 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2895 ev.key.addr.type = MGMT_ADDR_BREDR; 2956 ev.key.addr.type = BDADDR_BREDR;
2896 ev.key.type = key->type; 2957 ev.key.type = key->type;
2897 memcpy(ev.key.val, key->val, 16); 2958 memcpy(ev.key.val, key->val, 16);
2898 ev.key.pin_len = key->pin_len; 2959 ev.key.pin_len = key->pin_len;
@@ -2908,7 +2969,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
2908 2969
2909 ev.store_hint = persistent; 2970 ev.store_hint = persistent;
2910 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 2971 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2911 ev.key.addr.type = key->bdaddr_type; 2972 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
2912 ev.key.authenticated = key->authenticated; 2973 ev.key.authenticated = key->authenticated;
2913 ev.key.enc_size = key->enc_size; 2974 ev.key.enc_size = key->enc_size;
2914 ev.key.ediv = key->ediv; 2975 ev.key.ediv = key->ediv;
@@ -2932,7 +2993,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2932 u16 eir_len = 0; 2993 u16 eir_len = 0;
2933 2994
2934 bacpy(&ev->addr.bdaddr, bdaddr); 2995 bacpy(&ev->addr.bdaddr, bdaddr);
2935 ev->addr.type = link_to_mgmt(link_type, addr_type); 2996 ev->addr.type = link_to_bdaddr(link_type, addr_type);
2936 2997
2937 ev->flags = __cpu_to_le32(flags); 2998 ev->flags = __cpu_to_le32(flags);
2938 2999
@@ -2944,7 +3005,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2944 eir_len = eir_append_data(ev->eir, eir_len, 3005 eir_len = eir_append_data(ev->eir, eir_len,
2945 EIR_CLASS_OF_DEV, dev_class, 3); 3006 EIR_CLASS_OF_DEV, dev_class, 3);
2946 3007
2947 put_unaligned_le16(eir_len, &ev->eir_len); 3008 ev->eir_len = cpu_to_le16(eir_len);
2948 3009
2949 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, 3010 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
2950 sizeof(*ev) + eir_len, NULL); 3011 sizeof(*ev) + eir_len, NULL);
@@ -2995,13 +3056,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
2995 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); 3056 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
2996 3057
2997 bacpy(&ev.bdaddr, bdaddr); 3058 bacpy(&ev.bdaddr, bdaddr);
2998 ev.type = link_to_mgmt(link_type, addr_type); 3059 ev.type = link_to_bdaddr(link_type, addr_type);
2999 3060
3000 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), 3061 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3001 sk); 3062 sk);
3002 3063
3003 if (sk) 3064 if (sk)
3004 sock_put(sk); 3065 sock_put(sk);
3005 3066
3006 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3067 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3007 hdev); 3068 hdev);
@@ -3021,7 +3082,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3021 return -ENOENT; 3082 return -ENOENT;
3022 3083
3023 bacpy(&rp.addr.bdaddr, bdaddr); 3084 bacpy(&rp.addr.bdaddr, bdaddr);
3024 rp.addr.type = link_to_mgmt(link_type, addr_type); 3085 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3025 3086
3026 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 3087 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3027 mgmt_status(status), &rp, sizeof(rp)); 3088 mgmt_status(status), &rp, sizeof(rp));
@@ -3039,7 +3100,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3039 struct mgmt_ev_connect_failed ev; 3100 struct mgmt_ev_connect_failed ev;
3040 3101
3041 bacpy(&ev.addr.bdaddr, bdaddr); 3102 bacpy(&ev.addr.bdaddr, bdaddr);
3042 ev.addr.type = link_to_mgmt(link_type, addr_type); 3103 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3043 ev.status = mgmt_status(status); 3104 ev.status = mgmt_status(status);
3044 3105
3045 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); 3106 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3050,7 +3111,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3050 struct mgmt_ev_pin_code_request ev; 3111 struct mgmt_ev_pin_code_request ev;
3051 3112
3052 bacpy(&ev.addr.bdaddr, bdaddr); 3113 bacpy(&ev.addr.bdaddr, bdaddr);
3053 ev.addr.type = MGMT_ADDR_BREDR; 3114 ev.addr.type = BDADDR_BREDR;
3054 ev.secure = secure; 3115 ev.secure = secure;
3055 3116
3056 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), 3117 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
@@ -3069,7 +3130,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 return -ENOENT; 3130 return -ENOENT;
3070 3131
3071 bacpy(&rp.addr.bdaddr, bdaddr); 3132 bacpy(&rp.addr.bdaddr, bdaddr);
3072 rp.addr.type = MGMT_ADDR_BREDR; 3133 rp.addr.type = BDADDR_BREDR;
3073 3134
3074 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 3135 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3075 mgmt_status(status), &rp, sizeof(rp)); 3136 mgmt_status(status), &rp, sizeof(rp));
@@ -3091,7 +3152,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3091 return -ENOENT; 3152 return -ENOENT;
3092 3153
3093 bacpy(&rp.addr.bdaddr, bdaddr); 3154 bacpy(&rp.addr.bdaddr, bdaddr);
3094 rp.addr.type = MGMT_ADDR_BREDR; 3155 rp.addr.type = BDADDR_BREDR;
3095 3156
3096 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, 3157 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3097 mgmt_status(status), &rp, sizeof(rp)); 3158 mgmt_status(status), &rp, sizeof(rp));
@@ -3110,9 +3171,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3110 BT_DBG("%s", hdev->name); 3171 BT_DBG("%s", hdev->name);
3111 3172
3112 bacpy(&ev.addr.bdaddr, bdaddr); 3173 bacpy(&ev.addr.bdaddr, bdaddr);
3113 ev.addr.type = link_to_mgmt(link_type, addr_type); 3174 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3114 ev.confirm_hint = confirm_hint; 3175 ev.confirm_hint = confirm_hint;
3115 put_unaligned_le32(value, &ev.value); 3176 ev.value = value;
3116 3177
3117 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), 3178 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3118 NULL); 3179 NULL);
@@ -3126,7 +3187,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3126 BT_DBG("%s", hdev->name); 3187 BT_DBG("%s", hdev->name);
3127 3188
3128 bacpy(&ev.addr.bdaddr, bdaddr); 3189 bacpy(&ev.addr.bdaddr, bdaddr);
3129 ev.addr.type = link_to_mgmt(link_type, addr_type); 3190 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3130 3191
3131 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), 3192 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3132 NULL); 3193 NULL);
@@ -3145,7 +3206,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3145 return -ENOENT; 3206 return -ENOENT;
3146 3207
3147 bacpy(&rp.addr.bdaddr, bdaddr); 3208 bacpy(&rp.addr.bdaddr, bdaddr);
3148 rp.addr.type = link_to_mgmt(link_type, addr_type); 3209 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3149 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status), 3210 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3150 &rp, sizeof(rp)); 3211 &rp, sizeof(rp));
3151 3212
@@ -3188,7 +3249,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3188 struct mgmt_ev_auth_failed ev; 3249 struct mgmt_ev_auth_failed ev;
3189 3250
3190 bacpy(&ev.addr.bdaddr, bdaddr); 3251 bacpy(&ev.addr.bdaddr, bdaddr);
3191 ev.addr.type = link_to_mgmt(link_type, addr_type); 3252 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3192 ev.status = mgmt_status(status); 3253 ev.status = mgmt_status(status);
3193 3254
3194 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); 3255 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3413,10 +3474,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
3413 3474
3414 if (enable && test_and_clear_bit(HCI_LE_ENABLED, 3475 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
3415 &hdev->dev_flags)) 3476 &hdev->dev_flags))
3416 err = new_settings(hdev, NULL); 3477 err = new_settings(hdev, NULL);
3417 3478
3418 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, 3479 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
3419 cmd_status_rsp, &mgmt_err); 3480 &mgmt_err);
3420 3481
3421 return err; 3482 return err;
3422 } 3483 }
@@ -3455,7 +3516,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3455 memset(buf, 0, sizeof(buf)); 3516 memset(buf, 0, sizeof(buf));
3456 3517
3457 bacpy(&ev->addr.bdaddr, bdaddr); 3518 bacpy(&ev->addr.bdaddr, bdaddr);
3458 ev->addr.type = link_to_mgmt(link_type, addr_type); 3519 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3459 ev->rssi = rssi; 3520 ev->rssi = rssi;
3460 if (cfm_name) 3521 if (cfm_name)
3461 ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME; 3522 ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
@@ -3469,7 +3530,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3469 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, 3530 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
3470 dev_class, 3); 3531 dev_class, 3);
3471 3532
3472 put_unaligned_le16(eir_len, &ev->eir_len); 3533 ev->eir_len = cpu_to_le16(eir_len);
3473 3534
3474 ev_size = sizeof(*ev) + eir_len; 3535 ev_size = sizeof(*ev) + eir_len;
3475 3536
@@ -3488,13 +3549,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3488 memset(buf, 0, sizeof(buf)); 3549 memset(buf, 0, sizeof(buf));
3489 3550
3490 bacpy(&ev->addr.bdaddr, bdaddr); 3551 bacpy(&ev->addr.bdaddr, bdaddr);
3491 ev->addr.type = link_to_mgmt(link_type, addr_type); 3552 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3492 ev->rssi = rssi; 3553 ev->rssi = rssi;
3493 3554
3494 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, 3555 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
3495 name_len); 3556 name_len);
3496 3557
3497 put_unaligned_le16(eir_len, &ev->eir_len); 3558 ev->eir_len = cpu_to_le16(eir_len);
3498 3559
3499 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, 3560 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
3500 sizeof(*ev) + eir_len, NULL); 3561 sizeof(*ev) + eir_len, NULL);
@@ -3594,6 +3655,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3594 3655
3595module_param(enable_hs, bool, 0644); 3656module_param(enable_hs, bool, 0644);
3596MODULE_PARM_DESC(enable_hs, "Enable High Speed support"); 3657MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
3597
3598module_param(enable_le, bool, 0644);
3599MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index a55a43e9f70e..e8707debb864 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
260 260
261 if (parent) { 261 if (parent) {
262 sk->sk_type = parent->sk_type; 262 sk->sk_type = parent->sk_type;
263 pi->dlc->defer_setup = bt_sk(parent)->defer_setup; 263 pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
264 &bt_sk(parent)->flags);
264 265
265 pi->sec_level = rfcomm_pi(parent)->sec_level; 266 pi->sec_level = rfcomm_pi(parent)->sec_level;
266 pi->role_switch = rfcomm_pi(parent)->role_switch; 267 pi->role_switch = rfcomm_pi(parent)->role_switch;
@@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
731 break; 732 break;
732 } 733 }
733 734
734 bt_sk(sk)->defer_setup = opt; 735 if (opt)
736 set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
737 else
738 clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
739
735 break; 740 break;
736 741
737 default: 742 default:
@@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
849 break; 854 break;
850 } 855 }
851 856
852 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) 857 if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
858 (u32 __user *) optval))
853 err = -EFAULT; 859 err = -EFAULT;
854 860
855 break; 861 break;
@@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
972done: 978done:
973 bh_unlock_sock(parent); 979 bh_unlock_sock(parent);
974 980
975 if (bt_sk(parent)->defer_setup) 981 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
976 parent->sk_state_change(parent); 982 parent->sk_state_change(parent);
977 983
978 return result; 984 return result;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f6ab12907963..cbdd313659a7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
61static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent); 61static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
62static void sco_chan_del(struct sock *sk, int err); 62static void sco_chan_del(struct sock *sk, int err);
63 63
64static int sco_conn_del(struct hci_conn *conn, int err);
65
66static void sco_sock_close(struct sock *sk); 64static void sco_sock_close(struct sock *sk);
67static void sco_sock_kill(struct sock *sk); 65static void sco_sock_kill(struct sock *sk);
68 66
@@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
95} 93}
96 94
97/* ---- SCO connections ---- */ 95/* ---- SCO connections ---- */
98static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) 96static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
99{ 97{
100 struct hci_dev *hdev = hcon->hdev; 98 struct hci_dev *hdev = hcon->hdev;
101 struct sco_conn *conn = hcon->sco_data; 99 struct sco_conn *conn = hcon->sco_data;
102 100
103 if (conn || status) 101 if (conn)
104 return conn; 102 return conn;
105 103
106 conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); 104 conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
@@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk)
195 else 193 else
196 type = SCO_LINK; 194 type = SCO_LINK;
197 195
198 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 196 hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
197 HCI_AT_NO_BONDING);
199 if (IS_ERR(hcon)) { 198 if (IS_ERR(hcon)) {
200 err = PTR_ERR(hcon); 199 err = PTR_ERR(hcon);
201 goto done; 200 goto done;
202 } 201 }
203 202
204 conn = sco_conn_add(hcon, 0); 203 conn = sco_conn_add(hcon);
205 if (!conn) { 204 if (!conn) {
206 hci_conn_put(hcon); 205 hci_conn_put(hcon);
207 err = -ENOMEM; 206 err = -ENOMEM;
@@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
233{ 232{
234 struct sco_conn *conn = sco_pi(sk)->conn; 233 struct sco_conn *conn = sco_pi(sk)->conn;
235 struct sk_buff *skb; 234 struct sk_buff *skb;
236 int err, count; 235 int err;
237 236
238 /* Check outgoing MTU */ 237 /* Check outgoing MTU */
239 if (len > conn->mtu) 238 if (len > conn->mtu)
@@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
241 240
242 BT_DBG("sk %p len %d", sk, len); 241 BT_DBG("sk %p len %d", sk, len);
243 242
244 count = min_t(unsigned int, conn->mtu, len); 243 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
245 skb = bt_skb_send_alloc(sk, count,
246 msg->msg_flags & MSG_DONTWAIT, &err);
247 if (!skb) 244 if (!skb)
248 return err; 245 return err;
249 246
250 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { 247 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
251 kfree_skb(skb); 248 kfree_skb(skb);
252 return -EFAULT; 249 return -EFAULT;
253 } 250 }
254 251
255 hci_send_sco(conn->hcon, skb); 252 hci_send_sco(conn->hcon, skb);
256 253
257 return count; 254 return len;
258} 255}
259 256
260static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 257static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -277,17 +274,20 @@ drop:
277} 274}
278 275
279/* -------- Socket interface ---------- */ 276/* -------- Socket interface ---------- */
280static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba) 277static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
281{ 278{
282 struct sock *sk;
283 struct hlist_node *node; 279 struct hlist_node *node;
280 struct sock *sk;
281
282 sk_for_each(sk, node, &sco_sk_list.head) {
283 if (sk->sk_state != BT_LISTEN)
284 continue;
284 285
285 sk_for_each(sk, node, &sco_sk_list.head)
286 if (!bacmp(&bt_sk(sk)->src, ba)) 286 if (!bacmp(&bt_sk(sk)->src, ba))
287 goto found; 287 return sk;
288 sk = NULL; 288 }
289found: 289
290 return sk; 290 return NULL;
291} 291}
292 292
293/* Find socket listening on source bdaddr. 293/* Find socket listening on source bdaddr.
@@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
466{ 466{
467 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; 467 struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
468 struct sock *sk = sock->sk; 468 struct sock *sk = sock->sk;
469 bdaddr_t *src = &sa->sco_bdaddr;
470 int err = 0; 469 int err = 0;
471 470
472 BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); 471 BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
@@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
481 goto done; 480 goto done;
482 } 481 }
483 482
484 write_lock(&sco_sk_list.lock); 483 if (sk->sk_type != SOCK_SEQPACKET) {
485 484 err = -EINVAL;
486 if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) { 485 goto done;
487 err = -EADDRINUSE;
488 } else {
489 /* Save source address */
490 bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
491 sk->sk_state = BT_BOUND;
492 } 486 }
493 487
494 write_unlock(&sco_sk_list.lock); 488 bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
489
490 sk->sk_state = BT_BOUND;
495 491
496done: 492done:
497 release_sock(sk); 493 release_sock(sk);
@@ -537,21 +533,38 @@ done:
537static int sco_sock_listen(struct socket *sock, int backlog) 533static int sco_sock_listen(struct socket *sock, int backlog)
538{ 534{
539 struct sock *sk = sock->sk; 535 struct sock *sk = sock->sk;
536 bdaddr_t *src = &bt_sk(sk)->src;
540 int err = 0; 537 int err = 0;
541 538
542 BT_DBG("sk %p backlog %d", sk, backlog); 539 BT_DBG("sk %p backlog %d", sk, backlog);
543 540
544 lock_sock(sk); 541 lock_sock(sk);
545 542
546 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) { 543 if (sk->sk_state != BT_BOUND) {
547 err = -EBADFD; 544 err = -EBADFD;
548 goto done; 545 goto done;
549 } 546 }
550 547
548 if (sk->sk_type != SOCK_SEQPACKET) {
549 err = -EINVAL;
550 goto done;
551 }
552
553 write_lock(&sco_sk_list.lock);
554
555 if (__sco_get_sock_listen_by_addr(src)) {
556 err = -EADDRINUSE;
557 goto unlock;
558 }
559
551 sk->sk_max_ack_backlog = backlog; 560 sk->sk_max_ack_backlog = backlog;
552 sk->sk_ack_backlog = 0; 561 sk->sk_ack_backlog = 0;
562
553 sk->sk_state = BT_LISTEN; 563 sk->sk_state = BT_LISTEN;
554 564
565unlock:
566 write_unlock(&sco_sk_list.lock);
567
555done: 568done:
556 release_sock(sk); 569 release_sock(sk);
557 return err; 570 return err;
@@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
923 if (!status) { 936 if (!status) {
924 struct sco_conn *conn; 937 struct sco_conn *conn;
925 938
926 conn = sco_conn_add(hcon, status); 939 conn = sco_conn_add(hcon);
927 if (conn) 940 if (conn)
928 sco_conn_ready(conn); 941 sco_conn_ready(conn);
929 } else 942 } else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index deb119875fd9..6fc7c4708f3e 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -956,7 +956,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
956 HCI_SMP_LTK_SLAVE, 1, authenticated, 956 HCI_SMP_LTK_SLAVE, 1, authenticated,
957 enc.ltk, smp->enc_key_size, ediv, ident.rand); 957 enc.ltk, smp->enc_key_size, ediv, ident.rand);
958 958
959 ident.ediv = cpu_to_le16(ediv); 959 ident.ediv = ediv;
960 960
961 smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); 961 smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
962 962
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a8bdf7405433..e5b7182fa099 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -145,6 +145,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
145{ 145{
146 struct fib_info *fi = container_of(head, struct fib_info, rcu); 146 struct fib_info *fi = container_of(head, struct fib_info, rcu);
147 147
148 change_nexthops(fi) {
149 if (nexthop_nh->nh_dev)
150 dev_put(nexthop_nh->nh_dev);
151 } endfor_nexthops(fi);
152
153 release_net(fi->fib_net);
148 if (fi->fib_metrics != (u32 *) dst_default_metrics) 154 if (fi->fib_metrics != (u32 *) dst_default_metrics)
149 kfree(fi->fib_metrics); 155 kfree(fi->fib_metrics);
150 kfree(fi); 156 kfree(fi);
@@ -156,13 +162,7 @@ void free_fib_info(struct fib_info *fi)
156 pr_warn("Freeing alive fib_info %p\n", fi); 162 pr_warn("Freeing alive fib_info %p\n", fi);
157 return; 163 return;
158 } 164 }
159 change_nexthops(fi) {
160 if (nexthop_nh->nh_dev)
161 dev_put(nexthop_nh->nh_dev);
162 nexthop_nh->nh_dev = NULL;
163 } endfor_nexthops(fi);
164 fib_info_cnt--; 165 fib_info_cnt--;
165 release_net(fi->fib_net);
166 call_rcu(&fi->rcu, free_fib_info_rcu); 166 call_rcu(&fi->rcu, free_fib_info_rcu);
167} 167}
168 168
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ffcb3b016843..98b30d08efe9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3452,6 +3452,7 @@ int __init ip_rt_init(void)
3452 0, 3452 0,
3453 &rt_hash_log, 3453 &rt_hash_log,
3454 &rt_hash_mask, 3454 &rt_hash_mask,
3455 0,
3455 rhash_entries ? 0 : 512 * 1024); 3456 rhash_entries ? 0 : 512 * 1024);
3456 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); 3457 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3457 rt_hash_lock_init(); 3458 rt_hash_lock_init();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bb485fcb077e..3ba605f60e4e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3514,6 +3514,7 @@ void __init tcp_init(void)
3514 0, 3514 0,
3515 NULL, 3515 NULL,
3516 &tcp_hashinfo.ehash_mask, 3516 &tcp_hashinfo.ehash_mask,
3517 0,
3517 thash_entries ? 0 : 512 * 1024); 3518 thash_entries ? 0 : 512 * 1024);
3518 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { 3519 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
3519 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 3520 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
@@ -3530,6 +3531,7 @@ void __init tcp_init(void)
3530 0, 3531 0,
3531 &tcp_hashinfo.bhash_size, 3532 &tcp_hashinfo.bhash_size,
3532 NULL, 3533 NULL,
3534 0,
3533 64 * 1024); 3535 64 * 1024);
3534 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 3536 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3535 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3537 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cfa2aa128342..b224eb8bce8b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4555,6 +4555,11 @@ static bool tcp_try_coalesce(struct sock *sk,
4555 4555
4556 if (tcp_hdr(from)->fin) 4556 if (tcp_hdr(from)->fin)
4557 return false; 4557 return false;
4558
4559 /* Its possible this segment overlaps with prior segment in queue */
4560 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
4561 return false;
4562
4558 if (!skb_try_coalesce(to, from, fragstolen, &delta)) 4563 if (!skb_try_coalesce(to, from, fragstolen, &delta))
4559 return false; 4564 return false;
4560 4565
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 609397ee78fb..eaca73644e79 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2192,26 +2192,16 @@ void __init udp_table_init(struct udp_table *table, const char *name)
2192{ 2192{
2193 unsigned int i; 2193 unsigned int i;
2194 2194
2195 if (!CONFIG_BASE_SMALL) 2195 table->hash = alloc_large_system_hash(name,
2196 table->hash = alloc_large_system_hash(name, 2196 2 * sizeof(struct udp_hslot),
2197 2 * sizeof(struct udp_hslot), 2197 uhash_entries,
2198 uhash_entries, 2198 21, /* one slot per 2 MB */
2199 21, /* one slot per 2 MB */ 2199 0,
2200 0, 2200 &table->log,
2201 &table->log, 2201 &table->mask,
2202 &table->mask, 2202 UDP_HTABLE_SIZE_MIN,
2203 64 * 1024); 2203 64 * 1024);
2204 /* 2204
2205 * Make sure hash table has the minimum size
2206 */
2207 if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
2208 table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
2209 2 * sizeof(struct udp_hslot), GFP_KERNEL);
2210 if (!table->hash)
2211 panic(name);
2212 table->log = ilog2(UDP_HTABLE_SIZE_MIN);
2213 table->mask = UDP_HTABLE_SIZE_MIN - 1;
2214 }
2215 table->hash2 = table->hash + (table->mask + 1); 2205 table->hash2 = table->hash + (table->mask + 1);
2216 for (i = 0; i <= table->mask; i++) { 2206 for (i = 0; i <= table->mask; i++) {
2217 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); 2207 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5b7053c58732..7cf07158805c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -421,16 +421,22 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
421 struct tid_ampdu_tx *tid_tx; 421 struct tid_ampdu_tx *tid_tx;
422 unsigned long timeout; 422 unsigned long timeout;
423 423
424 tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid); 424 rcu_read_lock();
425 if (!tid_tx) 425 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
426 if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
427 rcu_read_unlock();
426 return; 428 return;
429 }
427 430
428 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); 431 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
429 if (time_is_after_jiffies(timeout)) { 432 if (time_is_after_jiffies(timeout)) {
430 mod_timer(&tid_tx->session_timer, timeout); 433 mod_timer(&tid_tx->session_timer, timeout);
434 rcu_read_unlock();
431 return; 435 return;
432 } 436 }
433 437
438 rcu_read_unlock();
439
434#ifdef CONFIG_MAC80211_HT_DEBUG 440#ifdef CONFIG_MAC80211_HT_DEBUG
435 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid); 441 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
436#endif 442#endif
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index ea0122dbd2b3..7ed433c66d68 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -509,6 +509,7 @@ IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
509 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); 509 u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
510IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); 510IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); 511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
512IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
512#endif 513#endif
513 514
514#define DEBUGFS_ADD_MODE(name, mode) \ 515#define DEBUGFS_ADD_MODE(name, mode) \
@@ -608,6 +609,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
608 MESHPARAMS_ADD(dot11MeshHWMPRannInterval); 609 MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
609 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); 610 MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
610 MESHPARAMS_ADD(rssi_threshold); 611 MESHPARAMS_ADD(rssi_threshold);
612 MESHPARAMS_ADD(ht_opmode);
611#undef MESHPARAMS_ADD 613#undef MESHPARAMS_ADD
612} 614}
613#endif 615#endif
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3ad33a824624..33d9d0c3e3d0 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -163,6 +163,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
163 sizeof(struct ieee80211_ht_operation)); 163 sizeof(struct ieee80211_ht_operation));
164 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, 164 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
165 sband->ht_cap.cap); 165 sband->ht_cap.cap);
166 /*
167 * Note: According to 802.11n-2009 9.13.3.1, HT Protection
168 * field and RIFS Mode are reserved in IBSS mode, therefore
169 * keep them at 0
170 */
166 pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, 171 pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
167 chan, channel_type, 0); 172 chan, channel_type, 0);
168 } 173 }
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 856237c5c1f8..d4c19a7773db 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -206,8 +206,10 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
206 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 206 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
207 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) 207 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
208 sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; 208 sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
209 else 209 else if (local->hw.queues >= IEEE80211_NUM_ACS)
210 sdata->vif.hw_queue[i] = i; 210 sdata->vif.hw_queue[i] = i;
211 else
212 sdata->vif.hw_queue[i] = 0;
211 } 213 }
212 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE; 214 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
213} 215}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b70f7f09da61..f5548e953259 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -596,6 +596,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
596 local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; 596 local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
597 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 597 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
598 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 598 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
599 local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
600 IEEE80211_RADIOTAP_MCS_HAVE_GI |
601 IEEE80211_RADIOTAP_MCS_HAVE_BW;
599 local->user_power_level = -1; 602 local->user_power_level = -1;
600 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; 603 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
601 604
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0675a2fec6a6..2913113c5833 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -109,8 +109,10 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
109 109
110 /* Disallow HT40+/- mismatch */ 110 /* Disallow HT40+/- mismatch */
111 if (ie->ht_operation && 111 if (ie->ht_operation &&
112 local->_oper_channel_type > NL80211_CHAN_HT20 && 112 (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
113 sta_channel_type > NL80211_CHAN_HT20 && 113 local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
114 (sta_channel_type == NL80211_CHAN_HT40MINUS ||
115 sta_channel_type == NL80211_CHAN_HT40PLUS) &&
114 local->_oper_channel_type != sta_channel_type) 116 local->_oper_channel_type != sta_channel_type)
115 goto mismatch; 117 goto mismatch;
116 118
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 27e0c2f06795..9b59658e8650 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -603,7 +603,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
603 hopcount, ttl, cpu_to_le32(lifetime), 603 hopcount, ttl, cpu_to_le32(lifetime),
604 cpu_to_le32(metric), cpu_to_le32(preq_id), 604 cpu_to_le32(metric), cpu_to_le32(preq_id),
605 sdata); 605 sdata);
606 ifmsh->mshstats.fwded_mcast++; 606 if (!is_multicast_ether_addr(da))
607 ifmsh->mshstats.fwded_unicast++;
608 else
609 ifmsh->mshstats.fwded_mcast++;
607 ifmsh->mshstats.fwded_frames++; 610 ifmsh->mshstats.fwded_frames++;
608 } 611 }
609} 612}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 8cc8461b48a0..60ef235c9d9b 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -105,15 +105,15 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 return sta; 105 return sta;
106} 106}
107 107
108/** mesh_set_ht_prot_mode - set correct HT protection mode 108/*
109 * mesh_set_ht_prot_mode - set correct HT protection mode
109 * 110 *
110 * Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for 111 * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
111 * HT mesh STA in a MBSS. Three HT protection modes are supported for now, 112 * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
112 * non-HT mixed mode, 20MHz-protection and no-protection mode. non-HT mixed 113 * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
113 * mode is selected if any non-HT peers are present in our MBSS. 114 * selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
114 * 20MHz-protection mode is selected if all peers in our 20/40MHz MBSS support 115 * is selected if all peers in our 20/40MHz MBSS support HT and atleast one
115 * HT and atleast one HT20 peer is present. Otherwise no-protection mode is 116 * HT20 peer is present. Otherwise no-protection mode is selected.
116 * selected.
117 */ 117 */
118static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) 118static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
119{ 119{
@@ -128,21 +128,22 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
128 128
129 rcu_read_lock(); 129 rcu_read_lock();
130 list_for_each_entry_rcu(sta, &local->sta_list, list) { 130 list_for_each_entry_rcu(sta, &local->sta_list, list) {
131 if (sdata == sta->sdata && 131 if (sdata != sta->sdata ||
132 sta->plink_state == NL80211_PLINK_ESTAB) { 132 sta->plink_state != NL80211_PLINK_ESTAB)
133 switch (sta->ch_type) { 133 continue;
134 case NL80211_CHAN_NO_HT: 134
135 mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present", 135 switch (sta->ch_type) {
136 sdata->vif.addr, sta->sta.addr); 136 case NL80211_CHAN_NO_HT:
137 non_ht_sta = true; 137 mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
138 goto out; 138 sdata->vif.addr, sta->sta.addr);
139 case NL80211_CHAN_HT20: 139 non_ht_sta = true;
140 mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present", 140 goto out;
141 sdata->vif.addr, sta->sta.addr); 141 case NL80211_CHAN_HT20:
142 ht20_sta = true; 142 mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
143 default: 143 sdata->vif.addr, sta->sta.addr);
144 break; 144 ht20_sta = true;
145 } 145 default:
146 break;
146 } 147 }
147 } 148 }
148out: 149out:
@@ -346,6 +347,15 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
346 347
347 sta = sta_info_get(sdata, addr); 348 sta = sta_info_get(sdata, addr);
348 if (!sta) { 349 if (!sta) {
350 /* Userspace handles peer allocation when security is enabled */
351 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
352 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
353 elems->ie_start,
354 elems->total_len,
355 GFP_ATOMIC);
356 return NULL;
357 }
358
349 sta = mesh_plink_alloc(sdata, addr); 359 sta = mesh_plink_alloc(sdata, addr);
350 if (!sta) 360 if (!sta)
351 return NULL; 361 return NULL;
@@ -387,15 +397,6 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
387{ 397{
388 struct sta_info *sta; 398 struct sta_info *sta;
389 399
390 /* Userspace handles peer allocation when security is enabled */
391 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
392 cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
393 elems->ie_start,
394 elems->total_len,
395 GFP_KERNEL);
396 return;
397 }
398
399 rcu_read_lock(); 400 rcu_read_lock();
400 sta = mesh_peer_init(sdata, hw_addr, elems); 401 sta = mesh_peer_init(sdata, hw_addr, elems);
401 if (!sta) 402 if (!sta)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8257a09eeed4..7bcecf73aafb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -204,14 +204,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
204 204
205 if (status->flag & RX_FLAG_HT) { 205 if (status->flag & RX_FLAG_HT) {
206 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 206 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
207 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS | 207 *pos++ = local->hw.radiotap_mcs_details;
208 IEEE80211_RADIOTAP_MCS_HAVE_GI |
209 IEEE80211_RADIOTAP_MCS_HAVE_BW;
210 *pos = 0; 208 *pos = 0;
211 if (status->flag & RX_FLAG_SHORT_GI) 209 if (status->flag & RX_FLAG_SHORT_GI)
212 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 210 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
213 if (status->flag & RX_FLAG_40MHZ) 211 if (status->flag & RX_FLAG_40MHZ)
214 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 212 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
213 if (status->flag & RX_FLAG_HT_GF)
214 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
215 pos++; 215 pos++;
216 *pos++ = status->rate_idx; 216 *pos++ = status->rate_idx;
217 } 217 }
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 7aa31bbfaa3b..c04d401dae92 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -92,6 +92,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
92 int keylen, int keyidx) 92 int keylen, int keyidx)
93{ 93{
94 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 94 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
95 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
95 unsigned int hdrlen; 96 unsigned int hdrlen;
96 u8 *newhdr; 97 u8 *newhdr;
97 98
@@ -104,6 +105,13 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
104 hdrlen = ieee80211_hdrlen(hdr->frame_control); 105 hdrlen = ieee80211_hdrlen(hdr->frame_control);
105 newhdr = skb_push(skb, WEP_IV_LEN); 106 newhdr = skb_push(skb, WEP_IV_LEN);
106 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); 107 memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
108
109 /* the HW only needs room for the IV, but not the actual IV */
110 if (info->control.hw_key &&
111 (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
112 return newhdr + hdrlen;
113
114 skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN);
107 ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); 115 ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
108 return newhdr + hdrlen; 116 return newhdr + hdrlen;
109} 117}
@@ -313,14 +321,15 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
313static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) 321static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
314{ 322{
315 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 323 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
324 struct ieee80211_key_conf *hw_key = info->control.hw_key;
316 325
317 if (!info->control.hw_key) { 326 if (!hw_key) {
318 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, 327 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
319 tx->key->conf.keylen, 328 tx->key->conf.keylen,
320 tx->key->conf.keyidx)) 329 tx->key->conf.keyidx))
321 return -1; 330 return -1;
322 } else if (info->control.hw_key->flags & 331 } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
323 IEEE80211_KEY_FLAG_GENERATE_IV) { 332 (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
324 if (!ieee80211_wep_add_iv(tx->local, skb, 333 if (!ieee80211_wep_add_iv(tx->local, skb,
325 tx->key->conf.keylen, 334 tx->key->conf.keylen,
326 tx->key->conf.keyidx)) 335 tx->key->conf.keyidx))
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0ae23c60968c..bdb53aba888e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -183,7 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
183 u8 *pos; 183 u8 *pos;
184 184
185 if (info->control.hw_key && 185 if (info->control.hw_key &&
186 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 186 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
187 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
187 /* hwaccel - with no need for software-generated IV */ 188 /* hwaccel - with no need for software-generated IV */
188 return 0; 189 return 0;
189 } 190 }
@@ -202,8 +203,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
202 203
203 pos = skb_push(skb, TKIP_IV_LEN); 204 pos = skb_push(skb, TKIP_IV_LEN);
204 memmove(pos, pos + TKIP_IV_LEN, hdrlen); 205 memmove(pos, pos + TKIP_IV_LEN, hdrlen);
206 skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN);
205 pos += hdrlen; 207 pos += hdrlen;
206 208
209 /* the HW only needs room for the IV, but not the actual IV */
210 if (info->control.hw_key &&
211 (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
212 return 0;
213
207 /* Increase IV for the frame */ 214 /* Increase IV for the frame */
208 spin_lock_irqsave(&key->u.tkip.txlock, flags); 215 spin_lock_irqsave(&key->u.tkip.txlock, flags);
209 key->u.tkip.tx.iv16++; 216 key->u.tkip.tx.iv16++;
@@ -422,6 +429,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
422 429
423 pos = skb_push(skb, CCMP_HDR_LEN); 430 pos = skb_push(skb, CCMP_HDR_LEN);
424 memmove(pos, pos + CCMP_HDR_LEN, hdrlen); 431 memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
432 skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN);
425 433
426 /* the HW only needs room for the IV, but not the actual IV */ 434 /* the HW only needs room for the IV, but not the actual IV */
427 if (info->control.hw_key && 435 if (info->control.hw_key &&
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 3192c3f589ee..9f6ce011d35d 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -97,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev)
97 goto error; 97 goto error;
98 } 98 }
99 99
100 if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) { 100 if (dev->polling || dev->active_target) {
101 rc = -EBUSY; 101 rc = -EBUSY;
102 goto error; 102 goto error;
103 } 103 }
@@ -183,11 +183,27 @@ error:
183 return rc; 183 return rc;
184} 184}
185 185
186static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
187{
188 int i;
189
190 if (dev->n_targets == 0)
191 return NULL;
192
193 for (i = 0; i < dev->n_targets ; i++) {
194 if (dev->targets[i].idx == target_idx)
195 return &dev->targets[i];
196 }
197
198 return NULL;
199}
200
186int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) 201int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
187{ 202{
188 int rc = 0; 203 int rc = 0;
189 u8 *gb; 204 u8 *gb;
190 size_t gb_len; 205 size_t gb_len;
206 struct nfc_target *target;
191 207
192 pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode); 208 pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
193 209
@@ -212,9 +228,15 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
212 goto error; 228 goto error;
213 } 229 }
214 230
215 rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len); 231 target = nfc_find_target(dev, target_index);
232 if (target == NULL) {
233 rc = -ENOTCONN;
234 goto error;
235 }
236
237 rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
216 if (!rc) 238 if (!rc)
217 dev->activated_target_idx = target_index; 239 dev->active_target = target;
218 240
219error: 241error:
220 device_unlock(&dev->dev); 242 device_unlock(&dev->dev);
@@ -250,7 +272,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
250 rc = dev->ops->dep_link_down(dev); 272 rc = dev->ops->dep_link_down(dev);
251 if (!rc) { 273 if (!rc) {
252 dev->dep_link_up = false; 274 dev->dep_link_up = false;
253 dev->activated_target_idx = NFC_TARGET_IDX_NONE; 275 dev->active_target = NULL;
254 nfc_llcp_mac_is_down(dev); 276 nfc_llcp_mac_is_down(dev);
255 nfc_genl_dep_link_down_event(dev); 277 nfc_genl_dep_link_down_event(dev);
256 } 278 }
@@ -282,6 +304,7 @@ EXPORT_SYMBOL(nfc_dep_link_is_up);
282int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) 304int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
283{ 305{
284 int rc; 306 int rc;
307 struct nfc_target *target;
285 308
286 pr_debug("dev_name=%s target_idx=%u protocol=%u\n", 309 pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
287 dev_name(&dev->dev), target_idx, protocol); 310 dev_name(&dev->dev), target_idx, protocol);
@@ -293,9 +316,20 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
293 goto error; 316 goto error;
294 } 317 }
295 318
296 rc = dev->ops->activate_target(dev, target_idx, protocol); 319 if (dev->active_target) {
320 rc = -EBUSY;
321 goto error;
322 }
323
324 target = nfc_find_target(dev, target_idx);
325 if (target == NULL) {
326 rc = -ENOTCONN;
327 goto error;
328 }
329
330 rc = dev->ops->activate_target(dev, target, protocol);
297 if (!rc) { 331 if (!rc) {
298 dev->activated_target_idx = target_idx; 332 dev->active_target = target;
299 333
300 if (dev->ops->check_presence) 334 if (dev->ops->check_presence)
301 mod_timer(&dev->check_pres_timer, jiffies + 335 mod_timer(&dev->check_pres_timer, jiffies +
@@ -327,11 +361,21 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
327 goto error; 361 goto error;
328 } 362 }
329 363
364 if (dev->active_target == NULL) {
365 rc = -ENOTCONN;
366 goto error;
367 }
368
369 if (dev->active_target->idx != target_idx) {
370 rc = -ENOTCONN;
371 goto error;
372 }
373
330 if (dev->ops->check_presence) 374 if (dev->ops->check_presence)
331 del_timer_sync(&dev->check_pres_timer); 375 del_timer_sync(&dev->check_pres_timer);
332 376
333 dev->ops->deactivate_target(dev, target_idx); 377 dev->ops->deactivate_target(dev, dev->active_target);
334 dev->activated_target_idx = NFC_TARGET_IDX_NONE; 378 dev->active_target = NULL;
335 379
336error: 380error:
337 device_unlock(&dev->dev); 381 device_unlock(&dev->dev);
@@ -365,13 +409,13 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
365 goto error; 409 goto error;
366 } 410 }
367 411
368 if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) { 412 if (dev->active_target == NULL) {
369 rc = -ENOTCONN; 413 rc = -ENOTCONN;
370 kfree_skb(skb); 414 kfree_skb(skb);
371 goto error; 415 goto error;
372 } 416 }
373 417
374 if (target_idx != dev->activated_target_idx) { 418 if (dev->active_target->idx != target_idx) {
375 rc = -EADDRNOTAVAIL; 419 rc = -EADDRNOTAVAIL;
376 kfree_skb(skb); 420 kfree_skb(skb);
377 goto error; 421 goto error;
@@ -380,7 +424,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
380 if (dev->ops->check_presence) 424 if (dev->ops->check_presence)
381 del_timer_sync(&dev->check_pres_timer); 425 del_timer_sync(&dev->check_pres_timer);
382 426
383 rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context); 427 rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
428 cb_context);
384 429
385 if (!rc && dev->ops->check_presence) 430 if (!rc && dev->ops->check_presence)
386 mod_timer(&dev->check_pres_timer, jiffies + 431 mod_timer(&dev->check_pres_timer, jiffies +
@@ -456,6 +501,9 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
456 * The device driver must call this function when one or many nfc targets 501 * The device driver must call this function when one or many nfc targets
457 * are found. After calling this function, the device driver must stop 502 * are found. After calling this function, the device driver must stop
458 * polling for targets. 503 * polling for targets.
504 * IMPORTANT: this function must not be called from an atomic context.
505 * In addition, it must also not be called from a context that would prevent
506 * the NFC Core to call other nfc ops entry point concurrently.
459 */ 507 */
460int nfc_targets_found(struct nfc_dev *dev, 508int nfc_targets_found(struct nfc_dev *dev,
461 struct nfc_target *targets, int n_targets) 509 struct nfc_target *targets, int n_targets)
@@ -469,7 +517,7 @@ int nfc_targets_found(struct nfc_dev *dev,
469 for (i = 0; i < n_targets; i++) 517 for (i = 0; i < n_targets; i++)
470 targets[i].idx = dev->target_next_idx++; 518 targets[i].idx = dev->target_next_idx++;
471 519
472 spin_lock_bh(&dev->targets_lock); 520 device_lock(&dev->dev);
473 521
474 dev->targets_generation++; 522 dev->targets_generation++;
475 523
@@ -479,12 +527,12 @@ int nfc_targets_found(struct nfc_dev *dev,
479 527
480 if (!dev->targets) { 528 if (!dev->targets) {
481 dev->n_targets = 0; 529 dev->n_targets = 0;
482 spin_unlock_bh(&dev->targets_lock); 530 device_unlock(&dev->dev);
483 return -ENOMEM; 531 return -ENOMEM;
484 } 532 }
485 533
486 dev->n_targets = n_targets; 534 dev->n_targets = n_targets;
487 spin_unlock_bh(&dev->targets_lock); 535 device_unlock(&dev->dev);
488 536
489 nfc_genl_targets_found(dev); 537 nfc_genl_targets_found(dev);
490 538
@@ -492,6 +540,18 @@ int nfc_targets_found(struct nfc_dev *dev,
492} 540}
493EXPORT_SYMBOL(nfc_targets_found); 541EXPORT_SYMBOL(nfc_targets_found);
494 542
543/**
544 * nfc_target_lost - inform that an activated target went out of field
545 *
546 * @dev: The nfc device that had the activated target in field
547 * @target_idx: the nfc index of the target
548 *
549 * The device driver must call this function when the activated target
550 * goes out of the field.
551 * IMPORTANT: this function must not be called from an atomic context.
552 * In addition, it must also not be called from a context that would prevent
553 * the NFC Core to call other nfc ops entry point concurrently.
554 */
495int nfc_target_lost(struct nfc_dev *dev, u32 target_idx) 555int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
496{ 556{
497 struct nfc_target *tg; 557 struct nfc_target *tg;
@@ -499,7 +559,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
499 559
500 pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx); 560 pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
501 561
502 spin_lock_bh(&dev->targets_lock); 562 device_lock(&dev->dev);
503 563
504 for (i = 0; i < dev->n_targets; i++) { 564 for (i = 0; i < dev->n_targets; i++) {
505 tg = &dev->targets[i]; 565 tg = &dev->targets[i];
@@ -508,13 +568,13 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
508 } 568 }
509 569
510 if (i == dev->n_targets) { 570 if (i == dev->n_targets) {
511 spin_unlock_bh(&dev->targets_lock); 571 device_unlock(&dev->dev);
512 return -EINVAL; 572 return -EINVAL;
513 } 573 }
514 574
515 dev->targets_generation++; 575 dev->targets_generation++;
516 dev->n_targets--; 576 dev->n_targets--;
517 dev->activated_target_idx = NFC_TARGET_IDX_NONE; 577 dev->active_target = NULL;
518 578
519 if (dev->n_targets) { 579 if (dev->n_targets) {
520 memcpy(&dev->targets[i], &dev->targets[i + 1], 580 memcpy(&dev->targets[i], &dev->targets[i + 1],
@@ -524,7 +584,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
524 dev->targets = NULL; 584 dev->targets = NULL;
525 } 585 }
526 586
527 spin_unlock_bh(&dev->targets_lock); 587 device_unlock(&dev->dev);
528 588
529 nfc_genl_target_lost(dev, target_idx); 589 nfc_genl_target_lost(dev, target_idx);
530 590
@@ -556,15 +616,16 @@ static void nfc_check_pres_work(struct work_struct *work)
556 616
557 device_lock(&dev->dev); 617 device_lock(&dev->dev);
558 618
559 if (dev->activated_target_idx != NFC_TARGET_IDX_NONE && 619 if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
560 timer_pending(&dev->check_pres_timer) == 0) { 620 rc = dev->ops->check_presence(dev, dev->active_target);
561 rc = dev->ops->check_presence(dev, dev->activated_target_idx);
562 if (!rc) { 621 if (!rc) {
563 mod_timer(&dev->check_pres_timer, jiffies + 622 mod_timer(&dev->check_pres_timer, jiffies +
564 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); 623 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
565 } else { 624 } else {
566 nfc_target_lost(dev, dev->activated_target_idx); 625 u32 active_target_idx = dev->active_target->idx;
567 dev->activated_target_idx = NFC_TARGET_IDX_NONE; 626 device_unlock(&dev->dev);
627 nfc_target_lost(dev, active_target_idx);
628 return;
568 } 629 }
569 } 630 }
570 631
@@ -637,14 +698,12 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
637 dev->tx_headroom = tx_headroom; 698 dev->tx_headroom = tx_headroom;
638 dev->tx_tailroom = tx_tailroom; 699 dev->tx_tailroom = tx_tailroom;
639 700
640 spin_lock_init(&dev->targets_lock);
641 nfc_genl_data_init(&dev->genl_data); 701 nfc_genl_data_init(&dev->genl_data);
642 702
703
643 /* first generation must not be 0 */ 704 /* first generation must not be 0 */
644 dev->targets_generation = 1; 705 dev->targets_generation = 1;
645 706
646 dev->activated_target_idx = NFC_TARGET_IDX_NONE;
647
648 if (ops->check_presence) { 707 if (ops->check_presence) {
649 char name[32]; 708 char name[32];
650 init_timer(&dev->check_pres_timer); 709 init_timer(&dev->check_pres_timer);
@@ -662,7 +721,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
662 } 721 }
663 } 722 }
664 723
665
666 return dev; 724 return dev;
667} 725}
668EXPORT_SYMBOL(nfc_allocate_device); 726EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
index 17213a6362b4..fd67f51d18e9 100644
--- a/net/nfc/hci/Kconfig
+++ b/net/nfc/hci/Kconfig
@@ -9,6 +9,7 @@ config NFC_HCI
9 9
10config NFC_SHDLC 10config NFC_SHDLC
11 depends on NFC_HCI 11 depends on NFC_HCI
12 select CRC_CCITT
12 bool "SHDLC link layer for HCI based NFC drivers" 13 bool "SHDLC link layer for HCI based NFC drivers"
13 default n 14 default n
14 ---help--- 15 ---help---
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 86fd00d5a099..e1a640d2b588 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -235,13 +235,6 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
235 targets->hci_reader_gate = gate; 235 targets->hci_reader_gate = gate;
236 236
237 r = nfc_targets_found(hdev->ndev, targets, 1); 237 r = nfc_targets_found(hdev->ndev, targets, 1);
238 if (r < 0)
239 goto exit;
240
241 kfree(hdev->targets);
242 hdev->targets = targets;
243 targets = NULL;
244 hdev->target_count = 1;
245 238
246exit: 239exit:
247 kfree(targets); 240 kfree(targets);
@@ -258,11 +251,6 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
258 251
259 switch (event) { 252 switch (event) {
260 case NFC_HCI_EVT_TARGET_DISCOVERED: 253 case NFC_HCI_EVT_TARGET_DISCOVERED:
261 if (hdev->poll_started == false) {
262 r = -EPROTO;
263 goto exit;
264 }
265
266 if (skb->len < 1) { /* no status data? */ 254 if (skb->len < 1) { /* no status data? */
267 r = -EPROTO; 255 r = -EPROTO;
268 goto exit; 256 goto exit;
@@ -496,74 +484,42 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
496static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols) 484static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
497{ 485{
498 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 486 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
499 int r;
500 487
501 if (hdev->ops->start_poll) 488 if (hdev->ops->start_poll)
502 r = hdev->ops->start_poll(hdev, protocols); 489 return hdev->ops->start_poll(hdev, protocols);
503 else 490 else
504 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 491 return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
505 NFC_HCI_EVT_READER_REQUESTED, NULL, 0); 492 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
506 if (r == 0)
507 hdev->poll_started = true;
508
509 return r;
510} 493}
511 494
512static void hci_stop_poll(struct nfc_dev *nfc_dev) 495static void hci_stop_poll(struct nfc_dev *nfc_dev)
513{ 496{
514 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 497 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
515 498
516 if (hdev->poll_started) { 499 nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
517 nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 500 NFC_HCI_EVT_END_OPERATION, NULL, 0);
518 NFC_HCI_EVT_END_OPERATION, NULL, 0);
519 hdev->poll_started = false;
520 }
521}
522
523static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev,
524 u32 target_idx)
525{
526 int i;
527 if (hdev->poll_started == false || hdev->targets == NULL)
528 return NULL;
529
530 for (i = 0; i < hdev->target_count; i++) {
531 if (hdev->targets[i].idx == target_idx)
532 return &hdev->targets[i];
533 }
534
535 return NULL;
536} 501}
537 502
538static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx, 503static int hci_activate_target(struct nfc_dev *nfc_dev,
539 u32 protocol) 504 struct nfc_target *target, u32 protocol)
540{ 505{
541 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
542
543 if (hci_find_target(hdev, target_idx) == NULL)
544 return -ENOMEDIUM;
545
546 return 0; 506 return 0;
547} 507}
548 508
549static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx) 509static void hci_deactivate_target(struct nfc_dev *nfc_dev,
510 struct nfc_target *target)
550{ 511{
551} 512}
552 513
553static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx, 514static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
554 struct sk_buff *skb, data_exchange_cb_t cb, 515 struct sk_buff *skb, data_exchange_cb_t cb,
555 void *cb_context) 516 void *cb_context)
556{ 517{
557 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); 518 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
558 int r; 519 int r;
559 struct nfc_target *target;
560 struct sk_buff *res_skb = NULL; 520 struct sk_buff *res_skb = NULL;
561 521
562 pr_debug("target_idx=%d\n", target_idx); 522 pr_debug("target_idx=%d\n", target->idx);
563
564 target = hci_find_target(hdev, target_idx);
565 if (target == NULL)
566 return -ENOMEDIUM;
567 523
568 switch (target->hci_reader_gate) { 524 switch (target->hci_reader_gate) {
569 case NFC_HCI_RF_READER_A_GATE: 525 case NFC_HCI_RF_READER_A_GATE:
@@ -605,7 +561,18 @@ static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
605 return 0; 561 return 0;
606} 562}
607 563
608struct nfc_ops hci_nfc_ops = { 564static int hci_check_presence(struct nfc_dev *nfc_dev,
565 struct nfc_target *target)
566{
567 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
568
569 if (hdev->ops->check_presence)
570 return hdev->ops->check_presence(hdev, target);
571
572 return 0;
573}
574
575static struct nfc_ops hci_nfc_ops = {
609 .dev_up = hci_dev_up, 576 .dev_up = hci_dev_up,
610 .dev_down = hci_dev_down, 577 .dev_down = hci_dev_down,
611 .start_poll = hci_start_poll, 578 .start_poll = hci_start_poll,
@@ -613,6 +580,7 @@ struct nfc_ops hci_nfc_ops = {
613 .activate_target = hci_activate_target, 580 .activate_target = hci_activate_target,
614 .deactivate_target = hci_deactivate_target, 581 .deactivate_target = hci_deactivate_target,
615 .data_exchange = hci_data_exchange, 582 .data_exchange = hci_data_exchange,
583 .check_presence = hci_check_presence,
616}; 584};
617 585
618struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, 586struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
index 923bdf7c26d6..5665dc6d893a 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/shdlc.c
@@ -816,6 +816,17 @@ static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
816 return -EPERM; 816 return -EPERM;
817} 817}
818 818
819static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
820 struct nfc_target *target)
821{
822 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
823
824 if (shdlc->ops->check_presence)
825 return shdlc->ops->check_presence(shdlc, target);
826
827 return 0;
828}
829
819static struct nfc_hci_ops shdlc_ops = { 830static struct nfc_hci_ops shdlc_ops = {
820 .open = nfc_shdlc_open, 831 .open = nfc_shdlc_open,
821 .close = nfc_shdlc_close, 832 .close = nfc_shdlc_close,
@@ -825,6 +836,7 @@ static struct nfc_hci_ops shdlc_ops = {
825 .target_from_gate = nfc_shdlc_target_from_gate, 836 .target_from_gate = nfc_shdlc_target_from_gate,
826 .complete_target_discovered = nfc_shdlc_complete_target_discovered, 837 .complete_target_discovered = nfc_shdlc_complete_target_discovered,
827 .data_exchange = nfc_shdlc_data_exchange, 838 .data_exchange = nfc_shdlc_data_exchange,
839 .check_presence = nfc_shdlc_check_presence,
828}; 840};
829 841
830struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops, 842struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 11a3b7d98dc5..bf8ae4f0b90c 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -488,7 +488,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
488 488
489 memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len); 489 memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
490 490
491 skb_queue_head(&sock->tx_queue, pdu); 491 skb_queue_tail(&sock->tx_queue, pdu);
492 492
493 lock_sock(sk); 493 lock_sock(sk);
494 494
@@ -502,7 +502,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
502 502
503 kfree(msg_data); 503 kfree(msg_data);
504 504
505 return 0; 505 return len;
506} 506}
507 507
508int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) 508int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 92988aa620dc..42994fac26d6 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -448,6 +448,8 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
448{ 448{
449 struct nfc_llcp_sock *sock, *llcp_sock, *n; 449 struct nfc_llcp_sock *sock, *llcp_sock, *n;
450 450
451 pr_debug("ssap dsap %d %d\n", ssap, dsap);
452
451 if (ssap == 0 && dsap == 0) 453 if (ssap == 0 && dsap == 0)
452 return NULL; 454 return NULL;
453 455
@@ -783,6 +785,7 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
783static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb) 785static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
784{ 786{
785 struct nfc_llcp_sock *llcp_sock; 787 struct nfc_llcp_sock *llcp_sock;
788 struct sock *sk;
786 u8 dsap, ssap; 789 u8 dsap, ssap;
787 790
788 dsap = nfc_llcp_dsap(skb); 791 dsap = nfc_llcp_dsap(skb);
@@ -801,10 +804,14 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
801 } 804 }
802 805
803 llcp_sock->dsap = ssap; 806 llcp_sock->dsap = ssap;
807 sk = &llcp_sock->sk;
804 808
805 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE], 809 nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
806 skb->len - LLCP_HEADER_SIZE); 810 skb->len - LLCP_HEADER_SIZE);
807 811
812 sk->sk_state = LLCP_CONNECTED;
813 sk->sk_state_change(sk);
814
808 nfc_llcp_sock_put(llcp_sock); 815 nfc_llcp_sock_put(llcp_sock);
809} 816}
810 817
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index c13e02ebdef9..3f339b19d140 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -27,6 +27,42 @@
27#include "../nfc.h" 27#include "../nfc.h"
28#include "llcp.h" 28#include "llcp.h"
29 29
30static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
31{
32 DECLARE_WAITQUEUE(wait, current);
33 int err = 0;
34
35 pr_debug("sk %p", sk);
36
37 add_wait_queue(sk_sleep(sk), &wait);
38 set_current_state(TASK_INTERRUPTIBLE);
39
40 while (sk->sk_state != state) {
41 if (!timeo) {
42 err = -EINPROGRESS;
43 break;
44 }
45
46 if (signal_pending(current)) {
47 err = sock_intr_errno(timeo);
48 break;
49 }
50
51 release_sock(sk);
52 timeo = schedule_timeout(timeo);
53 lock_sock(sk);
54 set_current_state(TASK_INTERRUPTIBLE);
55
56 err = sock_error(sk);
57 if (err)
58 break;
59 }
60
61 __set_current_state(TASK_RUNNING);
62 remove_wait_queue(sk_sleep(sk), &wait);
63 return err;
64}
65
30static struct proto llcp_sock_proto = { 66static struct proto llcp_sock_proto = {
31 .name = "NFC_LLCP", 67 .name = "NFC_LLCP",
32 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
@@ -304,11 +340,24 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
304 mask |= POLLERR; 340 mask |= POLLERR;
305 341
306 if (!skb_queue_empty(&sk->sk_receive_queue)) 342 if (!skb_queue_empty(&sk->sk_receive_queue))
307 mask |= POLLIN; 343 mask |= POLLIN | POLLRDNORM;
308 344
309 if (sk->sk_state == LLCP_CLOSED) 345 if (sk->sk_state == LLCP_CLOSED)
310 mask |= POLLHUP; 346 mask |= POLLHUP;
311 347
348 if (sk->sk_shutdown & RCV_SHUTDOWN)
349 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
350
351 if (sk->sk_shutdown == SHUTDOWN_MASK)
352 mask |= POLLHUP;
353
354 if (sock_writeable(sk))
355 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
356 else
357 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
358
359 pr_debug("mask 0x%x\n", mask);
360
312 return mask; 361 return mask;
313} 362}
314 363
@@ -462,9 +511,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
462 if (ret) 511 if (ret)
463 goto put_dev; 512 goto put_dev;
464 513
465 sk->sk_state = LLCP_CONNECTED; 514 ret = sock_wait_state(sk, LLCP_CONNECTED,
515 sock_sndtimeo(sk, flags & O_NONBLOCK));
516 if (ret)
517 goto put_dev;
466 518
467 release_sock(sk); 519 release_sock(sk);
520
468 return 0; 521 return 0;
469 522
470put_dev: 523put_dev:
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 8737c2089fdd..d560e6f13072 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -436,16 +436,16 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
436 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 436 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
437} 437}
438 438
439static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, 439static int nci_activate_target(struct nfc_dev *nfc_dev,
440 __u32 protocol) 440 struct nfc_target *target, __u32 protocol)
441{ 441{
442 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 442 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
443 struct nci_rf_discover_select_param param; 443 struct nci_rf_discover_select_param param;
444 struct nfc_target *target = NULL; 444 struct nfc_target *nci_target = NULL;
445 int i; 445 int i;
446 int rc = 0; 446 int rc = 0;
447 447
448 pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol); 448 pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
449 449
450 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) && 450 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
451 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { 451 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
@@ -459,25 +459,25 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
459 } 459 }
460 460
461 for (i = 0; i < ndev->n_targets; i++) { 461 for (i = 0; i < ndev->n_targets; i++) {
462 if (ndev->targets[i].idx == target_idx) { 462 if (ndev->targets[i].idx == target->idx) {
463 target = &ndev->targets[i]; 463 nci_target = &ndev->targets[i];
464 break; 464 break;
465 } 465 }
466 } 466 }
467 467
468 if (!target) { 468 if (!nci_target) {
469 pr_err("unable to find the selected target\n"); 469 pr_err("unable to find the selected target\n");
470 return -EINVAL; 470 return -EINVAL;
471 } 471 }
472 472
473 if (!(target->supported_protocols & (1 << protocol))) { 473 if (!(nci_target->supported_protocols & (1 << protocol))) {
474 pr_err("target does not support the requested protocol 0x%x\n", 474 pr_err("target does not support the requested protocol 0x%x\n",
475 protocol); 475 protocol);
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { 479 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
480 param.rf_discovery_id = target->logical_idx; 480 param.rf_discovery_id = nci_target->logical_idx;
481 481
482 if (protocol == NFC_PROTO_JEWEL) 482 if (protocol == NFC_PROTO_JEWEL)
483 param.rf_protocol = NCI_RF_PROTOCOL_T1T; 483 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
@@ -501,11 +501,12 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
501 return rc; 501 return rc;
502} 502}
503 503
504static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx) 504static void nci_deactivate_target(struct nfc_dev *nfc_dev,
505 struct nfc_target *target)
505{ 506{
506 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 507 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
507 508
508 pr_debug("target_idx %d\n", target_idx); 509 pr_debug("target_idx %d\n", target->idx);
509 510
510 if (!ndev->target_active_prot) { 511 if (!ndev->target_active_prot) {
511 pr_err("unable to deactivate target, no active target\n"); 512 pr_err("unable to deactivate target, no active target\n");
@@ -520,14 +521,14 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
520 } 521 }
521} 522}
522 523
523static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, 524static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
524 struct sk_buff *skb, 525 struct sk_buff *skb,
525 data_exchange_cb_t cb, void *cb_context) 526 data_exchange_cb_t cb, void *cb_context)
526{ 527{
527 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 528 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
528 int rc; 529 int rc;
529 530
530 pr_debug("target_idx %d, len %d\n", target_idx, skb->len); 531 pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
531 532
532 if (!ndev->target_active_prot) { 533 if (!ndev->target_active_prot) {
533 pr_err("unable to exchange data, no active target\n"); 534 pr_err("unable to exchange data, no active target\n");
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index a0bc326308a5..76c48c5324f8 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -49,7 +49,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
49 49
50 if (cb) { 50 if (cb) {
51 ndev->data_exchange_cb = NULL; 51 ndev->data_exchange_cb = NULL;
52 ndev->data_exchange_cb_context = 0; 52 ndev->data_exchange_cb_context = NULL;
53 53
54 /* forward skb to nfc core */ 54 /* forward skb to nfc core */
55 cb(cb_context, skb, err); 55 cb(cb_context, skb, err);
@@ -200,10 +200,10 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
200 pr_err("error adding room for accumulated rx data\n"); 200 pr_err("error adding room for accumulated rx data\n");
201 201
202 kfree_skb(skb); 202 kfree_skb(skb);
203 skb = 0; 203 skb = NULL;
204 204
205 kfree_skb(ndev->rx_data_reassembly); 205 kfree_skb(ndev->rx_data_reassembly);
206 ndev->rx_data_reassembly = 0; 206 ndev->rx_data_reassembly = NULL;
207 207
208 err = -ENOMEM; 208 err = -ENOMEM;
209 goto exit; 209 goto exit;
@@ -216,7 +216,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
216 216
217 /* third, free old reassembly */ 217 /* third, free old reassembly */
218 kfree_skb(ndev->rx_data_reassembly); 218 kfree_skb(ndev->rx_data_reassembly);
219 ndev->rx_data_reassembly = 0; 219 ndev->rx_data_reassembly = NULL;
220 } 220 }
221 221
222 if (pbf == NCI_PBF_CONT) { 222 if (pbf == NCI_PBF_CONT) {
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index 6a63e5eb483d..6b7fd26c68d9 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -31,6 +31,7 @@
31#include <linux/errno.h> 31#include <linux/errno.h>
32 32
33#include <net/nfc/nci.h> 33#include <net/nfc/nci.h>
34#include <net/nfc/nci_core.h>
34 35
35/* NCI status codes to Unix errno mapping */ 36/* NCI status codes to Unix errno mapping */
36int nci_to_errno(__u8 code) 37int nci_to_errno(__u8 code)
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 99e1632e6aac..cb2646179e5f 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -497,7 +497,7 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
497 /* drop partial rx data packet */ 497 /* drop partial rx data packet */
498 if (ndev->rx_data_reassembly) { 498 if (ndev->rx_data_reassembly) {
499 kfree_skb(ndev->rx_data_reassembly); 499 kfree_skb(ndev->rx_data_reassembly);
500 ndev->rx_data_reassembly = 0; 500 ndev->rx_data_reassembly = NULL;
501 } 501 }
502 502
503 /* complete the data exchange transaction, if exists */ 503 /* complete the data exchange transaction, if exists */
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f1829f6ae9c5..581d419083aa 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -33,7 +33,7 @@ static struct genl_multicast_group nfc_genl_event_mcgrp = {
33 .name = NFC_GENL_MCAST_EVENT_NAME, 33 .name = NFC_GENL_MCAST_EVENT_NAME,
34}; 34};
35 35
36struct genl_family nfc_genl_family = { 36static struct genl_family nfc_genl_family = {
37 .id = GENL_ID_GENERATE, 37 .id = GENL_ID_GENERATE,
38 .hdrsize = 0, 38 .hdrsize = 0,
39 .name = NFC_GENL_NAME, 39 .name = NFC_GENL_NAME,
@@ -128,7 +128,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
128 cb->args[1] = (long) dev; 128 cb->args[1] = (long) dev;
129 } 129 }
130 130
131 spin_lock_bh(&dev->targets_lock); 131 device_lock(&dev->dev);
132 132
133 cb->seq = dev->targets_generation; 133 cb->seq = dev->targets_generation;
134 134
@@ -141,7 +141,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
141 i++; 141 i++;
142 } 142 }
143 143
144 spin_unlock_bh(&dev->targets_lock); 144 device_unlock(&dev->dev);
145 145
146 cb->args[0] = i; 146 cb->args[0] = i;
147 147
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 7d589a81942e..3dd4232ae664 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -84,7 +84,7 @@ static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
84 return 0; 84 return 0;
85} 85}
86 86
87static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len) 87static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
88{ 88{
89 *gb_len = 0; 89 *gb_len = 0;
90 return NULL; 90 return NULL;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2fcfe0993ca2..884801ac4dd0 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -45,7 +45,7 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
45 return chan; 45 return chan;
46} 46}
47 47
48int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, 48bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
49 struct ieee80211_channel *chan, 49 struct ieee80211_channel *chan,
50 enum nl80211_channel_type channel_type) 50 enum nl80211_channel_type channel_type)
51{ 51{
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 39f2538a46fc..a87d43552974 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -664,7 +664,7 @@ void wiphy_unregister(struct wiphy *wiphy)
664 mutex_lock(&rdev->devlist_mtx); 664 mutex_lock(&rdev->devlist_mtx);
665 __count = rdev->opencount; 665 __count = rdev->opencount;
666 mutex_unlock(&rdev->devlist_mtx); 666 mutex_unlock(&rdev->devlist_mtx);
667 __count == 0;})); 667 __count == 0; }));
668 668
669 mutex_lock(&rdev->devlist_mtx); 669 mutex_lock(&rdev->devlist_mtx);
670 BUG_ON(!list_empty(&rdev->netdev_list)); 670 BUG_ON(!list_empty(&rdev->netdev_list));
@@ -776,7 +776,7 @@ static struct device_type wiphy_type = {
776 .name = "wlan", 776 .name = "wlan",
777}; 777};
778 778
779static int cfg80211_netdev_notifier_call(struct notifier_block * nb, 779static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
780 unsigned long state, 780 unsigned long state,
781 void *ndev) 781 void *ndev)
782{ 782{
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3ac2dd00d714..8523f3878677 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -445,8 +445,6 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
445 struct wireless_dev *wdev, int freq, 445 struct wireless_dev *wdev, int freq,
446 enum nl80211_channel_type channel_type); 446 enum nl80211_channel_type channel_type);
447 447
448u16 cfg80211_calculate_bitrate(struct rate_info *rate);
449
450int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, 448int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
451 const u8 *rates, unsigned int n_rates, 449 const u8 *rates, unsigned int n_rates,
452 u32 *mask); 450 u32 *mask);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b67b1114e25a..206465dc0cab 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1179,6 +1179,27 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
1179 wdev->iftype == NL80211_IFTYPE_P2P_GO; 1179 wdev->iftype == NL80211_IFTYPE_P2P_GO;
1180} 1180}
1181 1181
1182static bool nl80211_valid_channel_type(struct genl_info *info,
1183 enum nl80211_channel_type *channel_type)
1184{
1185 enum nl80211_channel_type tmp;
1186
1187 if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE])
1188 return false;
1189
1190 tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
1191 if (tmp != NL80211_CHAN_NO_HT &&
1192 tmp != NL80211_CHAN_HT20 &&
1193 tmp != NL80211_CHAN_HT40PLUS &&
1194 tmp != NL80211_CHAN_HT40MINUS)
1195 return false;
1196
1197 if (channel_type)
1198 *channel_type = tmp;
1199
1200 return true;
1201}
1202
1182static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, 1203static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1183 struct wireless_dev *wdev, 1204 struct wireless_dev *wdev,
1184 struct genl_info *info) 1205 struct genl_info *info)
@@ -1193,15 +1214,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
1193 if (!nl80211_can_set_dev_channel(wdev)) 1214 if (!nl80211_can_set_dev_channel(wdev))
1194 return -EOPNOTSUPP; 1215 return -EOPNOTSUPP;
1195 1216
1196 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 1217 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
1197 channel_type = nla_get_u32(info->attrs[ 1218 !nl80211_valid_channel_type(info, &channel_type))
1198 NL80211_ATTR_WIPHY_CHANNEL_TYPE]); 1219 return -EINVAL;
1199 if (channel_type != NL80211_CHAN_NO_HT &&
1200 channel_type != NL80211_CHAN_HT20 &&
1201 channel_type != NL80211_CHAN_HT40PLUS &&
1202 channel_type != NL80211_CHAN_HT40MINUS)
1203 return -EINVAL;
1204 }
1205 1220
1206 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 1221 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
1207 1222
@@ -2410,10 +2425,16 @@ static int parse_station_flags(struct genl_info *info,
2410 return -EINVAL; 2425 return -EINVAL;
2411 } 2426 }
2412 2427
2413 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) 2428 for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) {
2414 if (flags[flag]) 2429 if (flags[flag]) {
2415 params->sta_flags_set |= (1<<flag); 2430 params->sta_flags_set |= (1<<flag);
2416 2431
2432 /* no longer support new API additions in old API */
2433 if (flag > NL80211_STA_FLAG_MAX_OLD_API)
2434 return -EINVAL;
2435 }
2436 }
2437
2417 return 0; 2438 return 0;
2418} 2439}
2419 2440
@@ -4912,12 +4933,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
4912 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 4933 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4913 enum nl80211_channel_type channel_type; 4934 enum nl80211_channel_type channel_type;
4914 4935
4915 channel_type = nla_get_u32( 4936 if (!nl80211_valid_channel_type(info, &channel_type))
4916 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4917 if (channel_type != NL80211_CHAN_NO_HT &&
4918 channel_type != NL80211_CHAN_HT20 &&
4919 channel_type != NL80211_CHAN_HT40MINUS &&
4920 channel_type != NL80211_CHAN_HT40PLUS)
4921 return -EINVAL; 4937 return -EINVAL;
4922 4938
4923 if (channel_type != NL80211_CHAN_NO_HT && 4939 if (channel_type != NL80211_CHAN_NO_HT &&
@@ -5485,15 +5501,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5485 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)) 5501 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
5486 return -EOPNOTSUPP; 5502 return -EOPNOTSUPP;
5487 5503
5488 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 5504 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
5489 channel_type = nla_get_u32( 5505 !nl80211_valid_channel_type(info, &channel_type))
5490 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); 5506 return -EINVAL;
5491 if (channel_type != NL80211_CHAN_NO_HT &&
5492 channel_type != NL80211_CHAN_HT20 &&
5493 channel_type != NL80211_CHAN_HT40PLUS &&
5494 channel_type != NL80211_CHAN_HT40MINUS)
5495 return -EINVAL;
5496 }
5497 5507
5498 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 5508 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
5499 chan = rdev_freq_to_chan(rdev, freq, channel_type); 5509 chan = rdev_freq_to_chan(rdev, freq, channel_type);
@@ -5764,12 +5774,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5764 } 5774 }
5765 5775
5766 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 5776 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
5767 channel_type = nla_get_u32( 5777 if (!nl80211_valid_channel_type(info, &channel_type))
5768 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
5769 if (channel_type != NL80211_CHAN_NO_HT &&
5770 channel_type != NL80211_CHAN_HT20 &&
5771 channel_type != NL80211_CHAN_HT40PLUS &&
5772 channel_type != NL80211_CHAN_HT40MINUS)
5773 return -EINVAL; 5778 return -EINVAL;
5774 channel_type_valid = true; 5779 channel_type_valid = true;
5775 } 5780 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1cd255892a43..55d99466babb 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -879,7 +879,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
879 return rate->legacy; 879 return rate->legacy;
880 880
881 /* the formula below does only work for MCS values smaller than 32 */ 881 /* the formula below does only work for MCS values smaller than 32 */
882 if (rate->mcs >= 32) 882 if (WARN_ON_ONCE(rate->mcs >= 32))
883 return 0; 883 return 0;
884 884
885 modulation = rate->mcs & 7; 885 modulation = rate->mcs & 7;