aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/bluetooth/bluecard_cs.c10
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c3
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/btusb.c14
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h154
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c223
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c373
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c212
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c59
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c330
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c162
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h59
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c169
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h17
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h26
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c778
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h (renamed from drivers/net/wireless/iwlwifi/dvm/testmode.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c71
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c95
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c11
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--include/net/bluetooth/a2mp.h126
-rw-r--r--include/net/bluetooth/bluetooth.h39
-rw-r--r--include/net/bluetooth/hci.h105
-rw-r--r--include/net/bluetooth/hci_core.h29
-rw-r--r--include/net/bluetooth/l2cap.h205
-rw-r--r--include/net/mac80211.h6
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c568
-rw-r--r--net/bluetooth/af_bluetooth.c14
-rw-r--r--net/bluetooth/bnep/core.c21
-rw-r--r--net/bluetooth/bnep/netdev.c16
-rw-r--r--net/bluetooth/bnep/sock.c18
-rw-r--r--net/bluetooth/hci_conn.c98
-rw-r--r--net/bluetooth/hci_core.c214
-rw-r--r--net/bluetooth/hci_event.c357
-rw-r--r--net/bluetooth/hci_sock.c59
-rw-r--r--net/bluetooth/hci_sysfs.c99
-rw-r--r--net/bluetooth/hidp/core.c26
-rw-r--r--net/bluetooth/hidp/sock.c16
-rw-r--r--net/bluetooth/l2cap_core.c2132
-rw-r--r--net/bluetooth/l2cap_sock.c130
-rw-r--r--net/bluetooth/lib.c7
-rw-r--r--net/bluetooth/mgmt.c89
-rw-r--r--net/bluetooth/rfcomm/core.c32
-rw-r--r--net/bluetooth/rfcomm/sock.c21
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c43
-rw-r--r--net/bluetooth/smp.c18
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/sta_info.h5
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/util.c2
97 files changed, 5566 insertions, 3268 deletions
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 1fcd9238035..585c88e0189 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
231 } 231 }
232 232
233 do { 233 do {
234 register unsigned int iobase = info->p_dev->resource[0]->start; 234 unsigned int iobase = info->p_dev->resource[0]->start;
235 register unsigned int offset; 235 unsigned int offset;
236 register unsigned char command; 236 unsigned char command;
237 register unsigned long ready_bit; 237 unsigned long ready_bit;
238 register struct sk_buff *skb; 238 register struct sk_buff *skb;
239 register int len; 239 int len;
240 240
241 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 241 clear_bit(XMIT_WAKEUP, &(info->tx_state));
242 242
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 609861a53c2..29caaed2d71 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
470 hdev->flush = bpa10x_flush; 470 hdev->flush = bpa10x_flush;
471 hdev->send = bpa10x_send_frame; 471 hdev->send = bpa10x_send_frame;
472 472
473 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 473 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
474 474
475 err = hci_register_dev(hdev); 475 err = hci_register_dev(hdev);
476 if (err < 0) { 476 if (err < 0) {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 308c8599ab5..b2b0fbbb43b 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
186 return; 186 return;
187 187
188 do { 188 do {
189 register unsigned int iobase = info->p_dev->resource[0]->start; 189 unsigned int iobase = info->p_dev->resource[0]->start;
190 register struct sk_buff *skb; 190 register struct sk_buff *skb;
191 register int len; 191 int len;
192 192
193 if (!pcmcia_dev_present(info->p_dev)) 193 if (!pcmcia_dev_present(info->p_dev))
194 break; 194 break;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index a853244e7fd..2867499f725 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -110,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
110 /* Marvell SD8787 Bluetooth device */ 110 /* Marvell SD8787 Bluetooth device */
111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), 111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8787 Bluetooth AMP device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8797 Bluetooth device */ 116 /* Marvell SD8797 Bluetooth device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 117 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 118 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c4fc2f3fc32..65b8d996840 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info)
140 } 140 }
141 141
142 do { 142 do {
143 register unsigned int iobase = info->p_dev->resource[0]->start; 143 unsigned int iobase = info->p_dev->resource[0]->start;
144 register struct sk_buff *skb; 144 register struct sk_buff *skb;
145 register int len; 145 int len;
146 146
147 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 147 clear_bit(XMIT_WAKEUP, &(info->tx_state));
148 148
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c9463af8e56..a45e717f5f8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,15 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/kernel.h>
25#include <linux/module.h> 24#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/errno.h>
31#include <linux/skbuff.h>
32
33#include <linux/usb.h> 25#include <linux/usb.h>
34 26
35#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
@@ -1026,7 +1018,7 @@ static int btusb_probe(struct usb_interface *intf,
1026 data->isoc = usb_ifnum_to_if(data->udev, 1); 1018 data->isoc = usb_ifnum_to_if(data->udev, 1);
1027 1019
1028 if (!reset) 1020 if (!reset)
1029 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1021 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1030 1022
1031 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { 1023 if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
1032 if (!disable_scofix) 1024 if (!disable_scofix)
@@ -1038,7 +1030,7 @@ static int btusb_probe(struct usb_interface *intf,
1038 1030
1039 if (id->driver_info & BTUSB_DIGIANSWER) { 1031 if (id->driver_info & BTUSB_DIGIANSWER) {
1040 data->cmdreq_type = USB_TYPE_VENDOR; 1032 data->cmdreq_type = USB_TYPE_VENDOR;
1041 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1033 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1042 } 1034 }
1043 1035
1044 if (id->driver_info & BTUSB_CSR) { 1036 if (id->driver_info & BTUSB_CSR) {
@@ -1046,7 +1038,7 @@ static int btusb_probe(struct usb_interface *intf,
1046 1038
1047 /* Old firmware would otherwise execute USB reset */ 1039 /* Old firmware would otherwise execute USB reset */
1048 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) 1040 if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
1049 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 1041 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
1050 } 1042 }
1051 1043
1052 if (id->driver_info & BTUSB_SNIFFER) { 1044 if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 6e8d9618968..b1b37ccd3cd 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
144 } 144 }
145 145
146 do { 146 do {
147 register unsigned int iobase = info->p_dev->resource[0]->start; 147 unsigned int iobase = info->p_dev->resource[0]->start;
148 register struct sk_buff *skb; 148 register struct sk_buff *skb;
149 register int len; 149 int len;
150 150
151 clear_bit(XMIT_WAKEUP, &(info->tx_state)); 151 clear_bit(XMIT_WAKEUP, &(info->tx_state));
152 152
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 661a8dc4d2f..57e502e0608 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
552static int bcsp_recv(struct hci_uart *hu, void *data, int count) 552static int bcsp_recv(struct hci_uart *hu, void *data, int count)
553{ 553{
554 struct bcsp_struct *bcsp = hu->priv; 554 struct bcsp_struct *bcsp = hu->priv;
555 register unsigned char *ptr; 555 unsigned char *ptr;
556 556
557 BT_DBG("hu %p count %d rx_state %d rx_count %ld", 557 BT_DBG("hu %p count %d rx_state %d rx_count %ld",
558 hu, count, bcsp->rx_state, bcsp->rx_count); 558 hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 748329468d2..c60623f206d 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
126 126
127static inline int h4_check_data_len(struct h4_struct *h4, int len) 127static inline int h4_check_data_len(struct h4_struct *h4, int len)
128{ 128{
129 register int room = skb_tailroom(h4->rx_skb); 129 int room = skb_tailroom(h4->rx_skb);
130 130
131 BT_DBG("len %d room %d", len, room); 131 BT_DBG("len %d room %d", len, room);
132 132
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e564579a611..2f9b796e106 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -394,7 +394,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
394 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); 394 set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
395 395
396 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) 396 if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
397 set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); 397 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
398 398
399 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) 399 if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
400 hdev->dev_type = HCI_AMP; 400 hdev->dev_type = HCI_AMP;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index b874c0efde2..ff6d589c34a 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
348 348
349static inline int ll_check_data_len(struct ll_struct *ll, int len) 349static inline int ll_check_data_len(struct ll_struct *ll, int len)
350{ 350{
351 register int room = skb_tailroom(ll->rx_skb); 351 int room = skb_tailroom(ll->rx_skb);
352 352
353 BT_DBG("len %d room %d", len, room); 353 BT_DBG("len %d room %d", len, room);
354 354
@@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len)
374static int ll_recv(struct hci_uart *hu, void *data, int count) 374static int ll_recv(struct hci_uart *hu, void *data, int count)
375{ 375{
376 struct ll_struct *ll = hu->priv; 376 struct ll_struct *ll = hu->priv;
377 register char *ptr; 377 char *ptr;
378 struct hci_event_hdr *eh; 378 struct hci_event_hdr *eh;
379 struct hci_acl_hdr *ah; 379 struct hci_acl_hdr *ah;
380 struct hci_sco_hdr *sh; 380 struct hci_sco_hdr *sh;
381 register int len, type, dlen; 381 int len, type, dlen;
382 382
383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); 383 BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
384 384
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7de82418497..7211cb07426 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.50-0" 26#define DRV_MODULE_VERSION "1.72.51-0"
27#define DRV_MODULE_RELDATE "2012/04/23" 27#define DRV_MODULE_RELDATE "2012/06/18"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
@@ -248,13 +248,12 @@ enum {
248 BNX2X_MAX_CNIC_ETH_CL_ID_IDX, 248 BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
249}; 249};
250 250
251#define BNX2X_CNIC_START_ETH_CID 48 251#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
252enum { 252 (bp)->max_cos)
253 /* iSCSI L2 */ 253 /* iSCSI L2 */
254 BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, 254#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
255 /* FCoE L2 */ 255 /* FCoE L2 */
256 BNX2X_FCOE_ETH_CID, 256#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
257};
258 257
259/** Additional rings budgeting */ 258/** Additional rings budgeting */
260#ifdef BCM_CNIC 259#ifdef BCM_CNIC
@@ -276,29 +275,30 @@ enum {
276#define FIRST_TX_ONLY_COS_INDEX 1 275#define FIRST_TX_ONLY_COS_INDEX 1
277#define FIRST_TX_COS_INDEX 0 276#define FIRST_TX_COS_INDEX 0
278 277
279/* defines for decodeing the fastpath index and the cos index out of the
280 * transmission queue index
281 */
282#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
283
284#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
285#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
286
287/* rules for calculating the cids of tx-only connections */ 278/* rules for calculating the cids of tx-only connections */
288#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) 279#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
289#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) 280#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
281 (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
290 282
291/* fp index inside class of service range */ 283/* fp index inside class of service range */
292#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) 284#define FP_COS_TO_TXQ(fp, cos, bp) \
293 285 ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
294/* 286
295 * 0..15 eth cos0 287/* Indexes for transmission queues array:
296 * 16..31 eth cos1 if applicable 288 * txdata for RSS i CoS j is at location i + (j * num of RSS)
297 * 32..47 eth cos2 If applicable 289 * txdata for FCoE (if exist) is at location max cos * num of RSS
298 * fcoe queue follows eth queues (16, 32, 48 depending on cos) 290 * txdata for FWD (if exist) is one location after FCoE
291 * txdata for OOO (if exist) is one location after FWD
299 */ 292 */
300#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) 293enum {
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) 294 FCOE_TXQ_IDX_OFFSET,
295 FWD_TXQ_IDX_OFFSET,
296 OOO_TXQ_IDX_OFFSET,
297};
298#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
299#ifdef BCM_CNIC
300#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
301#endif
302 302
303/* fast path */ 303/* fast path */
304/* 304/*
@@ -481,6 +481,8 @@ struct bnx2x_fp_txdata {
481 __le16 *tx_cons_sb; 481 __le16 *tx_cons_sb;
482 482
483 int txq_index; 483 int txq_index;
484 struct bnx2x_fastpath *parent_fp;
485 int tx_ring_size;
484}; 486};
485 487
486enum bnx2x_tpa_mode_t { 488enum bnx2x_tpa_mode_t {
@@ -507,7 +509,7 @@ struct bnx2x_fastpath {
507 enum bnx2x_tpa_mode_t mode; 509 enum bnx2x_tpa_mode_t mode;
508 510
509 u8 max_cos; /* actual number of active tx coses */ 511 u8 max_cos; /* actual number of active tx coses */
510 struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; 512 struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
511 513
512 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ 514 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
513 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ 515 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +549,45 @@ struct bnx2x_fastpath {
547 rx_calls; 549 rx_calls;
548 550
549 /* TPA related */ 551 /* TPA related */
550 struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; 552 struct bnx2x_agg_info *tpa_info;
551 u8 disable_tpa; 553 u8 disable_tpa;
552#ifdef BNX2X_STOP_ON_ERROR 554#ifdef BNX2X_STOP_ON_ERROR
553 u64 tpa_queue_used; 555 u64 tpa_queue_used;
554#endif 556#endif
555
556 struct tstorm_per_queue_stats old_tclient;
557 struct ustorm_per_queue_stats old_uclient;
558 struct xstorm_per_queue_stats old_xclient;
559 struct bnx2x_eth_q_stats eth_q_stats;
560 struct bnx2x_eth_q_stats_old eth_q_stats_old;
561
562 /* The size is calculated using the following: 557 /* The size is calculated using the following:
563 sizeof name field from netdev structure + 558 sizeof name field from netdev structure +
564 4 ('-Xx-' string) + 559 4 ('-Xx-' string) +
565 4 (for the digits and to make it DWORD aligned) */ 560 4 (for the digits and to make it DWORD aligned) */
566#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) 561#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
567 char name[FP_NAME_SIZE]; 562 char name[FP_NAME_SIZE];
568
569 /* MACs object */
570 struct bnx2x_vlan_mac_obj mac_obj;
571
572 /* Queue State object */
573 struct bnx2x_queue_sp_obj q_obj;
574
575}; 563};
576 564
577#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 565#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
566#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
567#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
568#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
578 569
579/* Use 2500 as a mini-jumbo MTU for FCoE */ 570/* Use 2500 as a mini-jumbo MTU for FCoE */
580#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 571#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
581 572
582/* FCoE L2 `fastpath' entry is right after the eth entries */ 573#define FCOE_IDX_OFFSET 0
583#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 574
584#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) 575#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
585#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) 576 FCOE_IDX_OFFSET)
586#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ 577#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
587 txdata[FIRST_TX_COS_INDEX].var) 578#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
579#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
580#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
581#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
582 txdata_ptr[FIRST_TX_COS_INDEX] \
583 ->var)
588 584
589 585
590#define IS_ETH_FP(fp) (fp->index < \ 586#define IS_ETH_FP(fp) (fp->index < \
591 BNX2X_NUM_ETH_QUEUES(fp->bp)) 587 BNX2X_NUM_ETH_QUEUES(fp->bp))
592#ifdef BCM_CNIC 588#ifdef BCM_CNIC
593#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) 589#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
594#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) 590#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
595#else 591#else
596#define IS_FCOE_FP(fp) false 592#define IS_FCOE_FP(fp) false
597#define IS_FCOE_IDX(idx) false 593#define IS_FCOE_IDX(idx) false
@@ -978,8 +974,8 @@ union cdu_context {
978}; 974};
979 975
980/* CDU host DB constants */ 976/* CDU host DB constants */
981#define CDU_ILT_PAGE_SZ_HW 3 977#define CDU_ILT_PAGE_SZ_HW 2
982#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ 978#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
983#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) 979#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
984 980
985#ifdef BCM_CNIC 981#ifdef BCM_CNIC
@@ -1182,11 +1178,31 @@ struct bnx2x_prev_path_list {
1182 struct list_head list; 1178 struct list_head list;
1183}; 1179};
1184 1180
1181struct bnx2x_sp_objs {
1182 /* MACs object */
1183 struct bnx2x_vlan_mac_obj mac_obj;
1184
1185 /* Queue State object */
1186 struct bnx2x_queue_sp_obj q_obj;
1187};
1188
1189struct bnx2x_fp_stats {
1190 struct tstorm_per_queue_stats old_tclient;
1191 struct ustorm_per_queue_stats old_uclient;
1192 struct xstorm_per_queue_stats old_xclient;
1193 struct bnx2x_eth_q_stats eth_q_stats;
1194 struct bnx2x_eth_q_stats_old eth_q_stats_old;
1195};
1196
1185struct bnx2x { 1197struct bnx2x {
1186 /* Fields used in the tx and intr/napi performance paths 1198 /* Fields used in the tx and intr/napi performance paths
1187 * are grouped together in the beginning of the structure 1199 * are grouped together in the beginning of the structure
1188 */ 1200 */
1189 struct bnx2x_fastpath *fp; 1201 struct bnx2x_fastpath *fp;
1202 struct bnx2x_sp_objs *sp_objs;
1203 struct bnx2x_fp_stats *fp_stats;
1204 struct bnx2x_fp_txdata *bnx2x_txq;
1205 int bnx2x_txq_size;
1190 void __iomem *regview; 1206 void __iomem *regview;
1191 void __iomem *doorbells; 1207 void __iomem *doorbells;
1192 u16 db_size; 1208 u16 db_size;
@@ -1302,6 +1318,7 @@ struct bnx2x {
1302#define NO_FCOE_FLAG (1 << 15) 1318#define NO_FCOE_FLAG (1 << 15)
1303#define BC_SUPPORTS_PFC_STATS (1 << 17) 1319#define BC_SUPPORTS_PFC_STATS (1 << 17)
1304#define USING_SINGLE_MSIX_FLAG (1 << 20) 1320#define USING_SINGLE_MSIX_FLAG (1 << 20)
1321#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1305 1322
1306#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) 1323#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1307#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1324#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1377,6 +1394,7 @@ struct bnx2x {
1377#define BNX2X_MAX_COS 3 1394#define BNX2X_MAX_COS 3
1378#define BNX2X_MAX_TX_COS 2 1395#define BNX2X_MAX_TX_COS 2
1379 int num_queues; 1396 int num_queues;
1397 int num_napi_queues;
1380 int disable_tpa; 1398 int disable_tpa;
1381 1399
1382 u32 rx_mode; 1400 u32 rx_mode;
@@ -1389,6 +1407,7 @@ struct bnx2x {
1389 u8 igu_dsb_id; 1407 u8 igu_dsb_id;
1390 u8 igu_base_sb; 1408 u8 igu_base_sb;
1391 u8 igu_sb_cnt; 1409 u8 igu_sb_cnt;
1410
1392 dma_addr_t def_status_blk_mapping; 1411 dma_addr_t def_status_blk_mapping;
1393 1412
1394 struct bnx2x_slowpath *slowpath; 1413 struct bnx2x_slowpath *slowpath;
@@ -1420,7 +1439,11 @@ struct bnx2x {
1420 dma_addr_t fw_stats_data_mapping; 1439 dma_addr_t fw_stats_data_mapping;
1421 int fw_stats_data_sz; 1440 int fw_stats_data_sz;
1422 1441
1423 struct hw_context context; 1442 /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
1443 * context size we need 8 ILT entries.
1444 */
1445#define ILT_MAX_L2_LINES 8
1446 struct hw_context context[ILT_MAX_L2_LINES];
1424 1447
1425 struct bnx2x_ilt *ilt; 1448 struct bnx2x_ilt *ilt;
1426#define BP_ILT(bp) ((bp)->ilt) 1449#define BP_ILT(bp) ((bp)->ilt)
@@ -1433,13 +1456,14 @@ struct bnx2x {
1433 1456
1434/* 1457/*
1435 * Maximum CID count that might be required by the bnx2x: 1458 * Maximum CID count that might be required by the bnx2x:
1436 * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) 1459 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1437 */ 1460 */
1438#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ 1461#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1439 NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1462 + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1463#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1464 + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1440#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1465#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1441 ILT_PAGE_CIDS)) 1466 ILT_PAGE_CIDS))
1442#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
1443 1467
1444 int qm_cid_count; 1468 int qm_cid_count;
1445 1469
@@ -1598,6 +1622,8 @@ struct bnx2x {
1598extern int num_queues; 1622extern int num_queues;
1599#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1623#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1600#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) 1624#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
1625#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
1626 NON_ETH_CONTEXT_USE)
1601#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) 1627#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1602 1628
1603#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1629#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1656,6 +1682,9 @@ struct bnx2x_func_init_params {
1656 continue; \ 1682 continue; \
1657 else 1683 else
1658 1684
1685#define for_each_napi_rx_queue(bp, var) \
1686 for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
1687
1659/* Skip OOO FP */ 1688/* Skip OOO FP */
1660#define for_each_tx_queue(bp, var) \ 1689#define for_each_tx_queue(bp, var) \
1661 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1690 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1817,6 +1846,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1817#define LOAD_NORMAL 0 1846#define LOAD_NORMAL 0
1818#define LOAD_OPEN 1 1847#define LOAD_OPEN 1
1819#define LOAD_DIAG 2 1848#define LOAD_DIAG 2
1849#define LOAD_LOOPBACK_EXT 3
1820#define UNLOAD_NORMAL 0 1850#define UNLOAD_NORMAL 0
1821#define UNLOAD_CLOSE 1 1851#define UNLOAD_CLOSE 1
1822#define UNLOAD_RECOVERY 2 1852#define UNLOAD_RECOVERY 2
@@ -1899,13 +1929,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1899#define PCICFG_LINK_SPEED 0xf0000 1929#define PCICFG_LINK_SPEED 0xf0000
1900#define PCICFG_LINK_SPEED_SHIFT 16 1930#define PCICFG_LINK_SPEED_SHIFT 16
1901 1931
1902 1932#define BNX2X_NUM_TESTS_SF 7
1903#define BNX2X_NUM_TESTS 7 1933#define BNX2X_NUM_TESTS_MF 3
1934#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
1935 BNX2X_NUM_TESTS_SF)
1904 1936
1905#define BNX2X_PHY_LOOPBACK 0 1937#define BNX2X_PHY_LOOPBACK 0
1906#define BNX2X_MAC_LOOPBACK 1 1938#define BNX2X_MAC_LOOPBACK 1
1939#define BNX2X_EXT_LOOPBACK 2
1907#define BNX2X_PHY_LOOPBACK_FAILED 1 1940#define BNX2X_PHY_LOOPBACK_FAILED 1
1908#define BNX2X_MAC_LOOPBACK_FAILED 2 1941#define BNX2X_MAC_LOOPBACK_FAILED 2
1942#define BNX2X_EXT_LOOPBACK_FAILED 3
1909#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ 1943#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
1910 BNX2X_PHY_LOOPBACK_FAILED) 1944 BNX2X_PHY_LOOPBACK_FAILED)
1911 1945
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8098eea9704..00951b3aa62 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
40 * Makes sure the contents of the bp->fp[to].napi is kept 40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from 41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire 42 * the target to the source, and then mem copying the entire
43 * source onto the target 43 * source onto the target. Update txdata pointers and related
44 * content.
44 */ 45 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) 46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{ 47{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from]; 48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to]; 49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
49 56
50 /* Copy the NAPI object as it has been already initialized */ 57 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi; 58 from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
53 /* Move bnx2x_fastpath contents */ 60 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp)); 61 memcpy(to_fp, from_fp, sizeof(*to_fp));
55 to_fp->index = to; 62 to_fp->index = to;
63
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
56} 87}
57 88
58int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 89int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -479,7 +510,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
479 where we are and drop the whole packet */ 510 where we are and drop the whole packet */
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 511 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481 if (unlikely(err)) { 512 if (unlikely(err)) {
482 fp->eth_q_stats.rx_skb_alloc_failed++; 513 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
483 return err; 514 return err;
484 } 515 }
485 516
@@ -584,7 +615,7 @@ drop:
584 /* drop the packet and keep the buffer in the bin */ 615 /* drop the packet and keep the buffer in the bin */
585 DP(NETIF_MSG_RX_STATUS, 616 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n"); 617 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++; 618 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
588} 619}
589 620
590static int bnx2x_alloc_rx_data(struct bnx2x *bp, 621static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,8 +648,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
617 return 0; 648 return 0;
618} 649}
619 650
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, 651static
621 struct bnx2x_fastpath *fp) 652void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
653 struct bnx2x_fastpath *fp,
654 struct bnx2x_eth_q_stats *qstats)
622{ 655{
623 /* Do nothing if no IP/L4 csum validation was done */ 656 /* Do nothing if no IP/L4 csum validation was done */
624 657
@@ -632,7 +665,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
632 if (cqe->fast_path_cqe.type_error_flags & 665 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | 666 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) 667 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++; 668 qstats->hw_csum_err++;
636 else 669 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY; 670 skb->ip_summed = CHECKSUM_UNNECESSARY;
638} 671}
@@ -776,7 +809,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
776 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 809 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
777 "ERROR flags %x rx packet %u\n", 810 "ERROR flags %x rx packet %u\n",
778 cqe_fp_flags, sw_comp_cons); 811 cqe_fp_flags, sw_comp_cons);
779 fp->eth_q_stats.rx_err_discard_pkt++; 812 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
780 goto reuse_rx; 813 goto reuse_rx;
781 } 814 }
782 815
@@ -789,7 +822,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
789 if (skb == NULL) { 822 if (skb == NULL) {
790 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 823 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
791 "ERROR packet dropped because of alloc failure\n"); 824 "ERROR packet dropped because of alloc failure\n");
792 fp->eth_q_stats.rx_skb_alloc_failed++; 825 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
793 goto reuse_rx; 826 goto reuse_rx;
794 } 827 }
795 memcpy(skb->data, data + pad, len); 828 memcpy(skb->data, data + pad, len);
@@ -803,14 +836,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
803 skb = build_skb(data, 0); 836 skb = build_skb(data, 0);
804 if (unlikely(!skb)) { 837 if (unlikely(!skb)) {
805 kfree(data); 838 kfree(data);
806 fp->eth_q_stats.rx_skb_alloc_failed++; 839 bnx2x_fp_qstats(bp, fp)->
840 rx_skb_alloc_failed++;
807 goto next_rx; 841 goto next_rx;
808 } 842 }
809 skb_reserve(skb, pad); 843 skb_reserve(skb, pad);
810 } else { 844 } else {
811 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, 845 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
812 "ERROR packet dropped because of alloc failure\n"); 846 "ERROR packet dropped because of alloc failure\n");
813 fp->eth_q_stats.rx_skb_alloc_failed++; 847 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
814reuse_rx: 848reuse_rx:
815 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 849 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
816 goto next_rx; 850 goto next_rx;
@@ -826,8 +860,8 @@ reuse_rx:
826 skb_checksum_none_assert(skb); 860 skb_checksum_none_assert(skb);
827 861
828 if (bp->dev->features & NETIF_F_RXCSUM) 862 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp); 863 bnx2x_csum_validate(skb, cqe, fp,
830 864 bnx2x_fp_qstats(bp, fp));
831 865
832 skb_record_rx_queue(skb, fp->rx_queue); 866 skb_record_rx_queue(skb, fp->rx_queue);
833 867
@@ -888,7 +922,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
888 prefetch(fp->rx_cons_sb); 922 prefetch(fp->rx_cons_sb);
889 923
890 for_each_cos_in_tx_queue(fp, cos) 924 for_each_cos_in_tx_queue(fp, cos)
891 prefetch(fp->txdata[cos].tx_cons_sb); 925 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
892 926
893 prefetch(&fp->sb_running_index[SM_RX_ID]); 927 prefetch(&fp->sb_running_index[SM_RX_ID]);
894 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 928 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1205,7 +1239,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1205 for_each_tx_queue(bp, i) { 1239 for_each_tx_queue(bp, i) {
1206 struct bnx2x_fastpath *fp = &bp->fp[i]; 1240 struct bnx2x_fastpath *fp = &bp->fp[i];
1207 for_each_cos_in_tx_queue(fp, cos) { 1241 for_each_cos_in_tx_queue(fp, cos) {
1208 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 1242 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1209 unsigned pkts_compl = 0, bytes_compl = 0; 1243 unsigned pkts_compl = 0, bytes_compl = 0;
1210 1244
1211 u16 sw_prod = txdata->tx_pkt_prod; 1245 u16 sw_prod = txdata->tx_pkt_prod;
@@ -1217,7 +1251,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1217 sw_cons++; 1251 sw_cons++;
1218 } 1252 }
1219 netdev_tx_reset_queue( 1253 netdev_tx_reset_queue(
1220 netdev_get_tx_queue(bp->dev, txdata->txq_index)); 1254 netdev_get_tx_queue(bp->dev,
1255 txdata->txq_index));
1221 } 1256 }
1222 } 1257 }
1223} 1258}
@@ -1325,7 +1360,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1325 free_irq(bp->dev->irq, bp->dev); 1360 free_irq(bp->dev->irq, bp->dev);
1326} 1361}
1327 1362
1328int __devinit bnx2x_enable_msix(struct bnx2x *bp) 1363int bnx2x_enable_msix(struct bnx2x *bp)
1329{ 1364{
1330 int msix_vec = 0, i, rc, req_cnt; 1365 int msix_vec = 0, i, rc, req_cnt;
1331 1366
@@ -1579,6 +1614,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1579#endif 1614#endif
1580 /* Add special queues */ 1615 /* Add special queues */
1581 bp->num_queues += NON_ETH_CONTEXT_USE; 1616 bp->num_queues += NON_ETH_CONTEXT_USE;
1617
1618 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1582} 1619}
1583 1620
1584/** 1621/**
@@ -1607,8 +1644,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1607{ 1644{
1608 int rc, tx, rx; 1645 int rc, tx, rx;
1609 1646
1610 tx = MAX_TXQS_PER_COS * bp->max_cos; 1647 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1611 rx = BNX2X_NUM_ETH_QUEUES(bp); 1648 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
1612 1649
1613/* account for fcoe queue */ 1650/* account for fcoe queue */
1614#ifdef BCM_CNIC 1651#ifdef BCM_CNIC
@@ -1666,14 +1703,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1666static int bnx2x_init_rss_pf(struct bnx2x *bp) 1703static int bnx2x_init_rss_pf(struct bnx2x *bp)
1667{ 1704{
1668 int i; 1705 int i;
1669 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1670 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1706 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1671 1707
1672 /* Prepare the initial contents fo the indirection table if RSS is 1708 /* Prepare the initial contents fo the indirection table if RSS is
1673 * enabled 1709 * enabled
1674 */ 1710 */
1675 for (i = 0; i < sizeof(ind_table); i++) 1711 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1676 ind_table[i] = 1712 bp->rss_conf_obj.ind_table[i] =
1677 bp->fp->cl_id + 1713 bp->fp->cl_id +
1678 ethtool_rxfh_indir_default(i, num_eth_queues); 1714 ethtool_rxfh_indir_default(i, num_eth_queues);
1679 1715
@@ -1685,12 +1721,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
1685 * For 57712 and newer on the other hand it's a per-function 1721 * For 57712 and newer on the other hand it's a per-function
1686 * configuration. 1722 * configuration.
1687 */ 1723 */
1688 return bnx2x_config_rss_eth(bp, ind_table, 1724 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1689 bp->port.pmf || !CHIP_IS_E1x(bp));
1690} 1725}
1691 1726
1692int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 1727int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1693 u8 *ind_table, bool config_hash) 1728 bool config_hash)
1694{ 1729{
1695 struct bnx2x_config_rss_params params = {NULL}; 1730 struct bnx2x_config_rss_params params = {NULL};
1696 int i; 1731 int i;
@@ -1713,11 +1748,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1713 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1748 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1714 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags); 1749 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1715 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags); 1750 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1751 if (rss_obj->udp_rss_v4)
1752 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1753 if (rss_obj->udp_rss_v6)
1754 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1716 1755
1717 /* Hash bits */ 1756 /* Hash bits */
1718 params.rss_result_mask = MULTI_MASK; 1757 params.rss_result_mask = MULTI_MASK;
1719 1758
1720 memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); 1759 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1721 1760
1722 if (config_hash) { 1761 if (config_hash) {
1723 /* RSS keys */ 1762 /* RSS keys */
@@ -1754,7 +1793,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1754 int rc; 1793 int rc;
1755 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 1794 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1756 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1795 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1757 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 1796 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1758 1797
1759 /***************** Cleanup MACs' object first *************************/ 1798 /***************** Cleanup MACs' object first *************************/
1760 1799
@@ -1765,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1765 1804
1766 /* Clean ETH primary MAC */ 1805 /* Clean ETH primary MAC */
1767 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); 1806 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1768 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, 1807 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1769 &ramrod_flags); 1808 &ramrod_flags);
1770 if (rc != 0) 1809 if (rc != 0)
1771 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); 1810 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1851,11 +1890,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1851static void bnx2x_bz_fp(struct bnx2x *bp, int index) 1890static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1852{ 1891{
1853 struct bnx2x_fastpath *fp = &bp->fp[index]; 1892 struct bnx2x_fastpath *fp = &bp->fp[index];
1893 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1894
1895 int cos;
1854 struct napi_struct orig_napi = fp->napi; 1896 struct napi_struct orig_napi = fp->napi;
1897 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1855 /* bzero bnx2x_fastpath contents */ 1898 /* bzero bnx2x_fastpath contents */
1856 if (bp->stats_init) 1899 if (bp->stats_init) {
1900 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1857 memset(fp, 0, sizeof(*fp)); 1901 memset(fp, 0, sizeof(*fp));
1858 else { 1902 } else {
1859 /* Keep Queue statistics */ 1903 /* Keep Queue statistics */
1860 struct bnx2x_eth_q_stats *tmp_eth_q_stats; 1904 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1861 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; 1905 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1863,26 +1907,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1863 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), 1907 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1864 GFP_KERNEL); 1908 GFP_KERNEL);
1865 if (tmp_eth_q_stats) 1909 if (tmp_eth_q_stats)
1866 memcpy(tmp_eth_q_stats, &fp->eth_q_stats, 1910 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1867 sizeof(struct bnx2x_eth_q_stats)); 1911 sizeof(struct bnx2x_eth_q_stats));
1868 1912
1869 tmp_eth_q_stats_old = 1913 tmp_eth_q_stats_old =
1870 kzalloc(sizeof(struct bnx2x_eth_q_stats_old), 1914 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1871 GFP_KERNEL); 1915 GFP_KERNEL);
1872 if (tmp_eth_q_stats_old) 1916 if (tmp_eth_q_stats_old)
1873 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, 1917 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1874 sizeof(struct bnx2x_eth_q_stats_old)); 1918 sizeof(struct bnx2x_eth_q_stats_old));
1875 1919
1920 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1876 memset(fp, 0, sizeof(*fp)); 1921 memset(fp, 0, sizeof(*fp));
1877 1922
1878 if (tmp_eth_q_stats) { 1923 if (tmp_eth_q_stats) {
1879 memcpy(&fp->eth_q_stats, tmp_eth_q_stats, 1924 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
1880 sizeof(struct bnx2x_eth_q_stats)); 1925 sizeof(struct bnx2x_eth_q_stats));
1881 kfree(tmp_eth_q_stats); 1926 kfree(tmp_eth_q_stats);
1882 } 1927 }
1883 1928
1884 if (tmp_eth_q_stats_old) { 1929 if (tmp_eth_q_stats_old) {
1885 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, 1930 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1886 sizeof(struct bnx2x_eth_q_stats_old)); 1931 sizeof(struct bnx2x_eth_q_stats_old));
1887 kfree(tmp_eth_q_stats_old); 1932 kfree(tmp_eth_q_stats_old);
1888 } 1933 }
@@ -1891,7 +1936,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1891 1936
1892 /* Restore the NAPI object as it has been already initialized */ 1937 /* Restore the NAPI object as it has been already initialized */
1893 fp->napi = orig_napi; 1938 fp->napi = orig_napi;
1894 1939 fp->tpa_info = orig_tpa_info;
1895 fp->bp = bp; 1940 fp->bp = bp;
1896 fp->index = index; 1941 fp->index = index;
1897 if (IS_ETH_FP(fp)) 1942 if (IS_ETH_FP(fp))
@@ -1900,6 +1945,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1900 /* Special queues support only one CoS */ 1945 /* Special queues support only one CoS */
1901 fp->max_cos = 1; 1946 fp->max_cos = 1;
1902 1947
1948 /* Init txdata pointers */
1949#ifdef BCM_CNIC
1950 if (IS_FCOE_FP(fp))
1951 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1952#endif
1953 if (IS_ETH_FP(fp))
1954 for_each_cos_in_tx_queue(fp, cos)
1955 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1956 BNX2X_NUM_ETH_QUEUES(bp) + index];
1957
1903 /* 1958 /*
1904 * set the tpa flag for each queue. The tpa flag determines the queue 1959 * set the tpa flag for each queue. The tpa flag determines the queue
1905 * minimal size so it must be set prior to queue memory allocation 1960 * minimal size so it must be set prior to queue memory allocation
@@ -1949,11 +2004,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1949 /* 2004 /*
1950 * Zero fastpath structures preserving invariants like napi, which are 2005 * Zero fastpath structures preserving invariants like napi, which are
1951 * allocated only once, fp index, max_cos, bp pointer. 2006 * allocated only once, fp index, max_cos, bp pointer.
1952 * Also set fp->disable_tpa. 2007 * Also set fp->disable_tpa and txdata_ptr.
1953 */ 2008 */
1954 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2009 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1955 for_each_queue(bp, i) 2010 for_each_queue(bp, i)
1956 bnx2x_bz_fp(bp, i); 2011 bnx2x_bz_fp(bp, i);
2012 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
2013 sizeof(struct bnx2x_fp_txdata));
1957 2014
1958 2015
1959 /* Set the receive queues buffer size */ 2016 /* Set the receive queues buffer size */
@@ -2176,6 +2233,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2176 break; 2233 break;
2177 2234
2178 case LOAD_DIAG: 2235 case LOAD_DIAG:
2236 case LOAD_LOOPBACK_EXT:
2179 bp->state = BNX2X_STATE_DIAG; 2237 bp->state = BNX2X_STATE_DIAG;
2180 break; 2238 break;
2181 2239
@@ -2195,6 +2253,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2195 /* re-read iscsi info */ 2253 /* re-read iscsi info */
2196 bnx2x_get_iscsi_info(bp); 2254 bnx2x_get_iscsi_info(bp);
2197 bnx2x_setup_cnic_irq_info(bp); 2255 bnx2x_setup_cnic_irq_info(bp);
2256 bnx2x_setup_cnic_info(bp);
2198 if (bp->state == BNX2X_STATE_OPEN) 2257 if (bp->state == BNX2X_STATE_OPEN)
2199 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 2258 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2200#endif 2259#endif
@@ -2215,7 +2274,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2215 return -EBUSY; 2274 return -EBUSY;
2216 } 2275 }
2217 2276
2218 bnx2x_dcbx_init(bp); 2277 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2278 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2279 bnx2x_dcbx_init(bp, false);
2280
2219 return 0; 2281 return 0;
2220 2282
2221#ifndef BNX2X_STOP_ON_ERROR 2283#ifndef BNX2X_STOP_ON_ERROR
@@ -2298,6 +2360,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2298 2360
2299 /* Stop Tx */ 2361 /* Stop Tx */
2300 bnx2x_tx_disable(bp); 2362 bnx2x_tx_disable(bp);
2363 netdev_reset_tc(bp->dev);
2301 2364
2302#ifdef BCM_CNIC 2365#ifdef BCM_CNIC
2303 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 2366 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2456,8 +2519,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2456#endif 2519#endif
2457 2520
2458 for_each_cos_in_tx_queue(fp, cos) 2521 for_each_cos_in_tx_queue(fp, cos)
2459 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 2522 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2460 bnx2x_tx_int(bp, &fp->txdata[cos]); 2523 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2461 2524
2462 2525
2463 if (bnx2x_has_rx_work(fp)) { 2526 if (bnx2x_has_rx_work(fp)) {
@@ -2834,7 +2897,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2834{ 2897{
2835 struct bnx2x *bp = netdev_priv(dev); 2898 struct bnx2x *bp = netdev_priv(dev);
2836 2899
2837 struct bnx2x_fastpath *fp;
2838 struct netdev_queue *txq; 2900 struct netdev_queue *txq;
2839 struct bnx2x_fp_txdata *txdata; 2901 struct bnx2x_fp_txdata *txdata;
2840 struct sw_tx_bd *tx_buf; 2902 struct sw_tx_bd *tx_buf;
@@ -2844,7 +2906,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2844 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 2906 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2845 u32 pbd_e2_parsing_data = 0; 2907 u32 pbd_e2_parsing_data = 0;
2846 u16 pkt_prod, bd_prod; 2908 u16 pkt_prod, bd_prod;
2847 int nbd, txq_index, fp_index, txdata_index; 2909 int nbd, txq_index;
2848 dma_addr_t mapping; 2910 dma_addr_t mapping;
2849 u32 xmit_type = bnx2x_xmit_type(bp, skb); 2911 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2850 int i; 2912 int i;
@@ -2863,31 +2925,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2863 2925
2864 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 2926 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2865 2927
2866 /* decode the fastpath index and the cos index from the txq */ 2928 txdata = &bp->bnx2x_txq[txq_index];
2867 fp_index = TXQ_TO_FP(txq_index);
2868 txdata_index = TXQ_TO_COS(txq_index);
2869
2870#ifdef BCM_CNIC
2871 /*
2872 * Override the above for the FCoE queue:
2873 * - FCoE fp entry is right after the ETH entries.
2874 * - FCoE L2 queue uses bp->txdata[0] only.
2875 */
2876 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2877 bnx2x_fcoe_tx(bp, txq_index)))) {
2878 fp_index = FCOE_IDX;
2879 txdata_index = 0;
2880 }
2881#endif
2882 2929
2883 /* enable this debug print to view the transmission queue being used 2930 /* enable this debug print to view the transmission queue being used
2884 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", 2931 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
2885 txq_index, fp_index, txdata_index); */ 2932 txq_index, fp_index, txdata_index); */
2886 2933
2887 /* locate the fastpath and the txdata */
2888 fp = &bp->fp[fp_index];
2889 txdata = &fp->txdata[txdata_index];
2890
2891 /* enable this debug print to view the tranmission details 2934 /* enable this debug print to view the tranmission details
2892 DP(NETIF_MSG_TX_QUEUED, 2935 DP(NETIF_MSG_TX_QUEUED,
2893 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", 2936 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
@@ -2895,7 +2938,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2895 2938
2896 if (unlikely(bnx2x_tx_avail(bp, txdata) < 2939 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2897 (skb_shinfo(skb)->nr_frags + 3))) { 2940 (skb_shinfo(skb)->nr_frags + 3))) {
2898 fp->eth_q_stats.driver_xoff++; 2941 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2899 netif_tx_stop_queue(txq); 2942 netif_tx_stop_queue(txq);
2900 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 2943 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2901 return NETDEV_TX_BUSY; 2944 return NETDEV_TX_BUSY;
@@ -3177,7 +3220,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3177 * fp->bd_tx_cons */ 3220 * fp->bd_tx_cons */
3178 smp_mb(); 3221 smp_mb();
3179 3222
3180 fp->eth_q_stats.driver_xoff++; 3223 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3181 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) 3224 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
3182 netif_tx_wake_queue(txq); 3225 netif_tx_wake_queue(txq);
3183 } 3226 }
@@ -3243,7 +3286,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3243 /* configure traffic class to transmission queue mapping */ 3286 /* configure traffic class to transmission queue mapping */
3244 for (cos = 0; cos < bp->max_cos; cos++) { 3287 for (cos = 0; cos < bp->max_cos; cos++) {
3245 count = BNX2X_NUM_ETH_QUEUES(bp); 3288 count = BNX2X_NUM_ETH_QUEUES(bp);
3246 offset = cos * MAX_TXQS_PER_COS; 3289 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3247 netdev_set_tc_queue(dev, cos, count, offset); 3290 netdev_set_tc_queue(dev, cos, count, offset);
3248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 3291 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3249 "mapping tc %d to offset %d count %d\n", 3292 "mapping tc %d to offset %d count %d\n",
@@ -3342,7 +3385,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3342 if (!skip_tx_queue(bp, fp_index)) { 3385 if (!skip_tx_queue(bp, fp_index)) {
3343 /* fastpath tx rings: tx_buf tx_desc */ 3386 /* fastpath tx rings: tx_buf tx_desc */
3344 for_each_cos_in_tx_queue(fp, cos) { 3387 for_each_cos_in_tx_queue(fp, cos) {
3345 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3388 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3346 3389
3347 DP(NETIF_MSG_IFDOWN, 3390 DP(NETIF_MSG_IFDOWN,
3348 "freeing tx memory of fp %d cos %d cid %d\n", 3391 "freeing tx memory of fp %d cos %d cid %d\n",
@@ -3414,7 +3457,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3414 cqe_ring_prod); 3457 cqe_ring_prod);
3415 fp->rx_pkt = fp->rx_calls = 0; 3458 fp->rx_pkt = fp->rx_calls = 0;
3416 3459
3417 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; 3460 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3418 3461
3419 return i - failure_cnt; 3462 return i - failure_cnt;
3420} 3463}
@@ -3499,7 +3542,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3499 if (!skip_tx_queue(bp, index)) { 3542 if (!skip_tx_queue(bp, index)) {
3500 /* fastpath tx rings: tx_buf tx_desc */ 3543 /* fastpath tx rings: tx_buf tx_desc */
3501 for_each_cos_in_tx_queue(fp, cos) { 3544 for_each_cos_in_tx_queue(fp, cos) {
3502 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3545 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3503 3546
3504 DP(NETIF_MSG_IFUP, 3547 DP(NETIF_MSG_IFUP,
3505 "allocating tx memory of fp %d cos %d\n", 3548 "allocating tx memory of fp %d cos %d\n",
@@ -3582,7 +3625,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3582#ifdef BCM_CNIC 3625#ifdef BCM_CNIC
3583 if (!NO_FCOE(bp)) 3626 if (!NO_FCOE(bp))
3584 /* FCoE */ 3627 /* FCoE */
3585 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) 3628 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3586 /* we will fail load process instead of mark 3629 /* we will fail load process instead of mark
3587 * NO_FCOE_FLAG 3630 * NO_FCOE_FLAG
3588 */ 3631 */
@@ -3607,7 +3650,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3607 */ 3650 */
3608 3651
3609 /* move FCoE fp even NO_FCOE_FLAG is on */ 3652 /* move FCoE fp even NO_FCOE_FLAG is on */
3610 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); 3653 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3611#endif 3654#endif
3612 bp->num_queues -= delta; 3655 bp->num_queues -= delta;
3613 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3656 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3619,7 +3662,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3619 3662
3620void bnx2x_free_mem_bp(struct bnx2x *bp) 3663void bnx2x_free_mem_bp(struct bnx2x *bp)
3621{ 3664{
3665 kfree(bp->fp->tpa_info);
3622 kfree(bp->fp); 3666 kfree(bp->fp);
3667 kfree(bp->sp_objs);
3668 kfree(bp->fp_stats);
3669 kfree(bp->bnx2x_txq);
3623 kfree(bp->msix_table); 3670 kfree(bp->msix_table);
3624 kfree(bp->ilt); 3671 kfree(bp->ilt);
3625} 3672}
@@ -3630,6 +3677,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3630 struct msix_entry *tbl; 3677 struct msix_entry *tbl;
3631 struct bnx2x_ilt *ilt; 3678 struct bnx2x_ilt *ilt;
3632 int msix_table_size = 0; 3679 int msix_table_size = 0;
3680 int fp_array_size;
3681 int i;
3633 3682
3634 /* 3683 /*
3635 * The biggest MSI-X table we might need is as a maximum number of fast 3684 * The biggest MSI-X table we might need is as a maximum number of fast
@@ -3638,12 +3687,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3638 msix_table_size = bp->igu_sb_cnt + 1; 3687 msix_table_size = bp->igu_sb_cnt + 1;
3639 3688
3640 /* fp array: RSS plus CNIC related L2 queues */ 3689 /* fp array: RSS plus CNIC related L2 queues */
3641 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, 3690 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
3642 sizeof(*fp), GFP_KERNEL); 3691 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3692
3693 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3643 if (!fp) 3694 if (!fp)
3644 goto alloc_err; 3695 goto alloc_err;
3696 for (i = 0; i < fp_array_size; i++) {
3697 fp[i].tpa_info =
3698 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3699 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3700 if (!(fp[i].tpa_info))
3701 goto alloc_err;
3702 }
3703
3645 bp->fp = fp; 3704 bp->fp = fp;
3646 3705
3706 /* allocate sp objs */
3707 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3708 GFP_KERNEL);
3709 if (!bp->sp_objs)
3710 goto alloc_err;
3711
3712 /* allocate fp_stats */
3713 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3714 GFP_KERNEL);
3715 if (!bp->fp_stats)
3716 goto alloc_err;
3717
3718 /* Allocate memory for the transmission queues array */
3719 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
3720#ifdef BCM_CNIC
3721 bp->bnx2x_txq_size++;
3722#endif
3723 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
3724 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
3725 if (!bp->bnx2x_txq)
3726 goto alloc_err;
3727
3647 /* msix table */ 3728 /* msix table */
3648 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); 3729 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3649 if (!tbl) 3730 if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b75347..daa894bd772 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
30 30
31extern int num_queues; 31extern int num_queues;
32extern int int_mode;
32 33
33/************************ Macros ********************************/ 34/************************ Macros ********************************/
34#define BNX2X_PCI_FREE(x, y, size) \ 35#define BNX2X_PCI_FREE(x, y, size) \
@@ -94,7 +95,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
94 * @config_hash: re-configure RSS hash keys configuration 95 * @config_hash: re-configure RSS hash keys configuration
95 */ 96 */
96int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 97int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
97 u8 *ind_table, bool config_hash); 98 bool config_hash);
98 99
99/** 100/**
100 * bnx2x__init_func_obj - init function object 101 * bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
244 * @bp: driver handle 245 * @bp: driver handle
245 */ 246 */
246void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 247void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
248
249/**
250 * bnx2x_setup_cnic_info - provides cnic with updated info
251 *
252 * @bp: driver handle
253 */
254void bnx2x_setup_cnic_info(struct bnx2x *bp);
255
247#endif 256#endif
248 257
249/** 258/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
409 * 418 *
410 * @bp: driver handle 419 * @bp: driver handle
411 */ 420 */
412void bnx2x_dcbx_init(struct bnx2x *bp); 421void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
413 422
414/** 423/**
415 * bnx2x_set_power_state - set power state to the requested value. 424 * bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
487 * fills msix_table, requests vectors, updates num_queues 496 * fills msix_table, requests vectors, updates num_queues
488 * according to number of available vectors. 497 * according to number of available vectors.
489 */ 498 */
490int __devinit bnx2x_enable_msix(struct bnx2x *bp); 499int bnx2x_enable_msix(struct bnx2x *bp);
491 500
492/** 501/**
493 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 502 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
728{ 737{
729 u8 cos; 738 u8 cos;
730 for_each_cos_in_tx_queue(fp, cos) 739 for_each_cos_in_tx_queue(fp, cos)
731 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 740 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
732 return true; 741 return true;
733 return false; 742 return false;
734} 743}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
780{ 789{
781 int i; 790 int i;
782 791
792 bp->num_napi_queues = bp->num_queues;
793
783 /* Add NAPI objects */ 794 /* Add NAPI objects */
784 for_each_rx_queue(bp, i) 795 for_each_napi_rx_queue(bp, i)
785 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 796 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
786 bnx2x_poll, BNX2X_NAPI_WEIGHT); 797 bnx2x_poll, BNX2X_NAPI_WEIGHT);
787} 798}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
790{ 801{
791 int i; 802 int i;
792 803
793 for_each_rx_queue(bp, i) 804 for_each_napi_rx_queue(bp, i)
794 netif_napi_del(&bnx2x_fp(bp, i, napi)); 805 netif_napi_del(&bnx2x_fp(bp, i, napi));
795} 806}
796 807
808void bnx2x_set_int_mode(struct bnx2x *bp);
809
797static inline void bnx2x_disable_msi(struct bnx2x *bp) 810static inline void bnx2x_disable_msi(struct bnx2x *bp)
798{ 811{
799 if (bp->flags & USING_MSIX_FLAG) { 812 if (bp->flags & USING_MSIX_FLAG) {
@@ -865,11 +878,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
865 return 2 * vn + BP_PORT(bp); 878 return 2 * vn + BP_PORT(bp);
866} 879}
867 880
868static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table, 881static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
869 bool config_hash)
870{ 882{
871 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table, 883 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
872 config_hash);
873} 884}
874 885
875/** 886/**
@@ -975,8 +986,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
975 struct bnx2x *bp = fp->bp; 986 struct bnx2x *bp = fp->bp;
976 987
977 /* Configure classification DBs */ 988 /* Configure classification DBs */
978 bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, 989 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
979 BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 990 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
980 bnx2x_sp_mapping(bp, mac_rdata), 991 bnx2x_sp_mapping(bp, mac_rdata),
981 BNX2X_FILTER_MAC_PENDING, 992 BNX2X_FILTER_MAC_PENDING,
982 &bp->sp_state, obj_type, 993 &bp->sp_state, obj_type,
@@ -1068,12 +1079,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1068} 1079}
1069 1080
1070static inline void bnx2x_init_txdata(struct bnx2x *bp, 1081static inline void bnx2x_init_txdata(struct bnx2x *bp,
1071 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, 1082 struct bnx2x_fp_txdata *txdata, u32 cid,
1072 __le16 *tx_cons_sb) 1083 int txq_index, __le16 *tx_cons_sb,
1084 struct bnx2x_fastpath *fp)
1073{ 1085{
1074 txdata->cid = cid; 1086 txdata->cid = cid;
1075 txdata->txq_index = txq_index; 1087 txdata->txq_index = txq_index;
1076 txdata->tx_cons_sb = tx_cons_sb; 1088 txdata->tx_cons_sb = tx_cons_sb;
1089 txdata->parent_fp = fp;
1077 1090
1078 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1091 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
1079 txdata->cid, txdata->txq_index); 1092 txdata->cid, txdata->txq_index);
@@ -1107,18 +1120,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1107 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1120 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
1108 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1121 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
1109 BNX2X_FCOE_ETH_CL_ID_IDX); 1122 BNX2X_FCOE_ETH_CL_ID_IDX);
1110 /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than 1123 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
1111 * 16 ETH clients per function when CNIC is enabled!
1112 *
1113 * Fix it ASAP!!!
1114 */
1115 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1116 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1124 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1117 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1125 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1118 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1126 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1119 1127 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
1120 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), 1128 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
1121 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); 1129 fp);
1122 1130
1123 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1131 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
1124 1132
@@ -1135,8 +1143,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1135 /* No multi-CoS for FCoE L2 client */ 1143 /* No multi-CoS for FCoE L2 client */
1136 BUG_ON(fp->max_cos != 1); 1144 BUG_ON(fp->max_cos != 1);
1137 1145
1138 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, 1146 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
1139 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1147 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1140 bnx2x_sp_mapping(bp, q_rdata), q_type); 1148 bnx2x_sp_mapping(bp, q_rdata), q_type);
1141 1149
1142 DP(NETIF_MSG_IFUP, 1150 DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244bd753..8a73374e52a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
972 bp->dcbx_config_params.admin_default_priority = 0; 972 bp->dcbx_config_params.admin_default_priority = 0;
973} 973}
974 974
975void bnx2x_dcbx_init(struct bnx2x *bp) 975void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
976{ 976{
977 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; 977 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
978 978
979 /* only PMF can send ADMIN msg to MFW in old MFW versions */
980 if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
981 return;
982
979 if (bp->dcbx_enabled <= 0) 983 if (bp->dcbx_enabled <= 0)
980 return; 984 return;
981 985
982 /* validate: 986 /* validate:
983 * chip of good for dcbx version, 987 * chip of good for dcbx version,
984 * dcb is wanted 988 * dcb is wanted
985 * the function is pmf
986 * shmem2 contains DCBX support fields 989 * shmem2 contains DCBX support fields
987 */ 990 */
988 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", 991 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
989 bp->dcb_state, bp->port.pmf); 992 bp->dcb_state, bp->port.pmf);
990 993
991 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && 994 if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
992 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { 995 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
993 dcbx_lldp_params_offset = 996 dcbx_lldp_params_offset =
994 SHMEM2_RD(bp, dcbx_lldp_params_offset); 997 SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
999 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); 1002 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
1000 1003
1001 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { 1004 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
1002 bnx2x_dcbx_admin_mib_updated_params(bp, 1005 /* need HW lock to avoid scenario of two drivers
1003 dcbx_lldp_params_offset); 1006 * writing in parallel to shmem
1007 */
1008 bnx2x_acquire_hw_lock(bp,
1009 HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
1010 if (update_shmem)
1011 bnx2x_dcbx_admin_mib_updated_params(bp,
1012 dcbx_lldp_params_offset);
1004 1013
1005 /* Let HW start negotiation */ 1014 /* Let HW start negotiation */
1006 bnx2x_fw_command(bp, 1015 bnx2x_fw_command(bp,
1007 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); 1016 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
1017 /* release HW lock only after MFW acks that it finished
1018 * reading values from shmem
1019 */
1020 bnx2x_release_hw_lock(bp,
1021 HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
1008 } 1022 }
1009 } 1023 }
1010} 1024}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
2063 "Handling parity error recovery. Try again later\n"); 2077 "Handling parity error recovery. Try again later\n");
2064 return 1; 2078 return 1;
2065 } 2079 }
2066 if (netif_running(bp->dev)) { 2080 if (netif_running(bp->dev))
2067 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2081 bnx2x_dcbx_init(bp, true);
2068 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2069 }
2070 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); 2082 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
2071 if (rc) 2083 if (rc)
2072 return 1; 2084 return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bf30e282928..70c0881ce5a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -826,7 +826,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
826 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); 826 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
827 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 827 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
828 info->n_stats = BNX2X_NUM_STATS; 828 info->n_stats = BNX2X_NUM_STATS;
829 info->testinfo_len = BNX2X_NUM_TESTS; 829 info->testinfo_len = BNX2X_NUM_TESTS(bp);
830 info->eedump_len = bp->common.flash_size; 830 info->eedump_len = bp->common.flash_size;
831 info->regdump_len = bnx2x_get_regs_len(dev); 831 info->regdump_len = bnx2x_get_regs_len(dev);
832} 832}
@@ -1533,16 +1533,14 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1533 return 0; 1533 return 0;
1534} 1534}
1535 1535
1536static const struct { 1536char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
1537 char string[ETH_GSTRING_LEN]; 1537 "register_test (offline) ",
1538} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { 1538 "memory_test (offline) ",
1539 { "register_test (offline)" }, 1539 "int_loopback_test (offline)",
1540 { "memory_test (offline)" }, 1540 "ext_loopback_test (offline)",
1541 { "loopback_test (offline)" }, 1541 "nvram_test (online) ",
1542 { "nvram_test (online)" }, 1542 "interrupt_test (online) ",
1543 { "interrupt_test (online)" }, 1543 "link_test (online) "
1544 { "link_test (online)" },
1545 { "idle check (online)" }
1546}; 1544};
1547 1545
1548static u32 bnx2x_eee_to_adv(u32 eee_adv) 1546static u32 bnx2x_eee_to_adv(u32 eee_adv)
@@ -1943,6 +1941,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1943 1941
1944 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) 1942 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
1945 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); 1943 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
1944
1945 cnt = 1400;
1946 while (!bp->link_vars.link_up && cnt--)
1947 msleep(20);
1948
1949 if (cnt <= 0 && !bp->link_vars.link_up)
1950 DP(BNX2X_MSG_ETHTOOL,
1951 "Timeout waiting for link init\n");
1946 } 1952 }
1947} 1953}
1948 1954
@@ -1953,7 +1959,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1953 unsigned char *packet; 1959 unsigned char *packet;
1954 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 1960 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
1955 struct bnx2x_fastpath *fp_tx = &bp->fp[0]; 1961 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
1956 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; 1962 struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
1957 u16 tx_start_idx, tx_idx; 1963 u16 tx_start_idx, tx_idx;
1958 u16 rx_start_idx, rx_idx; 1964 u16 rx_start_idx, rx_idx;
1959 u16 pkt_prod, bd_prod; 1965 u16 pkt_prod, bd_prod;
@@ -1968,13 +1974,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1968 u16 len; 1974 u16 len;
1969 int rc = -ENODEV; 1975 int rc = -ENODEV;
1970 u8 *data; 1976 u8 *data;
1971 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); 1977 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
1978 txdata->txq_index);
1972 1979
1973 /* check the loopback mode */ 1980 /* check the loopback mode */
1974 switch (loopback_mode) { 1981 switch (loopback_mode) {
1975 case BNX2X_PHY_LOOPBACK: 1982 case BNX2X_PHY_LOOPBACK:
1976 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) 1983 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
1984 DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
1977 return -EINVAL; 1985 return -EINVAL;
1986 }
1978 break; 1987 break;
1979 case BNX2X_MAC_LOOPBACK: 1988 case BNX2X_MAC_LOOPBACK:
1980 if (CHIP_IS_E3(bp)) { 1989 if (CHIP_IS_E3(bp)) {
@@ -1991,6 +2000,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1991 2000
1992 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2001 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1993 break; 2002 break;
2003 case BNX2X_EXT_LOOPBACK:
2004 if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
2005 DP(BNX2X_MSG_ETHTOOL,
2006 "Can't configure external loopback\n");
2007 return -EINVAL;
2008 }
2009 break;
1994 default: 2010 default:
1995 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 2011 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
1996 return -EINVAL; 2012 return -EINVAL;
@@ -2162,6 +2178,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
2162 return rc; 2178 return rc;
2163} 2179}
2164 2180
2181static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2182{
2183 int rc;
2184 u8 is_serdes =
2185 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2186
2187 if (BP_NOMCP(bp))
2188 return -ENODEV;
2189
2190 if (!netif_running(bp->dev))
2191 return BNX2X_EXT_LOOPBACK_FAILED;
2192
2193 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2194 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2195 if (rc) {
2196 DP(BNX2X_MSG_ETHTOOL,
2197 "Can't perform self-test, nic_load (for external lb) failed\n");
2198 return -ENODEV;
2199 }
2200 bnx2x_wait_for_link(bp, 1, is_serdes);
2201
2202 bnx2x_netif_stop(bp, 1);
2203
2204 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
2205 if (rc)
2206 DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
2207
2208 bnx2x_netif_start(bp);
2209
2210 return rc;
2211}
2212
2165#define CRC32_RESIDUAL 0xdebb20e3 2213#define CRC32_RESIDUAL 0xdebb20e3
2166 2214
2167static int bnx2x_test_nvram(struct bnx2x *bp) 2215static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2244,7 +2292,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
2244 return -ENODEV; 2292 return -ENODEV;
2245 } 2293 }
2246 2294
2247 params.q_obj = &bp->fp->q_obj; 2295 params.q_obj = &bp->sp_objs->q_obj;
2248 params.cmd = BNX2X_Q_CMD_EMPTY; 2296 params.cmd = BNX2X_Q_CMD_EMPTY;
2249 2297
2250 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 2298 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2257,24 +2305,31 @@ static void bnx2x_self_test(struct net_device *dev,
2257{ 2305{
2258 struct bnx2x *bp = netdev_priv(dev); 2306 struct bnx2x *bp = netdev_priv(dev);
2259 u8 is_serdes; 2307 u8 is_serdes;
2308 int rc;
2309
2260 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2310 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2261 netdev_err(bp->dev, 2311 netdev_err(bp->dev,
2262 "Handling parity error recovery. Try again later\n"); 2312 "Handling parity error recovery. Try again later\n");
2263 etest->flags |= ETH_TEST_FL_FAILED; 2313 etest->flags |= ETH_TEST_FL_FAILED;
2264 return; 2314 return;
2265 } 2315 }
2316 DP(BNX2X_MSG_ETHTOOL,
2317 "Self-test command parameters: offline = %d, external_lb = %d\n",
2318 (etest->flags & ETH_TEST_FL_OFFLINE),
2319 (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
2266 2320
2267 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 2321 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
2268 2322
2269 if (!netif_running(dev)) 2323 if (!netif_running(dev)) {
2324 DP(BNX2X_MSG_ETHTOOL,
2325 "Can't perform self-test when interface is down\n");
2270 return; 2326 return;
2327 }
2271 2328
2272 /* offline tests are not supported in MF mode */
2273 if (IS_MF(bp))
2274 etest->flags &= ~ETH_TEST_FL_OFFLINE;
2275 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; 2329 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2276 2330
2277 if (etest->flags & ETH_TEST_FL_OFFLINE) { 2331 /* offline tests are not supported in MF mode */
2332 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
2278 int port = BP_PORT(bp); 2333 int port = BP_PORT(bp);
2279 u32 val; 2334 u32 val;
2280 u8 link_up; 2335 u8 link_up;
@@ -2287,7 +2342,14 @@ static void bnx2x_self_test(struct net_device *dev,
2287 link_up = bp->link_vars.link_up; 2342 link_up = bp->link_vars.link_up;
2288 2343
2289 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2344 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2290 bnx2x_nic_load(bp, LOAD_DIAG); 2345 rc = bnx2x_nic_load(bp, LOAD_DIAG);
2346 if (rc) {
2347 etest->flags |= ETH_TEST_FL_FAILED;
2348 DP(BNX2X_MSG_ETHTOOL,
2349 "Can't perform self-test, nic_load (for offline) failed\n");
2350 return;
2351 }
2352
2291 /* wait until link state is restored */ 2353 /* wait until link state is restored */
2292 bnx2x_wait_for_link(bp, 1, is_serdes); 2354 bnx2x_wait_for_link(bp, 1, is_serdes);
2293 2355
@@ -2300,30 +2362,51 @@ static void bnx2x_self_test(struct net_device *dev,
2300 etest->flags |= ETH_TEST_FL_FAILED; 2362 etest->flags |= ETH_TEST_FL_FAILED;
2301 } 2363 }
2302 2364
2303 buf[2] = bnx2x_test_loopback(bp); 2365 buf[2] = bnx2x_test_loopback(bp); /* internal LB */
2304 if (buf[2] != 0) 2366 if (buf[2] != 0)
2305 etest->flags |= ETH_TEST_FL_FAILED; 2367 etest->flags |= ETH_TEST_FL_FAILED;
2306 2368
2369 if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
2370 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
2371 if (buf[3] != 0)
2372 etest->flags |= ETH_TEST_FL_FAILED;
2373 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2374 }
2375
2307 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2376 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2308 2377
2309 /* restore input for TX port IF */ 2378 /* restore input for TX port IF */
2310 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); 2379 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
2311 2380 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2312 bnx2x_nic_load(bp, LOAD_NORMAL); 2381 if (rc) {
2382 etest->flags |= ETH_TEST_FL_FAILED;
2383 DP(BNX2X_MSG_ETHTOOL,
2384 "Can't perform self-test, nic_load (for online) failed\n");
2385 return;
2386 }
2313 /* wait until link state is restored */ 2387 /* wait until link state is restored */
2314 bnx2x_wait_for_link(bp, link_up, is_serdes); 2388 bnx2x_wait_for_link(bp, link_up, is_serdes);
2315 } 2389 }
2316 if (bnx2x_test_nvram(bp) != 0) { 2390 if (bnx2x_test_nvram(bp) != 0) {
2317 buf[3] = 1; 2391 if (!IS_MF(bp))
2392 buf[4] = 1;
2393 else
2394 buf[0] = 1;
2318 etest->flags |= ETH_TEST_FL_FAILED; 2395 etest->flags |= ETH_TEST_FL_FAILED;
2319 } 2396 }
2320 if (bnx2x_test_intr(bp) != 0) { 2397 if (bnx2x_test_intr(bp) != 0) {
2321 buf[4] = 1; 2398 if (!IS_MF(bp))
2399 buf[5] = 1;
2400 else
2401 buf[1] = 1;
2322 etest->flags |= ETH_TEST_FL_FAILED; 2402 etest->flags |= ETH_TEST_FL_FAILED;
2323 } 2403 }
2324 2404
2325 if (bnx2x_link_test(bp, is_serdes) != 0) { 2405 if (bnx2x_link_test(bp, is_serdes) != 0) {
2326 buf[5] = 1; 2406 if (!IS_MF(bp))
2407 buf[6] = 1;
2408 else
2409 buf[2] = 1;
2327 etest->flags |= ETH_TEST_FL_FAILED; 2410 etest->flags |= ETH_TEST_FL_FAILED;
2328 } 2411 }
2329 2412
@@ -2368,7 +2451,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2368 return num_stats; 2451 return num_stats;
2369 2452
2370 case ETH_SS_TEST: 2453 case ETH_SS_TEST:
2371 return BNX2X_NUM_TESTS; 2454 return BNX2X_NUM_TESTS(bp);
2372 2455
2373 default: 2456 default:
2374 return -EINVAL; 2457 return -EINVAL;
@@ -2378,7 +2461,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2378static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 2461static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2379{ 2462{
2380 struct bnx2x *bp = netdev_priv(dev); 2463 struct bnx2x *bp = netdev_priv(dev);
2381 int i, j, k; 2464 int i, j, k, offset, start;
2382 char queue_name[MAX_QUEUE_NAME_LEN+1]; 2465 char queue_name[MAX_QUEUE_NAME_LEN+1];
2383 2466
2384 switch (stringset) { 2467 switch (stringset) {
@@ -2409,7 +2492,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2409 break; 2492 break;
2410 2493
2411 case ETH_SS_TEST: 2494 case ETH_SS_TEST:
2412 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); 2495 /* First 4 tests cannot be done in MF mode */
2496 if (!IS_MF(bp))
2497 start = 0;
2498 else
2499 start = 4;
2500 for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
2501 i++, j++) {
2502 offset = sprintf(buf+32*i, "%s",
2503 bnx2x_tests_str_arr[j]);
2504 *(buf+offset) = '\0';
2505 }
2413 break; 2506 break;
2414 } 2507 }
2415} 2508}
@@ -2423,7 +2516,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
2423 2516
2424 if (is_multi(bp)) { 2517 if (is_multi(bp)) {
2425 for_each_eth_queue(bp, i) { 2518 for_each_eth_queue(bp, i) {
2426 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 2519 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
2427 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 2520 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
2428 if (bnx2x_q_stats_arr[j].size == 0) { 2521 if (bnx2x_q_stats_arr[j].size == 0) {
2429 /* skip this counter */ 2522 /* skip this counter */
@@ -2507,6 +2600,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
2507 return 0; 2600 return 0;
2508} 2601}
2509 2602
2603static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2604{
2605
2606 switch (info->flow_type) {
2607 case TCP_V4_FLOW:
2608 case TCP_V6_FLOW:
2609 info->data = RXH_IP_SRC | RXH_IP_DST |
2610 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2611 break;
2612 case UDP_V4_FLOW:
2613 if (bp->rss_conf_obj.udp_rss_v4)
2614 info->data = RXH_IP_SRC | RXH_IP_DST |
2615 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2616 else
2617 info->data = RXH_IP_SRC | RXH_IP_DST;
2618 break;
2619 case UDP_V6_FLOW:
2620 if (bp->rss_conf_obj.udp_rss_v6)
2621 info->data = RXH_IP_SRC | RXH_IP_DST |
2622 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2623 else
2624 info->data = RXH_IP_SRC | RXH_IP_DST;
2625 break;
2626 case IPV4_FLOW:
2627 case IPV6_FLOW:
2628 info->data = RXH_IP_SRC | RXH_IP_DST;
2629 break;
2630 default:
2631 info->data = 0;
2632 break;
2633 }
2634
2635 return 0;
2636}
2637
2510static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 2638static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2511 u32 *rules __always_unused) 2639 u32 *rules __always_unused)
2512{ 2640{
@@ -2516,7 +2644,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2516 case ETHTOOL_GRXRINGS: 2644 case ETHTOOL_GRXRINGS:
2517 info->data = BNX2X_NUM_ETH_QUEUES(bp); 2645 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2518 return 0; 2646 return 0;
2647 case ETHTOOL_GRXFH:
2648 return bnx2x_get_rss_flags(bp, info);
2649 default:
2650 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
2651 return -EOPNOTSUPP;
2652 }
2653}
2654
2655static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
2656{
2657 int udp_rss_requested;
2658
2659 DP(BNX2X_MSG_ETHTOOL,
2660 "Set rss flags command parameters: flow type = %d, data = %llu\n",
2661 info->flow_type, info->data);
2662
2663 switch (info->flow_type) {
2664 case TCP_V4_FLOW:
2665 case TCP_V6_FLOW:
2666 /* For TCP only 4-tupple hash is supported */
2667 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
2668 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2669 DP(BNX2X_MSG_ETHTOOL,
2670 "Command parameters not supported\n");
2671 return -EINVAL;
2672 } else {
2673 return 0;
2674 }
2675
2676 case UDP_V4_FLOW:
2677 case UDP_V6_FLOW:
2678 /* For UDP either 2-tupple hash or 4-tupple hash is supported */
2679 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
2680 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2681 udp_rss_requested = 1;
2682 else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
2683 udp_rss_requested = 0;
2684 else
2685 return -EINVAL;
2686 if ((info->flow_type == UDP_V4_FLOW) &&
2687 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
2688 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
2689 DP(BNX2X_MSG_ETHTOOL,
2690 "rss re-configured, UDP 4-tupple %s\n",
2691 udp_rss_requested ? "enabled" : "disabled");
2692 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
2693 } else if ((info->flow_type == UDP_V6_FLOW) &&
2694 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
2695 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
2696 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
2697 DP(BNX2X_MSG_ETHTOOL,
2698 "rss re-configured, UDP 4-tupple %s\n",
2699 udp_rss_requested ? "enabled" : "disabled");
2700 } else {
2701 return 0;
2702 }
2703 case IPV4_FLOW:
2704 case IPV6_FLOW:
2705 /* For IP only 2-tupple hash is supported */
2706 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
2707 DP(BNX2X_MSG_ETHTOOL,
2708 "Command parameters not supported\n");
2709 return -EINVAL;
2710 } else {
2711 return 0;
2712 }
2713 case SCTP_V4_FLOW:
2714 case AH_ESP_V4_FLOW:
2715 case AH_V4_FLOW:
2716 case ESP_V4_FLOW:
2717 case SCTP_V6_FLOW:
2718 case AH_ESP_V6_FLOW:
2719 case AH_V6_FLOW:
2720 case ESP_V6_FLOW:
2721 case IP_USER_FLOW:
2722 case ETHER_FLOW:
2723 /* RSS is not supported for these protocols */
2724 if (info->data) {
2725 DP(BNX2X_MSG_ETHTOOL,
2726 "Command parameters not supported\n");
2727 return -EINVAL;
2728 } else {
2729 return 0;
2730 }
2731 default:
2732 return -EINVAL;
2733 }
2734}
2519 2735
2736static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
2737{
2738 struct bnx2x *bp = netdev_priv(dev);
2739
2740 switch (info->cmd) {
2741 case ETHTOOL_SRXFH:
2742 return bnx2x_set_rss_flags(bp, info);
2520 default: 2743 default:
2521 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 2744 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
2522 return -EOPNOTSUPP; 2745 return -EOPNOTSUPP;
@@ -2556,7 +2779,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2556{ 2779{
2557 struct bnx2x *bp = netdev_priv(dev); 2780 struct bnx2x *bp = netdev_priv(dev);
2558 size_t i; 2781 size_t i;
2559 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
2560 2782
2561 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2783 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
2562 /* 2784 /*
@@ -2568,10 +2790,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2568 * align the received table to the Client ID of the leading RSS 2790 * align the received table to the Client ID of the leading RSS
2569 * queue 2791 * queue
2570 */ 2792 */
2571 ind_table[i] = indir[i] + bp->fp->cl_id; 2793 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
2572 } 2794 }
2573 2795
2574 return bnx2x_config_rss_eth(bp, ind_table, false); 2796 return bnx2x_config_rss_eth(bp, false);
2797}
2798
2799/**
2800 * bnx2x_get_channels - gets the number of RSS queues.
2801 *
2802 * @dev: net device
2803 * @channels: returns the number of max / current queues
2804 */
2805static void bnx2x_get_channels(struct net_device *dev,
2806 struct ethtool_channels *channels)
2807{
2808 struct bnx2x *bp = netdev_priv(dev);
2809
2810 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
2811 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
2812}
2813
2814/**
2815 * bnx2x_change_num_queues - change the number of RSS queues.
2816 *
2817 * @bp: bnx2x private structure
2818 *
2819 * Re-configure interrupt mode to get the new number of MSI-X
2820 * vectors and re-add NAPI objects.
2821 */
2822static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2823{
2824 bnx2x_del_all_napi(bp);
2825 bnx2x_disable_msi(bp);
2826 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
2827 bnx2x_set_int_mode(bp);
2828 bnx2x_add_all_napi(bp);
2829}
2830
2831/**
2832 * bnx2x_set_channels - sets the number of RSS queues.
2833 *
2834 * @dev: net device
2835 * @channels: includes the number of queues requested
2836 */
2837static int bnx2x_set_channels(struct net_device *dev,
2838 struct ethtool_channels *channels)
2839{
2840 struct bnx2x *bp = netdev_priv(dev);
2841
2842
2843 DP(BNX2X_MSG_ETHTOOL,
2844 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
2845 channels->rx_count, channels->tx_count, channels->other_count,
2846 channels->combined_count);
2847
2848 /* We don't support separate rx / tx channels.
2849 * We don't allow setting 'other' channels.
2850 */
2851 if (channels->rx_count || channels->tx_count || channels->other_count
2852 || (channels->combined_count == 0) ||
2853 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
2854 DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
2855 return -EINVAL;
2856 }
2857
2858 /* Check if there was a change in the active parameters */
2859 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
2860 DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
2861 return 0;
2862 }
2863
2864 /* Set the requested number of queues in bp context.
2865 * Note that the actual number of queues created during load may be
2866 * less than requested if memory is low.
2867 */
2868 if (unlikely(!netif_running(dev))) {
2869 bnx2x_change_num_queues(bp, channels->combined_count);
2870 return 0;
2871 }
2872 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2873 bnx2x_change_num_queues(bp, channels->combined_count);
2874 return bnx2x_nic_load(bp, LOAD_NORMAL);
2575} 2875}
2576 2876
2577static const struct ethtool_ops bnx2x_ethtool_ops = { 2877static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2601,9 +2901,12 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2601 .set_phys_id = bnx2x_set_phys_id, 2901 .set_phys_id = bnx2x_set_phys_id,
2602 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2902 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2603 .get_rxnfc = bnx2x_get_rxnfc, 2903 .get_rxnfc = bnx2x_get_rxnfc,
2904 .set_rxnfc = bnx2x_set_rxnfc,
2604 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, 2905 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
2605 .get_rxfh_indir = bnx2x_get_rxfh_indir, 2906 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2606 .set_rxfh_indir = bnx2x_set_rxfh_indir, 2907 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2908 .get_channels = bnx2x_get_channels,
2909 .set_channels = bnx2x_set_channels,
2607 .get_eee = bnx2x_get_eee, 2910 .get_eee = bnx2x_get_eee,
2608 .set_eee = bnx2x_set_eee, 2911 .set_eee = bnx2x_set_eee,
2609}; 2912};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index c61aa37298a..6b776309e0a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1253,6 +1253,7 @@ struct drv_func_mb {
1253 1253
1254 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 1254 #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
1255 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 1255 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
1256 #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
1256 1257
1257 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 1258 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
1258 1259
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a622bb7bf21..8ddc78e0d94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -104,7 +104,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 104
105#define INT_MODE_INTx 1 105#define INT_MODE_INTx 1
106#define INT_MODE_MSI 2 106#define INT_MODE_MSI 2
107static int int_mode; 107int int_mode;
108module_param(int_mode, int, 0); 108module_param(int_mode, int, 0);
109MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 109MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
110 "(1 INT#x; 2 MSI)"); 110 "(1 INT#x; 2 MSI)");
@@ -758,7 +758,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
758 /* Tx */ 758 /* Tx */
759 for_each_cos_in_tx_queue(fp, cos) 759 for_each_cos_in_tx_queue(fp, cos)
760 { 760 {
761 txdata = fp->txdata[cos]; 761 txdata = *fp->txdata_ptr[cos];
762 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 762 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
763 i, txdata.tx_pkt_prod, 763 i, txdata.tx_pkt_prod,
764 txdata.tx_pkt_cons, txdata.tx_bd_prod, 764 txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +876,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
876 for_each_tx_queue(bp, i) { 876 for_each_tx_queue(bp, i) {
877 struct bnx2x_fastpath *fp = &bp->fp[i]; 877 struct bnx2x_fastpath *fp = &bp->fp[i];
878 for_each_cos_in_tx_queue(fp, cos) { 878 for_each_cos_in_tx_queue(fp, cos) {
879 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 879 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
880 880
881 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 881 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
882 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 882 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1583,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1583 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1583 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1584 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1584 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1585 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1585 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1586 struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; 1586 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1587 1587
1588 DP(BNX2X_MSG_SP, 1588 DP(BNX2X_MSG_SP,
1589 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1589 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1710,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1710 /* Handle Rx or Tx according to SB id */ 1710 /* Handle Rx or Tx according to SB id */
1711 prefetch(fp->rx_cons_sb); 1711 prefetch(fp->rx_cons_sb);
1712 for_each_cos_in_tx_queue(fp, cos) 1712 for_each_cos_in_tx_queue(fp, cos)
1713 prefetch(fp->txdata[cos].tx_cons_sb); 1713 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1714 prefetch(&fp->sb_running_index[SM_RX_ID]); 1714 prefetch(&fp->sb_running_index[SM_RX_ID]);
1715 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1715 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1716 status &= ~mask; 1716 status &= ~mask;
@@ -2124,6 +2124,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2124 } 2124 }
2125 } 2125 }
2126 2126
2127 if (load_mode == LOAD_LOOPBACK_EXT) {
2128 struct link_params *lp = &bp->link_params;
2129 lp->loopback_mode = LOOPBACK_EXT;
2130 }
2131
2127 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2132 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2128 2133
2129 bnx2x_release_phy_lock(bp); 2134 bnx2x_release_phy_lock(bp);
@@ -2916,7 +2921,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
2916 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 2921 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
2917 u8 cos) 2922 u8 cos)
2918{ 2923{
2919 txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; 2924 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
2920 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 2925 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
2921 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2926 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2922 txq_init->fw_sb_id = fp->fw_sb_id; 2927 txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3035,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3030 memcpy(ether_stat->version, DRV_MODULE_VERSION, 3035 memcpy(ether_stat->version, DRV_MODULE_VERSION,
3031 ETH_STAT_INFO_VERSION_LEN - 1); 3036 ETH_STAT_INFO_VERSION_LEN - 1);
3032 3037
3033 bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, 3038 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3034 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3039 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3035 ether_stat->mac_local); 3040 ether_stat->mac_local);
3036 3041
3037 ether_stat->mtu_size = bp->dev->mtu; 3042 ether_stat->mtu_size = bp->dev->mtu;
3038 3043
@@ -3063,11 +3068,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3063 /* insert FCoE stats from ramrod response */ 3068 /* insert FCoE stats from ramrod response */
3064 if (!NO_FCOE(bp)) { 3069 if (!NO_FCOE(bp)) {
3065 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3070 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3066 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3071 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3067 tstorm_queue_statistics; 3072 tstorm_queue_statistics;
3068 3073
3069 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3074 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3070 &bp->fw_stats_data->queue_stats[FCOE_IDX]. 3075 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3071 xstorm_queue_statistics; 3076 xstorm_queue_statistics;
3072 3077
3073 struct fcoe_statistics_params *fw_fcoe_stat = 3078 struct fcoe_statistics_params *fw_fcoe_stat =
@@ -4623,11 +4628,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4623 case BNX2X_FILTER_MAC_PENDING: 4628 case BNX2X_FILTER_MAC_PENDING:
4624 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4629 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4625#ifdef BCM_CNIC 4630#ifdef BCM_CNIC
4626 if (cid == BNX2X_ISCSI_ETH_CID) 4631 if (cid == BNX2X_ISCSI_ETH_CID(bp))
4627 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4632 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4628 else 4633 else
4629#endif 4634#endif
4630 vlan_mac_obj = &bp->fp[cid].mac_obj; 4635 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4631 4636
4632 break; 4637 break;
4633 case BNX2X_FILTER_MCAST_PENDING: 4638 case BNX2X_FILTER_MCAST_PENDING:
@@ -4725,7 +4730,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4725 for_each_eth_queue(bp, q) { 4730 for_each_eth_queue(bp, q) {
4726 /* Set the appropriate Queue object */ 4731 /* Set the appropriate Queue object */
4727 fp = &bp->fp[q]; 4732 fp = &bp->fp[q];
4728 queue_params.q_obj = &fp->q_obj; 4733 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4729 4734
4730 /* send the ramrod */ 4735 /* send the ramrod */
4731 rc = bnx2x_queue_state_change(bp, &queue_params); 4736 rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4736,8 +4741,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4736 4741
4737#ifdef BCM_CNIC 4742#ifdef BCM_CNIC
4738 if (!NO_FCOE(bp)) { 4743 if (!NO_FCOE(bp)) {
4739 fp = &bp->fp[FCOE_IDX]; 4744 fp = &bp->fp[FCOE_IDX(bp)];
4740 queue_params.q_obj = &fp->q_obj; 4745 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4741 4746
4742 /* clear pending completion bit */ 4747 /* clear pending completion bit */
4743 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 4748 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4769,11 +4774,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4769{ 4774{
4770 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4775 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4771#ifdef BCM_CNIC 4776#ifdef BCM_CNIC
4772 if (cid == BNX2X_FCOE_ETH_CID) 4777 if (cid == BNX2X_FCOE_ETH_CID(bp))
4773 return &bnx2x_fcoe(bp, q_obj); 4778 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4774 else 4779 else
4775#endif 4780#endif
4776 return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); 4781 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4777} 4782}
4778 4783
4779static void bnx2x_eq_int(struct bnx2x *bp) 4784static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5655,15 +5660,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5655 5660
5656 /* init tx data */ 5661 /* init tx data */
5657 for_each_cos_in_tx_queue(fp, cos) { 5662 for_each_cos_in_tx_queue(fp, cos) {
5658 bnx2x_init_txdata(bp, &fp->txdata[cos], 5663 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
5659 CID_COS_TO_TX_ONLY_CID(fp->cid, cos), 5664 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
5660 FP_COS_TO_TXQ(fp, cos), 5665 FP_COS_TO_TXQ(fp, cos, bp),
5661 BNX2X_TX_SB_INDEX_BASE + cos); 5666 BNX2X_TX_SB_INDEX_BASE + cos, fp);
5662 cids[cos] = fp->txdata[cos].cid; 5667 cids[cos] = fp->txdata_ptr[cos]->cid;
5663 } 5668 }
5664 5669
5665 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, 5670 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
5666 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 5671 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5667 bnx2x_sp_mapping(bp, q_rdata), q_type); 5672 bnx2x_sp_mapping(bp, q_rdata), q_type);
5668 5673
5669 /** 5674 /**
@@ -5714,7 +5719,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
5714 5719
5715 for_each_tx_queue(bp, i) 5720 for_each_tx_queue(bp, i)
5716 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5721 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5717 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 5722 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5718} 5723}
5719 5724
5720void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5725void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7063,12 +7068,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7063 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7068 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7064 7069
7065 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7070 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7066 ilt->lines[cdu_ilt_start + i].page = 7071 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7067 bp->context.vcxt + (ILT_PAGE_CIDS * i);
7068 ilt->lines[cdu_ilt_start + i].page_mapping = 7072 ilt->lines[cdu_ilt_start + i].page_mapping =
7069 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); 7073 bp->context[i].cxt_mapping;
7070 /* cdu ilt pages are allocated manually so there's no need to 7074 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7071 set the size */
7072 } 7075 }
7073 bnx2x_ilt_init_op(bp, INITOP_SET); 7076 bnx2x_ilt_init_op(bp, INITOP_SET);
7074 7077
@@ -7335,6 +7338,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7335 7338
7336void bnx2x_free_mem(struct bnx2x *bp) 7339void bnx2x_free_mem(struct bnx2x *bp)
7337{ 7340{
7341 int i;
7342
7338 /* fastpath */ 7343 /* fastpath */
7339 bnx2x_free_fp_mem(bp); 7344 bnx2x_free_fp_mem(bp);
7340 /* end of fastpath */ 7345 /* end of fastpath */
@@ -7348,9 +7353,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
7348 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7353 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7349 sizeof(struct bnx2x_slowpath)); 7354 sizeof(struct bnx2x_slowpath));
7350 7355
7351 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, 7356 for (i = 0; i < L2_ILT_LINES(bp); i++)
7352 bp->context.size); 7357 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7353 7358 bp->context[i].size);
7354 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7359 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7355 7360
7356 BNX2X_FREE(bp->ilt->lines); 7361 BNX2X_FREE(bp->ilt->lines);
@@ -7436,6 +7441,8 @@ alloc_mem_err:
7436 7441
7437int bnx2x_alloc_mem(struct bnx2x *bp) 7442int bnx2x_alloc_mem(struct bnx2x *bp)
7438{ 7443{
7444 int i, allocated, context_size;
7445
7439#ifdef BCM_CNIC 7446#ifdef BCM_CNIC
7440 if (!CHIP_IS_E1x(bp)) 7447 if (!CHIP_IS_E1x(bp))
7441 /* size = the status block + ramrod buffers */ 7448 /* size = the status block + ramrod buffers */
@@ -7465,11 +7472,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
7465 if (bnx2x_alloc_fw_stats_mem(bp)) 7472 if (bnx2x_alloc_fw_stats_mem(bp))
7466 goto alloc_mem_err; 7473 goto alloc_mem_err;
7467 7474
7468 bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 7475 /* Allocate memory for CDU context:
7469 7476 * This memory is allocated separately and not in the generic ILT
7470 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, 7477 * functions because CDU differs in few aspects:
7471 bp->context.size); 7478 * 1. There are multiple entities allocating memory for context -
7479 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7480 * its own ILT lines.
7481 * 2. Since CDU page-size is not a single 4KB page (which is the case
7482 * for the other ILT clients), to be efficient we want to support
7483 * allocation of sub-page-size in the last entry.
7484 * 3. Context pointers are used by the driver to pass to FW / update
7485 * the context (for the other ILT clients the pointers are used just to
7486 * free the memory during unload).
7487 */
7488 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7472 7489
7490 for (i = 0, allocated = 0; allocated < context_size; i++) {
7491 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7492 (context_size - allocated));
7493 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7494 &bp->context[i].cxt_mapping,
7495 bp->context[i].size);
7496 allocated += bp->context[i].size;
7497 }
7473 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 7498 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7474 7499
7475 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 7500 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7571,8 +7596,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7571 7596
7572 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 7597 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7573 /* Eth MAC is set on RSS leading client (fp[0]) */ 7598 /* Eth MAC is set on RSS leading client (fp[0]) */
7574 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, 7599 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
7575 BNX2X_ETH_MAC, &ramrod_flags); 7600 set, BNX2X_ETH_MAC, &ramrod_flags);
7576} 7601}
7577 7602
7578int bnx2x_setup_leading(struct bnx2x *bp) 7603int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7587,7 +7612,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
7587 * 7612 *
7588 * In case of MSI-X it will also try to enable MSI-X. 7613 * In case of MSI-X it will also try to enable MSI-X.
7589 */ 7614 */
7590static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) 7615void bnx2x_set_int_mode(struct bnx2x *bp)
7591{ 7616{
7592 switch (int_mode) { 7617 switch (int_mode) {
7593 case INT_MODE_MSI: 7618 case INT_MODE_MSI:
@@ -7598,11 +7623,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7598 BNX2X_DEV_INFO("set number of queues to 1\n"); 7623 BNX2X_DEV_INFO("set number of queues to 1\n");
7599 break; 7624 break;
7600 default: 7625 default:
7601 /* Set number of queues for MSI-X mode */
7602 bnx2x_set_num_queues(bp);
7603
7604 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
7605
7606 /* if we can't use MSI-X we only need one fp, 7626 /* if we can't use MSI-X we only need one fp,
7607 * so try to enable MSI-X with the requested number of fp's 7627 * so try to enable MSI-X with the requested number of fp's
7608 * and fallback to MSI or legacy INTx with one fp 7628 * and fallback to MSI or legacy INTx with one fp
@@ -7743,6 +7763,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7743{ 7763{
7744 7764
7745 u8 cos; 7765 u8 cos;
7766 int cxt_index, cxt_offset;
7767
7746 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 7768 /* FCoE Queue uses Default SB, thus has no HC capabilities */
7747 if (!IS_FCOE_FP(fp)) { 7769 if (!IS_FCOE_FP(fp)) {
7748 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 7770 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7779,9 +7801,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7779 fp->index, init_params->max_cos); 7801 fp->index, init_params->max_cos);
7780 7802
7781 /* set the context pointers queue object */ 7803 /* set the context pointers queue object */
7782 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) 7804 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
7805 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
7806 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
7807 ILT_PAGE_CIDS);
7783 init_params->cxts[cos] = 7808 init_params->cxts[cos] =
7784 &bp->context.vcxt[fp->txdata[cos].cid].eth; 7809 &bp->context[cxt_index].vcxt[cxt_offset].eth;
7810 }
7785} 7811}
7786 7812
7787int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7813int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7846,7 +7872,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7846 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 7872 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
7847 IGU_INT_ENABLE, 0); 7873 IGU_INT_ENABLE, 0);
7848 7874
7849 q_params.q_obj = &fp->q_obj; 7875 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
7850 /* We want to wait for completion in this context */ 7876 /* We want to wait for completion in this context */
7851 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7877 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7852 7878
@@ -7919,7 +7945,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7919 7945
7920 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 7946 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
7921 7947
7922 q_params.q_obj = &fp->q_obj; 7948 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
7923 /* We want to wait for completion in this context */ 7949 /* We want to wait for completion in this context */
7924 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7950 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7925 7951
@@ -7930,7 +7956,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7930 tx_index++){ 7956 tx_index++){
7931 7957
7932 /* ascertain this is a normal queue*/ 7958 /* ascertain this is a normal queue*/
7933 txdata = &fp->txdata[tx_index]; 7959 txdata = fp->txdata_ptr[tx_index];
7934 7960
7935 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 7961 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
7936 txdata->txq_index); 7962 txdata->txq_index);
@@ -8297,7 +8323,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8297 struct bnx2x_fastpath *fp = &bp->fp[i]; 8323 struct bnx2x_fastpath *fp = &bp->fp[i];
8298 8324
8299 for_each_cos_in_tx_queue(fp, cos) 8325 for_each_cos_in_tx_queue(fp, cos)
8300 rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); 8326 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8301#ifdef BNX2X_STOP_ON_ERROR 8327#ifdef BNX2X_STOP_ON_ERROR
8302 if (rc) 8328 if (rc)
8303 return; 8329 return;
@@ -8308,12 +8334,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8308 usleep_range(1000, 1000); 8334 usleep_range(1000, 1000);
8309 8335
8310 /* Clean all ETH MACs */ 8336 /* Clean all ETH MACs */
8311 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); 8337 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8338 false);
8312 if (rc < 0) 8339 if (rc < 0)
8313 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8340 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8314 8341
8315 /* Clean up UC list */ 8342 /* Clean up UC list */
8316 rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, 8343 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8317 true); 8344 true);
8318 if (rc < 0) 8345 if (rc < 0)
8319 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8346 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9705,6 +9732,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9705 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 9732 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
9706 BC_SUPPORTS_PFC_STATS : 0; 9733 BC_SUPPORTS_PFC_STATS : 0;
9707 9734
9735 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
9736 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
9708 boot_mode = SHMEM_RD(bp, 9737 boot_mode = SHMEM_RD(bp,
9709 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 9738 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
9710 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 9739 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11018,7 +11047,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
11018 int rc; 11047 int rc;
11019 struct net_device *dev = bp->dev; 11048 struct net_device *dev = bp->dev;
11020 struct netdev_hw_addr *ha; 11049 struct netdev_hw_addr *ha;
11021 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 11050 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11022 unsigned long ramrod_flags = 0; 11051 unsigned long ramrod_flags = 0;
11023 11052
11024 /* First schedule a cleanup up of old configuration */ 11053 /* First schedule a cleanup up of old configuration */
@@ -11693,7 +11722,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
11693/* must be called after sriov-enable */ 11722/* must be called after sriov-enable */
11694static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11723static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11695{ 11724{
11696 int cid_count = BNX2X_L2_CID_COUNT(bp); 11725 int cid_count = BNX2X_L2_MAX_CID(bp);
11697 11726
11698#ifdef BCM_CNIC 11727#ifdef BCM_CNIC
11699 cid_count += CNIC_CID_MAX; 11728 cid_count += CNIC_CID_MAX;
@@ -11738,7 +11767,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11738 struct bnx2x *bp; 11767 struct bnx2x *bp;
11739 int pcie_width, pcie_speed; 11768 int pcie_width, pcie_speed;
11740 int rc, max_non_def_sbs; 11769 int rc, max_non_def_sbs;
11741 int rx_count, tx_count, rss_count; 11770 int rx_count, tx_count, rss_count, doorbell_size;
11742 /* 11771 /*
11743 * An estimated maximum supported CoS number according to the chip 11772 * An estimated maximum supported CoS number according to the chip
11744 * version. 11773 * version.
@@ -11781,13 +11810,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11781 11810
11782 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 11811 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
11783 11812
11784 /* !!! FIXME !!!
11785 * Do not allow the maximum SB count to grow above 16
11786 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
11787 * We will use the FP_SB_MAX_E1x macro for this matter.
11788 */
11789 max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
11790
11791 WARN_ON(!max_non_def_sbs); 11813 WARN_ON(!max_non_def_sbs);
11792 11814
11793 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 11815 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11798,9 +11820,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11798 11820
11799 /* 11821 /*
11800 * Maximum number of netdev Tx queues: 11822 * Maximum number of netdev Tx queues:
11801 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 11823 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
11802 */ 11824 */
11803 tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; 11825 tx_count = rss_count * max_cos_est + FCOE_PRESENT;
11804 11826
11805 /* dev zeroed in init_etherdev */ 11827 /* dev zeroed in init_etherdev */
11806 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 11828 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11809,9 +11831,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11809 11831
11810 bp = netdev_priv(dev); 11832 bp = netdev_priv(dev);
11811 11833
11812 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
11813 tx_count, rx_count);
11814
11815 bp->igu_sb_cnt = max_non_def_sbs; 11834 bp->igu_sb_cnt = max_non_def_sbs;
11816 bp->msg_enable = debug; 11835 bp->msg_enable = debug;
11817 pci_set_drvdata(pdev, dev); 11836 pci_set_drvdata(pdev, dev);
@@ -11824,6 +11843,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11824 11843
11825 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 11844 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
11826 11845
11846 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
11847 tx_count, rx_count);
11848
11827 rc = bnx2x_init_bp(bp); 11849 rc = bnx2x_init_bp(bp);
11828 if (rc) 11850 if (rc)
11829 goto init_one_exit; 11851 goto init_one_exit;
@@ -11832,9 +11854,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11832 * Map doorbels here as we need the real value of bp->max_cos which 11854 * Map doorbels here as we need the real value of bp->max_cos which
11833 * is initialized in bnx2x_init_bp(). 11855 * is initialized in bnx2x_init_bp().
11834 */ 11856 */
11857 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
11858 if (doorbell_size > pci_resource_len(pdev, 2)) {
11859 dev_err(&bp->pdev->dev,
11860 "Cannot map doorbells, bar size too small, aborting\n");
11861 rc = -ENOMEM;
11862 goto init_one_exit;
11863 }
11835 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 11864 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11836 min_t(u64, BNX2X_DB_SIZE(bp), 11865 doorbell_size);
11837 pci_resource_len(pdev, 2)));
11838 if (!bp->doorbells) { 11866 if (!bp->doorbells) {
11839 dev_err(&bp->pdev->dev, 11867 dev_err(&bp->pdev->dev,
11840 "Cannot map doorbell space, aborting\n"); 11868 "Cannot map doorbell space, aborting\n");
@@ -11852,8 +11880,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11852 11880
11853#endif 11881#endif
11854 11882
11883
11884 /* Set bp->num_queues for MSI-X mode*/
11885 bnx2x_set_num_queues(bp);
11886
11855 /* Configure interrupt mode: try to enable MSI-X/MSI if 11887 /* Configure interrupt mode: try to enable MSI-X/MSI if
11856 * needed, set bp->num_queues appropriately. 11888 * needed.
11857 */ 11889 */
11858 bnx2x_set_int_mode(bp); 11890 bnx2x_set_int_mode(bp);
11859 11891
@@ -12197,6 +12229,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
12197static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 12229static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12198{ 12230{
12199 struct eth_spe *spe; 12231 struct eth_spe *spe;
12232 int cxt_index, cxt_offset;
12200 12233
12201#ifdef BNX2X_STOP_ON_ERROR 12234#ifdef BNX2X_STOP_ON_ERROR
12202 if (unlikely(bp->panic)) 12235 if (unlikely(bp->panic))
@@ -12219,10 +12252,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12219 * ramrod 12252 * ramrod
12220 */ 12253 */
12221 if (type == ETH_CONNECTION_TYPE) { 12254 if (type == ETH_CONNECTION_TYPE) {
12222 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) 12255 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
12223 bnx2x_set_ctx_validation(bp, &bp->context. 12256 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
12224 vcxt[BNX2X_ISCSI_ETH_CID].eth, 12257 ILT_PAGE_CIDS;
12225 BNX2X_ISCSI_ETH_CID); 12258 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
12259 (cxt_index * ILT_PAGE_CIDS);
12260 bnx2x_set_ctx_validation(bp,
12261 &bp->context[cxt_index].
12262 vcxt[cxt_offset].eth,
12263 BNX2X_ISCSI_ETH_CID(bp));
12264 }
12226 } 12265 }
12227 12266
12228 /* 12267 /*
@@ -12575,6 +12614,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12575 cp->num_irq = 2; 12614 cp->num_irq = 2;
12576} 12615}
12577 12616
12617void bnx2x_setup_cnic_info(struct bnx2x *bp)
12618{
12619 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12620
12621
12622 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
12623 bnx2x_cid_ilt_lines(bp);
12624 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
12625 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12626 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12627
12628 if (NO_ISCSI_OOO(bp))
12629 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
12630}
12631
12578static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 12632static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12579 void *data) 12633 void *data)
12580{ 12634{
@@ -12653,10 +12707,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12653 cp->drv_ctl = bnx2x_drv_ctl; 12707 cp->drv_ctl = bnx2x_drv_ctl;
12654 cp->drv_register_cnic = bnx2x_register_cnic; 12708 cp->drv_register_cnic = bnx2x_register_cnic;
12655 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 12709 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12656 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; 12710 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12657 cp->iscsi_l2_client_id = 12711 cp->iscsi_l2_client_id =
12658 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 12712 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
12659 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 12713 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12660 12714
12661 if (NO_ISCSI_OOO(bp)) 12715 if (NO_ISCSI_OOO(bp))
12662 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 12716 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bfef98f666c..a78e35683b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5913,6 +5913,7 @@
5913#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 5913#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
5914#define MISC_REGISTERS_SPIO_SET_POS 8 5914#define MISC_REGISTERS_SPIO_SET_POS 8
5915#define HW_LOCK_MAX_RESOURCE_VALUE 31 5915#define HW_LOCK_MAX_RESOURCE_VALUE 31
5916#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
5916#define HW_LOCK_RESOURCE_DRV_FLAGS 10 5917#define HW_LOCK_RESOURCE_DRV_FLAGS 10
5917#define HW_LOCK_RESOURCE_GPIO 1 5918#define HW_LOCK_RESOURCE_GPIO 1
5918#define HW_LOCK_RESOURCE_MDIO 0 5919#define HW_LOCK_RESOURCE_MDIO 0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a4e82..734fd87cd99 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4107 data->capabilities |= 4107 data->capabilities |=
4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; 4108 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4109 4109
4110 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4111 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4113
4110 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4114 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4111 data->capabilities |= 4115 data->capabilities |=
4112 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; 4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4115 data->capabilities |= 4119 data->capabilities |=
4116 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; 4120 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4117 4121
4122 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4123 data->capabilities |=
4124 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4125
4118 /* Hashing mask */ 4126 /* Hashing mask */
4119 data->rss_result_mask = p->rss_result_mask; 4127 data->rss_result_mask = p->rss_result_mask;
4120 4128
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bdd0df..76818ef08f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -694,8 +694,10 @@ enum {
694 694
695 BNX2X_RSS_IPV4, 695 BNX2X_RSS_IPV4,
696 BNX2X_RSS_IPV4_TCP, 696 BNX2X_RSS_IPV4_TCP,
697 BNX2X_RSS_IPV4_UDP,
697 BNX2X_RSS_IPV6, 698 BNX2X_RSS_IPV6,
698 BNX2X_RSS_IPV6_TCP, 699 BNX2X_RSS_IPV6_TCP,
700 BNX2X_RSS_IPV6_UDP,
699}; 701};
700 702
701struct bnx2x_config_rss_params { 703struct bnx2x_config_rss_params {
@@ -729,6 +731,10 @@ struct bnx2x_rss_config_obj {
729 /* Last configured indirection table */ 731 /* Last configured indirection table */
730 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; 732 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
731 733
734 /* flags for enabling 4-tupple hash on UDP */
735 u8 udp_rss_v4;
736 u8 udp_rss_v6;
737
732 int (*config_rss)(struct bnx2x *bp, 738 int (*config_rss)(struct bnx2x *bp,
733 struct bnx2x_config_rss_params *p); 739 struct bnx2x_config_rss_params *p);
734}; 740};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 0e8bdcb9c74..514a528f6dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -859,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
859 struct tstorm_per_queue_stats *tclient = 859 struct tstorm_per_queue_stats *tclient =
860 &bp->fw_stats_data->queue_stats[i]. 860 &bp->fw_stats_data->queue_stats[i].
861 tstorm_queue_statistics; 861 tstorm_queue_statistics;
862 struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; 862 struct tstorm_per_queue_stats *old_tclient =
863 &bnx2x_fp_stats(bp, fp)->old_tclient;
863 struct ustorm_per_queue_stats *uclient = 864 struct ustorm_per_queue_stats *uclient =
864 &bp->fw_stats_data->queue_stats[i]. 865 &bp->fw_stats_data->queue_stats[i].
865 ustorm_queue_statistics; 866 ustorm_queue_statistics;
866 struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; 867 struct ustorm_per_queue_stats *old_uclient =
868 &bnx2x_fp_stats(bp, fp)->old_uclient;
867 struct xstorm_per_queue_stats *xclient = 869 struct xstorm_per_queue_stats *xclient =
868 &bp->fw_stats_data->queue_stats[i]. 870 &bp->fw_stats_data->queue_stats[i].
869 xstorm_queue_statistics; 871 xstorm_queue_statistics;
870 struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; 872 struct xstorm_per_queue_stats *old_xclient =
871 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 873 &bnx2x_fp_stats(bp, fp)->old_xclient;
872 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; 874 struct bnx2x_eth_q_stats *qstats =
875 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
876 struct bnx2x_eth_q_stats_old *qstats_old =
877 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
873 878
874 u32 diff; 879 u32 diff;
875 880
@@ -1052,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
1052 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1057 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1053 1058
1054 tmp = estats->mac_discard; 1059 tmp = estats->mac_discard;
1055 for_each_rx_queue(bp, i) 1060 for_each_rx_queue(bp, i) {
1056 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1061 struct tstorm_per_queue_stats *old_tclient =
1062 &bp->fp_stats[i].old_tclient;
1063 tmp += le32_to_cpu(old_tclient->checksum_discard);
1064 }
1057 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; 1065 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1058 1066
1059 nstats->tx_dropped = 0; 1067 nstats->tx_dropped = 0;
@@ -1103,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
1103 int i; 1111 int i;
1104 1112
1105 for_each_queue(bp, i) { 1113 for_each_queue(bp, i) {
1106 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 1114 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1107 struct bnx2x_eth_q_stats_old *qstats_old = 1115 struct bnx2x_eth_q_stats_old *qstats_old =
1108 &bp->fp[i].eth_q_stats_old; 1116 &bp->fp_stats[i].eth_q_stats_old;
1109 1117
1110 UPDATE_ESTAT_QSTAT(driver_xoff); 1118 UPDATE_ESTAT_QSTAT(driver_xoff);
1111 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1119 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1432,7 +1440,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1432 query[first_queue_query_index + i]; 1440 query[first_queue_query_index + i];
1433 1441
1434 cur_query_entry->kind = STATS_TYPE_QUEUE; 1442 cur_query_entry->kind = STATS_TYPE_QUEUE;
1435 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); 1443 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1436 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); 1444 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1437 cur_query_entry->address.hi = 1445 cur_query_entry->address.hi =
1438 cpu_to_le32(U64_HI(cur_data_offset)); 1446 cpu_to_le32(U64_HI(cur_data_offset));
@@ -1483,15 +1491,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
1483 1491
1484 /* function stats */ 1492 /* function stats */
1485 for_each_queue(bp, i) { 1493 for_each_queue(bp, i) {
1486 struct bnx2x_fastpath *fp = &bp->fp[i]; 1494 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1487 1495
1488 memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); 1496 memset(&fp_stats->old_tclient, 0,
1489 memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); 1497 sizeof(fp_stats->old_tclient));
1490 memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); 1498 memset(&fp_stats->old_uclient, 0,
1499 sizeof(fp_stats->old_uclient));
1500 memset(&fp_stats->old_xclient, 0,
1501 sizeof(fp_stats->old_xclient));
1491 if (bp->stats_init) { 1502 if (bp->stats_init) {
1492 memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); 1503 memset(&fp_stats->eth_q_stats, 0,
1493 memset(&fp->eth_q_stats_old, 0, 1504 sizeof(fp_stats->eth_q_stats));
1494 sizeof(fp->eth_q_stats_old)); 1505 memset(&fp_stats->eth_q_stats_old, 0,
1506 sizeof(fp_stats->eth_q_stats_old));
1495 } 1507 }
1496 } 1508 }
1497 1509
@@ -1533,8 +1545,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
1533 /* save queue statistics */ 1545 /* save queue statistics */
1534 for_each_eth_queue(bp, i) { 1546 for_each_eth_queue(bp, i) {
1535 struct bnx2x_fastpath *fp = &bp->fp[i]; 1547 struct bnx2x_fastpath *fp = &bp->fp[i];
1536 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1548 struct bnx2x_eth_q_stats *qstats =
1537 struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; 1549 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1550 struct bnx2x_eth_q_stats_old *qstats_old =
1551 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1538 1552
1539 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); 1553 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1540 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); 1554 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1573,7 +1587,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1573 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; 1587 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1574 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1588 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1575 struct per_queue_stats *fcoe_q_stats = 1589 struct per_queue_stats *fcoe_q_stats =
1576 &bp->fw_stats_data->queue_stats[FCOE_IDX]; 1590 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1577 1591
1578 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 1592 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1579 &fcoe_q_stats->tstorm_queue_statistics; 1593 &fcoe_q_stats->tstorm_queue_statistics;
@@ -1590,8 +1604,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1590 memset(afex_stats, 0, sizeof(struct afex_stats)); 1604 memset(afex_stats, 0, sizeof(struct afex_stats));
1591 1605
1592 for_each_eth_queue(bp, i) { 1606 for_each_eth_queue(bp, i) {
1593 struct bnx2x_fastpath *fp = &bp->fp[i]; 1607 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1594 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1595 1608
1596 ADD_64(afex_stats->rx_unicast_bytes_hi, 1609 ADD_64(afex_stats->rx_unicast_bytes_hi,
1597 qstats->total_unicast_bytes_received_hi, 1610 qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index d9e0824af09..78816b8b217 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -181,11 +181,14 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
181 u32 mask2 = 0; 181 u32 mask2 = 0;
182 struct ath9k_hw_capabilities *pCap = &ah->caps; 182 struct ath9k_hw_capabilities *pCap = &ah->caps;
183 struct ath_common *common = ath9k_hw_common(ah); 183 struct ath_common *common = ath9k_hw_common(ah);
184 u32 sync_cause = 0, async_cause; 184 u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
185
186 if (ath9k_hw_mci_is_enabled(ah))
187 async_mask |= AR_INTR_ASYNC_MASK_MCI;
185 188
186 async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 189 async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
187 190
188 if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) { 191 if (async_cause & async_mask) {
189 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) 192 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
190 == AR_RTC_STATUS_ON) 193 == AR_RTC_STATUS_ON)
191 isr = REG_READ(ah, AR_ISR); 194 isr = REG_READ(ah, AR_ISR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index b1ced2a76da..cc2853ade8f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -321,7 +321,7 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah)
321{ 321{
322 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 322 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
323 323
324 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) && 324 if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
325 (mci->bt_state != MCI_BT_SLEEP) && 325 (mci->bt_state != MCI_BT_SLEEP) &&
326 !mci->halted_bt_gpm) { 326 !mci->halted_bt_gpm) {
327 ar9003_mci_send_coex_halt_bt_gpm(ah, true, true); 327 ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
@@ -484,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
484 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 484 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
485 u32 cur_bt_state; 485 u32 cur_bt_state;
486 486
487 cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL); 487 cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
488 488
489 if (mci->bt_state != cur_bt_state) 489 if (mci->bt_state != cur_bt_state)
490 mci->bt_state = cur_bt_state; 490 mci->bt_state = cur_bt_state;
@@ -593,8 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
593 if (!time_out) 593 if (!time_out)
594 break; 594 break;
595 595
596 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 596 offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
597 &more_data);
598 597
599 if (offset == MCI_GPM_INVALID) 598 if (offset == MCI_GPM_INVALID)
600 continue; 599 continue;
@@ -658,8 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
658 time_out = 0; 657 time_out = 0;
659 658
660 while (more_data == MCI_GPM_MORE) { 659 while (more_data == MCI_GPM_MORE) {
661 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 660 offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
662 &more_data);
663 if (offset == MCI_GPM_INVALID) 661 if (offset == MCI_GPM_INVALID)
664 break; 662 break;
665 663
@@ -893,13 +891,16 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
893 udelay(100); 891 udelay(100);
894 } 892 }
895 893
894 /* Check pending GPM msg before MCI Reset Rx */
895 ar9003_mci_check_gpm_offset(ah);
896
896 regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); 897 regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
897 REG_WRITE(ah, AR_MCI_COMMAND2, regval); 898 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
898 udelay(1); 899 udelay(1);
899 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); 900 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
900 REG_WRITE(ah, AR_MCI_COMMAND2, regval); 901 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
901 902
902 ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); 903 ar9003_mci_get_next_gpm_offset(ah, true, NULL);
903 904
904 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 905 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
905 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | 906 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
@@ -1010,38 +1011,32 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
1010 } 1011 }
1011} 1012}
1012 1013
1013void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done) 1014void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
1014{ 1015{
1015 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1016 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1016 1017
1017 if (!mci->update_2g5g) 1018 if (!mci->update_2g5g && !force)
1018 return; 1019 return;
1019 1020
1020 if (mci->is_2g) { 1021 if (mci->is_2g) {
1021 ar9003_mci_send_2g5g_status(ah, true); 1022 ar9003_mci_send_2g5g_status(ah, true);
1022 ar9003_mci_send_lna_transfer(ah, true);
1023 udelay(5);
1024 1023
1025 REG_CLR_BIT(ah, AR_MCI_TX_CTRL, 1024 REG_SET_BIT(ah, AR_MCI_TX_CTRL,
1026 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 1025 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
1027 REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, 1026 REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
1028 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); 1027 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1029 1028
1030 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) 1029 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
1031 REG_SET_BIT(ah, AR_BTCOEX_CTRL, 1030 ar9003_mci_osla_setup(ah, true);
1032 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
1033 } else { 1031 } else {
1034 ar9003_mci_send_lna_take(ah, true);
1035 udelay(5);
1036
1037 REG_SET_BIT(ah, AR_MCI_TX_CTRL, 1032 REG_SET_BIT(ah, AR_MCI_TX_CTRL,
1038 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); 1033 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
1039 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, 1034 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
1040 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); 1035 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
1041 REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
1042 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
1043 1036
1044 ar9003_mci_send_2g5g_status(ah, true); 1037 ar9003_mci_osla_setup(ah, false);
1038 if (!force)
1039 ar9003_mci_send_2g5g_status(ah, true);
1045 } 1040 }
1046} 1041}
1047 1042
@@ -1169,11 +1164,10 @@ void ar9003_mci_cleanup(struct ath_hw *ah)
1169} 1164}
1170EXPORT_SYMBOL(ar9003_mci_cleanup); 1165EXPORT_SYMBOL(ar9003_mci_cleanup);
1171 1166
1172u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data) 1167u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1173{ 1168{
1174 struct ath_common *common = ath9k_hw_common(ah);
1175 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1169 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1176 u32 value = 0, more_gpm = 0, gpm_ptr; 1170 u32 value = 0;
1177 u8 query_type; 1171 u8 query_type;
1178 1172
1179 switch (state_type) { 1173 switch (state_type) {
@@ -1186,81 +1180,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1186 } 1180 }
1187 value &= AR_BTCOEX_CTRL_MCI_MODE_EN; 1181 value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
1188 break; 1182 break;
1189 case MCI_STATE_INIT_GPM_OFFSET:
1190 value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1191 mci->gpm_idx = value;
1192 break;
1193 case MCI_STATE_NEXT_GPM_OFFSET:
1194 case MCI_STATE_LAST_GPM_OFFSET:
1195 /*
1196 * This could be useful to avoid new GPM message interrupt which
1197 * may lead to spurious interrupt after power sleep, or multiple
1198 * entry of ath_mci_intr().
1199 * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
1200 * alleviate this effect, but clearing GPM RX interrupt bit is
1201 * safe, because whether this is called from hw or driver code
1202 * there must be an interrupt bit set/triggered initially
1203 */
1204 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1205 AR_MCI_INTERRUPT_RX_MSG_GPM);
1206
1207 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1208 value = gpm_ptr;
1209
1210 if (value == 0)
1211 value = mci->gpm_len - 1;
1212 else if (value >= mci->gpm_len) {
1213 if (value != 0xFFFF)
1214 value = 0;
1215 } else {
1216 value--;
1217 }
1218
1219 if (value == 0xFFFF) {
1220 value = MCI_GPM_INVALID;
1221 more_gpm = MCI_GPM_NOMORE;
1222 } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
1223 if (gpm_ptr == mci->gpm_idx) {
1224 value = MCI_GPM_INVALID;
1225 more_gpm = MCI_GPM_NOMORE;
1226 } else {
1227 for (;;) {
1228 u32 temp_index;
1229
1230 /* skip reserved GPM if any */
1231
1232 if (value != mci->gpm_idx)
1233 more_gpm = MCI_GPM_MORE;
1234 else
1235 more_gpm = MCI_GPM_NOMORE;
1236
1237 temp_index = mci->gpm_idx;
1238 mci->gpm_idx++;
1239
1240 if (mci->gpm_idx >=
1241 mci->gpm_len)
1242 mci->gpm_idx = 0;
1243
1244 if (ar9003_mci_is_gpm_valid(ah,
1245 temp_index)) {
1246 value = temp_index;
1247 break;
1248 }
1249
1250 if (more_gpm == MCI_GPM_NOMORE) {
1251 value = MCI_GPM_INVALID;
1252 break;
1253 }
1254 }
1255 }
1256 if (p_data)
1257 *p_data = more_gpm;
1258 }
1259
1260 if (value != MCI_GPM_INVALID)
1261 value <<= 4;
1262
1263 break;
1264 case MCI_STATE_LAST_SCHD_MSG_OFFSET: 1183 case MCI_STATE_LAST_SCHD_MSG_OFFSET:
1265 value = MS(REG_READ(ah, AR_MCI_RX_STATUS), 1184 value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
1266 AR_MCI_RX_LAST_SCHD_MSG_INDEX); 1185 AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1272,21 +1191,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1272 AR_MCI_RX_REMOTE_SLEEP) ? 1191 AR_MCI_RX_REMOTE_SLEEP) ?
1273 MCI_BT_SLEEP : MCI_BT_AWAKE; 1192 MCI_BT_SLEEP : MCI_BT_AWAKE;
1274 break; 1193 break;
1275 case MCI_STATE_CONT_RSSI_POWER:
1276 value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
1277 break;
1278 case MCI_STATE_CONT_PRIORITY:
1279 value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
1280 break;
1281 case MCI_STATE_CONT_TXRX:
1282 value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
1283 break;
1284 case MCI_STATE_BT:
1285 value = mci->bt_state;
1286 break;
1287 case MCI_STATE_SET_BT_SLEEP:
1288 mci->bt_state = MCI_BT_SLEEP;
1289 break;
1290 case MCI_STATE_SET_BT_AWAKE: 1194 case MCI_STATE_SET_BT_AWAKE:
1291 mci->bt_state = MCI_BT_AWAKE; 1195 mci->bt_state = MCI_BT_AWAKE;
1292 ar9003_mci_send_coex_version_query(ah, true); 1196 ar9003_mci_send_coex_version_query(ah, true);
@@ -1295,7 +1199,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1295 if (mci->unhalt_bt_gpm) 1199 if (mci->unhalt_bt_gpm)
1296 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); 1200 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
1297 1201
1298 ar9003_mci_2g5g_switch(ah, true); 1202 ar9003_mci_2g5g_switch(ah, false);
1299 break; 1203 break;
1300 case MCI_STATE_SET_BT_CAL_START: 1204 case MCI_STATE_SET_BT_CAL_START:
1301 mci->bt_state = MCI_BT_CAL_START; 1205 mci->bt_state = MCI_BT_CAL_START;
@@ -1319,34 +1223,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1319 case MCI_STATE_SEND_WLAN_COEX_VERSION: 1223 case MCI_STATE_SEND_WLAN_COEX_VERSION:
1320 ar9003_mci_send_coex_version_response(ah, true); 1224 ar9003_mci_send_coex_version_response(ah, true);
1321 break; 1225 break;
1322 case MCI_STATE_SET_BT_COEX_VERSION:
1323 if (!p_data)
1324 ath_dbg(common, MCI,
1325 "MCI Set BT Coex version with NULL data!!\n");
1326 else {
1327 mci->bt_ver_major = (*p_data >> 8) & 0xff;
1328 mci->bt_ver_minor = (*p_data) & 0xff;
1329 mci->bt_version_known = true;
1330 ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
1331 mci->bt_ver_major, mci->bt_ver_minor);
1332 }
1333 break;
1334 case MCI_STATE_SEND_WLAN_CHANNELS:
1335 if (p_data) {
1336 if (((mci->wlan_channels[1] & 0xffff0000) ==
1337 (*(p_data + 1) & 0xffff0000)) &&
1338 (mci->wlan_channels[2] == *(p_data + 2)) &&
1339 (mci->wlan_channels[3] == *(p_data + 3)))
1340 break;
1341
1342 mci->wlan_channels[0] = *p_data++;
1343 mci->wlan_channels[1] = *p_data++;
1344 mci->wlan_channels[2] = *p_data++;
1345 mci->wlan_channels[3] = *p_data++;
1346 }
1347 mci->wlan_channels_update = true;
1348 ar9003_mci_send_coex_wlan_channels(ah, true);
1349 break;
1350 case MCI_STATE_SEND_VERSION_QUERY: 1226 case MCI_STATE_SEND_VERSION_QUERY:
1351 ar9003_mci_send_coex_version_query(ah, true); 1227 ar9003_mci_send_coex_version_query(ah, true);
1352 break; 1228 break;
@@ -1354,29 +1230,12 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1354 query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY; 1230 query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
1355 ar9003_mci_send_coex_bt_status_query(ah, true, query_type); 1231 ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
1356 break; 1232 break;
1357 case MCI_STATE_NEED_FLUSH_BT_INFO:
1358 /*
1359 * btcoex_hw.mci.unhalt_bt_gpm means whether it's
1360 * needed to send UNHALT message. It's set whenever
1361 * there's a request to send HALT message.
1362 * mci_halted_bt_gpm means whether HALT message is sent
1363 * out successfully.
1364 *
1365 * Checking (mci_unhalt_bt_gpm == false) instead of
1366 * checking (ah->mci_halted_bt_gpm == false) will make
1367 * sure currently is in UNHALT-ed mode and BT can
1368 * respond to status query.
1369 */
1370 value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
1371 if (p_data)
1372 mci->need_flush_btinfo = (*p_data != 0) ? true : false;
1373 break;
1374 case MCI_STATE_RECOVER_RX: 1233 case MCI_STATE_RECOVER_RX:
1375 ar9003_mci_prep_interface(ah); 1234 ar9003_mci_prep_interface(ah);
1376 mci->query_bt = true; 1235 mci->query_bt = true;
1377 mci->need_flush_btinfo = true; 1236 mci->need_flush_btinfo = true;
1378 ar9003_mci_send_coex_wlan_channels(ah, true); 1237 ar9003_mci_send_coex_wlan_channels(ah, true);
1379 ar9003_mci_2g5g_switch(ah, true); 1238 ar9003_mci_2g5g_switch(ah, false);
1380 break; 1239 break;
1381 case MCI_STATE_NEED_FTP_STOMP: 1240 case MCI_STATE_NEED_FTP_STOMP:
1382 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); 1241 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
@@ -1404,3 +1263,154 @@ void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
1404 /* Force another 2g5g update at next scanning */ 1263 /* Force another 2g5g update at next scanning */
1405 mci->update_2g5g = true; 1264 mci->update_2g5g = true;
1406} 1265}
1266
1267void ar9003_mci_set_power_awake(struct ath_hw *ah)
1268{
1269 u32 btcoex_ctrl2, diag_sw;
1270 int i;
1271 u8 lna_ctrl, bt_sleep;
1272
1273 for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
1274 btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
1275 if (btcoex_ctrl2 != 0xdeadbeef)
1276 break;
1277 udelay(AH_TIME_QUANTUM);
1278 }
1279 REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
1280
1281 for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
1282 diag_sw = REG_READ(ah, AR_DIAG_SW);
1283 if (diag_sw != 0xdeadbeef)
1284 break;
1285 udelay(AH_TIME_QUANTUM);
1286 }
1287 REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
1288 lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
1289 bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
1290
1291 REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
1292 REG_WRITE(ah, AR_DIAG_SW, diag_sw);
1293
1294 if (bt_sleep && (lna_ctrl == 2)) {
1295 REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
1296 REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
1297 udelay(50);
1298 }
1299}
1300
1301void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
1302{
1303 struct ath_common *common = ath9k_hw_common(ah);
1304 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1305 u32 offset;
1306
1307 /*
1308 * This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
1309 */
1310 offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1311 if (mci->gpm_idx == offset)
1312 return;
1313 ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
1314 mci->gpm_idx, offset);
1315 mci->query_bt = true;
1316 mci->need_flush_btinfo = true;
1317 mci->gpm_idx = 0;
1318}
1319
1320u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
1321{
1322 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1323 u32 offset, more_gpm = 0, gpm_ptr;
1324
1325 if (first) {
1326 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1327 mci->gpm_idx = gpm_ptr;
1328 return gpm_ptr;
1329 }
1330
1331 /*
1332 * This could be useful to avoid new GPM message interrupt which
1333 * may lead to spurious interrupt after power sleep, or multiple
1334 * entry of ath_mci_intr().
1335 * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
1336 * alleviate this effect, but clearing GPM RX interrupt bit is
1337 * safe, because whether this is called from hw or driver code
1338 * there must be an interrupt bit set/triggered initially
1339 */
1340 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1341 AR_MCI_INTERRUPT_RX_MSG_GPM);
1342
1343 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1344 offset = gpm_ptr;
1345
1346 if (!offset)
1347 offset = mci->gpm_len - 1;
1348 else if (offset >= mci->gpm_len) {
1349 if (offset != 0xFFFF)
1350 offset = 0;
1351 } else {
1352 offset--;
1353 }
1354
1355 if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
1356 offset = MCI_GPM_INVALID;
1357 more_gpm = MCI_GPM_NOMORE;
1358 goto out;
1359 }
1360 for (;;) {
1361 u32 temp_index;
1362
1363 /* skip reserved GPM if any */
1364
1365 if (offset != mci->gpm_idx)
1366 more_gpm = MCI_GPM_MORE;
1367 else
1368 more_gpm = MCI_GPM_NOMORE;
1369
1370 temp_index = mci->gpm_idx;
1371 mci->gpm_idx++;
1372
1373 if (mci->gpm_idx >= mci->gpm_len)
1374 mci->gpm_idx = 0;
1375
1376 if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
1377 offset = temp_index;
1378 break;
1379 }
1380
1381 if (more_gpm == MCI_GPM_NOMORE) {
1382 offset = MCI_GPM_INVALID;
1383 break;
1384 }
1385 }
1386
1387 if (offset != MCI_GPM_INVALID)
1388 offset <<= 4;
1389out:
1390 if (more)
1391 *more = more_gpm;
1392
1393 return offset;
1394}
1395EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
1396
1397void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
1398{
1399 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1400
1401 mci->bt_ver_major = major;
1402 mci->bt_ver_minor = minor;
1403 mci->bt_version_known = true;
1404 ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
1405 mci->bt_ver_major, mci->bt_ver_minor);
1406}
1407EXPORT_SYMBOL(ar9003_mci_set_bt_version);
1408
1409void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
1410{
1411 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1412
1413 mci->wlan_channels_update = true;
1414 ar9003_mci_send_coex_wlan_channels(ah, true);
1415}
1416EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 10282e2bcdc..d33b8e12885 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -189,26 +189,15 @@ enum mci_bt_state {
189/* Type of state query */ 189/* Type of state query */
190enum mci_state_type { 190enum mci_state_type {
191 MCI_STATE_ENABLE, 191 MCI_STATE_ENABLE,
192 MCI_STATE_INIT_GPM_OFFSET,
193 MCI_STATE_NEXT_GPM_OFFSET,
194 MCI_STATE_LAST_GPM_OFFSET,
195 MCI_STATE_BT,
196 MCI_STATE_SET_BT_SLEEP,
197 MCI_STATE_SET_BT_AWAKE, 192 MCI_STATE_SET_BT_AWAKE,
198 MCI_STATE_SET_BT_CAL_START, 193 MCI_STATE_SET_BT_CAL_START,
199 MCI_STATE_SET_BT_CAL, 194 MCI_STATE_SET_BT_CAL,
200 MCI_STATE_LAST_SCHD_MSG_OFFSET, 195 MCI_STATE_LAST_SCHD_MSG_OFFSET,
201 MCI_STATE_REMOTE_SLEEP, 196 MCI_STATE_REMOTE_SLEEP,
202 MCI_STATE_CONT_RSSI_POWER,
203 MCI_STATE_CONT_PRIORITY,
204 MCI_STATE_CONT_TXRX,
205 MCI_STATE_RESET_REQ_WAKE, 197 MCI_STATE_RESET_REQ_WAKE,
206 MCI_STATE_SEND_WLAN_COEX_VERSION, 198 MCI_STATE_SEND_WLAN_COEX_VERSION,
207 MCI_STATE_SET_BT_COEX_VERSION,
208 MCI_STATE_SEND_WLAN_CHANNELS,
209 MCI_STATE_SEND_VERSION_QUERY, 199 MCI_STATE_SEND_VERSION_QUERY,
210 MCI_STATE_SEND_STATUS_QUERY, 200 MCI_STATE_SEND_STATUS_QUERY,
211 MCI_STATE_NEED_FLUSH_BT_INFO,
212 MCI_STATE_SET_CONCUR_TX_PRI, 201 MCI_STATE_SET_CONCUR_TX_PRI,
213 MCI_STATE_RECOVER_RX, 202 MCI_STATE_RECOVER_RX,
214 MCI_STATE_NEED_FTP_STOMP, 203 MCI_STATE_NEED_FTP_STOMP,
@@ -259,14 +248,15 @@ enum mci_gpm_coex_opcode {
259bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, 248bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
260 u32 *payload, u8 len, bool wait_done, 249 u32 *payload, u8 len, bool wait_done,
261 bool check_bt); 250 bool check_bt);
262u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data); 251u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
263void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, 252void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
264 u16 len, u32 sched_addr); 253 u16 len, u32 sched_addr);
265void ar9003_mci_cleanup(struct ath_hw *ah); 254void ar9003_mci_cleanup(struct ath_hw *ah);
266void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, 255void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
267 u32 *rx_msg_intr); 256 u32 *rx_msg_intr);
268void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); 257u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
269 258void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
259void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
270/* 260/*
271 * These functions are used by ath9k_hw. 261 * These functions are used by ath9k_hw.
272 */ 262 */
@@ -277,7 +267,7 @@ void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
277void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable); 267void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
278void ar9003_mci_init_cal_done(struct ath_hw *ah); 268void ar9003_mci_init_cal_done(struct ath_hw *ah);
279void ar9003_mci_set_full_sleep(struct ath_hw *ah); 269void ar9003_mci_set_full_sleep(struct ath_hw *ah);
280void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done); 270void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force);
281void ar9003_mci_check_bt(struct ath_hw *ah); 271void ar9003_mci_check_bt(struct ath_hw *ah);
282bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan); 272bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
283int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 273int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -285,6 +275,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
285void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, 275void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
286 bool is_full_sleep); 276 bool is_full_sleep);
287void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked); 277void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
279void ar9003_mci_set_power_awake(struct ath_hw *ah);
280void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
288 281
289#else 282#else
290 283
@@ -322,6 +315,15 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
322static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked) 315static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
323{ 316{
324} 317}
318static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
319{
320}
321static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
322{
323}
324static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
325{
326}
325#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 327#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
326 328
327#endif 329#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 4a93e1534c1..8f406ff2c95 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -52,7 +52,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
52 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, 52 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
53 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 53 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
54 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 54 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
55 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e}, 55 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
56 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 56 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
57 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 57 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
58 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 58 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 02fc1c1e5ee..a8c05008564 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -698,6 +698,7 @@ struct ath_softc {
698#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 698#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
699 struct ath_btcoex btcoex; 699 struct ath_btcoex btcoex;
700 struct ath_mci_coex mci_coex; 700 struct ath_mci_coex mci_coex;
701 struct work_struct mci_work;
701#endif 702#endif
702 703
703 struct ath_descdma txsdma; 704 struct ath_descdma txsdma;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index af6d2735029..26032cb59b8 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -202,7 +202,7 @@ static void ath_btcoex_period_timer(unsigned long data)
202 202
203 btcoex->bt_wait_time += btcoex->btcoex_period; 203 btcoex->bt_wait_time += btcoex->btcoex_period;
204 if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) { 204 if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
205 if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP, NULL) && 205 if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
206 (mci->num_pan || mci->num_other_acl)) 206 (mci->num_pan || mci->num_other_acl))
207 ah->btcoex_hw.mci.stomp_ftp = 207 ah->btcoex_hw.mci.stomp_ftp =
208 (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH); 208 (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
@@ -232,7 +232,7 @@ static void ath_btcoex_period_timer(unsigned long data)
232 } 232 }
233 233
234 ath9k_ps_restore(sc); 234 ath9k_ps_restore(sc);
235 timer_period = btcoex->btcoex_period / 1000; 235 timer_period = btcoex->btcoex_period;
236 mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period)); 236 mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
237} 237}
238 238
@@ -267,10 +267,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
267{ 267{
268 struct ath_btcoex *btcoex = &sc->btcoex; 268 struct ath_btcoex *btcoex = &sc->btcoex;
269 269
270 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; 270 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
271 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 271 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
272 btcoex->btcoex_period / 100; 272 btcoex->btcoex_period / 100;
273 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 273 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
274 btcoex->btcoex_period / 100; 274 btcoex->btcoex_period / 100;
275 275
276 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, 276 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 45e670087e1..784baee5db8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1348,6 +1348,9 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1348 } 1348 }
1349 } 1349 }
1350 1350
1351 if (ath9k_hw_mci_is_enabled(ah))
1352 ar9003_mci_check_gpm_offset(ah);
1353
1351 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1354 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1352 1355
1353 REGWRITE_BUFFER_FLUSH(ah); 1356 REGWRITE_BUFFER_FLUSH(ah);
@@ -1708,7 +1711,7 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1708 ath9k_hw_start_nfcal(ah, true); 1711 ath9k_hw_start_nfcal(ah, true);
1709 1712
1710 if (ath9k_hw_mci_is_enabled(ah)) 1713 if (ath9k_hw_mci_is_enabled(ah))
1711 ar9003_mci_2g5g_switch(ah, true); 1714 ar9003_mci_2g5g_switch(ah, false);
1712 1715
1713 if (AR_SREV_9271(ah)) 1716 if (AR_SREV_9271(ah))
1714 ar9002_hw_load_ani_reg(ah, chan); 1717 ar9002_hw_load_ani_reg(ah, chan);
@@ -1912,7 +1915,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1912 1915
1913 ath9k_hw_set_dma(ah); 1916 ath9k_hw_set_dma(ah);
1914 1917
1915 REG_WRITE(ah, AR_OBS, 8); 1918 if (!ath9k_hw_mci_is_enabled(ah))
1919 REG_WRITE(ah, AR_OBS, 8);
1916 1920
1917 if (ah->config.rx_intr_mitigation) { 1921 if (ah->config.rx_intr_mitigation) {
1918 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 1922 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
@@ -2111,6 +2115,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
2111 AR_RTC_FORCE_WAKE_EN); 2115 AR_RTC_FORCE_WAKE_EN);
2112 udelay(50); 2116 udelay(50);
2113 2117
2118 if (ath9k_hw_mci_is_enabled(ah))
2119 ar9003_mci_set_power_awake(ah);
2120
2114 for (i = POWER_UP_TIME / 50; i > 0; i--) { 2121 for (i = POWER_UP_TIME / 50; i > 0; i--) {
2115 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; 2122 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
2116 if (val == AR_RTC_STATUS_ON) 2123 if (val == AR_RTC_STATUS_ON)
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 0cc4c70f7f0..a105c942625 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -136,6 +136,14 @@ void ath_hw_pll_work(struct work_struct *work)
136 u32 pll_sqsum; 136 u32 pll_sqsum;
137 struct ath_softc *sc = container_of(work, struct ath_softc, 137 struct ath_softc *sc = container_of(work, struct ath_softc,
138 hw_pll_work.work); 138 hw_pll_work.work);
139 /*
140 * ensure that the PLL WAR is executed only
141 * after the STA is associated (or) if the
142 * beaconing had started in interfaces that
143 * uses beacons.
144 */
145 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
146 return;
139 147
140 ath9k_ps_wakeup(sc); 148 ath9k_ps_wakeup(sc);
141 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah); 149 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c0f478b0a9a..52561b341d6 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -150,6 +150,9 @@ static void __ath_cancel_work(struct ath_softc *sc)
150 cancel_work_sync(&sc->hw_check_work); 150 cancel_work_sync(&sc->hw_check_work);
151 cancel_delayed_work_sync(&sc->tx_complete_work); 151 cancel_delayed_work_sync(&sc->tx_complete_work);
152 cancel_delayed_work_sync(&sc->hw_pll_work); 152 cancel_delayed_work_sync(&sc->hw_pll_work);
153#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
154 cancel_work_sync(&sc->mci_work);
155#endif
153} 156}
154 157
155static void ath_cancel_work(struct ath_softc *sc) 158static void ath_cancel_work(struct ath_softc *sc)
@@ -1033,15 +1036,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1033 } 1036 }
1034 } 1037 }
1035 1038
1036 if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
1037 ((vif->type == NL80211_IFTYPE_ADHOC) &&
1038 sc->nvifs > 0)) {
1039 ath_err(common, "Cannot create ADHOC interface when other"
1040 " interfaces already exist.\n");
1041 ret = -EINVAL;
1042 goto out;
1043 }
1044
1045 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1039 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
1046 1040
1047 sc->nvifs++; 1041 sc->nvifs++;
@@ -1066,15 +1060,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1066 mutex_lock(&sc->mutex); 1060 mutex_lock(&sc->mutex);
1067 ath9k_ps_wakeup(sc); 1061 ath9k_ps_wakeup(sc);
1068 1062
1069 /* See if new interface type is valid. */
1070 if ((new_type == NL80211_IFTYPE_ADHOC) &&
1071 (sc->nvifs > 1)) {
1072 ath_err(common, "When using ADHOC, it must be the only"
1073 " interface.\n");
1074 ret = -EINVAL;
1075 goto out;
1076 }
1077
1078 if (ath9k_uses_beacons(new_type) && 1063 if (ath9k_uses_beacons(new_type) &&
1079 !ath9k_uses_beacons(vif->type)) { 1064 !ath9k_uses_beacons(vif->type)) {
1080 if (sc->nbcnvifs >= ATH_BCBUF) { 1065 if (sc->nbcnvifs >= ATH_BCBUF) {
@@ -1258,6 +1243,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1258 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 1243 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
1259 ath_err(common, "Unable to set channel\n"); 1244 ath_err(common, "Unable to set channel\n");
1260 mutex_unlock(&sc->mutex); 1245 mutex_unlock(&sc->mutex);
1246 ath9k_ps_restore(sc);
1261 return -EINVAL; 1247 return -EINVAL;
1262 } 1248 }
1263 1249
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 49137f477b0..7d34a504d61 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -20,7 +20,7 @@
20#include "ath9k.h" 20#include "ath9k.h"
21#include "mci.h" 21#include "mci.h"
22 22
23static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 }; 23static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
24 24
25static struct ath_mci_profile_info* 25static struct ath_mci_profile_info*
26ath_mci_find_profile(struct ath_mci_profile *mci, 26ath_mci_find_profile(struct ath_mci_profile *mci,
@@ -28,11 +28,14 @@ ath_mci_find_profile(struct ath_mci_profile *mci,
28{ 28{
29 struct ath_mci_profile_info *entry; 29 struct ath_mci_profile_info *entry;
30 30
31 if (list_empty(&mci->info))
32 return NULL;
33
31 list_for_each_entry(entry, &mci->info, list) { 34 list_for_each_entry(entry, &mci->info, list) {
32 if (entry->conn_handle == info->conn_handle) 35 if (entry->conn_handle == info->conn_handle)
33 break; 36 return entry;
34 } 37 }
35 return entry; 38 return NULL;
36} 39}
37 40
38static bool ath_mci_add_profile(struct ath_common *common, 41static bool ath_mci_add_profile(struct ath_common *common,
@@ -49,31 +52,21 @@ static bool ath_mci_add_profile(struct ath_common *common,
49 (info->type != MCI_GPM_COEX_PROFILE_VOICE)) 52 (info->type != MCI_GPM_COEX_PROFILE_VOICE))
50 return false; 53 return false;
51 54
52 entry = ath_mci_find_profile(mci, info); 55 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
53 56 if (!entry)
54 if (entry) { 57 return false;
55 memcpy(entry, info, 10);
56 } else {
57 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
58 if (!entry)
59 return false;
60 58
61 memcpy(entry, info, 10); 59 memcpy(entry, info, 10);
62 INC_PROF(mci, info); 60 INC_PROF(mci, info);
63 list_add_tail(&info->list, &mci->info); 61 list_add_tail(&entry->list, &mci->info);
64 }
65 62
66 return true; 63 return true;
67} 64}
68 65
69static void ath_mci_del_profile(struct ath_common *common, 66static void ath_mci_del_profile(struct ath_common *common,
70 struct ath_mci_profile *mci, 67 struct ath_mci_profile *mci,
71 struct ath_mci_profile_info *info) 68 struct ath_mci_profile_info *entry)
72{ 69{
73 struct ath_mci_profile_info *entry;
74
75 entry = ath_mci_find_profile(mci, info);
76
77 if (!entry) 70 if (!entry)
78 return; 71 return;
79 72
@@ -86,12 +79,16 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
86{ 79{
87 struct ath_mci_profile_info *info, *tinfo; 80 struct ath_mci_profile_info *info, *tinfo;
88 81
82 mci->aggr_limit = 0;
83
84 if (list_empty(&mci->info))
85 return;
86
89 list_for_each_entry_safe(info, tinfo, &mci->info, list) { 87 list_for_each_entry_safe(info, tinfo, &mci->info, list) {
90 list_del(&info->list); 88 list_del(&info->list);
91 DEC_PROF(mci, info); 89 DEC_PROF(mci, info);
92 kfree(info); 90 kfree(info);
93 } 91 }
94 mci->aggr_limit = 0;
95} 92}
96 93
97static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) 94static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
@@ -123,6 +120,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
123 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING) 120 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
124 goto skip_tuning; 121 goto skip_tuning;
125 122
123 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
124
126 if (num_profile == 1) { 125 if (num_profile == 1) {
127 info = list_first_entry(&mci->info, 126 info = list_first_entry(&mci->info,
128 struct ath_mci_profile_info, 127 struct ath_mci_profile_info,
@@ -181,12 +180,11 @@ skip_tuning:
181 if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) 180 if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
182 return; 181 return;
183 182
184 btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0); 183 btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
185 if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) 184 if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
186 btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; 185 btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
187 186
188 btcoex->btcoex_period *= 1000; 187 btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
189 btcoex->btcoex_no_stomp = btcoex->btcoex_period *
190 (100 - btcoex->duty_cycle) / 100; 188 (100 - btcoex->duty_cycle) / 100;
191 189
192 ath9k_hw_btcoex_enable(sc->sc_ah); 190 ath9k_hw_btcoex_enable(sc->sc_ah);
@@ -197,20 +195,16 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
197{ 195{
198 struct ath_hw *ah = sc->sc_ah; 196 struct ath_hw *ah = sc->sc_ah;
199 struct ath_common *common = ath9k_hw_common(ah); 197 struct ath_common *common = ath9k_hw_common(ah);
198 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
200 u32 payload[4] = {0, 0, 0, 0}; 199 u32 payload[4] = {0, 0, 0, 0};
201 200
202 switch (opcode) { 201 switch (opcode) {
203 case MCI_GPM_BT_CAL_REQ: 202 case MCI_GPM_BT_CAL_REQ:
204 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { 203 if (mci_hw->bt_state == MCI_BT_AWAKE) {
205 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL); 204 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
206 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 205 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
207 } else {
208 ath_dbg(common, MCI, "MCI State mismatch: %d\n",
209 ar9003_mci_state(ah, MCI_STATE_BT, NULL));
210 } 206 }
211 break; 207 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
212 case MCI_GPM_BT_CAL_DONE:
213 ar9003_mci_state(ah, MCI_STATE_BT, NULL);
214 break; 208 break;
215 case MCI_GPM_BT_CAL_GRANT: 209 case MCI_GPM_BT_CAL_GRANT:
216 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); 210 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
@@ -223,32 +217,42 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
223 } 217 }
224} 218}
225 219
220static void ath9k_mci_work(struct work_struct *work)
221{
222 struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
223
224 ath_mci_update_scheme(sc);
225}
226
226static void ath_mci_process_profile(struct ath_softc *sc, 227static void ath_mci_process_profile(struct ath_softc *sc,
227 struct ath_mci_profile_info *info) 228 struct ath_mci_profile_info *info)
228{ 229{
229 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 230 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
230 struct ath_btcoex *btcoex = &sc->btcoex; 231 struct ath_btcoex *btcoex = &sc->btcoex;
231 struct ath_mci_profile *mci = &btcoex->mci; 232 struct ath_mci_profile *mci = &btcoex->mci;
233 struct ath_mci_profile_info *entry = NULL;
234
235 entry = ath_mci_find_profile(mci, info);
236 if (entry)
237 memcpy(entry, info, 10);
232 238
233 if (info->start) { 239 if (info->start) {
234 if (!ath_mci_add_profile(common, mci, info)) 240 if (!entry && !ath_mci_add_profile(common, mci, info))
235 return; 241 return;
236 } else 242 } else
237 ath_mci_del_profile(common, mci, info); 243 ath_mci_del_profile(common, mci, entry);
238 244
239 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; 245 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
240 mci->aggr_limit = mci->num_sco ? 6 : 0; 246 mci->aggr_limit = mci->num_sco ? 6 : 0;
241 247
242 if (NUM_PROF(mci)) { 248 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
249 if (NUM_PROF(mci))
243 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 250 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
244 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; 251 else
245 } else {
246 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : 252 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
247 ATH_BTCOEX_STOMP_LOW; 253 ATH_BTCOEX_STOMP_LOW;
248 btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
249 }
250 254
251 ath_mci_update_scheme(sc); 255 ieee80211_queue_work(sc->hw, &sc->mci_work);
252} 256}
253 257
254static void ath_mci_process_status(struct ath_softc *sc, 258static void ath_mci_process_status(struct ath_softc *sc,
@@ -263,8 +267,6 @@ static void ath_mci_process_status(struct ath_softc *sc,
263 if (status->is_link) 267 if (status->is_link)
264 return; 268 return;
265 269
266 memset(&info, 0, sizeof(struct ath_mci_profile_info));
267
268 info.conn_handle = status->conn_handle; 270 info.conn_handle = status->conn_handle;
269 if (ath_mci_find_profile(mci, &info)) 271 if (ath_mci_find_profile(mci, &info))
270 return; 272 return;
@@ -284,7 +286,7 @@ static void ath_mci_process_status(struct ath_softc *sc,
284 } while (++i < ATH_MCI_MAX_PROFILE); 286 } while (++i < ATH_MCI_MAX_PROFILE);
285 287
286 if (old_num_mgmt != mci->num_mgmt) 288 if (old_num_mgmt != mci->num_mgmt)
287 ath_mci_update_scheme(sc); 289 ieee80211_queue_work(sc->hw, &sc->mci_work);
288} 290}
289 291
290static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 292static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -293,25 +295,20 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
293 struct ath_mci_profile_info profile_info; 295 struct ath_mci_profile_info profile_info;
294 struct ath_mci_profile_status profile_status; 296 struct ath_mci_profile_status profile_status;
295 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 297 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
296 u32 version; 298 u8 major, minor;
297 u8 major;
298 u8 minor;
299 u32 seq_num; 299 u32 seq_num;
300 300
301 switch (opcode) { 301 switch (opcode) {
302 case MCI_GPM_COEX_VERSION_QUERY: 302 case MCI_GPM_COEX_VERSION_QUERY:
303 version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION, 303 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
304 NULL);
305 break; 304 break;
306 case MCI_GPM_COEX_VERSION_RESPONSE: 305 case MCI_GPM_COEX_VERSION_RESPONSE:
307 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); 306 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
308 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); 307 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
309 version = (major << 8) + minor; 308 ar9003_mci_set_bt_version(ah, major, minor);
310 version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION,
311 &version);
312 break; 309 break;
313 case MCI_GPM_COEX_STATUS_QUERY: 310 case MCI_GPM_COEX_STATUS_QUERY:
314 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL); 311 ar9003_mci_send_wlan_channels(ah);
315 break; 312 break;
316 case MCI_GPM_COEX_BT_PROFILE_INFO: 313 case MCI_GPM_COEX_BT_PROFILE_INFO:
317 memcpy(&profile_info, 314 memcpy(&profile_info,
@@ -378,6 +375,7 @@ int ath_mci_setup(struct ath_softc *sc)
378 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), 375 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
379 mci->sched_buf.bf_paddr); 376 mci->sched_buf.bf_paddr);
380 377
378 INIT_WORK(&sc->mci_work, ath9k_mci_work);
381 ath_dbg(common, MCI, "MCI Initialized\n"); 379 ath_dbg(common, MCI, "MCI Initialized\n");
382 380
383 return 0; 381 return 0;
@@ -405,6 +403,7 @@ void ath_mci_intr(struct ath_softc *sc)
405 struct ath_mci_coex *mci = &sc->mci_coex; 403 struct ath_mci_coex *mci = &sc->mci_coex;
406 struct ath_hw *ah = sc->sc_ah; 404 struct ath_hw *ah = sc->sc_ah;
407 struct ath_common *common = ath9k_hw_common(ah); 405 struct ath_common *common = ath9k_hw_common(ah);
406 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
408 u32 mci_int, mci_int_rxmsg; 407 u32 mci_int, mci_int_rxmsg;
409 u32 offset, subtype, opcode; 408 u32 offset, subtype, opcode;
410 u32 *pgpm; 409 u32 *pgpm;
@@ -413,8 +412,8 @@ void ath_mci_intr(struct ath_softc *sc)
413 412
414 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); 413 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
415 414
416 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) { 415 if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
417 ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); 416 ar9003_mci_get_next_gpm_offset(ah, true, NULL);
418 return; 417 return;
419 } 418 }
420 419
@@ -433,46 +432,41 @@ void ath_mci_intr(struct ath_softc *sc)
433 NULL, 0, true, false); 432 NULL, 0, true, false);
434 433
435 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; 434 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
436 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL); 435 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
437 436
438 /* 437 /*
439 * always do this for recovery and 2G/5G toggling and LNA_TRANS 438 * always do this for recovery and 2G/5G toggling and LNA_TRANS
440 */ 439 */
441 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL); 440 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
442 } 441 }
443 442
444 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { 443 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
445 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; 444 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
446 445
447 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) { 446 if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
448 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != 447 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
449 MCI_BT_SLEEP) 448 MCI_BT_SLEEP))
450 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, 449 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
451 NULL);
452 }
453 } 450 }
454 451
455 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { 452 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
456 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; 453 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
457 454
458 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { 455 if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
459 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != 456 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
460 MCI_BT_AWAKE) 457 MCI_BT_AWAKE))
461 ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP, 458 mci_hw->bt_state = MCI_BT_SLEEP;
462 NULL);
463 }
464 } 459 }
465 460
466 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || 461 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
467 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { 462 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
468 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL); 463 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
469 skip_gpm = true; 464 skip_gpm = true;
470 } 465 }
471 466
472 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { 467 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
473 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; 468 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
474 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET, 469 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
475 NULL);
476 } 470 }
477 471
478 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { 472 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
@@ -481,8 +475,8 @@ void ath_mci_intr(struct ath_softc *sc)
481 while (more_data == MCI_GPM_MORE) { 475 while (more_data == MCI_GPM_MORE) {
482 476
483 pgpm = mci->gpm_buf.bf_addr; 477 pgpm = mci->gpm_buf.bf_addr;
484 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, 478 offset = ar9003_mci_get_next_gpm_offset(ah, false,
485 &more_data); 479 &more_data);
486 480
487 if (offset == MCI_GPM_INVALID) 481 if (offset == MCI_GPM_INVALID)
488 break; 482 break;
@@ -523,23 +517,17 @@ void ath_mci_intr(struct ath_softc *sc)
523 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; 517 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
524 518
525 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { 519 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
526 int value_dbm = ar9003_mci_state(ah, 520 int value_dbm = MS(mci_hw->cont_status,
527 MCI_STATE_CONT_RSSI_POWER, NULL); 521 AR_MCI_CONT_RSSI_POWER);
528 522
529 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; 523 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
530 524
531 if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL)) 525 ath_dbg(common, MCI,
532 ath_dbg(common, MCI, 526 "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
533 "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n", 527 MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
534 ar9003_mci_state(ah, 528 "tx" : "rx",
535 MCI_STATE_CONT_PRIORITY, NULL), 529 MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
536 value_dbm); 530 value_dbm);
537 else
538 ath_dbg(common, MCI,
539 "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
540 ar9003_mci_state(ah,
541 MCI_STATE_CONT_PRIORITY, NULL),
542 value_dbm);
543 } 531 }
544 532
545 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) 533 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 92a6c0a87f8..e034add9cd5 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
770 struct ieee80211_tx_rate *rates = tx_info->control.rates; 770 struct ieee80211_tx_rate *rates = tx_info->control.rates;
771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
772 __le16 fc = hdr->frame_control; 772 __le16 fc = hdr->frame_control;
773 u8 try_per_rate, i = 0, rix, high_rix; 773 u8 try_per_rate, i = 0, rix;
774 int is_probe = 0; 774 int is_probe = 0;
775 775
776 if (rate_control_send_low(sta, priv_sta, txrc)) 776 if (rate_control_send_low(sta, priv_sta, txrc))
@@ -791,7 +791,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
791 rate_table = ath_rc_priv->rate_table; 791 rate_table = ath_rc_priv->rate_table;
792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 792 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
793 &is_probe, false); 793 &is_probe, false);
794 high_rix = rix;
795 794
796 /* 795 /*
797 * If we're in HT mode and both us and our peer supports LDPC. 796 * If we're in HT mode and both us and our peer supports LDPC.
@@ -839,16 +838,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
839 try_per_rate = 8; 838 try_per_rate = 8;
840 839
841 /* 840 /*
842 * Use a legacy rate as last retry to ensure that the frame 841 * If the last rate in the rate series is MCS and has
843 * is tried in both MCS and legacy rates. 842 * more than 80% of per thresh, then use a legacy rate
843 * as last retry to ensure that the frame is tried in both
844 * MCS and legacy rate.
844 */ 845 */
845 if ((rates[2].flags & IEEE80211_TX_RC_MCS) && 846 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
846 (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) || 847 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
847 (ath_rc_priv->per[high_rix] > 45))) 848 (ath_rc_priv->per[rix] > 45))
848 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, 849 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
849 &is_probe, true); 850 &is_probe, true);
850 else
851 ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
852 851
853 /* All other rates in the series have RTS enabled */ 852 /* All other rates in the series have RTS enabled */
854 ath_rc_rate_set_series(rate_table, &rates[i], txrc, 853 ath_rc_rate_set_series(rate_table, &rates[i], txrc,
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 560d6effac7..75acefbd493 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -2098,8 +2098,8 @@ enum {
2098#define AR_MCI_CONT_STATUS 0x1848 2098#define AR_MCI_CONT_STATUS 0x1848
2099#define AR_MCI_CONT_RSSI_POWER 0x000000FF 2099#define AR_MCI_CONT_RSSI_POWER 0x000000FF
2100#define AR_MCI_CONT_RSSI_POWER_S 0 2100#define AR_MCI_CONT_RSSI_POWER_S 0
2101#define AR_MCI_CONT_RRIORITY 0x0000FF00 2101#define AR_MCI_CONT_PRIORITY 0x0000FF00
2102#define AR_MCI_CONT_RRIORITY_S 8 2102#define AR_MCI_CONT_PRIORITY_S 8
2103#define AR_MCI_CONT_TXRX 0x00010000 2103#define AR_MCI_CONT_TXRX 0x00010000
2104#define AR_MCI_CONT_TXRX_S 16 2104#define AR_MCI_CONT_TXRX_S 16
2105 2105
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index abb48032753..9d5170b6df5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -34,3 +34,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
34 sdio_chip.o 34 sdio_chip.o
35brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ 35brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
36 usb.o 36 usb.o
37brcmfmac-$(CONFIG_BRCMDBG) += \
38 dhd_dbg.o \ No newline at end of file
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 9f637014486..a11fe54f595 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -613,6 +613,9 @@ struct brcmf_pub {
613 struct work_struct multicast_work; 613 struct work_struct multicast_work;
614 u8 macvalue[ETH_ALEN]; 614 u8 macvalue[ETH_ALEN];
615 atomic_t pend_8021x_cnt; 615 atomic_t pend_8021x_cnt;
616#ifdef DEBUG
617 struct dentry *dbgfs_dir;
618#endif
616}; 619};
617 620
618struct brcmf_if_event { 621struct brcmf_if_event {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
new file mode 100644
index 00000000000..7f89540b56d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/debugfs.h>
17#include <linux/if_ether.h>
18#include <linux/if.h>
19#include <linux/ieee80211.h>
20#include <linux/module.h>
21
22#include <defs.h>
23#include <brcmu_wifi.h>
24#include <brcmu_utils.h>
25#include "dhd.h"
26#include "dhd_bus.h"
27#include "dhd_dbg.h"
28
29static struct dentry *root_folder;
30
31void brcmf_debugfs_init(void)
32{
33 root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
34 if (IS_ERR(root_folder))
35 root_folder = NULL;
36}
37
38void brcmf_debugfs_exit(void)
39{
40 if (!root_folder)
41 return;
42
43 debugfs_remove_recursive(root_folder);
44 root_folder = NULL;
45}
46
47int brcmf_debugfs_attach(struct brcmf_pub *drvr)
48{
49 if (!root_folder)
50 return -ENODEV;
51
52 drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder);
53 return PTR_RET(drvr->dbgfs_dir);
54}
55
56void brcmf_debugfs_detach(struct brcmf_pub *drvr)
57{
58 if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
59 debugfs_remove_recursive(drvr->dbgfs_dir);
60}
61
62struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
63{
64 return drvr->dbgfs_dir;
65}
66
67static
68ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data,
69 size_t count, loff_t *ppos)
70{
71 struct brcmf_sdio_count *sdcnt = f->private_data;
72 char buf[750];
73 int res;
74
75 /* only allow read from start */
76 if (*ppos > 0)
77 return 0;
78
79 res = scnprintf(buf, sizeof(buf),
80 "intrcount: %u\nlastintrs: %u\n"
81 "pollcnt: %u\nregfails: %u\n"
82 "tx_sderrs: %u\nfcqueued: %u\n"
83 "rxrtx: %u\nrx_toolong: %u\n"
84 "rxc_errors: %u\nrx_hdrfail: %u\n"
85 "rx_badhdr: %u\nrx_badseq: %u\n"
86 "fc_rcvd: %u\nfc_xoff: %u\n"
87 "fc_xon: %u\nrxglomfail: %u\n"
88 "rxglomframes: %u\nrxglompkts: %u\n"
89 "f2rxhdrs: %u\nf2rxdata: %u\n"
90 "f2txdata: %u\nf1regdata: %u\n"
91 "tickcnt: %u\ntx_ctlerrs: %lu\n"
92 "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n"
93 "rx_ctlpkts: %lu\nrx_readahead: %lu\n",
94 sdcnt->intrcount, sdcnt->lastintrs,
95 sdcnt->pollcnt, sdcnt->regfails,
96 sdcnt->tx_sderrs, sdcnt->fcqueued,
97 sdcnt->rxrtx, sdcnt->rx_toolong,
98 sdcnt->rxc_errors, sdcnt->rx_hdrfail,
99 sdcnt->rx_badhdr, sdcnt->rx_badseq,
100 sdcnt->fc_rcvd, sdcnt->fc_xoff,
101 sdcnt->fc_xon, sdcnt->rxglomfail,
102 sdcnt->rxglomframes, sdcnt->rxglompkts,
103 sdcnt->f2rxhdrs, sdcnt->f2rxdata,
104 sdcnt->f2txdata, sdcnt->f1regdata,
105 sdcnt->tickcnt, sdcnt->tx_ctlerrs,
106 sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
107 sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
108
109 return simple_read_from_buffer(data, count, ppos, buf, res);
110}
111
112static const struct file_operations brcmf_debugfs_sdio_counter_ops = {
113 .owner = THIS_MODULE,
114 .open = simple_open,
115 .read = brcmf_debugfs_sdio_counter_read
116};
117
118void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
119 struct brcmf_sdio_count *sdcnt)
120{
121 struct dentry *dentry = drvr->dbgfs_dir;
122
123 if (!IS_ERR_OR_NULL(dentry))
124 debugfs_create_file("counters", S_IRUGO, dentry,
125 sdcnt, &brcmf_debugfs_sdio_counter_ops);
126}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index a2c4576cf9f..b784920532d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -76,4 +76,63 @@ do { \
76 76
77extern int brcmf_msg_level; 77extern int brcmf_msg_level;
78 78
79/*
80 * hold counter variables used in brcmfmac sdio driver.
81 */
82struct brcmf_sdio_count {
83 uint intrcount; /* Count of device interrupt callbacks */
84 uint lastintrs; /* Count as of last watchdog timer */
85 uint pollcnt; /* Count of active polls */
86 uint regfails; /* Count of R_REG failures */
87 uint tx_sderrs; /* Count of tx attempts with sd errors */
88 uint fcqueued; /* Tx packets that got queued */
89 uint rxrtx; /* Count of rtx requests (NAK to dongle) */
90 uint rx_toolong; /* Receive frames too long to receive */
91 uint rxc_errors; /* SDIO errors when reading control frames */
92 uint rx_hdrfail; /* SDIO errors on header reads */
93 uint rx_badhdr; /* Bad received headers (roosync?) */
94 uint rx_badseq; /* Mismatched rx sequence number */
95 uint fc_rcvd; /* Number of flow-control events received */
96 uint fc_xoff; /* Number which turned on flow-control */
97 uint fc_xon; /* Number which turned off flow-control */
98 uint rxglomfail; /* Failed deglom attempts */
99 uint rxglomframes; /* Number of glom frames (superframes) */
100 uint rxglompkts; /* Number of packets from glom frames */
101 uint f2rxhdrs; /* Number of header reads */
102 uint f2rxdata; /* Number of frame data reads */
103 uint f2txdata; /* Number of f2 frame writes */
104 uint f1regdata; /* Number of f1 register accesses */
105 uint tickcnt; /* Number of watchdog been schedule */
106 ulong tx_ctlerrs; /* Err of sending ctrl frames */
107 ulong tx_ctlpkts; /* Ctrl frames sent to dongle */
108 ulong rx_ctlerrs; /* Err of processing rx ctrl frames */
109 ulong rx_ctlpkts; /* Ctrl frames processed from dongle */
110 ulong rx_readahead_cnt; /* packets where header read-ahead was used */
111};
112
113struct brcmf_pub;
114#ifdef DEBUG
115void brcmf_debugfs_init(void);
116void brcmf_debugfs_exit(void);
117int brcmf_debugfs_attach(struct brcmf_pub *drvr);
118void brcmf_debugfs_detach(struct brcmf_pub *drvr);
119struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
120void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
121 struct brcmf_sdio_count *sdcnt);
122#else
123static inline void brcmf_debugfs_init(void)
124{
125}
126static inline void brcmf_debugfs_exit(void)
127{
128}
129static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
130{
131 return 0;
132}
133static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
134{
135}
136#endif
137
79#endif /* _BRCMF_DBG_H_ */ 138#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8933f9b31a9..01cf6c03390 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1007,6 +1007,9 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
1007 drvr->bus_if->drvr = drvr; 1007 drvr->bus_if->drvr = drvr;
1008 drvr->dev = dev; 1008 drvr->dev = dev;
1009 1009
1010 /* create device debugfs folder */
1011 brcmf_debugfs_attach(drvr);
1012
1010 /* Attach and link in the protocol */ 1013 /* Attach and link in the protocol */
1011 ret = brcmf_proto_attach(drvr); 1014 ret = brcmf_proto_attach(drvr);
1012 if (ret != 0) { 1015 if (ret != 0) {
@@ -1123,6 +1126,7 @@ void brcmf_detach(struct device *dev)
1123 brcmf_proto_detach(drvr); 1126 brcmf_proto_detach(drvr);
1124 } 1127 }
1125 1128
1129 brcmf_debugfs_detach(drvr);
1126 bus_if->drvr = NULL; 1130 bus_if->drvr = NULL;
1127 kfree(drvr); 1131 kfree(drvr);
1128} 1132}
@@ -1192,6 +1196,8 @@ exit:
1192 1196
1193static void brcmf_driver_init(struct work_struct *work) 1197static void brcmf_driver_init(struct work_struct *work)
1194{ 1198{
1199 brcmf_debugfs_init();
1200
1195#ifdef CONFIG_BRCMFMAC_SDIO 1201#ifdef CONFIG_BRCMFMAC_SDIO
1196 brcmf_sdio_init(); 1202 brcmf_sdio_init();
1197#endif 1203#endif
@@ -1219,6 +1225,7 @@ static void __exit brcmfmac_module_exit(void)
1219#ifdef CONFIG_BRCMFMAC_USB 1225#ifdef CONFIG_BRCMFMAC_USB
1220 brcmf_usb_exit(); 1226 brcmf_usb_exit();
1221#endif 1227#endif
1228 brcmf_debugfs_exit();
1222} 1229}
1223 1230
1224module_init(brcmfmac_module_init); 1231module_init(brcmfmac_module_init);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 4deae28fc21..b023766954a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -502,12 +502,9 @@ struct brcmf_sdio {
502 bool intr; /* Use interrupts */ 502 bool intr; /* Use interrupts */
503 bool poll; /* Use polling */ 503 bool poll; /* Use polling */
504 bool ipend; /* Device interrupt is pending */ 504 bool ipend; /* Device interrupt is pending */
505 uint intrcount; /* Count of device interrupt callbacks */
506 uint lastintrs; /* Count as of last watchdog timer */
507 uint spurious; /* Count of spurious interrupts */ 505 uint spurious; /* Count of spurious interrupts */
508 uint pollrate; /* Ticks between device polls */ 506 uint pollrate; /* Ticks between device polls */
509 uint polltick; /* Tick counter */ 507 uint polltick; /* Tick counter */
510 uint pollcnt; /* Count of active polls */
511 508
512#ifdef DEBUG 509#ifdef DEBUG
513 uint console_interval; 510 uint console_interval;
@@ -515,8 +512,6 @@ struct brcmf_sdio {
515 uint console_addr; /* Console address from shared struct */ 512 uint console_addr; /* Console address from shared struct */
516#endif /* DEBUG */ 513#endif /* DEBUG */
517 514
518 uint regfails; /* Count of R_REG failures */
519
520 uint clkstate; /* State of sd and backplane clock(s) */ 515 uint clkstate; /* State of sd and backplane clock(s) */
521 bool activity; /* Activity flag for clock down */ 516 bool activity; /* Activity flag for clock down */
522 s32 idletime; /* Control for activity timeout */ 517 s32 idletime; /* Control for activity timeout */
@@ -531,33 +526,6 @@ struct brcmf_sdio {
531/* Field to decide if rx of control frames happen in rxbuf or lb-pool */ 526/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
532 bool usebufpool; 527 bool usebufpool;
533 528
534 /* Some additional counters */
535 uint tx_sderrs; /* Count of tx attempts with sd errors */
536 uint fcqueued; /* Tx packets that got queued */
537 uint rxrtx; /* Count of rtx requests (NAK to dongle) */
538 uint rx_toolong; /* Receive frames too long to receive */
539 uint rxc_errors; /* SDIO errors when reading control frames */
540 uint rx_hdrfail; /* SDIO errors on header reads */
541 uint rx_badhdr; /* Bad received headers (roosync?) */
542 uint rx_badseq; /* Mismatched rx sequence number */
543 uint fc_rcvd; /* Number of flow-control events received */
544 uint fc_xoff; /* Number which turned on flow-control */
545 uint fc_xon; /* Number which turned off flow-control */
546 uint rxglomfail; /* Failed deglom attempts */
547 uint rxglomframes; /* Number of glom frames (superframes) */
548 uint rxglompkts; /* Number of packets from glom frames */
549 uint f2rxhdrs; /* Number of header reads */
550 uint f2rxdata; /* Number of frame data reads */
551 uint f2txdata; /* Number of f2 frame writes */
552 uint f1regdata; /* Number of f1 register accesses */
553 uint tickcnt; /* Number of watchdog been schedule */
554 unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
555 unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
556 unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
557 unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
558 unsigned long rx_readahead_cnt; /* Number of packets where header
559 * read-ahead was used. */
560
561 u8 *ctrl_frame_buf; 529 u8 *ctrl_frame_buf;
562 u32 ctrl_frame_len; 530 u32 ctrl_frame_len;
563 bool ctrl_frame_stat; 531 bool ctrl_frame_stat;
@@ -583,6 +551,7 @@ struct brcmf_sdio {
583 u32 fw_ptr; 551 u32 fw_ptr;
584 552
585 bool txoff; /* Transmit flow-controlled */ 553 bool txoff; /* Transmit flow-controlled */
554 struct brcmf_sdio_count sdcnt;
586}; 555};
587 556
588/* clkstate */ 557/* clkstate */
@@ -945,7 +914,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
945 if (ret == 0) 914 if (ret == 0)
946 w_sdreg32(bus, SMB_INT_ACK, 915 w_sdreg32(bus, SMB_INT_ACK,
947 offsetof(struct sdpcmd_regs, tosbmailbox)); 916 offsetof(struct sdpcmd_regs, tosbmailbox));
948 bus->f1regdata += 2; 917 bus->sdcnt.f1regdata += 2;
949 918
950 /* Dongle recomposed rx frames, accept them again */ 919 /* Dongle recomposed rx frames, accept them again */
951 if (hmb_data & HMB_DATA_NAKHANDLED) { 920 if (hmb_data & HMB_DATA_NAKHANDLED) {
@@ -984,12 +953,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
984 HMB_DATA_FCDATA_SHIFT; 953 HMB_DATA_FCDATA_SHIFT;
985 954
986 if (fcbits & ~bus->flowcontrol) 955 if (fcbits & ~bus->flowcontrol)
987 bus->fc_xoff++; 956 bus->sdcnt.fc_xoff++;
988 957
989 if (bus->flowcontrol & ~fcbits) 958 if (bus->flowcontrol & ~fcbits)
990 bus->fc_xon++; 959 bus->sdcnt.fc_xon++;
991 960
992 bus->fc_rcvd++; 961 bus->sdcnt.fc_rcvd++;
993 bus->flowcontrol = fcbits; 962 bus->flowcontrol = fcbits;
994 } 963 }
995 964
@@ -1021,7 +990,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1021 990
1022 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 991 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
1023 SFC_RF_TERM, &err); 992 SFC_RF_TERM, &err);
1024 bus->f1regdata++; 993 bus->sdcnt.f1regdata++;
1025 994
1026 /* Wait until the packet has been flushed (device/FIFO stable) */ 995 /* Wait until the packet has been flushed (device/FIFO stable) */
1027 for (lastrbc = retries = 0xffff; retries > 0; retries--) { 996 for (lastrbc = retries = 0xffff; retries > 0; retries--) {
@@ -1029,7 +998,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1029 SBSDIO_FUNC1_RFRAMEBCHI, &err); 998 SBSDIO_FUNC1_RFRAMEBCHI, &err);
1030 lo = brcmf_sdio_regrb(bus->sdiodev, 999 lo = brcmf_sdio_regrb(bus->sdiodev,
1031 SBSDIO_FUNC1_RFRAMEBCLO, &err); 1000 SBSDIO_FUNC1_RFRAMEBCLO, &err);
1032 bus->f1regdata += 2; 1001 bus->sdcnt.f1regdata += 2;
1033 1002
1034 if ((hi == 0) && (lo == 0)) 1003 if ((hi == 0) && (lo == 0))
1035 break; 1004 break;
@@ -1047,11 +1016,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1047 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries); 1016 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
1048 1017
1049 if (rtx) { 1018 if (rtx) {
1050 bus->rxrtx++; 1019 bus->sdcnt.rxrtx++;
1051 err = w_sdreg32(bus, SMB_NAK, 1020 err = w_sdreg32(bus, SMB_NAK,
1052 offsetof(struct sdpcmd_regs, tosbmailbox)); 1021 offsetof(struct sdpcmd_regs, tosbmailbox));
1053 1022
1054 bus->f1regdata++; 1023 bus->sdcnt.f1regdata++;
1055 if (err == 0) 1024 if (err == 0)
1056 bus->rxskip = true; 1025 bus->rxskip = true;
1057 } 1026 }
@@ -1243,7 +1212,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1243 dlen); 1212 dlen);
1244 errcode = -1; 1213 errcode = -1;
1245 } 1214 }
1246 bus->f2rxdata++; 1215 bus->sdcnt.f2rxdata++;
1247 1216
1248 /* On failure, kill the superframe, allow a couple retries */ 1217 /* On failure, kill the superframe, allow a couple retries */
1249 if (errcode < 0) { 1218 if (errcode < 0) {
@@ -1256,7 +1225,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1256 } else { 1225 } else {
1257 bus->glomerr = 0; 1226 bus->glomerr = 0;
1258 brcmf_sdbrcm_rxfail(bus, true, false); 1227 brcmf_sdbrcm_rxfail(bus, true, false);
1259 bus->rxglomfail++; 1228 bus->sdcnt.rxglomfail++;
1260 brcmf_sdbrcm_free_glom(bus); 1229 brcmf_sdbrcm_free_glom(bus);
1261 } 1230 }
1262 return 0; 1231 return 0;
@@ -1312,7 +1281,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1312 if (rxseq != seq) { 1281 if (rxseq != seq) {
1313 brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n", 1282 brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
1314 seq, rxseq); 1283 seq, rxseq);
1315 bus->rx_badseq++; 1284 bus->sdcnt.rx_badseq++;
1316 rxseq = seq; 1285 rxseq = seq;
1317 } 1286 }
1318 1287
@@ -1376,7 +1345,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1376 } else { 1345 } else {
1377 bus->glomerr = 0; 1346 bus->glomerr = 0;
1378 brcmf_sdbrcm_rxfail(bus, true, false); 1347 brcmf_sdbrcm_rxfail(bus, true, false);
1379 bus->rxglomfail++; 1348 bus->sdcnt.rxglomfail++;
1380 brcmf_sdbrcm_free_glom(bus); 1349 brcmf_sdbrcm_free_glom(bus);
1381 } 1350 }
1382 bus->nextlen = 0; 1351 bus->nextlen = 0;
@@ -1402,7 +1371,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1402 if (rxseq != seq) { 1371 if (rxseq != seq) {
1403 brcmf_dbg(GLOM, "rx_seq %d, expected %d\n", 1372 brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
1404 seq, rxseq); 1373 seq, rxseq);
1405 bus->rx_badseq++; 1374 bus->sdcnt.rx_badseq++;
1406 rxseq = seq; 1375 rxseq = seq;
1407 } 1376 }
1408 rxseq++; 1377 rxseq++;
@@ -1441,8 +1410,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1441 down(&bus->sdsem); 1410 down(&bus->sdsem);
1442 } 1411 }
1443 1412
1444 bus->rxglomframes++; 1413 bus->sdcnt.rxglomframes++;
1445 bus->rxglompkts += bus->glom.qlen; 1414 bus->sdcnt.rxglompkts += bus->glom.qlen;
1446 } 1415 }
1447 return num; 1416 return num;
1448} 1417}
@@ -1526,7 +1495,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1526 brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", 1495 brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1527 len, len - doff, bus->sdiodev->bus_if->maxctl); 1496 len, len - doff, bus->sdiodev->bus_if->maxctl);
1528 bus->sdiodev->bus_if->dstats.rx_errors++; 1497 bus->sdiodev->bus_if->dstats.rx_errors++;
1529 bus->rx_toolong++; 1498 bus->sdcnt.rx_toolong++;
1530 brcmf_sdbrcm_rxfail(bus, false, false); 1499 brcmf_sdbrcm_rxfail(bus, false, false);
1531 goto done; 1500 goto done;
1532 } 1501 }
@@ -1536,13 +1505,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1536 bus->sdiodev->sbwad, 1505 bus->sdiodev->sbwad,
1537 SDIO_FUNC_2, 1506 SDIO_FUNC_2,
1538 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); 1507 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
1539 bus->f2rxdata++; 1508 bus->sdcnt.f2rxdata++;
1540 1509
1541 /* Control frame failures need retransmission */ 1510 /* Control frame failures need retransmission */
1542 if (sdret < 0) { 1511 if (sdret < 0) {
1543 brcmf_dbg(ERROR, "read %d control bytes failed: %d\n", 1512 brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
1544 rdlen, sdret); 1513 rdlen, sdret);
1545 bus->rxc_errors++; 1514 bus->sdcnt.rxc_errors++;
1546 brcmf_sdbrcm_rxfail(bus, true, true); 1515 brcmf_sdbrcm_rxfail(bus, true, true);
1547 goto done; 1516 goto done;
1548 } 1517 }
@@ -1589,7 +1558,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
1589 /* Read the entire frame */ 1558 /* Read the entire frame */
1590 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1559 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1591 SDIO_FUNC_2, F2SYNC, *pkt); 1560 SDIO_FUNC_2, F2SYNC, *pkt);
1592 bus->f2rxdata++; 1561 bus->sdcnt.f2rxdata++;
1593 1562
1594 if (sdret < 0) { 1563 if (sdret < 0) {
1595 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n", 1564 brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
@@ -1630,7 +1599,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
1630 if ((u16)~(*len ^ check)) { 1599 if ((u16)~(*len ^ check)) {
1631 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n", 1600 brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
1632 nextlen, *len, check); 1601 nextlen, *len, check);
1633 bus->rx_badhdr++; 1602 bus->sdcnt.rx_badhdr++;
1634 brcmf_sdbrcm_rxfail(bus, false, false); 1603 brcmf_sdbrcm_rxfail(bus, false, false);
1635 goto fail; 1604 goto fail;
1636 } 1605 }
@@ -1746,7 +1715,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1746 bus->nextlen = 0; 1715 bus->nextlen = 0;
1747 } 1716 }
1748 1717
1749 bus->rx_readahead_cnt++; 1718 bus->sdcnt.rx_readahead_cnt++;
1750 1719
1751 /* Handle Flow Control */ 1720 /* Handle Flow Control */
1752 fcbits = SDPCM_FCMASK_VALUE( 1721 fcbits = SDPCM_FCMASK_VALUE(
@@ -1754,12 +1723,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1754 1723
1755 if (bus->flowcontrol != fcbits) { 1724 if (bus->flowcontrol != fcbits) {
1756 if (~bus->flowcontrol & fcbits) 1725 if (~bus->flowcontrol & fcbits)
1757 bus->fc_xoff++; 1726 bus->sdcnt.fc_xoff++;
1758 1727
1759 if (bus->flowcontrol & ~fcbits) 1728 if (bus->flowcontrol & ~fcbits)
1760 bus->fc_xon++; 1729 bus->sdcnt.fc_xon++;
1761 1730
1762 bus->fc_rcvd++; 1731 bus->sdcnt.fc_rcvd++;
1763 bus->flowcontrol = fcbits; 1732 bus->flowcontrol = fcbits;
1764 } 1733 }
1765 1734
@@ -1767,7 +1736,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1767 if (rxseq != seq) { 1736 if (rxseq != seq) {
1768 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n", 1737 brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
1769 seq, rxseq); 1738 seq, rxseq);
1770 bus->rx_badseq++; 1739 bus->sdcnt.rx_badseq++;
1771 rxseq = seq; 1740 rxseq = seq;
1772 } 1741 }
1773 1742
@@ -1814,11 +1783,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1814 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, 1783 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
1815 SDIO_FUNC_2, F2SYNC, bus->rxhdr, 1784 SDIO_FUNC_2, F2SYNC, bus->rxhdr,
1816 BRCMF_FIRSTREAD); 1785 BRCMF_FIRSTREAD);
1817 bus->f2rxhdrs++; 1786 bus->sdcnt.f2rxhdrs++;
1818 1787
1819 if (sdret < 0) { 1788 if (sdret < 0) {
1820 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret); 1789 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
1821 bus->rx_hdrfail++; 1790 bus->sdcnt.rx_hdrfail++;
1822 brcmf_sdbrcm_rxfail(bus, true, true); 1791 brcmf_sdbrcm_rxfail(bus, true, true);
1823 continue; 1792 continue;
1824 } 1793 }
@@ -1840,7 +1809,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1840 if ((u16) ~(len ^ check)) { 1809 if ((u16) ~(len ^ check)) {
1841 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n", 1810 brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
1842 len, check); 1811 len, check);
1843 bus->rx_badhdr++; 1812 bus->sdcnt.rx_badhdr++;
1844 brcmf_sdbrcm_rxfail(bus, false, false); 1813 brcmf_sdbrcm_rxfail(bus, false, false);
1845 continue; 1814 continue;
1846 } 1815 }
@@ -1861,7 +1830,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1861 if ((doff < SDPCM_HDRLEN) || (doff > len)) { 1830 if ((doff < SDPCM_HDRLEN) || (doff > len)) {
1862 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n", 1831 brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
1863 doff, len, SDPCM_HDRLEN, seq); 1832 doff, len, SDPCM_HDRLEN, seq);
1864 bus->rx_badhdr++; 1833 bus->sdcnt.rx_badhdr++;
1865 brcmf_sdbrcm_rxfail(bus, false, false); 1834 brcmf_sdbrcm_rxfail(bus, false, false);
1866 continue; 1835 continue;
1867 } 1836 }
@@ -1880,19 +1849,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1880 1849
1881 if (bus->flowcontrol != fcbits) { 1850 if (bus->flowcontrol != fcbits) {
1882 if (~bus->flowcontrol & fcbits) 1851 if (~bus->flowcontrol & fcbits)
1883 bus->fc_xoff++; 1852 bus->sdcnt.fc_xoff++;
1884 1853
1885 if (bus->flowcontrol & ~fcbits) 1854 if (bus->flowcontrol & ~fcbits)
1886 bus->fc_xon++; 1855 bus->sdcnt.fc_xon++;
1887 1856
1888 bus->fc_rcvd++; 1857 bus->sdcnt.fc_rcvd++;
1889 bus->flowcontrol = fcbits; 1858 bus->flowcontrol = fcbits;
1890 } 1859 }
1891 1860
1892 /* Check and update sequence number */ 1861 /* Check and update sequence number */
1893 if (rxseq != seq) { 1862 if (rxseq != seq) {
1894 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq); 1863 brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
1895 bus->rx_badseq++; 1864 bus->sdcnt.rx_badseq++;
1896 rxseq = seq; 1865 rxseq = seq;
1897 } 1866 }
1898 1867
@@ -1937,7 +1906,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1937 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n", 1906 brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
1938 len, rdlen); 1907 len, rdlen);
1939 bus->sdiodev->bus_if->dstats.rx_errors++; 1908 bus->sdiodev->bus_if->dstats.rx_errors++;
1940 bus->rx_toolong++; 1909 bus->sdcnt.rx_toolong++;
1941 brcmf_sdbrcm_rxfail(bus, false, false); 1910 brcmf_sdbrcm_rxfail(bus, false, false);
1942 continue; 1911 continue;
1943 } 1912 }
@@ -1960,7 +1929,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1960 /* Read the remaining frame data */ 1929 /* Read the remaining frame data */
1961 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1930 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1962 SDIO_FUNC_2, F2SYNC, pkt); 1931 SDIO_FUNC_2, F2SYNC, pkt);
1963 bus->f2rxdata++; 1932 bus->sdcnt.f2rxdata++;
1964 1933
1965 if (sdret < 0) { 1934 if (sdret < 0) {
1966 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen, 1935 brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
@@ -2147,18 +2116,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2147 2116
2148 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, 2117 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
2149 SDIO_FUNC_2, F2SYNC, pkt); 2118 SDIO_FUNC_2, F2SYNC, pkt);
2150 bus->f2txdata++; 2119 bus->sdcnt.f2txdata++;
2151 2120
2152 if (ret < 0) { 2121 if (ret < 0) {
2153 /* On failure, abort the command and terminate the frame */ 2122 /* On failure, abort the command and terminate the frame */
2154 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2123 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2155 ret); 2124 ret);
2156 bus->tx_sderrs++; 2125 bus->sdcnt.tx_sderrs++;
2157 2126
2158 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2127 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2159 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2128 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2160 SFC_WF_TERM, NULL); 2129 SFC_WF_TERM, NULL);
2161 bus->f1regdata++; 2130 bus->sdcnt.f1regdata++;
2162 2131
2163 for (i = 0; i < 3; i++) { 2132 for (i = 0; i < 3; i++) {
2164 u8 hi, lo; 2133 u8 hi, lo;
@@ -2166,7 +2135,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2166 SBSDIO_FUNC1_WFRAMEBCHI, NULL); 2135 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2167 lo = brcmf_sdio_regrb(bus->sdiodev, 2136 lo = brcmf_sdio_regrb(bus->sdiodev,
2168 SBSDIO_FUNC1_WFRAMEBCLO, NULL); 2137 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2169 bus->f1regdata += 2; 2138 bus->sdcnt.f1regdata += 2;
2170 if ((hi == 0) && (lo == 0)) 2139 if ((hi == 0) && (lo == 0))
2171 break; 2140 break;
2172 } 2141 }
@@ -2224,7 +2193,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2224 ret = r_sdreg32(bus, &intstatus, 2193 ret = r_sdreg32(bus, &intstatus,
2225 offsetof(struct sdpcmd_regs, 2194 offsetof(struct sdpcmd_regs,
2226 intstatus)); 2195 intstatus));
2227 bus->f2txdata++; 2196 bus->sdcnt.f2txdata++;
2228 if (ret != 0) 2197 if (ret != 0)
2229 break; 2198 break;
2230 if (intstatus & bus->hostintmask) 2199 if (intstatus & bus->hostintmask)
@@ -2417,7 +2386,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2417 bus->ipend = false; 2386 bus->ipend = false;
2418 err = r_sdreg32(bus, &newstatus, 2387 err = r_sdreg32(bus, &newstatus,
2419 offsetof(struct sdpcmd_regs, intstatus)); 2388 offsetof(struct sdpcmd_regs, intstatus));
2420 bus->f1regdata++; 2389 bus->sdcnt.f1regdata++;
2421 if (err != 0) 2390 if (err != 0)
2422 newstatus = 0; 2391 newstatus = 0;
2423 newstatus &= bus->hostintmask; 2392 newstatus &= bus->hostintmask;
@@ -2426,7 +2395,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2426 err = w_sdreg32(bus, newstatus, 2395 err = w_sdreg32(bus, newstatus,
2427 offsetof(struct sdpcmd_regs, 2396 offsetof(struct sdpcmd_regs,
2428 intstatus)); 2397 intstatus));
2429 bus->f1regdata++; 2398 bus->sdcnt.f1regdata++;
2430 } 2399 }
2431 } 2400 }
2432 2401
@@ -2445,7 +2414,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2445 2414
2446 err = r_sdreg32(bus, &newstatus, 2415 err = r_sdreg32(bus, &newstatus,
2447 offsetof(struct sdpcmd_regs, intstatus)); 2416 offsetof(struct sdpcmd_regs, intstatus));
2448 bus->f1regdata += 2; 2417 bus->sdcnt.f1regdata += 2;
2449 bus->fcstate = 2418 bus->fcstate =
2450 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); 2419 !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
2451 intstatus |= (newstatus & bus->hostintmask); 2420 intstatus |= (newstatus & bus->hostintmask);
@@ -2510,13 +2479,13 @@ clkwait:
2510 terminate the frame */ 2479 terminate the frame */
2511 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2480 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2512 ret); 2481 ret);
2513 bus->tx_sderrs++; 2482 bus->sdcnt.tx_sderrs++;
2514 2483
2515 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2484 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2516 2485
2517 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2486 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2518 SFC_WF_TERM, &err); 2487 SFC_WF_TERM, &err);
2519 bus->f1regdata++; 2488 bus->sdcnt.f1regdata++;
2520 2489
2521 for (i = 0; i < 3; i++) { 2490 for (i = 0; i < 3; i++) {
2522 u8 hi, lo; 2491 u8 hi, lo;
@@ -2526,7 +2495,7 @@ clkwait:
2526 lo = brcmf_sdio_regrb(bus->sdiodev, 2495 lo = brcmf_sdio_regrb(bus->sdiodev,
2527 SBSDIO_FUNC1_WFRAMEBCLO, 2496 SBSDIO_FUNC1_WFRAMEBCLO,
2528 &err); 2497 &err);
2529 bus->f1regdata += 2; 2498 bus->sdcnt.f1regdata += 2;
2530 if ((hi == 0) && (lo == 0)) 2499 if ((hi == 0) && (lo == 0))
2531 break; 2500 break;
2532 } 2501 }
@@ -2657,7 +2626,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2657 /* Check for existing queue, current flow-control, 2626 /* Check for existing queue, current flow-control,
2658 pending event, or pending clock */ 2627 pending event, or pending clock */
2659 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); 2628 brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
2660 bus->fcqueued++; 2629 bus->sdcnt.fcqueued++;
2661 2630
2662 /* Priority based enq */ 2631 /* Priority based enq */
2663 spin_lock_bh(&bus->txqlock); 2632 spin_lock_bh(&bus->txqlock);
@@ -2845,13 +2814,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2845 /* On failure, abort the command and terminate the frame */ 2814 /* On failure, abort the command and terminate the frame */
2846 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", 2815 brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
2847 ret); 2816 ret);
2848 bus->tx_sderrs++; 2817 bus->sdcnt.tx_sderrs++;
2849 2818
2850 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); 2819 brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
2851 2820
2852 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, 2821 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
2853 SFC_WF_TERM, NULL); 2822 SFC_WF_TERM, NULL);
2854 bus->f1regdata++; 2823 bus->sdcnt.f1regdata++;
2855 2824
2856 for (i = 0; i < 3; i++) { 2825 for (i = 0; i < 3; i++) {
2857 u8 hi, lo; 2826 u8 hi, lo;
@@ -2859,7 +2828,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2859 SBSDIO_FUNC1_WFRAMEBCHI, NULL); 2828 SBSDIO_FUNC1_WFRAMEBCHI, NULL);
2860 lo = brcmf_sdio_regrb(bus->sdiodev, 2829 lo = brcmf_sdio_regrb(bus->sdiodev,
2861 SBSDIO_FUNC1_WFRAMEBCLO, NULL); 2830 SBSDIO_FUNC1_WFRAMEBCLO, NULL);
2862 bus->f1regdata += 2; 2831 bus->sdcnt.f1regdata += 2;
2863 if (hi == 0 && lo == 0) 2832 if (hi == 0 && lo == 0)
2864 break; 2833 break;
2865 } 2834 }
@@ -2976,13 +2945,26 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2976 up(&bus->sdsem); 2945 up(&bus->sdsem);
2977 2946
2978 if (ret) 2947 if (ret)
2979 bus->tx_ctlerrs++; 2948 bus->sdcnt.tx_ctlerrs++;
2980 else 2949 else
2981 bus->tx_ctlpkts++; 2950 bus->sdcnt.tx_ctlpkts++;
2982 2951
2983 return ret ? -EIO : 0; 2952 return ret ? -EIO : 0;
2984} 2953}
2985 2954
2955#ifdef DEBUG
2956static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2957{
2958 struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
2959
2960 brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
2961}
2962#else
2963static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
2964{
2965}
2966#endif /* DEBUG */
2967
2986static int 2968static int
2987brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) 2969brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
2988{ 2970{
@@ -3017,9 +2999,9 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3017 } 2999 }
3018 3000
3019 if (rxlen) 3001 if (rxlen)
3020 bus->rx_ctlpkts++; 3002 bus->sdcnt.rx_ctlpkts++;
3021 else 3003 else
3022 bus->rx_ctlerrs++; 3004 bus->sdcnt.rx_ctlerrs++;
3023 3005
3024 return rxlen ? (int)rxlen : -ETIMEDOUT; 3006 return rxlen ? (int)rxlen : -ETIMEDOUT;
3025} 3007}
@@ -3419,7 +3401,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3419 return 0; 3401 return 0;
3420 3402
3421 /* Start the watchdog timer */ 3403 /* Start the watchdog timer */
3422 bus->tickcnt = 0; 3404 bus->sdcnt.tickcnt = 0;
3423 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); 3405 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3424 3406
3425 down(&bus->sdsem); 3407 down(&bus->sdsem);
@@ -3512,7 +3494,7 @@ void brcmf_sdbrcm_isr(void *arg)
3512 return; 3494 return;
3513 } 3495 }
3514 /* Count the interrupt call */ 3496 /* Count the interrupt call */
3515 bus->intrcount++; 3497 bus->sdcnt.intrcount++;
3516 bus->ipend = true; 3498 bus->ipend = true;
3517 3499
3518 /* Shouldn't get this interrupt if we're sleeping? */ 3500 /* Shouldn't get this interrupt if we're sleeping? */
@@ -3554,7 +3536,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3554 bus->polltick = 0; 3536 bus->polltick = 0;
3555 3537
3556 /* Check device if no interrupts */ 3538 /* Check device if no interrupts */
3557 if (!bus->intr || (bus->intrcount == bus->lastintrs)) { 3539 if (!bus->intr ||
3540 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3558 3541
3559 if (!bus->dpc_sched) { 3542 if (!bus->dpc_sched) {
3560 u8 devpend; 3543 u8 devpend;
@@ -3569,7 +3552,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3569 /* If there is something, make like the ISR and 3552 /* If there is something, make like the ISR and
3570 schedule the DPC */ 3553 schedule the DPC */
3571 if (intstatus) { 3554 if (intstatus) {
3572 bus->pollcnt++; 3555 bus->sdcnt.pollcnt++;
3573 bus->ipend = true; 3556 bus->ipend = true;
3574 3557
3575 bus->dpc_sched = true; 3558 bus->dpc_sched = true;
@@ -3581,7 +3564,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3581 } 3564 }
3582 3565
3583 /* Update interrupt tracking */ 3566 /* Update interrupt tracking */
3584 bus->lastintrs = bus->intrcount; 3567 bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
3585 } 3568 }
3586#ifdef DEBUG 3569#ifdef DEBUG
3587 /* Poll for console output periodically */ 3570 /* Poll for console output periodically */
@@ -3793,7 +3776,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
3793 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { 3776 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3794 brcmf_sdbrcm_bus_watchdog(bus); 3777 brcmf_sdbrcm_bus_watchdog(bus);
3795 /* Count the tick for reference */ 3778 /* Count the tick for reference */
3796 bus->tickcnt++; 3779 bus->sdcnt.tickcnt++;
3797 } else 3780 } else
3798 break; 3781 break;
3799 } 3782 }
@@ -3834,7 +3817,6 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3834static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) 3817static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3835{ 3818{
3836 brcmf_dbg(TRACE, "Enter\n"); 3819 brcmf_dbg(TRACE, "Enter\n");
3837
3838 if (bus) { 3820 if (bus) {
3839 /* De-register interrupt handler */ 3821 /* De-register interrupt handler */
3840 brcmf_sdio_intr_unregister(bus->sdiodev); 3822 brcmf_sdio_intr_unregister(bus->sdiodev);
@@ -3938,6 +3920,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3938 goto fail; 3920 goto fail;
3939 } 3921 }
3940 3922
3923 brcmf_sdio_debugfs_create(bus);
3941 brcmf_dbg(INFO, "completed!!\n"); 3924 brcmf_dbg(INFO, "completed!!\n");
3942 3925
3943 /* if firmware path present try to download and bring up bus */ 3926 /* if firmware path present try to download and bring up bus */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 95b5902bc4b..01b190a25d9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -735,10 +735,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
735 * a candidate for aggregation 735 * a candidate for aggregation
736 */ 736 */
737 p = pktq_ppeek(&qi->q, prec); 737 p = pktq_ppeek(&qi->q, prec);
738 /* tx_info must be checked with current p */
739 tx_info = IEEE80211_SKB_CB(p);
740
741 if (p) { 738 if (p) {
739 tx_info = IEEE80211_SKB_CB(p);
742 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && 740 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
743 ((u8) (p->priority) == tid)) { 741 ((u8) (p->priority) == tid)) {
744 plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; 742 plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
@@ -759,6 +757,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
759 p = NULL; 757 p = NULL;
760 continue; 758 continue;
761 } 759 }
760 /* next packet fit for aggregation so dequeue */
762 p = brcmu_pktq_pdeq(&qi->q, prec); 761 p = brcmu_pktq_pdeq(&qi->q, prec);
763 } else { 762 } else {
764 p = NULL; 763 p = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 50f92a0b7c4..341e06a0d6e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -721,14 +721,6 @@ static const struct ieee80211_ops brcms_ops = {
721 .flush = brcms_ops_flush, 721 .flush = brcms_ops_flush,
722}; 722};
723 723
724/*
725 * is called in brcms_bcma_probe() context, therefore no locking required.
726 */
727static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
728{
729 return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
730}
731
732void brcms_dpc(unsigned long data) 724void brcms_dpc(unsigned long data)
733{ 725{
734 struct brcms_info *wl; 726 struct brcms_info *wl;
@@ -1068,9 +1060,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1068 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" 1060 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
1069 "%d\n", __func__, err); 1061 "%d\n", __func__, err);
1070 1062
1071 if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode)) 1063 if (wl->pub->srom_ccode[0] &&
1072 wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", 1064 regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
1073 __func__, err); 1065 wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
1074 1066
1075 n_adapters_found++; 1067 n_adapters_found++;
1076 return wl; 1068 return wl;
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 98c8f644964..170ec330d2a 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,3 @@
1obj-$(CONFIG_IWLDVM) += dvm/
2
3CFLAGS_iwl-devtrace.o := -I$(src)
4
5# common 1# common
6obj-$(CONFIG_IWLWIFI) += iwlwifi.o 2obj-$(CONFIG_IWLWIFI) += iwlwifi.o
7iwlwifi-objs += iwl-io.o 3iwlwifi-objs += iwl-io.o
@@ -13,5 +9,11 @@ iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
13iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o 9iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
14 10
15iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 11iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
12iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
16 13
17ccflags-y += -D__CHECK_ENDIAN__ -I$(src) 14ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
15
16
17obj-$(CONFIG_IWLDVM) += dvm/
18
19CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 2ae3608472a..6d102413dd9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -395,8 +395,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
395} 395}
396 396
397extern int iwl_alive_start(struct iwl_priv *priv); 397extern int iwl_alive_start(struct iwl_priv *priv);
398/* svtool */ 398
399/* testmode support */
399#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 400#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
401
400extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, 402extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
401 int len); 403 int len);
402extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, 404extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
@@ -404,13 +406,16 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
404 struct netlink_callback *cb, 406 struct netlink_callback *cb,
405 void *data, int len); 407 void *data, int len);
406extern void iwl_testmode_init(struct iwl_priv *priv); 408extern void iwl_testmode_init(struct iwl_priv *priv);
407extern void iwl_testmode_cleanup(struct iwl_priv *priv); 409extern void iwl_testmode_free(struct iwl_priv *priv);
410
408#else 411#else
412
409static inline 413static inline
410int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) 414int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
411{ 415{
412 return -ENOSYS; 416 return -ENOSYS;
413} 417}
418
414static inline 419static inline
415int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 420int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
416 struct netlink_callback *cb, 421 struct netlink_callback *cb,
@@ -418,12 +423,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
418{ 423{
419 return -ENOSYS; 424 return -ENOSYS;
420} 425}
421static inline 426
422void iwl_testmode_init(struct iwl_priv *priv) 427static inline void iwl_testmode_init(struct iwl_priv *priv)
423{ 428{
424} 429}
425static inline 430
426void iwl_testmode_cleanup(struct iwl_priv *priv) 431static inline void iwl_testmode_free(struct iwl_priv *priv)
427{ 432{
428} 433}
429#endif 434#endif
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 89f2e1040e7..4620b657948 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -52,6 +52,8 @@
52#include "rs.h" 52#include "rs.h"
53#include "tt.h" 53#include "tt.h"
54 54
55#include "iwl-test.h"
56
55/* CT-KILL constants */ 57/* CT-KILL constants */
56#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 58#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
57#define CT_KILL_THRESHOLD 114 /* in Celsius */ 59#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -596,24 +598,6 @@ struct iwl_lib_ops {
596 void (*temperature)(struct iwl_priv *priv); 598 void (*temperature)(struct iwl_priv *priv);
597}; 599};
598 600
599#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
600struct iwl_testmode_trace {
601 u32 buff_size;
602 u32 total_size;
603 u32 num_chunks;
604 u8 *cpu_addr;
605 u8 *trace_addr;
606 dma_addr_t dma_addr;
607 bool trace_enabled;
608};
609struct iwl_testmode_mem {
610 u32 buff_size;
611 u32 num_chunks;
612 u8 *buff_addr;
613 bool read_in_progress;
614};
615#endif
616
617struct iwl_wipan_noa_data { 601struct iwl_wipan_noa_data {
618 struct rcu_head rcu_head; 602 struct rcu_head rcu_head;
619 u32 length; 603 u32 length;
@@ -670,8 +654,6 @@ struct iwl_priv {
670 enum ieee80211_band band; 654 enum ieee80211_band band;
671 u8 valid_contexts; 655 u8 valid_contexts;
672 656
673 void (*pre_rx_handler)(struct iwl_priv *priv,
674 struct iwl_rx_cmd_buffer *rxb);
675 int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 657 int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
676 struct iwl_rx_cmd_buffer *rxb, 658 struct iwl_rx_cmd_buffer *rxb,
677 struct iwl_device_cmd *cmd); 659 struct iwl_device_cmd *cmd);
@@ -895,9 +877,9 @@ struct iwl_priv {
895 struct led_classdev led; 877 struct led_classdev led;
896 unsigned long blink_on, blink_off; 878 unsigned long blink_on, blink_off;
897 bool led_registered; 879 bool led_registered;
880
898#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 881#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
899 struct iwl_testmode_trace testmode_trace; 882 struct iwl_test tst;
900 struct iwl_testmode_mem testmode_mem;
901 u32 tm_fixed_rate; 883 u32 tm_fixed_rate;
902#endif 884#endif
903 885
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index cb1ca7a25dd..76f259283c3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -1265,7 +1265,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1265 * the mutex, this ensures we don't try to send two 1265 * the mutex, this ensures we don't try to send two
1266 * (or more) synchronous commands at a time. 1266 * (or more) synchronous commands at a time.
1267 */ 1267 */
1268 if (cmd->flags & CMD_SYNC) 1268 if (!(cmd->flags & CMD_ASYNC))
1269 lockdep_assert_held(&priv->mutex); 1269 lockdep_assert_held(&priv->mutex);
1270 1270
1271 if (priv->ucode_owner == IWL_OWNERSHIP_TM && 1271 if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 599e8b41f5a..9d237486231 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -476,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
476 } 476 }
477 477
478 if (priv->wowlan_sram) 478 if (priv->wowlan_sram)
479 _iwl_read_targ_mem_words( 479 _iwl_read_targ_mem_dwords(
480 priv->trans, 0x800000, 480 priv->trans, 0x800000,
481 priv->wowlan_sram, 481 priv->wowlan_sram,
482 img->sec[IWL_UCODE_SECTION_DATA].len / 4); 482 img->sec[IWL_UCODE_SECTION_DATA].len / 4);
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 1c2d0233a40..e620af3d592 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -406,7 +406,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
406 406
407 base = priv->device_pointers.log_event_table; 407 base = priv->device_pointers.log_event_table;
408 if (iwlagn_hw_valid_rtc_data_addr(base)) { 408 if (iwlagn_hw_valid_rtc_data_addr(base)) {
409 iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read)); 409 iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
410 capacity = read.capacity; 410 capacity = read.capacity;
411 mode = read.mode; 411 mode = read.mode;
412 num_wraps = read.wrap_counter; 412 num_wraps = read.wrap_counter;
@@ -1548,7 +1548,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1548 1548
1549 iwl_dbgfs_unregister(priv); 1549 iwl_dbgfs_unregister(priv);
1550 1550
1551 iwl_testmode_cleanup(priv); 1551 iwl_testmode_free(priv);
1552 iwlagn_mac_unregister(priv); 1552 iwlagn_mac_unregister(priv);
1553 1553
1554 iwl_tt_exit(priv); 1554 iwl_tt_exit(priv);
@@ -1671,7 +1671,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1671 } 1671 }
1672 1672
1673 /*TODO: Update dbgfs with ISR error stats obtained below */ 1673 /*TODO: Update dbgfs with ISR error stats obtained below */
1674 iwl_read_targ_mem_words(trans, base, &table, sizeof(table)); 1674 iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
1675 1675
1676 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 1676 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1677 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 1677 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 0ed90bb8b56..c1f7a18e08d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -1124,8 +1124,6 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1124{ 1124{
1125 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1125 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1126 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1126 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1127 void (*pre_rx_handler)(struct iwl_priv *,
1128 struct iwl_rx_cmd_buffer *);
1129 int err = 0; 1127 int err = 0;
1130 1128
1131 /* 1129 /*
@@ -1135,19 +1133,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1135 */ 1133 */
1136 iwl_notification_wait_notify(&priv->notif_wait, pkt); 1134 iwl_notification_wait_notify(&priv->notif_wait, pkt);
1137 1135
1138 /* RX data may be forwarded to userspace (using pre_rx_handler) in one 1136#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1139 * of two cases: the first, that the user owns the uCode through 1137 /*
1140 * testmode - in such case the pre_rx_handler is set and no further 1138 * RX data may be forwarded to userspace in one
1141 * processing takes place. The other case is when the user want to 1139 * of two cases: the user owns the fw through testmode or when
1142 * monitor the rx w/o affecting the regular flow - the pre_rx_handler 1140 * the user requested to monitor the rx w/o affecting the regular flow.
1143 * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow 1141 * In these cases the iwl_test object will handle forwarding the rx
1142 * data to user space.
1143 * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
1144 * continues. 1144 * continues.
1145 * We need to use ACCESS_ONCE to prevent a case where the handler
1146 * changes between the check and the call.
1147 */ 1145 */
1148 pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler); 1146 iwl_test_rx(&priv->tst, rxb);
1149 if (pre_rx_handler) 1147#endif
1150 pre_rx_handler(priv, rxb); 1148
1151 if (priv->ucode_owner != IWL_OWNERSHIP_TM) { 1149 if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
1152 /* Based on type of command response or notification, 1150 /* Based on type of command response or notification,
1153 * handle those that need handling via function in 1151 * handle those that need handling via function in
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
index e08b1a38359..57b918ce3b5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -60,6 +60,7 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63
63#include <linux/init.h> 64#include <linux/init.h>
64#include <linux/kernel.h> 65#include <linux/kernel.h>
65#include <linux/module.h> 66#include <linux/module.h>
@@ -69,354 +70,83 @@
69#include <net/cfg80211.h> 70#include <net/cfg80211.h>
70#include <net/mac80211.h> 71#include <net/mac80211.h>
71#include <net/netlink.h> 72#include <net/netlink.h>
73
72#include "iwl-debug.h" 74#include "iwl-debug.h"
73#include "iwl-io.h"
74#include "iwl-trans.h" 75#include "iwl-trans.h"
75#include "iwl-fh.h"
76#include "iwl-prph.h"
77#include "dev.h" 76#include "dev.h"
78#include "agn.h" 77#include "agn.h"
79#include "testmode.h" 78#include "iwl-test.h"
80 79#include "iwl-testmode.h"
81
82/* Periphery registers absolute lower bound. This is used in order to
83 * differentiate registery access through HBUS_TARG_PRPH_* and
84 * HBUS_TARG_MEM_* accesses.
85 */
86#define IWL_TM_ABS_PRPH_START (0xA00000)
87
88/* The TLVs used in the gnl message policy between the kernel module and
89 * user space application. iwl_testmode_gnl_msg_policy is to be carried
90 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
91 * See testmode.h
92 */
93static
94struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
95 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
96
97 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
98 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
99
100 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
101 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
102 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
103
104 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
105 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
106 80
107 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, 81static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
108 82 struct iwl_host_cmd *cmd)
109 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
110 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
111 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
112
113 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
114
115 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
116
117 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
120
121 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
122 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
123 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
124 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
125 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
126
127 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
128};
129
130/*
131 * See the struct iwl_rx_packet in commands.h for the format of the
132 * received events from the device
133 */
134static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
135{ 83{
136 struct iwl_rx_packet *pkt = rxb_addr(rxb); 84 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
137 if (pkt) 85 return iwl_dvm_send_cmd(priv, cmd);
138 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
139 else
140 return 0;
141} 86}
142 87
143 88static bool iwl_testmode_valid_hw_addr(u32 addr)
144/*
145 * This function multicasts the spontaneous messages from the device to the
146 * user space. It is invoked whenever there is a received messages
147 * from the device. This function is called within the ISR of the rx handlers
148 * in iwlagn driver.
149 *
150 * The parsing of the message content is left to the user space application,
151 * The message content is treated as unattacked raw data and is encapsulated
152 * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
153 *
154 * @priv: the instance of iwlwifi device
155 * @rxb: pointer to rx data content received by the ISR
156 *
157 * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
158 * For the messages multicasting to the user application, the mandatory
159 * TLV fields are :
160 * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
161 * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
162 */
163
164static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
165 struct iwl_rx_cmd_buffer *rxb)
166{ 89{
167 struct ieee80211_hw *hw = priv->hw; 90 if (iwlagn_hw_valid_rtc_data_addr(addr))
168 struct sk_buff *skb; 91 return true;
169 void *data;
170 int length;
171 92
172 data = rxb_addr(rxb); 93 if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
173 length = get_event_length(rxb); 94 addr < IWLAGN_RTC_INST_UPPER_BOUND)
95 return true;
174 96
175 if (!data || length == 0) 97 return false;
176 return;
177
178 skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
179 GFP_ATOMIC);
180 if (skb == NULL) {
181 IWL_ERR(priv,
182 "Run out of memory for messages to user space ?\n");
183 return;
184 }
185 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
186 /* the length doesn't include len_n_flags field, so add it manually */
187 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
188 goto nla_put_failure;
189 cfg80211_testmode_event(skb, GFP_ATOMIC);
190 return;
191
192nla_put_failure:
193 kfree_skb(skb);
194 IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n");
195} 98}
196 99
197void iwl_testmode_init(struct iwl_priv *priv) 100static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
198{ 101{
199 priv->pre_rx_handler = NULL; 102 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
200 priv->testmode_trace.trace_enabled = false; 103 return priv->fw->ucode_ver;
201 priv->testmode_mem.read_in_progress = false;
202} 104}
203 105
204static void iwl_mem_cleanup(struct iwl_priv *priv) 106static struct sk_buff*
107iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
205{ 108{
206 if (priv->testmode_mem.read_in_progress) { 109 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
207 kfree(priv->testmode_mem.buff_addr); 110 return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
208 priv->testmode_mem.buff_addr = NULL;
209 priv->testmode_mem.buff_size = 0;
210 priv->testmode_mem.num_chunks = 0;
211 priv->testmode_mem.read_in_progress = false;
212 }
213} 111}
214 112
215static void iwl_trace_cleanup(struct iwl_priv *priv) 113static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
216{ 114{
217 if (priv->testmode_trace.trace_enabled) { 115 return cfg80211_testmode_reply(skb);
218 if (priv->testmode_trace.cpu_addr &&
219 priv->testmode_trace.dma_addr)
220 dma_free_coherent(priv->trans->dev,
221 priv->testmode_trace.total_size,
222 priv->testmode_trace.cpu_addr,
223 priv->testmode_trace.dma_addr);
224 priv->testmode_trace.trace_enabled = false;
225 priv->testmode_trace.cpu_addr = NULL;
226 priv->testmode_trace.trace_addr = NULL;
227 priv->testmode_trace.dma_addr = 0;
228 priv->testmode_trace.buff_size = 0;
229 priv->testmode_trace.total_size = 0;
230 }
231} 116}
232 117
233 118static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
234void iwl_testmode_cleanup(struct iwl_priv *priv) 119 int len)
235{ 120{
236 iwl_trace_cleanup(priv); 121 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
237 iwl_mem_cleanup(priv); 122 return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
123 GFP_ATOMIC);
238} 124}
239 125
240 126static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
241/*
242 * This function handles the user application commands to the ucode.
243 *
244 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
245 * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
246 * host command to the ucode.
247 *
248 * If any mandatory field is missing, -ENOMSG is replied to the user space
249 * application; otherwise, waits for the host command to be sent and checks
250 * the return code. In case or error, it is returned, otherwise a reply is
251 * allocated and the reply RX packet
252 * is returned.
253 *
254 * @hw: ieee80211_hw object that represents the device
255 * @tb: gnl message fields from the user space
256 */
257static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
258{ 127{
259 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 128 return cfg80211_testmode_event(skb, GFP_ATOMIC);
260 struct iwl_host_cmd cmd;
261 struct iwl_rx_packet *pkt;
262 struct sk_buff *skb;
263 void *reply_buf;
264 u32 reply_len;
265 int ret;
266 bool cmd_want_skb;
267
268 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
269
270 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
271 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
272 IWL_ERR(priv, "Missing ucode command mandatory fields\n");
273 return -ENOMSG;
274 }
275
276 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
277 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
278 if (cmd_want_skb)
279 cmd.flags |= CMD_WANT_SKB;
280
281 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
282 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
283 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
284 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
285 IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
286 " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
287
288 ret = iwl_dvm_send_cmd(priv, &cmd);
289 if (ret) {
290 IWL_ERR(priv, "Failed to send hcmd\n");
291 return ret;
292 }
293 if (!cmd_want_skb)
294 return ret;
295
296 /* Handling return of SKB to the user */
297 pkt = cmd.resp_pkt;
298 if (!pkt) {
299 IWL_ERR(priv, "HCMD received a null response packet\n");
300 return ret;
301 }
302
303 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
304 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
305 reply_buf = kmalloc(reply_len, GFP_KERNEL);
306 if (!skb || !reply_buf) {
307 kfree_skb(skb);
308 kfree(reply_buf);
309 return -ENOMEM;
310 }
311
312 /* The reply is in a page, that we cannot send to user space. */
313 memcpy(reply_buf, &(pkt->hdr), reply_len);
314 iwl_free_resp(&cmd);
315
316 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
317 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
318 goto nla_put_failure;
319 return cfg80211_testmode_reply(skb);
320
321nla_put_failure:
322 IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
323 return -ENOMSG;
324} 129}
325 130
131static struct iwl_test_ops tst_ops = {
132 .send_cmd = iwl_testmode_send_cmd,
133 .valid_hw_addr = iwl_testmode_valid_hw_addr,
134 .get_fw_ver = iwl_testmode_get_fw_ver,
135 .alloc_reply = iwl_testmode_alloc_reply,
136 .reply = iwl_testmode_reply,
137 .alloc_event = iwl_testmode_alloc_event,
138 .event = iwl_testmode_event,
139};
326 140
327/* 141void iwl_testmode_init(struct iwl_priv *priv)
328 * This function handles the user application commands for register access.
329 *
330 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
331 * handlers respectively.
332 *
333 * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
334 * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
335 * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
336 * the success of the command execution.
337 *
338 * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
339 * value is returned with IWL_TM_ATTR_REG_VALUE32.
340 *
341 * @hw: ieee80211_hw object that represents the device
342 * @tb: gnl message fields from the user space
343 */
344static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
345{ 142{
346 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 143 iwl_test_init(&priv->tst, priv->trans, &tst_ops);
347 u32 ofs, val32, cmd;
348 u8 val8;
349 struct sk_buff *skb;
350 int status = 0;
351
352 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
353 IWL_ERR(priv, "Missing register offset\n");
354 return -ENOMSG;
355 }
356 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
357 IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
358
359 /* Allow access only to FH/CSR/HBUS in direct mode.
360 Since we don't have the upper bounds for the CSR and HBUS segments,
361 we will use only the upper bound of FH for sanity check. */
362 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
363 if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
364 cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
365 cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
366 (ofs >= FH_MEM_UPPER_BOUND)) {
367 IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
368 FH_MEM_UPPER_BOUND);
369 return -EINVAL;
370 }
371
372 switch (cmd) {
373 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
374 val32 = iwl_read_direct32(priv->trans, ofs);
375 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
376
377 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
378 if (!skb) {
379 IWL_ERR(priv, "Memory allocation fail\n");
380 return -ENOMEM;
381 }
382 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
383 goto nla_put_failure;
384 status = cfg80211_testmode_reply(skb);
385 if (status < 0)
386 IWL_ERR(priv, "Error sending msg : %d\n", status);
387 break;
388 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
389 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
390 IWL_ERR(priv, "Missing value to write\n");
391 return -ENOMSG;
392 } else {
393 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
394 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
395 iwl_write_direct32(priv->trans, ofs, val32);
396 }
397 break;
398 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
399 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
400 IWL_ERR(priv, "Missing value to write\n");
401 return -ENOMSG;
402 } else {
403 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
404 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
405 iwl_write8(priv->trans, ofs, val8);
406 }
407 break;
408 default:
409 IWL_ERR(priv, "Unknown testmode register command ID\n");
410 return -ENOSYS;
411 }
412
413 return status;
414
415nla_put_failure:
416 kfree_skb(skb);
417 return -EMSGSIZE;
418} 144}
419 145
146void iwl_testmode_free(struct iwl_priv *priv)
147{
148 iwl_test_free(&priv->tst);
149}
420 150
421static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) 151static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
422{ 152{
@@ -469,7 +199,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
469 struct sk_buff *skb; 199 struct sk_buff *skb;
470 unsigned char *rsp_data_ptr = NULL; 200 unsigned char *rsp_data_ptr = NULL;
471 int status = 0, rsp_data_len = 0; 201 int status = 0, rsp_data_len = 0;
472 u32 devid, inst_size = 0, data_size = 0; 202 u32 inst_size = 0, data_size = 0;
473 const struct fw_img *img; 203 const struct fw_img *img;
474 204
475 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 205 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
@@ -563,39 +293,6 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
563 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); 293 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
564 break; 294 break;
565 295
566 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
567 IWL_INFO(priv, "uCode version raw: 0x%x\n",
568 priv->fw->ucode_ver);
569
570 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
571 if (!skb) {
572 IWL_ERR(priv, "Memory allocation fail\n");
573 return -ENOMEM;
574 }
575 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
576 priv->fw->ucode_ver))
577 goto nla_put_failure;
578 status = cfg80211_testmode_reply(skb);
579 if (status < 0)
580 IWL_ERR(priv, "Error sending msg : %d\n", status);
581 break;
582
583 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
584 devid = priv->trans->hw_id;
585 IWL_INFO(priv, "hw version: 0x%x\n", devid);
586
587 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
588 if (!skb) {
589 IWL_ERR(priv, "Memory allocation fail\n");
590 return -ENOMEM;
591 }
592 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
593 goto nla_put_failure;
594 status = cfg80211_testmode_reply(skb);
595 if (status < 0)
596 IWL_ERR(priv, "Error sending msg : %d\n", status);
597 break;
598
599 case IWL_TM_CMD_APP2DEV_GET_FW_INFO: 296 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
600 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8); 297 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
601 if (!skb) { 298 if (!skb) {
@@ -630,125 +327,6 @@ nla_put_failure:
630 return -EMSGSIZE; 327 return -EMSGSIZE;
631} 328}
632 329
633
634/*
635 * This function handles the user application commands for uCode trace
636 *
637 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
638 * handlers respectively.
639 *
640 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
641 * value of the actual command execution is replied to the user application.
642 *
643 * @hw: ieee80211_hw object that represents the device
644 * @tb: gnl message fields from the user space
645 */
646static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
647{
648 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
649 struct sk_buff *skb;
650 int status = 0;
651 struct device *dev = priv->trans->dev;
652
653 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
654 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
655 if (priv->testmode_trace.trace_enabled)
656 return -EBUSY;
657
658 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
659 priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
660 else
661 priv->testmode_trace.buff_size =
662 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
663 if (!priv->testmode_trace.buff_size)
664 return -EINVAL;
665 if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
666 priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
667 return -EINVAL;
668
669 priv->testmode_trace.total_size =
670 priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
671 priv->testmode_trace.cpu_addr =
672 dma_alloc_coherent(dev,
673 priv->testmode_trace.total_size,
674 &priv->testmode_trace.dma_addr,
675 GFP_KERNEL);
676 if (!priv->testmode_trace.cpu_addr)
677 return -ENOMEM;
678 priv->testmode_trace.trace_enabled = true;
679 priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
680 priv->testmode_trace.cpu_addr, 0x100);
681 memset(priv->testmode_trace.trace_addr, 0x03B,
682 priv->testmode_trace.buff_size);
683 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
684 sizeof(priv->testmode_trace.dma_addr) + 20);
685 if (!skb) {
686 IWL_ERR(priv, "Memory allocation fail\n");
687 iwl_trace_cleanup(priv);
688 return -ENOMEM;
689 }
690 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
691 sizeof(priv->testmode_trace.dma_addr),
692 (u64 *)&priv->testmode_trace.dma_addr))
693 goto nla_put_failure;
694 status = cfg80211_testmode_reply(skb);
695 if (status < 0) {
696 IWL_ERR(priv, "Error sending msg : %d\n", status);
697 }
698 priv->testmode_trace.num_chunks =
699 DIV_ROUND_UP(priv->testmode_trace.buff_size,
700 DUMP_CHUNK_SIZE);
701 break;
702
703 case IWL_TM_CMD_APP2DEV_END_TRACE:
704 iwl_trace_cleanup(priv);
705 break;
706 default:
707 IWL_ERR(priv, "Unknown testmode mem command ID\n");
708 return -ENOSYS;
709 }
710 return status;
711
712nla_put_failure:
713 kfree_skb(skb);
714 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
715 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
716 iwl_trace_cleanup(priv);
717 return -EMSGSIZE;
718}
719
720static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
721 struct sk_buff *skb,
722 struct netlink_callback *cb)
723{
724 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
725 int idx, length;
726
727 if (priv->testmode_trace.trace_enabled &&
728 priv->testmode_trace.trace_addr) {
729 idx = cb->args[4];
730 if (idx >= priv->testmode_trace.num_chunks)
731 return -ENOENT;
732 length = DUMP_CHUNK_SIZE;
733 if (((idx + 1) == priv->testmode_trace.num_chunks) &&
734 (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
735 length = priv->testmode_trace.buff_size %
736 DUMP_CHUNK_SIZE;
737
738 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
739 priv->testmode_trace.trace_addr +
740 (DUMP_CHUNK_SIZE * idx)))
741 goto nla_put_failure;
742 idx++;
743 cb->args[4] = idx;
744 return 0;
745 } else
746 return -EFAULT;
747
748 nla_put_failure:
749 return -ENOBUFS;
750}
751
752/* 330/*
753 * This function handles the user application switch ucode ownership. 331 * This function handles the user application switch ucode ownership.
754 * 332 *
@@ -777,10 +355,10 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
777 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]); 355 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
778 if (owner == IWL_OWNERSHIP_DRIVER) { 356 if (owner == IWL_OWNERSHIP_DRIVER) {
779 priv->ucode_owner = owner; 357 priv->ucode_owner = owner;
780 priv->pre_rx_handler = NULL; 358 iwl_test_enable_notifications(&priv->tst, false);
781 } else if (owner == IWL_OWNERSHIP_TM) { 359 } else if (owner == IWL_OWNERSHIP_TM) {
782 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
783 priv->ucode_owner = owner; 360 priv->ucode_owner = owner;
361 iwl_test_enable_notifications(&priv->tst, true);
784 } else { 362 } else {
785 IWL_ERR(priv, "Invalid owner\n"); 363 IWL_ERR(priv, "Invalid owner\n");
786 return -EINVAL; 364 return -EINVAL;
@@ -788,180 +366,6 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
788 return 0; 366 return 0;
789} 367}
790 368
791static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
792{
793 struct iwl_trans *trans = priv->trans;
794 unsigned long flags;
795 int i;
796
797 if (size & 0x3)
798 return -EINVAL;
799 priv->testmode_mem.buff_size = size;
800 priv->testmode_mem.buff_addr =
801 kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL);
802 if (priv->testmode_mem.buff_addr == NULL)
803 return -ENOMEM;
804
805 /* Hard-coded periphery absolute address */
806 if (IWL_TM_ABS_PRPH_START <= addr &&
807 addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
808 spin_lock_irqsave(&trans->reg_lock, flags);
809 iwl_grab_nic_access(trans);
810 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
811 addr | (3 << 24));
812 for (i = 0; i < size; i += 4)
813 *(u32 *)(priv->testmode_mem.buff_addr + i) =
814 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
815 iwl_release_nic_access(trans);
816 spin_unlock_irqrestore(&trans->reg_lock, flags);
817 } else { /* target memory (SRAM) */
818 _iwl_read_targ_mem_words(trans, addr,
819 priv->testmode_mem.buff_addr,
820 priv->testmode_mem.buff_size / 4);
821 }
822
823 priv->testmode_mem.num_chunks =
824 DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE);
825 priv->testmode_mem.read_in_progress = true;
826 return 0;
827
828}
829
830static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
831 u32 size, unsigned char *buf)
832{
833 struct iwl_trans *trans = priv->trans;
834 u32 val, i;
835 unsigned long flags;
836
837 if (IWL_TM_ABS_PRPH_START <= addr &&
838 addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
839 /* Periphery writes can be 1-3 bytes long, or DWORDs */
840 if (size < 4) {
841 memcpy(&val, buf, size);
842 spin_lock_irqsave(&trans->reg_lock, flags);
843 iwl_grab_nic_access(trans);
844 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
845 (addr & 0x0000FFFF) |
846 ((size - 1) << 24));
847 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
848 iwl_release_nic_access(trans);
849 /* needed after consecutive writes w/o read */
850 mmiowb();
851 spin_unlock_irqrestore(&trans->reg_lock, flags);
852 } else {
853 if (size % 4)
854 return -EINVAL;
855 for (i = 0; i < size; i += 4)
856 iwl_write_prph(trans, addr+i,
857 *(u32 *)(buf+i));
858 }
859 } else if (iwlagn_hw_valid_rtc_data_addr(addr) ||
860 (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
861 addr < IWLAGN_RTC_INST_UPPER_BOUND)) {
862 _iwl_write_targ_mem_words(trans, addr, buf, size/4);
863 } else
864 return -EINVAL;
865 return 0;
866}
867
868/*
869 * This function handles the user application commands for SRAM data dump
870 *
871 * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
872 * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
873 *
874 * Several error will be retured, -EBUSY if the SRAM data retrieved by
875 * previous command has not been delivered to userspace, or -ENOMSG if
876 * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
877 * are missing, or -ENOMEM if the buffer allocation fails.
878 *
879 * Otherwise 0 is replied indicating the success of the SRAM reading.
880 *
881 * @hw: ieee80211_hw object that represents the device
882 * @tb: gnl message fields from the user space
883 */
884static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw,
885 struct nlattr **tb)
886{
887 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
888 u32 addr, size, cmd;
889 unsigned char *buf;
890
891 /* Both read and write should be blocked, for atomicity */
892 if (priv->testmode_mem.read_in_progress)
893 return -EBUSY;
894
895 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
896 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
897 IWL_ERR(priv, "Error finding memory offset address\n");
898 return -ENOMSG;
899 }
900 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
901 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
902 IWL_ERR(priv, "Error finding size for memory reading\n");
903 return -ENOMSG;
904 }
905 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
906
907 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ)
908 return iwl_testmode_indirect_read(priv, addr, size);
909 else {
910 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
911 return -EINVAL;
912 buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
913 return iwl_testmode_indirect_write(priv, addr, size, buf);
914 }
915}
916
917static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
918 struct sk_buff *skb,
919 struct netlink_callback *cb)
920{
921 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
922 int idx, length;
923
924 if (priv->testmode_mem.read_in_progress) {
925 idx = cb->args[4];
926 if (idx >= priv->testmode_mem.num_chunks) {
927 iwl_mem_cleanup(priv);
928 return -ENOENT;
929 }
930 length = DUMP_CHUNK_SIZE;
931 if (((idx + 1) == priv->testmode_mem.num_chunks) &&
932 (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE))
933 length = priv->testmode_mem.buff_size %
934 DUMP_CHUNK_SIZE;
935
936 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
937 priv->testmode_mem.buff_addr +
938 (DUMP_CHUNK_SIZE * idx)))
939 goto nla_put_failure;
940 idx++;
941 cb->args[4] = idx;
942 return 0;
943 } else
944 return -EFAULT;
945
946 nla_put_failure:
947 return -ENOBUFS;
948}
949
950static int iwl_testmode_notifications(struct ieee80211_hw *hw,
951 struct nlattr **tb)
952{
953 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
954 bool enable;
955
956 enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
957 if (enable)
958 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
959 else
960 priv->pre_rx_handler = NULL;
961 return 0;
962}
963
964
965/* The testmode gnl message handler that takes the gnl message from the 369/* The testmode gnl message handler that takes the gnl message from the
966 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then 370 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
967 * invoke the corresponding handlers. 371 * invoke the corresponding handlers.
@@ -987,32 +391,27 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
987 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 391 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
988 int result; 392 int result;
989 393
990 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, 394 result = iwl_test_parse(&priv->tst, tb, data, len);
991 iwl_testmode_gnl_msg_policy); 395 if (result)
992 if (result != 0) {
993 IWL_ERR(priv, "Error parsing the gnl message : %d\n", result);
994 return result; 396 return result;
995 }
996 397
997 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
998 if (!tb[IWL_TM_ATTR_COMMAND]) {
999 IWL_ERR(priv, "Missing testmode command type\n");
1000 return -ENOMSG;
1001 }
1002 /* in case multiple accesses to the device happens */ 398 /* in case multiple accesses to the device happens */
1003 mutex_lock(&priv->mutex); 399 mutex_lock(&priv->mutex);
1004
1005 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 400 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
1006 case IWL_TM_CMD_APP2DEV_UCODE: 401 case IWL_TM_CMD_APP2DEV_UCODE:
1007 IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
1008 result = iwl_testmode_ucode(hw, tb);
1009 break;
1010 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: 402 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
1011 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: 403 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
1012 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: 404 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
1013 IWL_DEBUG_INFO(priv, "testmode cmd to register\n"); 405 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
1014 result = iwl_testmode_reg(hw, tb); 406 case IWL_TM_CMD_APP2DEV_END_TRACE:
407 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
408 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
409 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
410 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
411 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
412 result = iwl_test_handle_cmd(&priv->tst, tb);
1015 break; 413 break;
414
1016 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: 415 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
1017 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: 416 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
1018 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: 417 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
@@ -1020,45 +419,25 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
1020 case IWL_TM_CMD_APP2DEV_GET_EEPROM: 419 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
1021 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: 420 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
1022 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: 421 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
1023 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
1024 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
1025 case IWL_TM_CMD_APP2DEV_GET_FW_INFO: 422 case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
1026 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); 423 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
1027 result = iwl_testmode_driver(hw, tb); 424 result = iwl_testmode_driver(hw, tb);
1028 break; 425 break;
1029 426
1030 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
1031 case IWL_TM_CMD_APP2DEV_END_TRACE:
1032 case IWL_TM_CMD_APP2DEV_READ_TRACE:
1033 IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
1034 result = iwl_testmode_trace(hw, tb);
1035 break;
1036
1037 case IWL_TM_CMD_APP2DEV_OWNERSHIP: 427 case IWL_TM_CMD_APP2DEV_OWNERSHIP:
1038 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n"); 428 IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
1039 result = iwl_testmode_ownership(hw, tb); 429 result = iwl_testmode_ownership(hw, tb);
1040 break; 430 break;
1041 431
1042 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
1043 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
1044 IWL_DEBUG_INFO(priv, "testmode indirect memory cmd "
1045 "to driver\n");
1046 result = iwl_testmode_indirect_mem(hw, tb);
1047 break;
1048
1049 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
1050 IWL_DEBUG_INFO(priv, "testmode notifications cmd "
1051 "to driver\n");
1052 result = iwl_testmode_notifications(hw, tb);
1053 break;
1054
1055 default: 432 default:
1056 IWL_ERR(priv, "Unknown testmode command\n"); 433 IWL_ERR(priv, "Unknown testmode command\n");
1057 result = -ENOSYS; 434 result = -ENOSYS;
1058 break; 435 break;
1059 } 436 }
1060
1061 mutex_unlock(&priv->mutex); 437 mutex_unlock(&priv->mutex);
438
439 if (result)
440 IWL_ERR(priv, "Test cmd failed result=%d\n", result);
1062 return result; 441 return result;
1063} 442}
1064 443
@@ -1066,7 +445,6 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
1066 struct netlink_callback *cb, 445 struct netlink_callback *cb,
1067 void *data, int len) 446 void *data, int len)
1068{ 447{
1069 struct nlattr *tb[IWL_TM_ATTR_MAX];
1070 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 448 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1071 int result; 449 int result;
1072 u32 cmd; 450 u32 cmd;
@@ -1075,39 +453,19 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
1075 /* offset by 1 since commands start at 0 */ 453 /* offset by 1 since commands start at 0 */
1076 cmd = cb->args[3] - 1; 454 cmd = cb->args[3] - 1;
1077 } else { 455 } else {
1078 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, 456 struct nlattr *tb[IWL_TM_ATTR_MAX];
1079 iwl_testmode_gnl_msg_policy); 457
1080 if (result) { 458 result = iwl_test_parse(&priv->tst, tb, data, len);
1081 IWL_ERR(priv, 459 if (result)
1082 "Error parsing the gnl message : %d\n", result);
1083 return result; 460 return result;
1084 }
1085 461
1086 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
1087 if (!tb[IWL_TM_ATTR_COMMAND]) {
1088 IWL_ERR(priv, "Missing testmode command type\n");
1089 return -ENOMSG;
1090 }
1091 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); 462 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
1092 cb->args[3] = cmd + 1; 463 cb->args[3] = cmd + 1;
1093 } 464 }
1094 465
1095 /* in case multiple accesses to the device happens */ 466 /* in case multiple accesses to the device happens */
1096 mutex_lock(&priv->mutex); 467 mutex_lock(&priv->mutex);
1097 switch (cmd) { 468 result = iwl_test_dump(&priv->tst, cmd, skb, cb);
1098 case IWL_TM_CMD_APP2DEV_READ_TRACE:
1099 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
1100 result = iwl_testmode_trace_dump(hw, skb, cb);
1101 break;
1102 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
1103 IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
1104 result = iwl_testmode_buffer_dump(hw, skb, cb);
1105 break;
1106 default:
1107 result = -EINVAL;
1108 break;
1109 }
1110
1111 mutex_unlock(&priv->mutex); 469 mutex_unlock(&priv->mutex);
1112 return result; 470 return result;
1113} 471}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 0dfaf649b25..5971a23aa47 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -403,6 +403,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
403 403
404 info->driver_data[0] = ctx; 404 info->driver_data[0] = ctx;
405 info->driver_data[1] = dev_cmd; 405 info->driver_data[1] = dev_cmd;
406 /* From now on, we cannot access info->control */
406 407
407 spin_lock(&priv->sta_lock); 408 spin_lock(&priv->sta_lock);
408 409
@@ -1182,7 +1183,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1182 } 1183 }
1183 1184
1184 /*we can free until ssn % q.n_bd not inclusive */ 1185 /*we can free until ssn % q.n_bd not inclusive */
1185 WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs)); 1186 WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
1187 txq_id, ssn, &skbs));
1186 iwlagn_check_ratid_empty(priv, sta_id, tid); 1188 iwlagn_check_ratid_empty(priv, sta_id, tid);
1187 freed = 0; 1189 freed = 0;
1188 1190
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 49df0e9d5c5..a175997e782 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -131,6 +131,8 @@ struct iwl_drv {
131#define DVM_OP_MODE 0 131#define DVM_OP_MODE 0
132#define MVM_OP_MODE 1 132#define MVM_OP_MODE 1
133 133
134/* Protects the table contents, i.e. the ops pointer & drv list */
135static struct mutex iwlwifi_opmode_table_mtx;
134static struct iwlwifi_opmode_table { 136static struct iwlwifi_opmode_table {
135 const char *name; /* name: iwldvm, iwlmvm, etc */ 137 const char *name; /* name: iwldvm, iwlmvm, etc */
136 const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ 138 const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
@@ -776,6 +778,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
776 const unsigned int api_min = drv->cfg->ucode_api_min; 778 const unsigned int api_min = drv->cfg->ucode_api_min;
777 u32 api_ver; 779 u32 api_ver;
778 int i; 780 int i;
781 bool load_module = false;
779 782
780 fw->ucode_capa.max_probe_length = 200; 783 fw->ucode_capa.max_probe_length = 200;
781 fw->ucode_capa.standard_phy_calibration_size = 784 fw->ucode_capa.standard_phy_calibration_size =
@@ -898,6 +901,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
898 /* We have our copies now, allow OS release its copies */ 901 /* We have our copies now, allow OS release its copies */
899 release_firmware(ucode_raw); 902 release_firmware(ucode_raw);
900 903
904 mutex_lock(&iwlwifi_opmode_table_mtx);
901 op = &iwlwifi_opmode_table[DVM_OP_MODE]; 905 op = &iwlwifi_opmode_table[DVM_OP_MODE];
902 906
903 /* add this device to the list of devices using this op_mode */ 907 /* add this device to the list of devices using this op_mode */
@@ -907,11 +911,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
907 const struct iwl_op_mode_ops *ops = op->ops; 911 const struct iwl_op_mode_ops *ops = op->ops;
908 drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw); 912 drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
909 913
910 if (!drv->op_mode) 914 if (!drv->op_mode) {
915 mutex_unlock(&iwlwifi_opmode_table_mtx);
911 goto out_unbind; 916 goto out_unbind;
917 }
912 } else { 918 } else {
913 request_module_nowait("%s", op->name); 919 load_module = true;
914 } 920 }
921 mutex_unlock(&iwlwifi_opmode_table_mtx);
915 922
916 /* 923 /*
917 * Complete the firmware request last so that 924 * Complete the firmware request last so that
@@ -919,6 +926,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
919 * are doing the start() above. 926 * are doing the start() above.
920 */ 927 */
921 complete(&drv->request_firmware_complete); 928 complete(&drv->request_firmware_complete);
929
930 /*
931 * Load the module last so we don't block anything
932 * else from proceeding if the module fails to load
933 * or hangs loading.
934 */
935 if (load_module)
936 request_module("%s", op->name);
922 return; 937 return;
923 938
924 try_again: 939 try_again:
@@ -952,6 +967,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
952 drv->cfg = cfg; 967 drv->cfg = cfg;
953 968
954 init_completion(&drv->request_firmware_complete); 969 init_completion(&drv->request_firmware_complete);
970 INIT_LIST_HEAD(&drv->list);
955 971
956 ret = iwl_request_firmware(drv, true); 972 ret = iwl_request_firmware(drv, true);
957 973
@@ -974,6 +990,16 @@ void iwl_drv_stop(struct iwl_drv *drv)
974 990
975 iwl_dealloc_ucode(drv); 991 iwl_dealloc_ucode(drv);
976 992
993 mutex_lock(&iwlwifi_opmode_table_mtx);
994 /*
995 * List is empty (this item wasn't added)
996 * when firmware loading failed -- in that
997 * case we can't remove it from any list.
998 */
999 if (!list_empty(&drv->list))
1000 list_del(&drv->list);
1001 mutex_unlock(&iwlwifi_opmode_table_mtx);
1002
977 kfree(drv); 1003 kfree(drv);
978} 1004}
979 1005
@@ -996,6 +1022,7 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
996 int i; 1022 int i;
997 struct iwl_drv *drv; 1023 struct iwl_drv *drv;
998 1024
1025 mutex_lock(&iwlwifi_opmode_table_mtx);
999 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { 1026 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1000 if (strcmp(iwlwifi_opmode_table[i].name, name)) 1027 if (strcmp(iwlwifi_opmode_table[i].name, name))
1001 continue; 1028 continue;
@@ -1003,8 +1030,10 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1003 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) 1030 list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
1004 drv->op_mode = ops->start(drv->trans, drv->cfg, 1031 drv->op_mode = ops->start(drv->trans, drv->cfg,
1005 &drv->fw); 1032 &drv->fw);
1033 mutex_unlock(&iwlwifi_opmode_table_mtx);
1006 return 0; 1034 return 0;
1007 } 1035 }
1036 mutex_unlock(&iwlwifi_opmode_table_mtx);
1008 return -EIO; 1037 return -EIO;
1009} 1038}
1010EXPORT_SYMBOL_GPL(iwl_opmode_register); 1039EXPORT_SYMBOL_GPL(iwl_opmode_register);
@@ -1014,6 +1043,7 @@ void iwl_opmode_deregister(const char *name)
1014 int i; 1043 int i;
1015 struct iwl_drv *drv; 1044 struct iwl_drv *drv;
1016 1045
1046 mutex_lock(&iwlwifi_opmode_table_mtx);
1017 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { 1047 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
1018 if (strcmp(iwlwifi_opmode_table[i].name, name)) 1048 if (strcmp(iwlwifi_opmode_table[i].name, name))
1019 continue; 1049 continue;
@@ -1026,8 +1056,10 @@ void iwl_opmode_deregister(const char *name)
1026 drv->op_mode = NULL; 1056 drv->op_mode = NULL;
1027 } 1057 }
1028 } 1058 }
1059 mutex_unlock(&iwlwifi_opmode_table_mtx);
1029 return; 1060 return;
1030 } 1061 }
1062 mutex_unlock(&iwlwifi_opmode_table_mtx);
1031} 1063}
1032EXPORT_SYMBOL_GPL(iwl_opmode_deregister); 1064EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
1033 1065
@@ -1035,6 +1067,8 @@ static int __init iwl_drv_init(void)
1035{ 1067{
1036 int i; 1068 int i;
1037 1069
1070 mutex_init(&iwlwifi_opmode_table_mtx);
1071
1038 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) 1072 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
1039 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); 1073 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
1040 1074
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 74bce97a860..80604664174 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -421,6 +421,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
421 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) 421 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
422 422
423#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) 423#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
424#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
425
424/* Instruct FH to increment the retry count of a packet when 426/* Instruct FH to increment the retry count of a packet when
425 * it is brought from the memory to TX-FIFO 427 * it is brought from the memory to TX-FIFO
426 */ 428 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 5f2df70b73c..66c873399ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -298,8 +298,8 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
298} 298}
299EXPORT_SYMBOL_GPL(iwl_clear_bits_prph); 299EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
300 300
301void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, 301void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
302 void *buf, int words) 302 void *buf, int dwords)
303{ 303{
304 unsigned long flags; 304 unsigned long flags;
305 int offs; 305 int offs;
@@ -308,26 +308,26 @@ void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
308 spin_lock_irqsave(&trans->reg_lock, flags); 308 spin_lock_irqsave(&trans->reg_lock, flags);
309 if (likely(iwl_grab_nic_access(trans))) { 309 if (likely(iwl_grab_nic_access(trans))) {
310 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); 310 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
311 for (offs = 0; offs < words; offs++) 311 for (offs = 0; offs < dwords; offs++)
312 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); 312 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
313 iwl_release_nic_access(trans); 313 iwl_release_nic_access(trans);
314 } 314 }
315 spin_unlock_irqrestore(&trans->reg_lock, flags); 315 spin_unlock_irqrestore(&trans->reg_lock, flags);
316} 316}
317EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_words); 317EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
318 318
319u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr) 319u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
320{ 320{
321 u32 value; 321 u32 value;
322 322
323 _iwl_read_targ_mem_words(trans, addr, &value, 1); 323 _iwl_read_targ_mem_dwords(trans, addr, &value, 1);
324 324
325 return value; 325 return value;
326} 326}
327EXPORT_SYMBOL_GPL(iwl_read_targ_mem); 327EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
328 328
329int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, 329int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
330 void *buf, int words) 330 void *buf, int dwords)
331{ 331{
332 unsigned long flags; 332 unsigned long flags;
333 int offs, result = 0; 333 int offs, result = 0;
@@ -336,7 +336,7 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
336 spin_lock_irqsave(&trans->reg_lock, flags); 336 spin_lock_irqsave(&trans->reg_lock, flags);
337 if (likely(iwl_grab_nic_access(trans))) { 337 if (likely(iwl_grab_nic_access(trans))) {
338 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 338 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
339 for (offs = 0; offs < words; offs++) 339 for (offs = 0; offs < dwords; offs++)
340 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]); 340 iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
341 iwl_release_nic_access(trans); 341 iwl_release_nic_access(trans);
342 } else 342 } else
@@ -345,10 +345,10 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
345 345
346 return result; 346 return result;
347} 347}
348EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_words); 348EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
349 349
350int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val) 350int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
351{ 351{
352 return _iwl_write_targ_mem_words(trans, addr, &val, 1); 352 return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
353} 353}
354EXPORT_SYMBOL_GPL(iwl_write_targ_mem); 354EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 4a9a45f771e..50d3819739d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -76,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
76 u32 bits, u32 mask); 76 u32 bits, u32 mask);
77void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask); 77void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
78 78
79void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, 79void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
80 void *buf, int words); 80 void *buf, int dwords);
81 81
82#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \ 82#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
83 do { \ 83 do { \
84 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 84 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
85 _iwl_read_targ_mem_words(trans, addr, buf, \ 85 _iwl_read_targ_mem_dwords(trans, addr, buf, \
86 (bufsize) / sizeof(u32));\ 86 (bufsize) / sizeof(u32));\
87 } while (0) 87 } while (0)
88 88
89int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, 89int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
90 void *buf, int words); 90 void *buf, int dwords);
91 91
92u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr); 92u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
93int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val); 93int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
new file mode 100644
index 00000000000..81e8c7126d7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -0,0 +1,856 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/export.h>
65#include <net/netlink.h>
66
67#include "iwl-io.h"
68#include "iwl-fh.h"
69#include "iwl-prph.h"
70#include "iwl-trans.h"
71#include "iwl-test.h"
72#include "iwl-csr.h"
73#include "iwl-testmode.h"
74
75/*
76 * Periphery registers absolute lower bound. This is used in order to
77 * differentiate registery access through HBUS_TARG_PRPH_* and
78 * HBUS_TARG_MEM_* accesses.
79 */
80#define IWL_ABS_PRPH_START (0xA00000)
81
82/*
83 * The TLVs used in the gnl message policy between the kernel module and
84 * user space application. iwl_testmode_gnl_msg_policy is to be carried
85 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
86 * See iwl-testmode.h
87 */
88static
89struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
90 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
91
92 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
93 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
94
95 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
96 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
97 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
98
99 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
100 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
101
102 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
103
104 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
105 [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
106 [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
107
108 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
109
110 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
111
112 [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
113 [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
114 [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
115
116 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
117 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
118 [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
119 [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
120 [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
121
122 [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
123};
124
125static inline void iwl_test_trace_clear(struct iwl_test *tst)
126{
127 memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
128}
129
130static void iwl_test_trace_stop(struct iwl_test *tst)
131{
132 if (!tst->trace.enabled)
133 return;
134
135 if (tst->trace.cpu_addr && tst->trace.dma_addr)
136 dma_free_coherent(tst->trans->dev,
137 tst->trace.tsize,
138 tst->trace.cpu_addr,
139 tst->trace.dma_addr);
140
141 iwl_test_trace_clear(tst);
142}
143
144static inline void iwl_test_mem_clear(struct iwl_test *tst)
145{
146 memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
147}
148
149static inline void iwl_test_mem_stop(struct iwl_test *tst)
150{
151 if (!tst->mem.in_read)
152 return;
153
154 iwl_test_mem_clear(tst);
155}
156
157/*
158 * Initializes the test object
159 * During the lifetime of the test object it is assumed that the transport is
160 * started. The test object should be stopped before the transport is stopped.
161 */
162void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
163 struct iwl_test_ops *ops)
164{
165 tst->trans = trans;
166 tst->ops = ops;
167
168 iwl_test_trace_clear(tst);
169 iwl_test_mem_clear(tst);
170}
171EXPORT_SYMBOL_GPL(iwl_test_init);
172
173/*
174 * Stop the test object
175 */
176void iwl_test_free(struct iwl_test *tst)
177{
178 iwl_test_mem_stop(tst);
179 iwl_test_trace_stop(tst);
180}
181EXPORT_SYMBOL_GPL(iwl_test_free);
182
183static inline int iwl_test_send_cmd(struct iwl_test *tst,
184 struct iwl_host_cmd *cmd)
185{
186 return tst->ops->send_cmd(tst->trans->op_mode, cmd);
187}
188
189static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
190{
191 return tst->ops->valid_hw_addr(addr);
192}
193
194static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
195{
196 return tst->ops->get_fw_ver(tst->trans->op_mode);
197}
198
199static inline struct sk_buff*
200iwl_test_alloc_reply(struct iwl_test *tst, int len)
201{
202 return tst->ops->alloc_reply(tst->trans->op_mode, len);
203}
204
205static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
206{
207 return tst->ops->reply(tst->trans->op_mode, skb);
208}
209
210static inline struct sk_buff*
211iwl_test_alloc_event(struct iwl_test *tst, int len)
212{
213 return tst->ops->alloc_event(tst->trans->op_mode, len);
214}
215
216static inline void
217iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
218{
219 return tst->ops->event(tst->trans->op_mode, skb);
220}
221
222/*
223 * This function handles the user application commands to the fw. The fw
224 * commands are sent in a synchronuous manner. In case that the user requested
225 * to get commands response, it is send to the user.
226 */
227static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
228{
229 struct iwl_host_cmd cmd;
230 struct iwl_rx_packet *pkt;
231 struct sk_buff *skb;
232 void *reply_buf;
233 u32 reply_len;
234 int ret;
235 bool cmd_want_skb;
236
237 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
238
239 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
240 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
241 IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
242 return -ENOMSG;
243 }
244
245 cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
246 cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
247 if (cmd_want_skb)
248 cmd.flags |= CMD_WANT_SKB;
249
250 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
251 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
252 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
253 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
254 IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
255 cmd.id, cmd.flags, cmd.len[0]);
256
257 ret = iwl_test_send_cmd(tst, &cmd);
258 if (ret) {
259 IWL_ERR(tst->trans, "Failed to send hcmd\n");
260 return ret;
261 }
262 if (!cmd_want_skb)
263 return ret;
264
265 /* Handling return of SKB to the user */
266 pkt = cmd.resp_pkt;
267 if (!pkt) {
268 IWL_ERR(tst->trans, "HCMD received a null response packet\n");
269 return ret;
270 }
271
272 reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
273 skb = iwl_test_alloc_reply(tst, reply_len + 20);
274 reply_buf = kmalloc(reply_len, GFP_KERNEL);
275 if (!skb || !reply_buf) {
276 kfree_skb(skb);
277 kfree(reply_buf);
278 return -ENOMEM;
279 }
280
281 /* The reply is in a page, that we cannot send to user space. */
282 memcpy(reply_buf, &(pkt->hdr), reply_len);
283 iwl_free_resp(&cmd);
284
285 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
286 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
287 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
288 goto nla_put_failure;
289 return iwl_test_reply(tst, skb);
290
291nla_put_failure:
292 IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
293 kfree(reply_buf);
294 kfree_skb(skb);
295 return -ENOMSG;
296}
297
298/*
299 * Handles the user application commands for register access.
300 */
301static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
302{
303 u32 ofs, val32, cmd;
304 u8 val8;
305 struct sk_buff *skb;
306 int status = 0;
307 struct iwl_trans *trans = tst->trans;
308
309 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
310 IWL_ERR(trans, "Missing reg offset\n");
311 return -ENOMSG;
312 }
313
314 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
315 IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
316
317 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
318
319 /*
320 * Allow access only to FH/CSR/HBUS in direct mode.
321 * Since we don't have the upper bounds for the CSR and HBUS segments,
322 * we will use only the upper bound of FH for sanity check.
323 */
324 if (ofs >= FH_MEM_UPPER_BOUND) {
325 IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
326 FH_MEM_UPPER_BOUND);
327 return -EINVAL;
328 }
329
330 switch (cmd) {
331 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
332 val32 = iwl_read_direct32(tst->trans, ofs);
333 IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
334
335 skb = iwl_test_alloc_reply(tst, 20);
336 if (!skb) {
337 IWL_ERR(trans, "Memory allocation fail\n");
338 return -ENOMEM;
339 }
340 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
341 goto nla_put_failure;
342 status = iwl_test_reply(tst, skb);
343 if (status < 0)
344 IWL_ERR(trans, "Error sending msg : %d\n", status);
345 break;
346
347 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
348 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
349 IWL_ERR(trans, "Missing value to write\n");
350 return -ENOMSG;
351 } else {
352 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
353 IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
354 iwl_write_direct32(tst->trans, ofs, val32);
355 }
356 break;
357
358 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
359 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
360 IWL_ERR(trans, "Missing value to write\n");
361 return -ENOMSG;
362 } else {
363 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
364 IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
365 iwl_write8(tst->trans, ofs, val8);
366 }
367 break;
368
369 default:
370 IWL_ERR(trans, "Unknown test register cmd ID\n");
371 return -ENOMSG;
372 }
373
374 return status;
375
376nla_put_failure:
377 kfree_skb(skb);
378 return -EMSGSIZE;
379}
380
381/*
382 * Handles the request to start FW tracing. Allocates of the trace buffer
383 * and sends a reply to user space with the address of the allocated buffer.
384 */
385static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
386{
387 struct sk_buff *skb;
388 int status = 0;
389
390 if (tst->trace.enabled)
391 return -EBUSY;
392
393 if (!tb[IWL_TM_ATTR_TRACE_SIZE])
394 tst->trace.size = TRACE_BUFF_SIZE_DEF;
395 else
396 tst->trace.size =
397 nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
398
399 if (!tst->trace.size)
400 return -EINVAL;
401
402 if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
403 tst->trace.size > TRACE_BUFF_SIZE_MAX)
404 return -EINVAL;
405
406 tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
407 tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
408 tst->trace.tsize,
409 &tst->trace.dma_addr,
410 GFP_KERNEL);
411 if (!tst->trace.cpu_addr)
412 return -ENOMEM;
413
414 tst->trace.enabled = true;
415 tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
416
417 memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
418
419 skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
420 if (!skb) {
421 IWL_ERR(tst->trans, "Memory allocation fail\n");
422 iwl_test_trace_stop(tst);
423 return -ENOMEM;
424 }
425
426 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
427 sizeof(tst->trace.dma_addr),
428 (u64 *)&tst->trace.dma_addr))
429 goto nla_put_failure;
430
431 status = iwl_test_reply(tst, skb);
432 if (status < 0)
433 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
434
435 tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
436 DUMP_CHUNK_SIZE);
437
438 return status;
439
440nla_put_failure:
441 kfree_skb(skb);
442 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
443 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
444 iwl_test_trace_stop(tst);
445 return -EMSGSIZE;
446}
447
448/*
449 * Handles indirect read from the periphery or the SRAM. The read is performed
450 * to a temporary buffer. The user space application should later issue a dump
451 */
452static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
453{
454 struct iwl_trans *trans = tst->trans;
455 unsigned long flags;
456 int i;
457
458 if (size & 0x3)
459 return -EINVAL;
460
461 tst->mem.size = size;
462 tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
463 if (tst->mem.addr == NULL)
464 return -ENOMEM;
465
466 /* Hard-coded periphery absolute address */
467 if (IWL_ABS_PRPH_START <= addr &&
468 addr < IWL_ABS_PRPH_START + PRPH_END) {
469 spin_lock_irqsave(&trans->reg_lock, flags);
470 iwl_grab_nic_access(trans);
471 iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
472 addr | (3 << 24));
473 for (i = 0; i < size; i += 4)
474 *(u32 *)(tst->mem.addr + i) =
475 iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
476 iwl_release_nic_access(trans);
477 spin_unlock_irqrestore(&trans->reg_lock, flags);
478 } else { /* target memory (SRAM) */
479 _iwl_read_targ_mem_dwords(trans, addr,
480 tst->mem.addr,
481 tst->mem.size / 4);
482 }
483
484 tst->mem.nchunks =
485 DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
486 tst->mem.in_read = true;
487 return 0;
488
489}
490
491/*
492 * Handles indirect write to the periphery or SRAM. The is performed to a
493 * temporary buffer.
494 */
495static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
496 u32 size, unsigned char *buf)
497{
498 struct iwl_trans *trans = tst->trans;
499 u32 val, i;
500 unsigned long flags;
501
502 if (IWL_ABS_PRPH_START <= addr &&
503 addr < IWL_ABS_PRPH_START + PRPH_END) {
504 /* Periphery writes can be 1-3 bytes long, or DWORDs */
505 if (size < 4) {
506 memcpy(&val, buf, size);
507 spin_lock_irqsave(&trans->reg_lock, flags);
508 iwl_grab_nic_access(trans);
509 iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
510 (addr & 0x0000FFFF) |
511 ((size - 1) << 24));
512 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
513 iwl_release_nic_access(trans);
514 /* needed after consecutive writes w/o read */
515 mmiowb();
516 spin_unlock_irqrestore(&trans->reg_lock, flags);
517 } else {
518 if (size % 4)
519 return -EINVAL;
520 for (i = 0; i < size; i += 4)
521 iwl_write_prph(trans, addr+i,
522 *(u32 *)(buf+i));
523 }
524 } else if (iwl_test_valid_hw_addr(tst, addr)) {
525 _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
526 } else {
527 return -EINVAL;
528 }
529 return 0;
530}
531
532/*
533 * Handles the user application commands for indirect read/write
534 * to/from the periphery or the SRAM.
535 */
536static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
537{
538 u32 addr, size, cmd;
539 unsigned char *buf;
540
541 /* Both read and write should be blocked, for atomicity */
542 if (tst->mem.in_read)
543 return -EBUSY;
544
545 cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
546 if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
547 IWL_ERR(tst->trans, "Error finding memory offset address\n");
548 return -ENOMSG;
549 }
550 addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
551 if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
552 IWL_ERR(tst->trans, "Error finding size for memory reading\n");
553 return -ENOMSG;
554 }
555 size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
556
557 if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
558 return iwl_test_indirect_read(tst, addr, size);
559 } else {
560 if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
561 return -EINVAL;
562 buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
563 return iwl_test_indirect_write(tst, addr, size, buf);
564 }
565}
566
567/*
568 * Enable notifications to user space
569 */
570static int iwl_test_notifications(struct iwl_test *tst,
571 struct nlattr **tb)
572{
573 tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
574 return 0;
575}
576
577/*
578 * Handles the request to get the device id
579 */
580static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
581{
582 u32 devid = tst->trans->hw_id;
583 struct sk_buff *skb;
584 int status;
585
586 IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
587
588 skb = iwl_test_alloc_reply(tst, 20);
589 if (!skb) {
590 IWL_ERR(tst->trans, "Memory allocation fail\n");
591 return -ENOMEM;
592 }
593
594 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
595 goto nla_put_failure;
596 status = iwl_test_reply(tst, skb);
597 if (status < 0)
598 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
599
600 return 0;
601
602nla_put_failure:
603 kfree_skb(skb);
604 return -EMSGSIZE;
605}
606
607/*
608 * Handles the request to get the FW version
609 */
610static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
611{
612 struct sk_buff *skb;
613 int status;
614 u32 ver = iwl_test_fw_ver(tst);
615
616 IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
617
618 skb = iwl_test_alloc_reply(tst, 20);
619 if (!skb) {
620 IWL_ERR(tst->trans, "Memory allocation fail\n");
621 return -ENOMEM;
622 }
623
624 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
625 goto nla_put_failure;
626
627 status = iwl_test_reply(tst, skb);
628 if (status < 0)
629 IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
630
631 return 0;
632
633nla_put_failure:
634 kfree_skb(skb);
635 return -EMSGSIZE;
636}
637
638/*
639 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
640 */
641int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
642 void *data, int len)
643{
644 int result;
645
646 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
647 iwl_testmode_gnl_msg_policy);
648 if (result) {
649 IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
650 return result;
651 }
652
653 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
654 if (!tb[IWL_TM_ATTR_COMMAND]) {
655 IWL_ERR(tst->trans, "Missing testmode command type\n");
656 return -ENOMSG;
657 }
658 return 0;
659}
660EXPORT_SYMBOL_GPL(iwl_test_parse);
661
662/*
663 * Handle test commands.
664 * Returns 1 for unknown commands (not handled by the test object); negative
665 * value in case of error.
666 */
667int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
668{
669 int result;
670
671 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
672 case IWL_TM_CMD_APP2DEV_UCODE:
673 IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
674 result = iwl_test_fw_cmd(tst, tb);
675 break;
676
677 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
678 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
679 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
680 IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
681 result = iwl_test_reg(tst, tb);
682 break;
683
684 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
685 IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
686 result = iwl_test_trace_begin(tst, tb);
687 break;
688
689 case IWL_TM_CMD_APP2DEV_END_TRACE:
690 iwl_test_trace_stop(tst);
691 result = 0;
692 break;
693
694 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
695 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
696 IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
697 result = iwl_test_indirect_mem(tst, tb);
698 break;
699
700 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
701 IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
702 result = iwl_test_notifications(tst, tb);
703 break;
704
705 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
706 IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
707 result = iwl_test_get_fw_ver(tst, tb);
708 break;
709
710 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
711 IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
712 result = iwl_test_get_dev_id(tst, tb);
713 break;
714
715 default:
716 IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
717 result = 1;
718 break;
719 }
720 return result;
721}
722EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
723
724static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
725 struct netlink_callback *cb)
726{
727 int idx, length;
728
729 if (!tst->trace.enabled || !tst->trace.trace_addr)
730 return -EFAULT;
731
732 idx = cb->args[4];
733 if (idx >= tst->trace.nchunks)
734 return -ENOENT;
735
736 length = DUMP_CHUNK_SIZE;
737 if (((idx + 1) == tst->trace.nchunks) &&
738 (tst->trace.size % DUMP_CHUNK_SIZE))
739 length = tst->trace.size %
740 DUMP_CHUNK_SIZE;
741
742 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
743 tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
744 goto nla_put_failure;
745
746 cb->args[4] = ++idx;
747 return 0;
748
749 nla_put_failure:
750 return -ENOBUFS;
751}
752
753static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
754 struct netlink_callback *cb)
755{
756 int idx, length;
757
758 if (!tst->mem.in_read)
759 return -EFAULT;
760
761 idx = cb->args[4];
762 if (idx >= tst->mem.nchunks) {
763 iwl_test_mem_stop(tst);
764 return -ENOENT;
765 }
766
767 length = DUMP_CHUNK_SIZE;
768 if (((idx + 1) == tst->mem.nchunks) &&
769 (tst->mem.size % DUMP_CHUNK_SIZE))
770 length = tst->mem.size % DUMP_CHUNK_SIZE;
771
772 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
773 tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
774 goto nla_put_failure;
775
776 cb->args[4] = ++idx;
777 return 0;
778
779 nla_put_failure:
780 return -ENOBUFS;
781}
782
783/*
784 * Handle dump commands.
785 * Returns 1 for unknown commands (not handled by the test object); negative
786 * value in case of error.
787 */
788int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
789 struct netlink_callback *cb)
790{
791 int result;
792
793 switch (cmd) {
794 case IWL_TM_CMD_APP2DEV_READ_TRACE:
795 IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
796 result = iwl_test_trace_dump(tst, skb, cb);
797 break;
798
799 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
800 IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
801 result = iwl_test_buffer_dump(tst, skb, cb);
802 break;
803
804 default:
805 result = 1;
806 break;
807 }
808 return result;
809}
810EXPORT_SYMBOL_GPL(iwl_test_dump);
811
812/*
813 * Multicast a spontaneous messages from the device to the user space.
814 */
815static void iwl_test_send_rx(struct iwl_test *tst,
816 struct iwl_rx_cmd_buffer *rxb)
817{
818 struct sk_buff *skb;
819 struct iwl_rx_packet *data;
820 int length;
821
822 data = rxb_addr(rxb);
823 length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
824
825 /* the length doesn't include len_n_flags field, so add it manually */
826 length += sizeof(__le32);
827
828 skb = iwl_test_alloc_event(tst, length + 20);
829 if (skb == NULL) {
830 IWL_ERR(tst->trans, "Out of memory for message to user\n");
831 return;
832 }
833
834 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
835 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
836 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
837 goto nla_put_failure;
838
839 iwl_test_event(tst, skb);
840 return;
841
842nla_put_failure:
843 kfree_skb(skb);
844 IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
845}
846
847/*
848 * Called whenever a Rx frames is recevied from the device. If notifications to
849 * the user space are requested, sends the frames to the user.
850 */
851void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
852{
853 if (tst->notify)
854 iwl_test_send_rx(tst, rxb);
855}
856EXPORT_SYMBOL_GPL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
new file mode 100644
index 00000000000..e13ffa8acc0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -0,0 +1,161 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_TEST_H__
65#define __IWL_TEST_H__
66
67#include <linux/types.h>
68#include "iwl-trans.h"
69
70struct iwl_test_trace {
71 u32 size;
72 u32 tsize;
73 u32 nchunks;
74 u8 *cpu_addr;
75 u8 *trace_addr;
76 dma_addr_t dma_addr;
77 bool enabled;
78};
79
80struct iwl_test_mem {
81 u32 size;
82 u32 nchunks;
83 u8 *addr;
84 bool in_read;
85};
86
87/*
88 * struct iwl_test_ops: callback to the op mode
89 *
90 * The structure defines the callbacks that the op_mode should handle,
91 * inorder to handle logic that is out of the scope of iwl_test. The
92 * op_mode must set all the callbacks.
93
94 * @send_cmd: handler that is used by the test object to request the
95 * op_mode to send a command to the fw.
96 *
97 * @valid_hw_addr: handler that is used by the test object to request the
98 * op_mode to check if the given address is a valid address.
99 *
100 * @get_fw_ver: handler used to get the FW version.
101 *
102 * @alloc_reply: handler used by the test object to request the op_mode
103 * to allocate an skb for sending a reply to the user, and initialize
104 * the skb. It is assumed that the test object only fills the required
105 * attributes.
106 *
107 * @reply: handler used by the test object to request the op_mode to reply
108 * to a request. The skb is an skb previously allocated by the the
109 * alloc_reply callback.
110 I
111 * @alloc_event: handler used by the test object to request the op_mode
112 * to allocate an skb for sending an event, and initialize
113 * the skb. It is assumed that the test object only fills the required
114 * attributes.
115 *
116 * @reply: handler used by the test object to request the op_mode to send
117 * an event. The skb is an skb previously allocated by the the
118 * alloc_event callback.
119 */
120struct iwl_test_ops {
121 int (*send_cmd)(struct iwl_op_mode *op_modes,
122 struct iwl_host_cmd *cmd);
123 bool (*valid_hw_addr)(u32 addr);
124 u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
125
126 struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
127 int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
128 struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
129 void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
130};
131
132struct iwl_test {
133 struct iwl_trans *trans;
134 struct iwl_test_ops *ops;
135 struct iwl_test_trace trace;
136 struct iwl_test_mem mem;
137 bool notify;
138};
139
140void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
141 struct iwl_test_ops *ops);
142
143void iwl_test_free(struct iwl_test *tst);
144
145int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
146 void *data, int len);
147
148int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
149
150int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
151 struct netlink_callback *cb);
152
153void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
154
155static inline void iwl_test_enable_notifications(struct iwl_test *tst,
156 bool enable)
157{
158 tst->notify = enable;
159}
160
161#endif
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 6ba211b0942..6ba211b0942 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index cb08ba03aae..4a57624afc4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -258,6 +258,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
258 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 258 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
259 .base_params = &iwl6000_g2_base_params, \ 259 .base_params = &iwl6000_g2_base_params, \
260 .bt_params = &iwl6000_bt_params, \ 260 .bt_params = &iwl6000_bt_params, \
261 .eeprom_params = &iwl6000_eeprom_params, \
261 .need_temp_offset_calib = true, \ 262 .need_temp_offset_calib = true, \
262 .led_mode = IWL_LED_RF_STATE, \ 263 .led_mode = IWL_LED_RF_STATE, \
263 .adv_pm = true 264 .adv_pm = true
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 94201c4d622..5024fb662bf 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -339,16 +339,9 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
340 struct iwl_tx_queue *txq, 340 struct iwl_tx_queue *txq,
341 u16 byte_cnt); 341 u16 byte_cnt);
342void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
343void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
344void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
345 struct iwl_tx_queue *txq,
346 int tx_fifo_id, bool active);
347void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
348 int fifo, int sta_id, int tid,
349 int frame_limit, u16 ssn);
350void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, 342void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
351 int sta_id, int tid, int frame_limit, u16 ssn); 343 int sta_id, int tid, int frame_limit, u16 ssn);
344void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
352void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 345void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
353 enum dma_data_direction dma_dir); 346 enum dma_data_direction dma_dir);
354int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 347int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 7461a6a1433..32ab8ea5613 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -298,6 +298,10 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
298 struct iwl_tx_queue *txq = (void *)data; 298 struct iwl_tx_queue *txq = (void *)data;
299 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 299 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
300 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 300 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
301 u32 scd_sram_addr = trans_pcie->scd_base_addr +
302 SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
303 u8 buf[16];
304 int i;
301 305
302 spin_lock(&txq->lock); 306 spin_lock(&txq->lock);
303 /* check if triggered erroneously */ 307 /* check if triggered erroneously */
@@ -307,15 +311,40 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
307 } 311 }
308 spin_unlock(&txq->lock); 312 spin_unlock(&txq->lock);
309 313
310
311 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, 314 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
312 jiffies_to_msecs(trans_pcie->wd_timeout)); 315 jiffies_to_msecs(trans_pcie->wd_timeout));
313 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", 316 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
314 txq->q.read_ptr, txq->q.write_ptr); 317 txq->q.read_ptr, txq->q.write_ptr);
315 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", 318
316 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id)) 319 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
317 & (TFD_QUEUE_SIZE_MAX - 1), 320
318 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id))); 321 iwl_print_hex_error(trans, buf, sizeof(buf));
322
323 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
324 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
325 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
326
327 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
328 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
329 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
330 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
331 u32 tbl_dw =
332 iwl_read_targ_mem(trans,
333 trans_pcie->scd_base_addr +
334 SCD_TRANS_TBL_OFFSET_QUEUE(i));
335
336 if (i & 0x1)
337 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
338 else
339 tbl_dw = tbl_dw & 0x0000FFFF;
340
341 IWL_ERR(trans,
342 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
343 i, active ? "" : "in", fifo, tbl_dw,
344 iwl_read_prph(trans,
345 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
346 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
347 }
319 348
320 iwl_op_mode_nic_error(trans->op_mode); 349 iwl_op_mode_nic_error(trans->op_mode);
321} 350}
@@ -1054,23 +1083,21 @@ static void iwl_tx_start(struct iwl_trans *trans)
1054 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 1083 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
1055 trans_pcie->scd_bc_tbls.dma >> 10); 1084 trans_pcie->scd_bc_tbls.dma >> 10);
1056 1085
1086 /* The chain extension of the SCD doesn't work well. This feature is
1087 * enabled by default by the HW, so we need to disable it manually.
1088 */
1089 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1090
1057 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) { 1091 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1058 int fifo = trans_pcie->setup_q_to_fifo[i]; 1092 int fifo = trans_pcie->setup_q_to_fifo[i];
1059 1093
1060 __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION, 1094 iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
1061 IWL_TID_NON_QOS, 1095 IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0);
1062 SCD_FRAME_LIMIT, 0);
1063 } 1096 }
1064 1097
1065 /* Activate all Tx DMA/FIFO channels */ 1098 /* Activate all Tx DMA/FIFO channels */
1066 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); 1099 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1067 1100
1068 /* The chain extension of the SCD doesn't work well. This feature is
1069 * enabled by default by the HW, so we need to disable it manually.
1070 */
1071 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1072
1073
1074 /* Enable DMA channel */ 1101 /* Enable DMA channel */
1075 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1102 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1076 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 1103 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
@@ -1239,6 +1266,19 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1239 1266
1240 spin_lock(&txq->lock); 1267 spin_lock(&txq->lock);
1241 1268
1269 /* In AGG mode, the index in the ring must correspond to the WiFi
1270 * sequence number. This is a HW requirements to help the SCD to parse
1271 * the BA.
1272 * Check here that the packets are in the right place on the ring.
1273 */
1274#ifdef CONFIG_IWLWIFI_DEBUG
1275 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1276 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1277 ((wifi_seq & 0xff) != q->write_ptr),
1278 "Q: %d WiFi Seq %d tfdNum %d",
1279 txq_id, wifi_seq, q->write_ptr);
1280#endif
1281
1242 /* Set up driver data for this TFD */ 1282 /* Set up driver data for this TFD */
1243 txq->entries[q->write_ptr].skb = skb; 1283 txq->entries[q->write_ptr].skb = skb;
1244 txq->entries[q->write_ptr].cmd = dev_cmd; 1284 txq->entries[q->write_ptr].cmd = dev_cmd;
@@ -1332,7 +1372,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1332 skb->data + hdr_len, secondlen); 1372 skb->data + hdr_len, secondlen);
1333 1373
1334 /* start timer if queue currently empty */ 1374 /* start timer if queue currently empty */
1335 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1375 if (txq->need_update && q->read_ptr == q->write_ptr &&
1376 trans_pcie->wd_timeout)
1336 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 1377 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1337 1378
1338 /* Tell device the write index *just past* this latest filled TFD */ 1379 /* Tell device the write index *just past* this latest filled TFD */
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 35e82161ca4..6baf8deef51 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -380,8 +380,8 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
380 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 380 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
381} 381}
382 382
383static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, 383static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
384 u16 txq_id) 384 u16 txq_id)
385{ 385{
386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
387 u32 tbl_dw_addr; 387 u32 tbl_dw_addr;
@@ -405,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
405 return 0; 405 return 0;
406} 406}
407 407
408static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) 408static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
409{ 409{
410 /* Simply stop the queue, but don't change any configuration; 410 /* Simply stop the queue, but don't change any configuration;
411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -415,46 +415,16 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
416} 416}
417 417
418void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) 418void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
419{ 419 int sta_id, int tid, int frame_limit, u16 ssn)
420 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
421 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
422 (index & 0xff) | (txq_id << 8));
423 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
424}
425
426void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
427 struct iwl_tx_queue *txq,
428 int tx_fifo_id, bool active)
429{
430 int txq_id = txq->q.id;
431
432 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
433 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
434 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
435 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
436 SCD_QUEUE_STTS_REG_MSK);
437
438 if (active)
439 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
440 txq_id, tx_fifo_id);
441 else
442 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
443}
444
445void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
446 int fifo, int sta_id, int tid,
447 int frame_limit, u16 ssn)
448{ 420{
449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
450 422
451 lockdep_assert_held(&trans_pcie->irq_lock);
452
453 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 423 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
454 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 424 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
455 425
456 /* Stop this Tx queue before configuring it */ 426 /* Stop this Tx queue before configuring it */
457 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 427 iwl_txq_set_inactive(trans, txq_id);
458 428
459 /* Set this queue as a chain-building queue unless it is CMD queue */ 429 /* Set this queue as a chain-building queue unless it is CMD queue */
460 if (txq_id != trans_pcie->cmd_queue) 430 if (txq_id != trans_pcie->cmd_queue)
@@ -465,17 +435,27 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
465 u16 ra_tid = BUILD_RAxTID(sta_id, tid); 435 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
466 436
467 /* Map receiver-address / traffic-ID to this queue */ 437 /* Map receiver-address / traffic-ID to this queue */
468 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); 438 iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
469 439
470 /* enable aggregations for the queue */ 440 /* enable aggregations for the queue */
471 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 441 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
442 } else {
443 /*
444 * disable aggregations for the queue, this will also make the
445 * ra_tid mapping configuration irrelevant since it is now a
446 * non-AGG queue.
447 */
448 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
472 } 449 }
473 450
474 /* Place first TFD at index corresponding to start sequence number. 451 /* Place first TFD at index corresponding to start sequence number.
475 * Assumes that ssn_idx is valid (!= 0xFFF) */ 452 * Assumes that ssn_idx is valid (!= 0xFFF) */
476 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); 453 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
477 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); 454 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
478 iwl_trans_set_wr_ptrs(trans, txq_id, ssn); 455
456 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
457 (ssn & 0xff) | (txq_id << 8));
458 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
479 459
480 /* Set up Tx window size and frame limit for this queue */ 460 /* Set up Tx window size and frame limit for this queue */
481 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 461 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
@@ -488,43 +468,34 @@ void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
488 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 468 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
489 469
490 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 470 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
491 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 471 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
492 fifo, true); 472 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
493} 473 (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
494 474 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
495void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, 475 SCD_QUEUE_STTS_REG_MSK);
496 int sta_id, int tid, int frame_limit, u16 ssn) 476 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
497{ 477 txq_id, fifo, ssn & 0xff);
498 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
499 unsigned long flags;
500
501 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
502
503 __iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id,
504 tid, frame_limit, ssn);
505
506 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
507} 478}
508 479
509void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) 480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
510{ 481{
511 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 u16 rd_ptr, wr_ptr;
484 int n_bd = trans_pcie->txq[txq_id].q.n_bd;
512 485
513 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 486 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
514 WARN_ONCE(1, "queue %d not used", txq_id); 487 WARN_ONCE(1, "queue %d not used", txq_id);
515 return; 488 return;
516 } 489 }
517 490
518 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 491 rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
519 492 wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
520 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
521 493
522 trans_pcie->txq[txq_id].q.read_ptr = 0; 494 WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
523 trans_pcie->txq[txq_id].q.write_ptr = 0; 495 txq_id, rd_ptr, wr_ptr);
524 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
525 496
526 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 497 iwl_txq_set_inactive(trans, txq_id);
527 0, false); 498 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
528} 499}
529 500
530/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 501/*************** HOST COMMAND QUEUE FUNCTIONS *****/
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 8173ab66066..89f9a2a45de 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -27,6 +27,17 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
27 struct cfg80211_ap_settings *params) { 27 struct cfg80211_ap_settings *params) {
28 int i; 28 int i;
29 29
30 if (!params->privacy) {
31 bss_config->protocol = PROTOCOL_NO_SECURITY;
32 bss_config->key_mgmt = KEY_MGMT_NONE;
33 bss_config->wpa_cfg.length = 0;
34 priv->sec_info.wep_enabled = 0;
35 priv->sec_info.wpa_enabled = 0;
36 priv->sec_info.wpa2_enabled = 0;
37
38 return 0;
39 }
40
30 switch (params->auth_type) { 41 switch (params->auth_type) {
31 case NL80211_AUTHTYPE_OPEN_SYSTEM: 42 case NL80211_AUTHTYPE_OPEN_SYSTEM:
32 bss_config->auth_mode = WLAN_AUTH_OPEN; 43 bss_config->auth_mode = WLAN_AUTH_OPEN;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2e9e6af2136..dfcd02ab6ca 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2110,7 +2110,7 @@ resize_buf:
2110 while (check_bssid_list_item(bssid, bssid_len, buf, len)) { 2110 while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
2111 if (rndis_bss_info_update(usbdev, bssid) && match_bssid && 2111 if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
2112 matched) { 2112 matched) {
2113 if (!ether_addr_equal(bssid->mac, match_bssid)) 2113 if (ether_addr_equal(bssid->mac, match_bssid))
2114 *matched = true; 2114 *matched = true;
2115 } 2115 }
2116 2116
diff --git a/include/net/bluetooth/a2mp.h b/include/net/bluetooth/a2mp.h
new file mode 100644
index 00000000000..6a76e0a0705
--- /dev/null
+++ b/include/net/bluetooth/a2mp.h
@@ -0,0 +1,126 @@
1/*
2 Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
3 Copyright (c) 2011,2012 Intel Corp.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 and
7 only version 2 as published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13*/
14
15#ifndef __A2MP_H
16#define __A2MP_H
17
18#include <net/bluetooth/l2cap.h>
19
20#define A2MP_FEAT_EXT 0x8000
21
22struct amp_mgr {
23 struct l2cap_conn *l2cap_conn;
24 struct l2cap_chan *a2mp_chan;
25 struct kref kref;
26 __u8 ident;
27 __u8 handle;
28 unsigned long flags;
29};
30
31struct a2mp_cmd {
32 __u8 code;
33 __u8 ident;
34 __le16 len;
35 __u8 data[0];
36} __packed;
37
38/* A2MP command codes */
39#define A2MP_COMMAND_REJ 0x01
40struct a2mp_cmd_rej {
41 __le16 reason;
42 __u8 data[0];
43} __packed;
44
45#define A2MP_DISCOVER_REQ 0x02
46struct a2mp_discov_req {
47 __le16 mtu;
48 __le16 ext_feat;
49} __packed;
50
51struct a2mp_cl {
52 __u8 id;
53 __u8 type;
54 __u8 status;
55} __packed;
56
57#define A2MP_DISCOVER_RSP 0x03
58struct a2mp_discov_rsp {
59 __le16 mtu;
60 __le16 ext_feat;
61 struct a2mp_cl cl[0];
62} __packed;
63
64#define A2MP_CHANGE_NOTIFY 0x04
65#define A2MP_CHANGE_RSP 0x05
66
67#define A2MP_GETINFO_REQ 0x06
68struct a2mp_info_req {
69 __u8 id;
70} __packed;
71
72#define A2MP_GETINFO_RSP 0x07
73struct a2mp_info_rsp {
74 __u8 id;
75 __u8 status;
76 __le32 total_bw;
77 __le32 max_bw;
78 __le32 min_latency;
79 __le16 pal_cap;
80 __le16 assoc_size;
81} __packed;
82
83#define A2MP_GETAMPASSOC_REQ 0x08
84struct a2mp_amp_assoc_req {
85 __u8 id;
86} __packed;
87
88#define A2MP_GETAMPASSOC_RSP 0x09
89struct a2mp_amp_assoc_rsp {
90 __u8 id;
91 __u8 status;
92 __u8 amp_assoc[0];
93} __packed;
94
95#define A2MP_CREATEPHYSLINK_REQ 0x0A
96#define A2MP_DISCONNPHYSLINK_REQ 0x0C
97struct a2mp_physlink_req {
98 __u8 local_id;
99 __u8 remote_id;
100 __u8 amp_assoc[0];
101} __packed;
102
103#define A2MP_CREATEPHYSLINK_RSP 0x0B
104#define A2MP_DISCONNPHYSLINK_RSP 0x0D
105struct a2mp_physlink_rsp {
106 __u8 local_id;
107 __u8 remote_id;
108 __u8 status;
109} __packed;
110
111/* A2MP response status */
112#define A2MP_STATUS_SUCCESS 0x00
113#define A2MP_STATUS_INVALID_CTRL_ID 0x01
114#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
115#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
116#define A2MP_STATUS_COLLISION_OCCURED 0x03
117#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
118#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
119#define A2MP_STATUS_SECURITY_VIOLATION 0x06
120
121void amp_mgr_get(struct amp_mgr *mgr);
122int amp_mgr_put(struct amp_mgr *mgr);
123struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
124 struct sk_buff *skb);
125
126#endif /* __A2MP_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 961669b648f..565d4bee1e4 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -1,4 +1,4 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 4
@@ -12,22 +12,19 @@
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED. 22 SOFTWARE IS DISCLAIMED.
23*/ 23*/
24 24
25#ifndef __BLUETOOTH_H 25#ifndef __BLUETOOTH_H
26#define __BLUETOOTH_H 26#define __BLUETOOTH_H
27 27
28#include <asm/types.h>
29#include <asm/byteorder.h>
30#include <linux/list.h>
31#include <linux/poll.h> 28#include <linux/poll.h>
32#include <net/sock.h> 29#include <net/sock.h>
33 30
@@ -168,8 +165,8 @@ typedef struct {
168#define BDADDR_LE_PUBLIC 0x01 165#define BDADDR_LE_PUBLIC 0x01
169#define BDADDR_LE_RANDOM 0x02 166#define BDADDR_LE_RANDOM 0x02
170 167
171#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) 168#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
172#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}}) 169#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
173 170
174/* Copy, swap, convert BD Address */ 171/* Copy, swap, convert BD Address */
175static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2) 172static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
@@ -215,7 +212,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
215 struct msghdr *msg, size_t len, int flags); 212 struct msghdr *msg, size_t len, int flags);
216int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, 213int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
217 struct msghdr *msg, size_t len, int flags); 214 struct msghdr *msg, size_t len, int flags);
218uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); 215uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
219int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 216int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
220int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); 217int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
221 218
@@ -225,12 +222,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
225 222
226/* Skb helpers */ 223/* Skb helpers */
227struct l2cap_ctrl { 224struct l2cap_ctrl {
228 unsigned int sframe : 1, 225 unsigned int sframe:1,
229 poll : 1, 226 poll:1,
230 final : 1, 227 final:1,
231 fcs : 1, 228 fcs:1,
232 sar : 2, 229 sar:2,
233 super : 2; 230 super:2;
234 __u16 reqseq; 231 __u16 reqseq;
235 __u16 txseq; 232 __u16 txseq;
236 __u8 retries; 233 __u8 retries;
@@ -249,7 +246,8 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
249{ 246{
250 struct sk_buff *skb; 247 struct sk_buff *skb;
251 248
252 if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) { 249 skb = alloc_skb(len + BT_SKB_RESERVE, how);
250 if (skb) {
253 skb_reserve(skb, BT_SKB_RESERVE); 251 skb_reserve(skb, BT_SKB_RESERVE);
254 bt_cb(skb)->incoming = 0; 252 bt_cb(skb)->incoming = 0;
255 } 253 }
@@ -261,7 +259,8 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
261{ 259{
262 struct sk_buff *skb; 260 struct sk_buff *skb;
263 261
264 if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { 262 skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
263 if (skb) {
265 skb_reserve(skb, BT_SKB_RESERVE); 264 skb_reserve(skb, BT_SKB_RESERVE);
266 bt_cb(skb)->incoming = 0; 265 bt_cb(skb)->incoming = 0;
267 } 266 }
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 66a7b579e31..2a6b0b8b712 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -30,6 +30,9 @@
30#define HCI_MAX_EVENT_SIZE 260 30#define HCI_MAX_EVENT_SIZE 260
31#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) 31#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
32 32
33#define HCI_LINK_KEY_SIZE 16
34#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
35
33/* HCI dev events */ 36/* HCI dev events */
34#define HCI_DEV_REG 1 37#define HCI_DEV_REG 1
35#define HCI_DEV_UNREG 2 38#define HCI_DEV_UNREG 2
@@ -56,9 +59,12 @@
56#define HCI_BREDR 0x00 59#define HCI_BREDR 0x00
57#define HCI_AMP 0x01 60#define HCI_AMP 0x01
58 61
62/* First BR/EDR Controller shall have ID = 0 */
63#define HCI_BREDR_ID 0
64
59/* HCI device quirks */ 65/* HCI device quirks */
60enum { 66enum {
61 HCI_QUIRK_NO_RESET, 67 HCI_QUIRK_RESET_ON_CLOSE,
62 HCI_QUIRK_RAW_DEVICE, 68 HCI_QUIRK_RAW_DEVICE,
63 HCI_QUIRK_FIXUP_BUFFER_SIZE 69 HCI_QUIRK_FIXUP_BUFFER_SIZE
64}; 70};
@@ -133,10 +139,8 @@ enum {
133#define HCIINQUIRY _IOR('H', 240, int) 139#define HCIINQUIRY _IOR('H', 240, int)
134 140
135/* HCI timeouts */ 141/* HCI timeouts */
136#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
137#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ 142#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
138#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */ 143#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
139#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
140#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ 144#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
141#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */ 145#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */
142#define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */ 146#define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */
@@ -371,7 +375,7 @@ struct hci_cp_reject_conn_req {
371#define HCI_OP_LINK_KEY_REPLY 0x040b 375#define HCI_OP_LINK_KEY_REPLY 0x040b
372struct hci_cp_link_key_reply { 376struct hci_cp_link_key_reply {
373 bdaddr_t bdaddr; 377 bdaddr_t bdaddr;
374 __u8 link_key[16]; 378 __u8 link_key[HCI_LINK_KEY_SIZE];
375} __packed; 379} __packed;
376 380
377#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c 381#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
@@ -523,6 +527,28 @@ struct hci_cp_io_capability_neg_reply {
523 __u8 reason; 527 __u8 reason;
524} __packed; 528} __packed;
525 529
530#define HCI_OP_CREATE_PHY_LINK 0x0435
531struct hci_cp_create_phy_link {
532 __u8 phy_handle;
533 __u8 key_len;
534 __u8 key_type;
535 __u8 key[HCI_AMP_LINK_KEY_SIZE];
536} __packed;
537
538#define HCI_OP_ACCEPT_PHY_LINK 0x0436
539struct hci_cp_accept_phy_link {
540 __u8 phy_handle;
541 __u8 key_len;
542 __u8 key_type;
543 __u8 key[HCI_AMP_LINK_KEY_SIZE];
544} __packed;
545
546#define HCI_OP_DISCONN_PHY_LINK 0x0437
547struct hci_cp_disconn_phy_link {
548 __u8 phy_handle;
549 __u8 reason;
550} __packed;
551
526#define HCI_OP_SNIFF_MODE 0x0803 552#define HCI_OP_SNIFF_MODE 0x0803
527struct hci_cp_sniff_mode { 553struct hci_cp_sniff_mode {
528 __le16 handle; 554 __le16 handle;
@@ -818,6 +844,31 @@ struct hci_rp_read_local_amp_info {
818 __le32 be_flush_to; 844 __le32 be_flush_to;
819} __packed; 845} __packed;
820 846
847#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
848struct hci_cp_read_local_amp_assoc {
849 __u8 phy_handle;
850 __le16 len_so_far;
851 __le16 max_len;
852} __packed;
853struct hci_rp_read_local_amp_assoc {
854 __u8 status;
855 __u8 phy_handle;
856 __le16 rem_len;
857 __u8 frag[0];
858} __packed;
859
860#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
861struct hci_cp_write_remote_amp_assoc {
862 __u8 phy_handle;
863 __le16 len_so_far;
864 __le16 rem_len;
865 __u8 frag[0];
866} __packed;
867struct hci_rp_write_remote_amp_assoc {
868 __u8 status;
869 __u8 phy_handle;
870} __packed;
871
821#define HCI_OP_LE_SET_EVENT_MASK 0x2001 872#define HCI_OP_LE_SET_EVENT_MASK 0x2001
822struct hci_cp_le_set_event_mask { 873struct hci_cp_le_set_event_mask {
823 __u8 mask[8]; 874 __u8 mask[8];
@@ -1048,7 +1099,7 @@ struct hci_ev_link_key_req {
1048#define HCI_EV_LINK_KEY_NOTIFY 0x18 1099#define HCI_EV_LINK_KEY_NOTIFY 0x18
1049struct hci_ev_link_key_notify { 1100struct hci_ev_link_key_notify {
1050 bdaddr_t bdaddr; 1101 bdaddr_t bdaddr;
1051 __u8 link_key[16]; 1102 __u8 link_key[HCI_LINK_KEY_SIZE];
1052 __u8 key_type; 1103 __u8 key_type;
1053} __packed; 1104} __packed;
1054 1105
@@ -1144,6 +1195,12 @@ struct extended_inquiry_info {
1144 __u8 data[240]; 1195 __u8 data[240];
1145} __packed; 1196} __packed;
1146 1197
1198#define HCI_EV_KEY_REFRESH_COMPLETE 0x30
1199struct hci_ev_key_refresh_complete {
1200 __u8 status;
1201 __le16 handle;
1202} __packed;
1203
1147#define HCI_EV_IO_CAPA_REQUEST 0x31 1204#define HCI_EV_IO_CAPA_REQUEST 0x31
1148struct hci_ev_io_capa_request { 1205struct hci_ev_io_capa_request {
1149 bdaddr_t bdaddr; 1206 bdaddr_t bdaddr;
@@ -1190,6 +1247,39 @@ struct hci_ev_le_meta {
1190 __u8 subevent; 1247 __u8 subevent;
1191} __packed; 1248} __packed;
1192 1249
1250#define HCI_EV_PHY_LINK_COMPLETE 0x40
1251struct hci_ev_phy_link_complete {
1252 __u8 status;
1253 __u8 phy_handle;
1254} __packed;
1255
1256#define HCI_EV_CHANNEL_SELECTED 0x41
1257struct hci_ev_channel_selected {
1258 __u8 phy_handle;
1259} __packed;
1260
1261#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
1262struct hci_ev_disconn_phy_link_complete {
1263 __u8 status;
1264 __u8 phy_handle;
1265 __u8 reason;
1266} __packed;
1267
1268#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
1269struct hci_ev_logical_link_complete {
1270 __u8 status;
1271 __le16 handle;
1272 __u8 phy_handle;
1273 __u8 flow_spec_id;
1274} __packed;
1275
1276#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
1277struct hci_ev_disconn_logical_link_complete {
1278 __u8 status;
1279 __le16 handle;
1280 __u8 reason;
1281} __packed;
1282
1193#define HCI_EV_NUM_COMP_BLOCKS 0x48 1283#define HCI_EV_NUM_COMP_BLOCKS 0x48
1194struct hci_comp_blocks_info { 1284struct hci_comp_blocks_info {
1195 __le16 handle; 1285 __le16 handle;
@@ -1290,7 +1380,6 @@ struct hci_sco_hdr {
1290 __u8 dlen; 1380 __u8 dlen;
1291} __packed; 1381} __packed;
1292 1382
1293#include <linux/skbuff.h>
1294static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) 1383static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
1295{ 1384{
1296 return (struct hci_event_hdr *) skb->data; 1385 return (struct hci_event_hdr *) skb->data;
@@ -1307,12 +1396,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
1307} 1396}
1308 1397
1309/* Command opcode pack/unpack */ 1398/* Command opcode pack/unpack */
1310#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) 1399#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
1311#define hci_opcode_ogf(op) (op >> 10) 1400#define hci_opcode_ogf(op) (op >> 10)
1312#define hci_opcode_ocf(op) (op & 0x03ff) 1401#define hci_opcode_ocf(op) (op & 0x03ff)
1313 1402
1314/* ACL handle and flags pack/unpack */ 1403/* ACL handle and flags pack/unpack */
1315#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) 1404#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
1316#define hci_handle(h) (h & 0x0fff) 1405#define hci_handle(h) (h & 0x0fff)
1317#define hci_flags(h) (h >> 12) 1406#define hci_flags(h) (h >> 12)
1318 1407
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9fc7728f94e..20fd57367dd 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,7 +25,6 @@
25#ifndef __HCI_CORE_H 25#ifndef __HCI_CORE_H
26#define __HCI_CORE_H 26#define __HCI_CORE_H
27 27
28#include <linux/interrupt.h>
29#include <net/bluetooth/hci.h> 28#include <net/bluetooth/hci.h>
30 29
31/* HCI priority */ 30/* HCI priority */
@@ -65,7 +64,7 @@ struct discovery_state {
65 DISCOVERY_RESOLVING, 64 DISCOVERY_RESOLVING,
66 DISCOVERY_STOPPING, 65 DISCOVERY_STOPPING,
67 } state; 66 } state;
68 struct list_head all; /* All devices found during inquiry */ 67 struct list_head all; /* All devices found during inquiry */
69 struct list_head unknown; /* Name state not known */ 68 struct list_head unknown; /* Name state not known */
70 struct list_head resolve; /* Name needs to be resolved */ 69 struct list_head resolve; /* Name needs to be resolved */
71 __u32 timestamp; 70 __u32 timestamp;
@@ -105,7 +104,7 @@ struct link_key {
105 struct list_head list; 104 struct list_head list;
106 bdaddr_t bdaddr; 105 bdaddr_t bdaddr;
107 u8 type; 106 u8 type;
108 u8 val[16]; 107 u8 val[HCI_LINK_KEY_SIZE];
109 u8 pin_len; 108 u8 pin_len;
110}; 109};
111 110
@@ -333,6 +332,7 @@ struct hci_conn {
333 void *l2cap_data; 332 void *l2cap_data;
334 void *sco_data; 333 void *sco_data;
335 void *smp_conn; 334 void *smp_conn;
335 struct amp_mgr *amp_mgr;
336 336
337 struct hci_conn *link; 337 struct hci_conn *link;
338 338
@@ -360,7 +360,8 @@ extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
360extern int l2cap_disconn_ind(struct hci_conn *hcon); 360extern int l2cap_disconn_ind(struct hci_conn *hcon);
361extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); 361extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
362extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); 362extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
363extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); 363extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
364 u16 flags);
364 365
365extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 366extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
366extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status); 367extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
@@ -429,8 +430,8 @@ enum {
429static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 430static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
430{ 431{
431 struct hci_dev *hdev = conn->hdev; 432 struct hci_dev *hdev = conn->hdev;
432 return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 433 return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
433 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags)); 434 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
434} 435}
435 436
436static inline void hci_conn_hash_init(struct hci_dev *hdev) 437static inline void hci_conn_hash_init(struct hci_dev *hdev)
@@ -640,6 +641,19 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
640 dev_set_drvdata(&hdev->dev, data); 641 dev_set_drvdata(&hdev->dev, data);
641} 642}
642 643
644/* hci_dev_list shall be locked */
645static inline uint8_t __hci_num_ctrl(void)
646{
647 uint8_t count = 0;
648 struct list_head *p;
649
650 list_for_each(p, &hci_dev_list) {
651 count++;
652 }
653
654 return count;
655}
656
643struct hci_dev *hci_dev_get(int index); 657struct hci_dev *hci_dev_get(int index);
644struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); 658struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
645 659
@@ -661,7 +675,8 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
661int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); 675int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
662int hci_inquiry(void __user *arg); 676int hci_inquiry(void __user *arg);
663 677
664struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); 678struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
679 bdaddr_t *bdaddr);
665int hci_blacklist_clear(struct hci_dev *hdev); 680int hci_blacklist_clear(struct hci_dev *hdev);
666int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 681int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
667int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 682int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 1c7d1cd5e67..d80e3f0691b 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -40,11 +40,11 @@
40#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ 40#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
41#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ 41#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
42#define L2CAP_DEFAULT_ACK_TO 200 42#define L2CAP_DEFAULT_ACK_TO 200
43#define L2CAP_LE_DEFAULT_MTU 23
44#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF 43#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
45#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF 44#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
46#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF 45#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
47#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */ 46#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
47#define L2CAP_LE_MIN_MTU 23
48 48
49#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100) 49#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
50#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000) 50#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@@ -52,6 +52,8 @@
52#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000) 52#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
53#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000) 53#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
54 54
55#define L2CAP_A2MP_DEFAULT_MTU 670
56
55/* L2CAP socket address */ 57/* L2CAP socket address */
56struct sockaddr_l2 { 58struct sockaddr_l2 {
57 sa_family_t l2_family; 59 sa_family_t l2_family;
@@ -229,9 +231,14 @@ struct l2cap_conn_rsp {
229 __le16 status; 231 __le16 status;
230} __packed; 232} __packed;
231 233
234/* protocol/service multiplexer (PSM) */
235#define L2CAP_PSM_SDP 0x0001
236#define L2CAP_PSM_RFCOMM 0x0003
237
232/* channel indentifier */ 238/* channel indentifier */
233#define L2CAP_CID_SIGNALING 0x0001 239#define L2CAP_CID_SIGNALING 0x0001
234#define L2CAP_CID_CONN_LESS 0x0002 240#define L2CAP_CID_CONN_LESS 0x0002
241#define L2CAP_CID_A2MP 0x0003
235#define L2CAP_CID_LE_DATA 0x0004 242#define L2CAP_CID_LE_DATA 0x0004
236#define L2CAP_CID_LE_SIGNALING 0x0005 243#define L2CAP_CID_LE_SIGNALING 0x0005
237#define L2CAP_CID_SMP 0x0006 244#define L2CAP_CID_SMP 0x0006
@@ -271,6 +278,9 @@ struct l2cap_conf_rsp {
271#define L2CAP_CONF_PENDING 0x0004 278#define L2CAP_CONF_PENDING 0x0004
272#define L2CAP_CONF_EFS_REJECT 0x0005 279#define L2CAP_CONF_EFS_REJECT 0x0005
273 280
281/* configuration req/rsp continuation flag */
282#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
283
274struct l2cap_conf_opt { 284struct l2cap_conf_opt {
275 __u8 type; 285 __u8 type;
276 __u8 len; 286 __u8 len;
@@ -419,11 +429,6 @@ struct l2cap_seq_list {
419#define L2CAP_SEQ_LIST_CLEAR 0xFFFF 429#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
420#define L2CAP_SEQ_LIST_TAIL 0x8000 430#define L2CAP_SEQ_LIST_TAIL 0x8000
421 431
422struct srej_list {
423 __u16 tx_seq;
424 struct list_head list;
425};
426
427struct l2cap_chan { 432struct l2cap_chan {
428 struct sock *sk; 433 struct sock *sk;
429 434
@@ -475,14 +480,12 @@ struct l2cap_chan {
475 __u16 expected_ack_seq; 480 __u16 expected_ack_seq;
476 __u16 expected_tx_seq; 481 __u16 expected_tx_seq;
477 __u16 buffer_seq; 482 __u16 buffer_seq;
478 __u16 buffer_seq_srej;
479 __u16 srej_save_reqseq; 483 __u16 srej_save_reqseq;
480 __u16 last_acked_seq; 484 __u16 last_acked_seq;
481 __u16 frames_sent; 485 __u16 frames_sent;
482 __u16 unacked_frames; 486 __u16 unacked_frames;
483 __u8 retry_count; 487 __u8 retry_count;
484 __u16 srej_queue_next; 488 __u16 srej_queue_next;
485 __u8 num_acked;
486 __u16 sdu_len; 489 __u16 sdu_len;
487 struct sk_buff *sdu; 490 struct sk_buff *sdu;
488 struct sk_buff *sdu_last_frag; 491 struct sk_buff *sdu_last_frag;
@@ -515,7 +518,6 @@ struct l2cap_chan {
515 struct sk_buff_head srej_q; 518 struct sk_buff_head srej_q;
516 struct l2cap_seq_list srej_list; 519 struct l2cap_seq_list srej_list;
517 struct l2cap_seq_list retrans_list; 520 struct l2cap_seq_list retrans_list;
518 struct list_head srej_l;
519 521
520 struct list_head list; 522 struct list_head list;
521 struct list_head global_l; 523 struct list_head global_l;
@@ -528,10 +530,14 @@ struct l2cap_chan {
528struct l2cap_ops { 530struct l2cap_ops {
529 char *name; 531 char *name;
530 532
531 struct l2cap_chan *(*new_connection) (void *data); 533 struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
532 int (*recv) (void *data, struct sk_buff *skb); 534 int (*recv) (struct l2cap_chan * chan,
533 void (*close) (void *data); 535 struct sk_buff *skb);
534 void (*state_change) (void *data, int state); 536 void (*teardown) (struct l2cap_chan *chan, int err);
537 void (*close) (struct l2cap_chan *chan);
538 void (*state_change) (struct l2cap_chan *chan,
539 int state);
540 void (*ready) (struct l2cap_chan *chan);
535 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, 541 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
536 unsigned long len, int nb); 542 unsigned long len, int nb);
537}; 543};
@@ -575,6 +581,7 @@ struct l2cap_conn {
575#define L2CAP_CHAN_RAW 1 581#define L2CAP_CHAN_RAW 1
576#define L2CAP_CHAN_CONN_LESS 2 582#define L2CAP_CHAN_CONN_LESS 2
577#define L2CAP_CHAN_CONN_ORIENTED 3 583#define L2CAP_CHAN_CONN_ORIENTED 3
584#define L2CAP_CHAN_CONN_FIX_A2MP 4
578 585
579/* ----- L2CAP socket info ----- */ 586/* ----- L2CAP socket info ----- */
580#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) 587#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
@@ -597,6 +604,7 @@ enum {
597 CONF_EWS_RECV, 604 CONF_EWS_RECV,
598 CONF_LOC_CONF_PEND, 605 CONF_LOC_CONF_PEND,
599 CONF_REM_CONF_PEND, 606 CONF_REM_CONF_PEND,
607 CONF_NOT_COMPLETE,
600}; 608};
601 609
602#define L2CAP_CONF_MAX_CONF_REQ 2 610#define L2CAP_CONF_MAX_CONF_REQ 2
@@ -713,11 +721,7 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
713 721
714#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) 722#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
715#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) 723#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
716#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
717 msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO));
718#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer) 724#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
719#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \
720 msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO));
721#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer) 725#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
722#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \ 726#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
723 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO)); 727 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
@@ -736,173 +740,17 @@ static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
736 return (seq + 1) % (chan->tx_win_max + 1); 740 return (seq + 1) % (chan->tx_win_max + 1);
737} 741}
738 742
739static inline int l2cap_tx_window_full(struct l2cap_chan *ch) 743static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
740{
741 int sub;
742
743 sub = (ch->next_tx_seq - ch->expected_ack_seq) % 64;
744
745 if (sub < 0)
746 sub += 64;
747
748 return sub == ch->remote_tx_win;
749}
750
751static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
752{
753 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
754 return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
755 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
756 else
757 return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
758}
759
760static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
761{ 744{
762 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 745 return NULL;
763 return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
764 L2CAP_EXT_CTRL_REQSEQ;
765 else
766 return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
767} 746}
768 747
769static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl) 748static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
770{ 749{
771 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
772 return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
773 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
774 else
775 return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
776} 750}
777 751
778static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq) 752static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
779{ 753{
780 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
781 return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
782 L2CAP_EXT_CTRL_TXSEQ;
783 else
784 return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
785}
786
787static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
788{
789 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
790 return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
791 else
792 return ctrl & L2CAP_CTRL_FRAME_TYPE;
793}
794
795static inline __u32 __set_sframe(struct l2cap_chan *chan)
796{
797 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
798 return L2CAP_EXT_CTRL_FRAME_TYPE;
799 else
800 return L2CAP_CTRL_FRAME_TYPE;
801}
802
803static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
804{
805 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
806 return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
807 else
808 return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809}
810
811static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
812{
813 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
814 return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
815 else
816 return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
817}
818
819static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
820{
821 return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
822}
823
824static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
825{
826 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
827 return L2CAP_EXT_CTRL_SAR;
828 else
829 return L2CAP_CTRL_SAR;
830}
831
832static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
833{
834 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
835 return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
836 L2CAP_EXT_CTRL_SUPER_SHIFT;
837 else
838 return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
839}
840
841static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
842{
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
845 L2CAP_EXT_CTRL_SUPERVISE;
846 else
847 return (super << L2CAP_CTRL_SUPER_SHIFT) &
848 L2CAP_CTRL_SUPERVISE;
849}
850
851static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
852{
853 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
854 return L2CAP_EXT_CTRL_FINAL;
855 else
856 return L2CAP_CTRL_FINAL;
857}
858
859static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
860{
861 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
862 return ctrl & L2CAP_EXT_CTRL_FINAL;
863 else
864 return ctrl & L2CAP_CTRL_FINAL;
865}
866
867static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
868{
869 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
870 return L2CAP_EXT_CTRL_POLL;
871 else
872 return L2CAP_CTRL_POLL;
873}
874
875static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
876{
877 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
878 return ctrl & L2CAP_EXT_CTRL_POLL;
879 else
880 return ctrl & L2CAP_CTRL_POLL;
881}
882
883static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
884{
885 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
886 return get_unaligned_le32(p);
887 else
888 return get_unaligned_le16(p);
889}
890
891static inline void __put_control(struct l2cap_chan *chan, __u32 control,
892 void *p)
893{
894 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
895 return put_unaligned_le32(control, p);
896 else
897 return put_unaligned_le16(control, p);
898}
899
900static inline __u8 __ctrl_size(struct l2cap_chan *chan)
901{
902 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
903 return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
904 else
905 return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
906} 754}
907 755
908extern bool disable_ertm; 756extern bool disable_ertm;
@@ -926,5 +774,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
926void l2cap_chan_busy(struct l2cap_chan *chan, int busy); 774void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
927int l2cap_chan_check_security(struct l2cap_chan *chan); 775int l2cap_chan_check_security(struct l2cap_chan *chan);
928void l2cap_chan_set_defaults(struct l2cap_chan *chan); 776void l2cap_chan_set_defaults(struct l2cap_chan *chan);
777int l2cap_ertm_init(struct l2cap_chan *chan);
778void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
779void l2cap_chan_del(struct l2cap_chan *chan, int err);
929 780
930#endif /* __L2CAP_H */ 781#endif /* __L2CAP_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d152f54064f..6914f9978ae 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1945,6 +1945,11 @@ enum ieee80211_rate_control_changed {
1945 * to also unregister the device. If it returns 1, then mac80211 1945 * to also unregister the device. If it returns 1, then mac80211
1946 * will also go through the regular complete restart on resume. 1946 * will also go through the regular complete restart on resume.
1947 * 1947 *
1948 * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is
1949 * modified. The reason is that device_set_wakeup_enable() is
1950 * supposed to be called when the configuration changes, not only
1951 * in suspend().
1952 *
1948 * @add_interface: Called when a netdevice attached to the hardware is 1953 * @add_interface: Called when a netdevice attached to the hardware is
1949 * enabled. Because it is not called for monitor mode devices, @start 1954 * enabled. Because it is not called for monitor mode devices, @start
1950 * and @stop must be implemented. 1955 * and @stop must be implemented.
@@ -2974,6 +2979,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
2974 * ieee80211_generic_frame_duration - Calculate the duration field for a frame 2979 * ieee80211_generic_frame_duration - Calculate the duration field for a frame
2975 * @hw: pointer obtained from ieee80211_alloc_hw(). 2980 * @hw: pointer obtained from ieee80211_alloc_hw().
2976 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2981 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2982 * @band: the band to calculate the frame duration on
2977 * @frame_len: the length of the frame. 2983 * @frame_len: the length of the frame.
2978 * @rate: the rate at which the frame is going to be transmitted. 2984 * @rate: the rate at which the frame is going to be transmitted.
2979 * 2985 *
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a5700f5..fa6d94a4602 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o 12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 00000000000..fb93250b393
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,568 @@
1/*
2 Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
3 Copyright (c) 2011,2012 Intel Corp.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 and
7 only version 2 as published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13*/
14
15#include <net/bluetooth/bluetooth.h>
16#include <net/bluetooth/hci_core.h>
17#include <net/bluetooth/l2cap.h>
18#include <net/bluetooth/a2mp.h>
19
20/* A2MP build & send command helper functions */
21static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
22{
23 struct a2mp_cmd *cmd;
24 int plen;
25
26 plen = sizeof(*cmd) + len;
27 cmd = kzalloc(plen, GFP_KERNEL);
28 if (!cmd)
29 return NULL;
30
31 cmd->code = code;
32 cmd->ident = ident;
33 cmd->len = cpu_to_le16(len);
34
35 memcpy(cmd->data, data, len);
36
37 return cmd;
38}
39
40static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
41 void *data)
42{
43 struct l2cap_chan *chan = mgr->a2mp_chan;
44 struct a2mp_cmd *cmd;
45 u16 total_len = len + sizeof(*cmd);
46 struct kvec iv;
47 struct msghdr msg;
48
49 cmd = __a2mp_build(code, ident, len, data);
50 if (!cmd)
51 return;
52
53 iv.iov_base = cmd;
54 iv.iov_len = total_len;
55
56 memset(&msg, 0, sizeof(msg));
57
58 msg.msg_iov = (struct iovec *) &iv;
59 msg.msg_iovlen = 1;
60
61 l2cap_chan_send(chan, &msg, total_len, 0);
62
63 kfree(cmd);
64}
65
66static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
67{
68 cl->id = 0;
69 cl->type = 0;
70 cl->status = 1;
71}
72
73/* hci_dev_list shall be locked */
74static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
75{
76 int i = 0;
77 struct hci_dev *hdev;
78
79 __a2mp_cl_bredr(cl);
80
81 list_for_each_entry(hdev, &hci_dev_list, list) {
82 /* Iterate through AMP controllers */
83 if (hdev->id == HCI_BREDR_ID)
84 continue;
85
86 /* Starting from second entry */
87 if (++i >= num_ctrl)
88 return;
89
90 cl[i].id = hdev->id;
91 cl[i].type = hdev->amp_type;
92 cl[i].status = hdev->amp_status;
93 }
94}
95
96/* Processing A2MP messages */
97static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
98 struct a2mp_cmd *hdr)
99{
100 struct a2mp_cmd_rej *rej = (void *) skb->data;
101
102 if (le16_to_cpu(hdr->len) < sizeof(*rej))
103 return -EINVAL;
104
105 BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
106
107 skb_pull(skb, sizeof(*rej));
108
109 return 0;
110}
111
112static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
113 struct a2mp_cmd *hdr)
114{
115 struct a2mp_discov_req *req = (void *) skb->data;
116 u16 len = le16_to_cpu(hdr->len);
117 struct a2mp_discov_rsp *rsp;
118 u16 ext_feat;
119 u8 num_ctrl;
120
121 if (len < sizeof(*req))
122 return -EINVAL;
123
124 skb_pull(skb, sizeof(*req));
125
126 ext_feat = le16_to_cpu(req->ext_feat);
127
128 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
129
130 /* check that packet is not broken for now */
131 while (ext_feat & A2MP_FEAT_EXT) {
132 if (len < sizeof(ext_feat))
133 return -EINVAL;
134
135 ext_feat = get_unaligned_le16(skb->data);
136 BT_DBG("efm 0x%4.4x", ext_feat);
137 len -= sizeof(ext_feat);
138 skb_pull(skb, sizeof(ext_feat));
139 }
140
141 read_lock(&hci_dev_list_lock);
142
143 num_ctrl = __hci_num_ctrl();
144 len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
145 rsp = kmalloc(len, GFP_ATOMIC);
146 if (!rsp) {
147 read_unlock(&hci_dev_list_lock);
148 return -ENOMEM;
149 }
150
151 rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
152 rsp->ext_feat = 0;
153
154 __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
155
156 read_unlock(&hci_dev_list_lock);
157
158 a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
159
160 kfree(rsp);
161 return 0;
162}
163
164static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
165 struct a2mp_cmd *hdr)
166{
167 struct a2mp_cl *cl = (void *) skb->data;
168
169 while (skb->len >= sizeof(*cl)) {
170 BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
171 cl->status);
172 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
173 }
174
175 /* TODO send A2MP_CHANGE_RSP */
176
177 return 0;
178}
179
180static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
181 struct a2mp_cmd *hdr)
182{
183 struct a2mp_info_req *req = (void *) skb->data;
184 struct a2mp_info_rsp rsp;
185 struct hci_dev *hdev;
186
187 if (le16_to_cpu(hdr->len) < sizeof(*req))
188 return -EINVAL;
189
190 BT_DBG("id %d", req->id);
191
192 rsp.id = req->id;
193 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
194
195 hdev = hci_dev_get(req->id);
196 if (hdev && hdev->amp_type != HCI_BREDR) {
197 rsp.status = 0;
198 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
199 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
200 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
201 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
202 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
203 }
204
205 if (hdev)
206 hci_dev_put(hdev);
207
208 a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
209
210 skb_pull(skb, sizeof(*req));
211 return 0;
212}
213
214static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
215 struct a2mp_cmd *hdr)
216{
217 struct a2mp_amp_assoc_req *req = (void *) skb->data;
218 struct hci_dev *hdev;
219
220 if (le16_to_cpu(hdr->len) < sizeof(*req))
221 return -EINVAL;
222
223 BT_DBG("id %d", req->id);
224
225 hdev = hci_dev_get(req->id);
226 if (!hdev || hdev->amp_type == HCI_BREDR) {
227 struct a2mp_amp_assoc_rsp rsp;
228 rsp.id = req->id;
229 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
230
231 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
232 &rsp);
233 goto clean;
234 }
235
236 /* Placeholder for HCI Read AMP Assoc */
237
238clean:
239 if (hdev)
240 hci_dev_put(hdev);
241
242 skb_pull(skb, sizeof(*req));
243 return 0;
244}
245
246static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
247 struct a2mp_cmd *hdr)
248{
249 struct a2mp_physlink_req *req = (void *) skb->data;
250
251 struct a2mp_physlink_rsp rsp;
252 struct hci_dev *hdev;
253
254 if (le16_to_cpu(hdr->len) < sizeof(*req))
255 return -EINVAL;
256
257 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
258
259 rsp.local_id = req->remote_id;
260 rsp.remote_id = req->local_id;
261
262 hdev = hci_dev_get(req->remote_id);
263 if (!hdev || hdev->amp_type != HCI_AMP) {
264 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
265 goto send_rsp;
266 }
267
268 /* TODO process physlink create */
269
270 rsp.status = A2MP_STATUS_SUCCESS;
271
272send_rsp:
273 if (hdev)
274 hci_dev_put(hdev);
275
276 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
277 &rsp);
278
279 skb_pull(skb, le16_to_cpu(hdr->len));
280 return 0;
281}
282
283static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
284 struct a2mp_cmd *hdr)
285{
286 struct a2mp_physlink_req *req = (void *) skb->data;
287 struct a2mp_physlink_rsp rsp;
288 struct hci_dev *hdev;
289
290 if (le16_to_cpu(hdr->len) < sizeof(*req))
291 return -EINVAL;
292
293 BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
294
295 rsp.local_id = req->remote_id;
296 rsp.remote_id = req->local_id;
297 rsp.status = A2MP_STATUS_SUCCESS;
298
299 hdev = hci_dev_get(req->local_id);
300 if (!hdev) {
301 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
302 goto send_rsp;
303 }
304
305 /* TODO Disconnect Phys Link here */
306
307 hci_dev_put(hdev);
308
309send_rsp:
310 a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
311
312 skb_pull(skb, sizeof(*req));
313 return 0;
314}
315
316static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
317 struct a2mp_cmd *hdr)
318{
319 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
320
321 skb_pull(skb, le16_to_cpu(hdr->len));
322 return 0;
323}
324
325/* Handle A2MP signalling */
326static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
327{
328 struct a2mp_cmd *hdr = (void *) skb->data;
329 struct amp_mgr *mgr = chan->data;
330 int err = 0;
331
332 amp_mgr_get(mgr);
333
334 while (skb->len >= sizeof(*hdr)) {
335 struct a2mp_cmd *hdr = (void *) skb->data;
336 u16 len = le16_to_cpu(hdr->len);
337
338 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
339
340 skb_pull(skb, sizeof(*hdr));
341
342 if (len > skb->len || !hdr->ident) {
343 err = -EINVAL;
344 break;
345 }
346
347 mgr->ident = hdr->ident;
348
349 switch (hdr->code) {
350 case A2MP_COMMAND_REJ:
351 a2mp_command_rej(mgr, skb, hdr);
352 break;
353
354 case A2MP_DISCOVER_REQ:
355 err = a2mp_discover_req(mgr, skb, hdr);
356 break;
357
358 case A2MP_CHANGE_NOTIFY:
359 err = a2mp_change_notify(mgr, skb, hdr);
360 break;
361
362 case A2MP_GETINFO_REQ:
363 err = a2mp_getinfo_req(mgr, skb, hdr);
364 break;
365
366 case A2MP_GETAMPASSOC_REQ:
367 err = a2mp_getampassoc_req(mgr, skb, hdr);
368 break;
369
370 case A2MP_CREATEPHYSLINK_REQ:
371 err = a2mp_createphyslink_req(mgr, skb, hdr);
372 break;
373
374 case A2MP_DISCONNPHYSLINK_REQ:
375 err = a2mp_discphyslink_req(mgr, skb, hdr);
376 break;
377
378 case A2MP_CHANGE_RSP:
379 case A2MP_DISCOVER_RSP:
380 case A2MP_GETINFO_RSP:
381 case A2MP_GETAMPASSOC_RSP:
382 case A2MP_CREATEPHYSLINK_RSP:
383 case A2MP_DISCONNPHYSLINK_RSP:
384 err = a2mp_cmd_rsp(mgr, skb, hdr);
385 break;
386
387 default:
388 BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
389 err = -EINVAL;
390 break;
391 }
392 }
393
394 if (err) {
395 struct a2mp_cmd_rej rej;
396 rej.reason = __constant_cpu_to_le16(0);
397
398 BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
399
400 a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
401 &rej);
402 }
403
404 /* Always free skb and return success error code to prevent
405 from sending L2CAP Disconnect over A2MP channel */
406 kfree_skb(skb);
407
408 amp_mgr_put(mgr);
409
410 return 0;
411}
412
413static void a2mp_chan_close_cb(struct l2cap_chan *chan)
414{
415 l2cap_chan_destroy(chan);
416}
417
418static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
419{
420 struct amp_mgr *mgr = chan->data;
421
422 if (!mgr)
423 return;
424
425 BT_DBG("chan %p state %s", chan, state_to_string(state));
426
427 chan->state = state;
428
429 switch (state) {
430 case BT_CLOSED:
431 if (mgr)
432 amp_mgr_put(mgr);
433 break;
434 }
435}
436
437static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
438 unsigned long len, int nb)
439{
440 return bt_skb_alloc(len, GFP_KERNEL);
441}
442
443static struct l2cap_ops a2mp_chan_ops = {
444 .name = "L2CAP A2MP channel",
445 .recv = a2mp_chan_recv_cb,
446 .close = a2mp_chan_close_cb,
447 .state_change = a2mp_chan_state_change_cb,
448 .alloc_skb = a2mp_chan_alloc_skb_cb,
449
450 /* Not implemented for A2MP */
451 .new_connection = l2cap_chan_no_new_connection,
452 .teardown = l2cap_chan_no_teardown,
453 .ready = l2cap_chan_no_ready,
454};
455
456static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
457{
458 struct l2cap_chan *chan;
459 int err;
460
461 chan = l2cap_chan_create();
462 if (!chan)
463 return NULL;
464
465 BT_DBG("chan %p", chan);
466
467 chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
468 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
469
470 chan->ops = &a2mp_chan_ops;
471
472 l2cap_chan_set_defaults(chan);
473 chan->remote_max_tx = chan->max_tx;
474 chan->remote_tx_win = chan->tx_win;
475
476 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
477 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
478
479 skb_queue_head_init(&chan->tx_q);
480
481 chan->mode = L2CAP_MODE_ERTM;
482
483 err = l2cap_ertm_init(chan);
484 if (err < 0) {
485 l2cap_chan_del(chan, 0);
486 return NULL;
487 }
488
489 chan->conf_state = 0;
490
491 l2cap_chan_add(conn, chan);
492
493 chan->remote_mps = chan->omtu;
494 chan->mps = chan->omtu;
495
496 chan->state = BT_CONNECTED;
497
498 return chan;
499}
500
501/* AMP Manager functions */
502void amp_mgr_get(struct amp_mgr *mgr)
503{
504 BT_DBG("mgr %p", mgr);
505
506 kref_get(&mgr->kref);
507}
508
509static void amp_mgr_destroy(struct kref *kref)
510{
511 struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
512
513 BT_DBG("mgr %p", mgr);
514
515 kfree(mgr);
516}
517
518int amp_mgr_put(struct amp_mgr *mgr)
519{
520 BT_DBG("mgr %p", mgr);
521
522 return kref_put(&mgr->kref, &amp_mgr_destroy);
523}
524
525static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
526{
527 struct amp_mgr *mgr;
528 struct l2cap_chan *chan;
529
530 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
531 if (!mgr)
532 return NULL;
533
534 BT_DBG("conn %p mgr %p", conn, mgr);
535
536 mgr->l2cap_conn = conn;
537
538 chan = a2mp_chan_open(conn);
539 if (!chan) {
540 kfree(mgr);
541 return NULL;
542 }
543
544 mgr->a2mp_chan = chan;
545 chan->data = mgr;
546
547 conn->hcon->amp_mgr = mgr;
548
549 kref_init(&mgr->kref);
550
551 return mgr;
552}
553
554struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
555 struct sk_buff *skb)
556{
557 struct amp_mgr *mgr;
558
559 mgr = amp_mgr_create(conn);
560 if (!mgr) {
561 BT_ERR("Could not create AMP manager");
562 return NULL;
563 }
564
565 BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
566
567 return mgr->a2mp_chan;
568}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3e18af4dadc..f7db5792ec6 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,18 +25,7 @@
25/* Bluetooth address family and sockets. */ 25/* Bluetooth address family and sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/poll.h>
37#include <net/sock.h>
38#include <asm/ioctls.h> 28#include <asm/ioctls.h>
39#include <linux/kmod.h>
40 29
41#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
42 31
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
418 return 0; 407 return 0;
419} 408}
420 409
421unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) 410unsigned int bt_sock_poll(struct file *file, struct socket *sock,
411 poll_table *wait)
422{ 412{
423 struct sock *sk = sock->sk; 413 struct sock *sk = sock->sk;
424 unsigned int mask = 0; 414 unsigned int mask = 0;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 031d7d65675..4a6620bc157 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,26 +26,9 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/freezer.h>
36#include <linux/errno.h>
37#include <linux/net.h>
38#include <linux/slab.h>
39#include <linux/kthread.h> 29#include <linux/kthread.h>
40#include <net/sock.h>
41
42#include <linux/socket.h>
43#include <linux/file.h> 30#include <linux/file.h>
44
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48
49#include <asm/unaligned.h> 32#include <asm/unaligned.h>
50 33
51#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
306 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 289 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
307}; 290};
308 291
309static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 292static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
310{ 293{
311 struct net_device *dev = s->dev; 294 struct net_device *dev = s->dev;
312 struct sk_buff *nskb; 295 struct sk_buff *nskb;
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
404 BNEP_COMPRESSED 387 BNEP_COMPRESSED
405}; 388};
406 389
407static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) 390static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
408{ 391{
409 struct ethhdr *eh = (void *) skb->data; 392 struct ethhdr *eh = (void *) skb->data;
410 struct socket *sock = s->sock; 393 struct socket *sock = s->sock;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc4086480d9..98f86f91d47 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,8 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29#include <linux/slab.h>
30
31#include <linux/socket.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/wait.h>
36
37#include <asm/unaligned.h>
38 30
39#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
128} 120}
129 121
130#ifdef CONFIG_BT_BNEP_MC_FILTER 122#ifdef CONFIG_BT_BNEP_MC_FILTER
131static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 123static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
132{ 124{
133 struct ethhdr *eh = (void *) skb->data; 125 struct ethhdr *eh = (void *) skb->data;
134 126
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
140 132
141#ifdef CONFIG_BT_BNEP_PROTO_FILTER 133#ifdef CONFIG_BT_BNEP_PROTO_FILTER
142/* Determine ether protocol. Based on eth_type_trans. */ 134/* Determine ether protocol. Based on eth_type_trans. */
143static inline u16 bnep_net_eth_proto(struct sk_buff *skb) 135static u16 bnep_net_eth_proto(struct sk_buff *skb)
144{ 136{
145 struct ethhdr *eh = (void *) skb->data; 137 struct ethhdr *eh = (void *) skb->data;
146 u16 proto = ntohs(eh->h_proto); 138 u16 proto = ntohs(eh->h_proto);
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
154 return ETH_P_802_2; 146 return ETH_P_802_2;
155} 147}
156 148
157static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) 149static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
158{ 150{
159 u16 proto = bnep_net_eth_proto(skb); 151 u16 proto = bnep_net_eth_proto(skb);
160 struct bnep_proto_filter *f = s->proto_filter; 152 struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc45810..5e5f5b410e0 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,24 +24,8 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/skbuff.h>
36#include <linux/socket.h>
37#include <linux/ioctl.h>
38#include <linux/file.h> 28#include <linux/file.h>
39#include <linux/init.h>
40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <linux/uaccess.h>
43#include <net/sock.h>
44
45 29
46#include "bnep.h" 30#include "bnep.h"
47 31
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3f18a6ed973..2fcced377e5 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,24 +24,11 @@
24 24
25/* Bluetooth HCI connection handling. */ 25/* Bluetooth HCI connection handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42 28
43#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/a2mp.h>
45 32
46static void hci_le_connect(struct hci_conn *conn) 33static void hci_le_connect(struct hci_conn *conn)
47{ 34{
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
54 conn->sec_level = BT_SECURITY_LOW; 41 conn->sec_level = BT_SECURITY_LOW;
55 42
56 memset(&cp, 0, sizeof(cp)); 43 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060); 44 cp.scan_interval = __constant_cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030); 45 cp.scan_window = __constant_cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst); 46 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type; 47 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028); 48 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038); 49 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a); 50 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000); 51 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000); 52 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
66 53
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 54 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68} 55}
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 86 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode; 87 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset | 88 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000); 89 __constant_cpu_to_le16(0x8000);
103 } 90 }
104 91
105 memcpy(conn->dev_class, ie->data.dev_class, 3); 92 memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
175 cp.handle = cpu_to_le16(handle); 162 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type); 163 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 164
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 165 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 166 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff); 167 cp.max_latency = __constant_cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting); 168 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff; 169 cp.retrans_effort = 0xff;
183 170
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
185} 172}
186 173
187void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 174void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier) 175 u16 latency, u16 to_multiplier)
189{ 176{
190 struct hci_cp_le_conn_update cp; 177 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev; 178 struct hci_dev *hdev = conn->hdev;
@@ -197,15 +184,14 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
197 cp.conn_interval_max = cpu_to_le16(max); 184 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency); 185 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier); 186 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001); 187 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001); 188 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
202 189
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 190 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204} 191}
205EXPORT_SYMBOL(hci_le_conn_update);
206 192
207void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], 193void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16]) 194 __u8 ltk[16])
209{ 195{
210 struct hci_dev *hdev = conn->hdev; 196 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp; 197 struct hci_cp_le_start_enc cp;
@@ -221,7 +207,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
221 207
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 208 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223} 209}
224EXPORT_SYMBOL(hci_le_start_enc);
225 210
226/* Device _must_ be locked */ 211/* Device _must_ be locked */
227void hci_sco_setup(struct hci_conn *conn, __u8 status) 212void hci_sco_setup(struct hci_conn *conn, __u8 status)
@@ -247,7 +232,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
247static void hci_conn_timeout(struct work_struct *work) 232static void hci_conn_timeout(struct work_struct *work)
248{ 233{
249 struct hci_conn *conn = container_of(work, struct hci_conn, 234 struct hci_conn *conn = container_of(work, struct hci_conn,
250 disc_work.work); 235 disc_work.work);
251 __u8 reason; 236 __u8 reason;
252 237
253 BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); 238 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
295 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 280 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
296 struct hci_cp_sniff_subrate cp; 281 struct hci_cp_sniff_subrate cp;
297 cp.handle = cpu_to_le16(conn->handle); 282 cp.handle = cpu_to_le16(conn->handle);
298 cp.max_latency = cpu_to_le16(0); 283 cp.max_latency = __constant_cpu_to_le16(0);
299 cp.min_remote_timeout = cpu_to_le16(0); 284 cp.min_remote_timeout = __constant_cpu_to_le16(0);
300 cp.min_local_timeout = cpu_to_le16(0); 285 cp.min_local_timeout = __constant_cpu_to_le16(0);
301 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 286 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
302 } 287 }
303 288
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
306 cp.handle = cpu_to_le16(conn->handle); 291 cp.handle = cpu_to_le16(conn->handle);
307 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 292 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
308 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 293 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
309 cp.attempt = cpu_to_le16(4); 294 cp.attempt = __constant_cpu_to_le16(4);
310 cp.timeout = cpu_to_le16(1); 295 cp.timeout = __constant_cpu_to_le16(1);
311 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 296 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
312 } 297 }
313} 298}
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
327 struct hci_dev *hdev = conn->hdev; 312 struct hci_dev *hdev = conn->hdev;
328 313
329 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 314 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
330 &conn->dst); 315 &conn->dst);
331} 316}
332 317
333struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
376 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 361 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
377 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 362 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
378 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 363 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
379 (unsigned long) conn); 364 (unsigned long) conn);
380 365
381 atomic_set(&conn->refcnt, 0); 366 atomic_set(&conn->refcnt, 0);
382 367
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
425 } 410 }
426 } 411 }
427 412
428
429 hci_chan_list_flush(conn); 413 hci_chan_list_flush(conn);
430 414
415 if (conn->amp_mgr)
416 amp_mgr_put(conn->amp_mgr);
417
431 hci_conn_hash_del(hdev, conn); 418 hci_conn_hash_del(hdev, conn);
432 if (hdev->notify) 419 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 420 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -454,7 +441,8 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
454 read_lock(&hci_dev_list_lock); 441 read_lock(&hci_dev_list_lock);
455 442
456 list_for_each_entry(d, &hci_dev_list, list) { 443 list_for_each_entry(d, &hci_dev_list, list) {
457 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 444 if (!test_bit(HCI_UP, &d->flags) ||
445 test_bit(HCI_RAW, &d->flags))
458 continue; 446 continue;
459 447
460 /* Simple routing: 448 /* Simple routing:
@@ -495,6 +483,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
495 if (type == LE_LINK) { 483 if (type == LE_LINK) {
496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 484 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
497 if (!le) { 485 if (!le) {
486 le = hci_conn_hash_lookup_state(hdev, LE_LINK,
487 BT_CONNECT);
488 if (le)
489 return ERR_PTR(-EBUSY);
490
498 le = hci_conn_add(hdev, LE_LINK, dst); 491 le = hci_conn_add(hdev, LE_LINK, dst);
499 if (!le) 492 if (!le)
500 return ERR_PTR(-ENOMEM); 493 return ERR_PTR(-ENOMEM);
@@ -545,7 +538,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
545 hci_conn_hold(sco); 538 hci_conn_hold(sco);
546 539
547 if (acl->state == BT_CONNECTED && 540 if (acl->state == BT_CONNECTED &&
548 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 541 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
549 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); 542 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
550 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 543 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
551 544
@@ -560,7 +553,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
560 553
561 return sco; 554 return sco;
562} 555}
563EXPORT_SYMBOL(hci_connect);
564 556
565/* Check link security requirement */ 557/* Check link security requirement */
566int hci_conn_check_link_mode(struct hci_conn *conn) 558int hci_conn_check_link_mode(struct hci_conn *conn)
@@ -572,7 +564,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
572 564
573 return 1; 565 return 1;
574} 566}
575EXPORT_SYMBOL(hci_conn_check_link_mode);
576 567
577/* Authenticate remote device */ 568/* Authenticate remote device */
578static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 569static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
@@ -600,7 +591,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
600 591
601 cp.handle = cpu_to_le16(conn->handle); 592 cp.handle = cpu_to_le16(conn->handle);
602 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 593 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
603 sizeof(cp), &cp); 594 sizeof(cp), &cp);
604 if (conn->key_type != 0xff) 595 if (conn->key_type != 0xff)
605 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 596 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
606 } 597 }
@@ -618,7 +609,7 @@ static void hci_conn_encrypt(struct hci_conn *conn)
618 cp.handle = cpu_to_le16(conn->handle); 609 cp.handle = cpu_to_le16(conn->handle);
619 cp.encrypt = 0x01; 610 cp.encrypt = 0x01;
620 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 611 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
621 &cp); 612 &cp);
622 } 613 }
623} 614}
624 615
@@ -648,8 +639,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
648 /* An unauthenticated combination key has sufficient security for 639 /* An unauthenticated combination key has sufficient security for
649 security level 1 and 2. */ 640 security level 1 and 2. */
650 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && 641 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
651 (sec_level == BT_SECURITY_MEDIUM || 642 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
652 sec_level == BT_SECURITY_LOW))
653 goto encrypt; 643 goto encrypt;
654 644
655 /* A combination key has always sufficient security for the security 645 /* A combination key has always sufficient security for the security
@@ -657,8 +647,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
657 is generated using maximum PIN code length (16). 647 is generated using maximum PIN code length (16).
658 For pre 2.1 units. */ 648 For pre 2.1 units. */
659 if (conn->key_type == HCI_LK_COMBINATION && 649 if (conn->key_type == HCI_LK_COMBINATION &&
660 (sec_level != BT_SECURITY_HIGH || 650 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
661 conn->pin_length == 16))
662 goto encrypt; 651 goto encrypt;
663 652
664auth: 653auth:
@@ -701,12 +690,11 @@ int hci_conn_change_link_key(struct hci_conn *conn)
701 struct hci_cp_change_conn_link_key cp; 690 struct hci_cp_change_conn_link_key cp;
702 cp.handle = cpu_to_le16(conn->handle); 691 cp.handle = cpu_to_le16(conn->handle);
703 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 692 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
704 sizeof(cp), &cp); 693 sizeof(cp), &cp);
705 } 694 }
706 695
707 return 0; 696 return 0;
708} 697}
709EXPORT_SYMBOL(hci_conn_change_link_key);
710 698
711/* Switch role */ 699/* Switch role */
712int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 700int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
@@ -752,7 +740,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
752timer: 740timer:
753 if (hdev->idle_timeout > 0) 741 if (hdev->idle_timeout > 0)
754 mod_timer(&conn->idle_timer, 742 mod_timer(&conn->idle_timer,
755 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 743 jiffies + msecs_to_jiffies(hdev->idle_timeout));
756} 744}
757 745
758/* Drop all connection on the device */ 746/* Drop all connection on the device */
@@ -802,7 +790,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
802 790
803int hci_get_conn_list(void __user *arg) 791int hci_get_conn_list(void __user *arg)
804{ 792{
805 register struct hci_conn *c; 793 struct hci_conn *c;
806 struct hci_conn_list_req req, *cl; 794 struct hci_conn_list_req req, *cl;
807 struct hci_conn_info *ci; 795 struct hci_conn_info *ci;
808 struct hci_dev *hdev; 796 struct hci_dev *hdev;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 411ace8e647..08994ecc3b6 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -25,28 +25,10 @@
25 25
26/* Bluetooth HCI core. */ 26/* Bluetooth HCI core. */
27 27
28#include <linux/jiffies.h> 28#include <linux/export.h>
29#include <linux/module.h> 29#include <linux/idr.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
41#include <linux/workqueue.h>
42#include <linux/interrupt.h>
43#include <linux/rfkill.h>
44#include <linux/timer.h>
45#include <linux/crypto.h>
46#include <net/sock.h>
47 30
48#include <linux/uaccess.h> 31#include <linux/rfkill.h>
49#include <asm/unaligned.h>
50 32
51#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 34#include <net/bluetooth/hci_core.h>
@@ -65,6 +47,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
65LIST_HEAD(hci_cb_list); 47LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock); 48DEFINE_RWLOCK(hci_cb_list_lock);
67 49
50/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
68/* ---- HCI notifications ---- */ 53/* ---- HCI notifications ---- */
69 54
70static void hci_notify(struct hci_dev *hdev, int event) 55static void hci_notify(struct hci_dev *hdev, int event)
@@ -124,8 +109,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
124} 109}
125 110
126/* Execute request and wait for completion. */ 111/* Execute request and wait for completion. */
127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 112static int __hci_request(struct hci_dev *hdev,
128 unsigned long opt, __u32 timeout) 113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
129{ 115{
130 DECLARE_WAITQUEUE(wait, current); 116 DECLARE_WAITQUEUE(wait, current);
131 int err = 0; 117 int err = 0;
@@ -166,8 +152,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
166 return err; 152 return err;
167} 153}
168 154
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 155static int hci_request(struct hci_dev *hdev,
170 unsigned long opt, __u32 timeout) 156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
171{ 158{
172 int ret; 159 int ret;
173 160
@@ -202,7 +189,7 @@ static void bredr_init(struct hci_dev *hdev)
202 /* Mandatory initialization */ 189 /* Mandatory initialization */
203 190
204 /* Reset */ 191 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
206 set_bit(HCI_RESET, &hdev->flags); 193 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 } 195 }
@@ -235,7 +222,7 @@ static void bredr_init(struct hci_dev *hdev)
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 223
237 /* Connection accept timeout ~20 secs */ 224 /* Connection accept timeout ~20 secs */
238 param = cpu_to_le16(0x7d00); 225 param = __constant_cpu_to_le16(0x7d00);
239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 227
241 bacpy(&cp.bdaddr, BDADDR_ANY); 228 bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -417,7 +404,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
417 INIT_LIST_HEAD(&cache->resolve); 404 INIT_LIST_HEAD(&cache->resolve);
418} 405}
419 406
420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
421{ 409{
422 struct discovery_state *cache = &hdev->discovery; 410 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e; 411 struct inquiry_entry *e;
@@ -478,7 +466,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
478 466
479 list_for_each_entry(p, &cache->resolve, list) { 467 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING && 468 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi)) 469 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break; 470 break;
483 pos = &p->list; 471 pos = &p->list;
484 } 472 }
@@ -503,7 +491,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
503 *ssp = true; 491 *ssp = true;
504 492
505 if (ie->name_state == NAME_NEEDED && 493 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) { 494 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi; 495 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie); 496 hci_inquiry_cache_update_resolve(hdev, ie);
509 } 497 }
@@ -527,7 +515,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
527 515
528update: 516update:
529 if (name_known && ie->name_state != NAME_KNOWN && 517 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) { 518 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN; 519 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list); 520 list_del(&ie->list);
533 } 521 }
@@ -605,8 +593,7 @@ int hci_inquiry(void __user *arg)
605 593
606 hci_dev_lock(hdev); 594 hci_dev_lock(hdev);
607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
608 inquiry_cache_empty(hdev) || 596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
609 ir.flags & IREQ_CACHE_FLUSH) {
610 inquiry_cache_flush(hdev); 597 inquiry_cache_flush(hdev);
611 do_inquiry = 1; 598 do_inquiry = 1;
612 } 599 }
@@ -620,7 +607,9 @@ int hci_inquiry(void __user *arg)
620 goto done; 607 goto done;
621 } 608 }
622 609
623 /* for unlimited number of responses we will use buffer with 255 entries */ 610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625 614
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -641,7 +630,7 @@ int hci_inquiry(void __user *arg)
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir); 631 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp)) 633 ir.num_rsp))
645 err = -EFAULT; 634 err = -EFAULT;
646 } else 635 } else
647 err = -EFAULT; 636 err = -EFAULT;
@@ -702,11 +691,11 @@ int hci_dev_open(__u16 dev)
702 hdev->init_last_cmd = 0; 691 hdev->init_last_cmd = 0;
703 692
704 ret = __hci_request(hdev, hci_init_req, 0, 693 ret = __hci_request(hdev, hci_init_req, 0,
705 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 695
707 if (lmp_host_le_capable(hdev)) 696 if (lmp_host_le_capable(hdev))
708 ret = __hci_request(hdev, hci_le_init_req, 0, 697 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 699
711 clear_bit(HCI_INIT, &hdev->flags); 700 clear_bit(HCI_INIT, &hdev->flags);
712 } 701 }
@@ -791,10 +780,10 @@ static int hci_dev_do_close(struct hci_dev *hdev)
791 skb_queue_purge(&hdev->cmd_q); 780 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1); 781 atomic_set(&hdev->cmd_cnt, 1);
793 if (!test_bit(HCI_RAW, &hdev->flags) && 782 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 783 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
795 set_bit(HCI_INIT, &hdev->flags); 784 set_bit(HCI_INIT, &hdev->flags);
796 __hci_request(hdev, hci_reset_req, 0, 785 __hci_request(hdev, hci_reset_req, 0,
797 msecs_to_jiffies(250)); 786 msecs_to_jiffies(250));
798 clear_bit(HCI_INIT, &hdev->flags); 787 clear_bit(HCI_INIT, &hdev->flags);
799 } 788 }
800 789
@@ -884,7 +873,7 @@ int hci_dev_reset(__u16 dev)
884 873
885 if (!test_bit(HCI_RAW, &hdev->flags)) 874 if (!test_bit(HCI_RAW, &hdev->flags))
886 ret = __hci_request(hdev, hci_reset_req, 0, 875 ret = __hci_request(hdev, hci_reset_req, 0,
887 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 876 msecs_to_jiffies(HCI_INIT_TIMEOUT));
888 877
889done: 878done:
890 hci_req_unlock(hdev); 879 hci_req_unlock(hdev);
@@ -924,7 +913,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
924 switch (cmd) { 913 switch (cmd) {
925 case HCISETAUTH: 914 case HCISETAUTH:
926 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 915 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 break; 917 break;
929 918
930 case HCISETENCRYPT: 919 case HCISETENCRYPT:
@@ -936,23 +925,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
936 if (!test_bit(HCI_AUTH, &hdev->flags)) { 925 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */ 926 /* Auth must be enabled first */
938 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 927 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 928 msecs_to_jiffies(HCI_INIT_TIMEOUT));
940 if (err) 929 if (err)
941 break; 930 break;
942 } 931 }
943 932
944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 933 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
946 break; 935 break;
947 936
948 case HCISETSCAN: 937 case HCISETSCAN:
949 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 938 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
951 break; 940 break;
952 941
953 case HCISETLINKPOL: 942 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 943 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
956 break; 945 break;
957 946
958 case HCISETLINKMODE: 947 case HCISETLINKMODE:
@@ -1103,7 +1092,7 @@ static void hci_power_on(struct work_struct *work)
1103 1092
1104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1105 schedule_delayed_work(&hdev->power_off, 1094 schedule_delayed_work(&hdev->power_off,
1106 msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 1095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1107 1096
1108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1109 mgmt_index_added(hdev); 1098 mgmt_index_added(hdev);
@@ -1112,7 +1101,7 @@ static void hci_power_on(struct work_struct *work)
1112static void hci_power_off(struct work_struct *work) 1101static void hci_power_off(struct work_struct *work)
1113{ 1102{
1114 struct hci_dev *hdev = container_of(work, struct hci_dev, 1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work); 1104 power_off.work);
1116 1105
1117 BT_DBG("%s", hdev->name); 1106 BT_DBG("%s", hdev->name);
1118 1107
@@ -1193,7 +1182,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1193} 1182}
1194 1183
1195static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1184static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type) 1185 u8 key_type, u8 old_key_type)
1197{ 1186{
1198 /* Legacy key */ 1187 /* Legacy key */
1199 if (key_type < 0x03) 1188 if (key_type < 0x03)
@@ -1234,7 +1223,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1234 1223
1235 list_for_each_entry(k, &hdev->long_term_keys, list) { 1224 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv || 1225 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand))) 1226 memcmp(rand, k->rand, sizeof(k->rand)))
1238 continue; 1227 continue;
1239 1228
1240 return k; 1229 return k;
@@ -1242,7 +1231,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1242 1231
1243 return NULL; 1232 return NULL;
1244} 1233}
1245EXPORT_SYMBOL(hci_find_ltk);
1246 1234
1247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1235struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type) 1236 u8 addr_type)
@@ -1251,12 +1239,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1251 1239
1252 list_for_each_entry(k, &hdev->long_term_keys, list) 1240 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type && 1241 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0) 1242 bacmp(bdaddr, &k->bdaddr) == 0)
1255 return k; 1243 return k;
1256 1244
1257 return NULL; 1245 return NULL;
1258} 1246}
1259EXPORT_SYMBOL(hci_find_ltk_by_addr);
1260 1247
1261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1248int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1249 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1283,15 +1270,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1283 * combination key for legacy pairing even when there's no 1270 * combination key for legacy pairing even when there's no
1284 * previous key */ 1271 * previous key */
1285 if (type == HCI_LK_CHANGED_COMBINATION && 1272 if (type == HCI_LK_CHANGED_COMBINATION &&
1286 (!conn || conn->remote_auth == 0xff) && 1273 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1287 old_key_type == 0xff) {
1288 type = HCI_LK_COMBINATION; 1274 type = HCI_LK_COMBINATION;
1289 if (conn) 1275 if (conn)
1290 conn->key_type = type; 1276 conn->key_type = type;
1291 } 1277 }
1292 1278
1293 bacpy(&key->bdaddr, bdaddr); 1279 bacpy(&key->bdaddr, bdaddr);
1294 memcpy(key->val, val, 16); 1280 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1295 key->pin_len = pin_len; 1281 key->pin_len = pin_len;
1296 1282
1297 if (type == HCI_LK_CHANGED_COMBINATION) 1283 if (type == HCI_LK_CHANGED_COMBINATION)
@@ -1540,6 +1526,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1540 1526
1541 memset(&cp, 0, sizeof(cp)); 1527 memset(&cp, 0, sizeof(cp));
1542 cp.enable = 1; 1528 cp.enable = 1;
1529 cp.filter_dup = 1;
1543 1530
1544 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1531 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1545} 1532}
@@ -1707,41 +1694,39 @@ EXPORT_SYMBOL(hci_free_dev);
1707/* Register HCI device */ 1694/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev) 1695int hci_register_dev(struct hci_dev *hdev)
1709{ 1696{
1710 struct list_head *head, *p;
1711 int id, error; 1697 int id, error;
1712 1698
1713 if (!hdev->open || !hdev->close) 1699 if (!hdev->open || !hdev->close)
1714 return -EINVAL; 1700 return -EINVAL;
1715 1701
1716 write_lock(&hci_dev_list_lock);
1717
1718 /* Do not allow HCI_AMP devices to register at index 0, 1702 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID. 1703 * so the index can be used as the AMP controller ID.
1720 */ 1704 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; 1705 switch (hdev->dev_type) {
1722 head = &hci_dev_list; 1706 case HCI_BREDR:
1723 1707 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1724 /* Find first available device id */ 1708 break;
1725 list_for_each(p, &hci_dev_list) { 1709 case HCI_AMP:
1726 int nid = list_entry(p, struct hci_dev, list)->id; 1710 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1727 if (nid > id) 1711 break;
1728 break; 1712 default:
1729 if (nid == id) 1713 return -EINVAL;
1730 id++;
1731 head = p;
1732 } 1714 }
1733 1715
1716 if (id < 0)
1717 return id;
1718
1734 sprintf(hdev->name, "hci%d", id); 1719 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id; 1720 hdev->id = id;
1736 1721
1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1722 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738 1723
1739 list_add(&hdev->list, head); 1724 write_lock(&hci_dev_list_lock);
1740 1725 list_add(&hdev->list, &hci_dev_list);
1741 write_unlock(&hci_dev_list_lock); 1726 write_unlock(&hci_dev_list_lock);
1742 1727
1743 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | 1728 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1744 WQ_MEM_RECLAIM, 1); 1729 WQ_MEM_RECLAIM, 1);
1745 if (!hdev->workqueue) { 1730 if (!hdev->workqueue) {
1746 error = -ENOMEM; 1731 error = -ENOMEM;
1747 goto err; 1732 goto err;
@@ -1752,7 +1737,8 @@ int hci_register_dev(struct hci_dev *hdev)
1752 goto err_wqueue; 1737 goto err_wqueue;
1753 1738
1754 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1739 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1755 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 1740 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1741 hdev);
1756 if (hdev->rfkill) { 1742 if (hdev->rfkill) {
1757 if (rfkill_register(hdev->rfkill) < 0) { 1743 if (rfkill_register(hdev->rfkill) < 0) {
1758 rfkill_destroy(hdev->rfkill); 1744 rfkill_destroy(hdev->rfkill);
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
1772err_wqueue: 1758err_wqueue:
1773 destroy_workqueue(hdev->workqueue); 1759 destroy_workqueue(hdev->workqueue);
1774err: 1760err:
1761 ida_simple_remove(&hci_index_ida, hdev->id);
1775 write_lock(&hci_dev_list_lock); 1762 write_lock(&hci_dev_list_lock);
1776 list_del(&hdev->list); 1763 list_del(&hdev->list);
1777 write_unlock(&hci_dev_list_lock); 1764 write_unlock(&hci_dev_list_lock);
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
1783/* Unregister HCI device */ 1770/* Unregister HCI device */
1784void hci_unregister_dev(struct hci_dev *hdev) 1771void hci_unregister_dev(struct hci_dev *hdev)
1785{ 1772{
1786 int i; 1773 int i, id;
1787 1774
1788 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1775 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1789 1776
1790 set_bit(HCI_UNREGISTER, &hdev->dev_flags); 1777 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1791 1778
1779 id = hdev->id;
1780
1792 write_lock(&hci_dev_list_lock); 1781 write_lock(&hci_dev_list_lock);
1793 list_del(&hdev->list); 1782 list_del(&hdev->list);
1794 write_unlock(&hci_dev_list_lock); 1783 write_unlock(&hci_dev_list_lock);
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1799 kfree_skb(hdev->reassembly[i]); 1788 kfree_skb(hdev->reassembly[i]);
1800 1789
1801 if (!test_bit(HCI_INIT, &hdev->flags) && 1790 if (!test_bit(HCI_INIT, &hdev->flags) &&
1802 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 1791 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1803 hci_dev_lock(hdev); 1792 hci_dev_lock(hdev);
1804 mgmt_index_removed(hdev); 1793 mgmt_index_removed(hdev);
1805 hci_dev_unlock(hdev); 1794 hci_dev_unlock(hdev);
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1829 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1830 1819
1831 hci_dev_put(hdev); 1820 hci_dev_put(hdev);
1821
1822 ida_simple_remove(&hci_index_ida, id);
1832} 1823}
1833EXPORT_SYMBOL(hci_unregister_dev); 1824EXPORT_SYMBOL(hci_unregister_dev);
1834 1825
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
1853{ 1844{
1854 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1845 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1855 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 1846 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1856 && !test_bit(HCI_INIT, &hdev->flags))) { 1847 && !test_bit(HCI_INIT, &hdev->flags))) {
1857 kfree_skb(skb); 1848 kfree_skb(skb);
1858 return -ENXIO; 1849 return -ENXIO;
1859 } 1850 }
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
1872EXPORT_SYMBOL(hci_recv_frame); 1863EXPORT_SYMBOL(hci_recv_frame);
1873 1864
1874static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1865static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1875 int count, __u8 index) 1866 int count, __u8 index)
1876{ 1867{
1877 int len = 0; 1868 int len = 0;
1878 int hlen = 0; 1869 int hlen = 0;
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1881 struct bt_skb_cb *scb; 1872 struct bt_skb_cb *scb;
1882 1873
1883 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || 1874 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1884 index >= NUM_REASSEMBLY) 1875 index >= NUM_REASSEMBLY)
1885 return -EILSEQ; 1876 return -EILSEQ;
1886 1877
1887 skb = hdev->reassembly[index]; 1878 skb = hdev->reassembly[index];
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2023 type = bt_cb(skb)->pkt_type; 2014 type = bt_cb(skb)->pkt_type;
2024 2015
2025 rem = hci_reassembly(hdev, type, data, count, 2016 rem = hci_reassembly(hdev, type, data, count,
2026 STREAM_REASSEMBLY); 2017 STREAM_REASSEMBLY);
2027 if (rem < 0) 2018 if (rem < 0)
2028 return rem; 2019 return rem;
2029 2020
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2157} 2148}
2158 2149
2159static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, 2150static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2160 struct sk_buff *skb, __u16 flags) 2151 struct sk_buff *skb, __u16 flags)
2161{ 2152{
2162 struct hci_dev *hdev = conn->hdev; 2153 struct hci_dev *hdev = conn->hdev;
2163 struct sk_buff *list; 2154 struct sk_buff *list;
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2216 2207
2217 queue_work(hdev->workqueue, &hdev->tx_work); 2208 queue_work(hdev->workqueue, &hdev->tx_work);
2218} 2209}
2219EXPORT_SYMBOL(hci_send_acl);
2220 2210
2221/* Send SCO data */ 2211/* Send SCO data */
2222void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 2212void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2239 skb_queue_tail(&conn->data_q, skb); 2229 skb_queue_tail(&conn->data_q, skb);
2240 queue_work(hdev->workqueue, &hdev->tx_work); 2230 queue_work(hdev->workqueue, &hdev->tx_work);
2241} 2231}
2242EXPORT_SYMBOL(hci_send_sco);
2243 2232
2244/* ---- HCI TX task (outgoing data) ---- */ 2233/* ---- HCI TX task (outgoing data) ---- */
2245 2234
2246/* HCI Connection scheduler */ 2235/* HCI Connection scheduler */
2247static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 2236static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2237 int *quote)
2248{ 2238{
2249 struct hci_conn_hash *h = &hdev->conn_hash; 2239 struct hci_conn_hash *h = &hdev->conn_hash;
2250 struct hci_conn *conn = NULL, *c; 2240 struct hci_conn *conn = NULL, *c;
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2303 return conn; 2293 return conn;
2304} 2294}
2305 2295
2306static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 2296static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2307{ 2297{
2308 struct hci_conn_hash *h = &hdev->conn_hash; 2298 struct hci_conn_hash *h = &hdev->conn_hash;
2309 struct hci_conn *c; 2299 struct hci_conn *c;
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2316 list_for_each_entry_rcu(c, &h->list, list) { 2306 list_for_each_entry_rcu(c, &h->list, list) {
2317 if (c->type == type && c->sent) { 2307 if (c->type == type && c->sent) {
2318 BT_ERR("%s killing stalled connection %s", 2308 BT_ERR("%s killing stalled connection %s",
2319 hdev->name, batostr(&c->dst)); 2309 hdev->name, batostr(&c->dst));
2320 hci_acl_disconn(c, 0x13); 2310 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2321 } 2311 }
2322 } 2312 }
2323 2313
2324 rcu_read_unlock(); 2314 rcu_read_unlock();
2325} 2315}
2326 2316
2327static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 2317static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2328 int *quote) 2318 int *quote)
2329{ 2319{
2330 struct hci_conn_hash *h = &hdev->conn_hash; 2320 struct hci_conn_hash *h = &hdev->conn_hash;
2331 struct hci_chan *chan = NULL; 2321 struct hci_chan *chan = NULL;
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2442 skb->priority = HCI_PRIO_MAX - 1; 2432 skb->priority = HCI_PRIO_MAX - 1;
2443 2433
2444 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 2434 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2445 skb->priority); 2435 skb->priority);
2446 } 2436 }
2447 2437
2448 if (hci_conn_num(hdev, type) == num) 2438 if (hci_conn_num(hdev, type) == num)
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2459 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 2449 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2460} 2450}
2461 2451
2462static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 2452static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2463{ 2453{
2464 if (!test_bit(HCI_RAW, &hdev->flags)) { 2454 if (!test_bit(HCI_RAW, &hdev->flags)) {
2465 /* ACL tx timeout must be longer than maximum 2455 /* ACL tx timeout must be longer than maximum
2466 * link supervision timeout (40.9 seconds) */ 2456 * link supervision timeout (40.9 seconds) */
2467 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 2457 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2468 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) 2458 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2469 hci_link_tx_to(hdev, ACL_LINK); 2459 hci_link_tx_to(hdev, ACL_LINK);
2470 } 2460 }
2471} 2461}
2472 2462
2473static inline void hci_sched_acl_pkt(struct hci_dev *hdev) 2463static void hci_sched_acl_pkt(struct hci_dev *hdev)
2474{ 2464{
2475 unsigned int cnt = hdev->acl_cnt; 2465 unsigned int cnt = hdev->acl_cnt;
2476 struct hci_chan *chan; 2466 struct hci_chan *chan;
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2480 __check_timeout(hdev, cnt); 2470 __check_timeout(hdev, cnt);
2481 2471
2482 while (hdev->acl_cnt && 2472 while (hdev->acl_cnt &&
2483 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2473 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2484 u32 priority = (skb_peek(&chan->data_q))->priority; 2474 u32 priority = (skb_peek(&chan->data_q))->priority;
2485 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2475 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2486 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2476 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2487 skb->len, skb->priority); 2477 skb->len, skb->priority);
2488 2478
2489 /* Stop if priority has changed */ 2479 /* Stop if priority has changed */
2490 if (skb->priority < priority) 2480 if (skb->priority < priority)
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2508 hci_prio_recalculate(hdev, ACL_LINK); 2498 hci_prio_recalculate(hdev, ACL_LINK);
2509} 2499}
2510 2500
2511static inline void hci_sched_acl_blk(struct hci_dev *hdev) 2501static void hci_sched_acl_blk(struct hci_dev *hdev)
2512{ 2502{
2513 unsigned int cnt = hdev->block_cnt; 2503 unsigned int cnt = hdev->block_cnt;
2514 struct hci_chan *chan; 2504 struct hci_chan *chan;
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2518 __check_timeout(hdev, cnt); 2508 __check_timeout(hdev, cnt);
2519 2509
2520 while (hdev->block_cnt > 0 && 2510 while (hdev->block_cnt > 0 &&
2521 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2511 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2522 u32 priority = (skb_peek(&chan->data_q))->priority; 2512 u32 priority = (skb_peek(&chan->data_q))->priority;
2523 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 2513 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2524 int blocks; 2514 int blocks;
2525 2515
2526 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2516 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 skb->len, skb->priority); 2517 skb->len, skb->priority);
2528 2518
2529 /* Stop if priority has changed */ 2519 /* Stop if priority has changed */
2530 if (skb->priority < priority) 2520 if (skb->priority < priority)
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2537 return; 2527 return;
2538 2528
2539 hci_conn_enter_active_mode(chan->conn, 2529 hci_conn_enter_active_mode(chan->conn,
2540 bt_cb(skb)->force_active); 2530 bt_cb(skb)->force_active);
2541 2531
2542 hci_send_frame(skb); 2532 hci_send_frame(skb);
2543 hdev->acl_last_tx = jiffies; 2533 hdev->acl_last_tx = jiffies;
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2554 hci_prio_recalculate(hdev, ACL_LINK); 2544 hci_prio_recalculate(hdev, ACL_LINK);
2555} 2545}
2556 2546
2557static inline void hci_sched_acl(struct hci_dev *hdev) 2547static void hci_sched_acl(struct hci_dev *hdev)
2558{ 2548{
2559 BT_DBG("%s", hdev->name); 2549 BT_DBG("%s", hdev->name);
2560 2550
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2573} 2563}
2574 2564
2575/* Schedule SCO */ 2565/* Schedule SCO */
2576static inline void hci_sched_sco(struct hci_dev *hdev) 2566static void hci_sched_sco(struct hci_dev *hdev)
2577{ 2567{
2578 struct hci_conn *conn; 2568 struct hci_conn *conn;
2579 struct sk_buff *skb; 2569 struct sk_buff *skb;
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
2596 } 2586 }
2597} 2587}
2598 2588
2599static inline void hci_sched_esco(struct hci_dev *hdev) 2589static void hci_sched_esco(struct hci_dev *hdev)
2600{ 2590{
2601 struct hci_conn *conn; 2591 struct hci_conn *conn;
2602 struct sk_buff *skb; 2592 struct sk_buff *skb;
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2607 if (!hci_conn_num(hdev, ESCO_LINK)) 2597 if (!hci_conn_num(hdev, ESCO_LINK))
2608 return; 2598 return;
2609 2599
2610 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 2600 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2601 &quote))) {
2611 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2602 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612 BT_DBG("skb %p len %d", skb, skb->len); 2603 BT_DBG("skb %p len %d", skb, skb->len);
2613 hci_send_frame(skb); 2604 hci_send_frame(skb);
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2619 } 2610 }
2620} 2611}
2621 2612
2622static inline void hci_sched_le(struct hci_dev *hdev) 2613static void hci_sched_le(struct hci_dev *hdev)
2623{ 2614{
2624 struct hci_chan *chan; 2615 struct hci_chan *chan;
2625 struct sk_buff *skb; 2616 struct sk_buff *skb;
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2634 /* LE tx timeout must be longer than maximum 2625 /* LE tx timeout must be longer than maximum
2635 * link supervision timeout (40.9 seconds) */ 2626 * link supervision timeout (40.9 seconds) */
2636 if (!hdev->le_cnt && hdev->le_pkts && 2627 if (!hdev->le_cnt && hdev->le_pkts &&
2637 time_after(jiffies, hdev->le_last_tx + HZ * 45)) 2628 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2638 hci_link_tx_to(hdev, LE_LINK); 2629 hci_link_tx_to(hdev, LE_LINK);
2639 } 2630 }
2640 2631
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2644 u32 priority = (skb_peek(&chan->data_q))->priority; 2635 u32 priority = (skb_peek(&chan->data_q))->priority;
2645 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2636 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2637 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2647 skb->len, skb->priority); 2638 skb->len, skb->priority);
2648 2639
2649 /* Stop if priority has changed */ 2640 /* Stop if priority has changed */
2650 if (skb->priority < priority) 2641 if (skb->priority < priority)
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
2676 struct sk_buff *skb; 2667 struct sk_buff *skb;
2677 2668
2678 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 2669 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2679 hdev->sco_cnt, hdev->le_cnt); 2670 hdev->sco_cnt, hdev->le_cnt);
2680 2671
2681 /* Schedule queues and send stuff to HCI driver */ 2672 /* Schedule queues and send stuff to HCI driver */
2682 2673
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
2696/* ----- HCI RX task (incoming data processing) ----- */ 2687/* ----- HCI RX task (incoming data processing) ----- */
2697 2688
2698/* ACL data packet */ 2689/* ACL data packet */
2699static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2690static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2700{ 2691{
2701 struct hci_acl_hdr *hdr = (void *) skb->data; 2692 struct hci_acl_hdr *hdr = (void *) skb->data;
2702 struct hci_conn *conn; 2693 struct hci_conn *conn;
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2708 flags = hci_flags(handle); 2699 flags = hci_flags(handle);
2709 handle = hci_handle(handle); 2700 handle = hci_handle(handle);
2710 2701
2711 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 2702 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2703 handle, flags);
2712 2704
2713 hdev->stat.acl_rx++; 2705 hdev->stat.acl_rx++;
2714 2706
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2732 return; 2724 return;
2733 } else { 2725 } else {
2734 BT_ERR("%s ACL packet for unknown connection handle %d", 2726 BT_ERR("%s ACL packet for unknown connection handle %d",
2735 hdev->name, handle); 2727 hdev->name, handle);
2736 } 2728 }
2737 2729
2738 kfree_skb(skb); 2730 kfree_skb(skb);
2739} 2731}
2740 2732
2741/* SCO data packet */ 2733/* SCO data packet */
2742static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2734static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2743{ 2735{
2744 struct hci_sco_hdr *hdr = (void *) skb->data; 2736 struct hci_sco_hdr *hdr = (void *) skb->data;
2745 struct hci_conn *conn; 2737 struct hci_conn *conn;
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2763 return; 2755 return;
2764 } else { 2756 } else {
2765 BT_ERR("%s SCO packet for unknown connection handle %d", 2757 BT_ERR("%s SCO packet for unknown connection handle %d",
2766 hdev->name, handle); 2758 hdev->name, handle);
2767 } 2759 }
2768 2760
2769 kfree_skb(skb); 2761 kfree_skb(skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4eefb7f65cf..1ba929c05d0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,20 +24,7 @@
24 24
25/* Bluetooth HCI event handling. */ 25/* Bluetooth HCI event handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h> 28#include <asm/unaligned.h>
42 29
43#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
95 hci_conn_check_pending(hdev); 82 hci_conn_check_pending(hdev);
96} 83}
97 84
98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
99{ 87{
100 BT_DBG("%s", hdev->name); 88 BT_DBG("%s", hdev->name);
101} 89}
@@ -166,7 +154,8 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
166 hci_dev_unlock(hdev); 154 hci_dev_unlock(hdev);
167} 155}
168 156
169static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 157static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
170{ 159{
171 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
172 161
@@ -178,7 +167,8 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
178 hdev->link_policy = __le16_to_cpu(rp->policy); 167 hdev->link_policy = __le16_to_cpu(rp->policy);
179} 168}
180 169
181static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
182{ 172{
183 __u8 status = *((__u8 *) skb->data); 173 __u8 status = *((__u8 *) skb->data);
184 void *sent; 174 void *sent;
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
329 if (hdev->discov_timeout > 0) { 319 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to); 322 to);
333 } 323 }
334 } else if (old_iscan) 324 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0); 325 mgmt_discoverable(hdev, 0);
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
358 memcpy(hdev->dev_class, rp->dev_class, 3); 348 memcpy(hdev->dev_class, rp->dev_class, 3);
359 349
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362} 352}
363 353
364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 354static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -406,7 +396,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407} 397}
408 398
409static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 399static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
410{ 401{
411 __u8 status = *((__u8 *) skb->data); 402 __u8 status = *((__u8 *) skb->data);
412 __u16 setting; 403 __u16 setting;
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
473 return 1; 464 return 1;
474 465
475 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
476 hdev->lmp_subver == 0x0757) 467 hdev->lmp_subver == 0x0757)
477 return 1; 468 return 1;
478 469
479 if (hdev->manufacturer == 15) { 470 if (hdev->manufacturer == 15) {
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
486 } 477 }
487 478
488 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
489 hdev->lmp_subver == 0x1805) 480 hdev->lmp_subver == 0x1805)
490 return 1; 481 return 1;
491 482
492 return 0; 483 return 0;
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev)
566 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
568 559
569 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 560 if (lmp_ssp_capable(hdev)) {
570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
571 u8 mode = 0x01; 562 u8 mode = 0x01;
572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
@@ -618,8 +609,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619 610
620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 611 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
621 hdev->manufacturer, 612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
622 hdev->hci_ver, hdev->hci_rev);
623 613
624 if (test_bit(HCI_INIT, &hdev->flags)) 614 if (test_bit(HCI_INIT, &hdev->flags))
625 hci_setup(hdev); 615 hci_setup(hdev);
@@ -646,7 +636,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
647} 637}
648 638
649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 639static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
650{ 641{
651 struct hci_rp_read_local_commands *rp = (void *) skb->data; 642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
652 643
@@ -664,7 +655,8 @@ done:
664 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
665} 656}
666 657
667static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 658static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
668{ 660{
669 struct hci_rp_read_local_features *rp = (void *) skb->data; 661 struct hci_rp_read_local_features *rp = (void *) skb->data;
670 662
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714 706
715 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
716 hdev->features[0], hdev->features[1], 708 hdev->features[0], hdev->features[1],
717 hdev->features[2], hdev->features[3], 709 hdev->features[2], hdev->features[3],
718 hdev->features[4], hdev->features[5], 710 hdev->features[4], hdev->features[5],
719 hdev->features[6], hdev->features[7]); 711 hdev->features[6], hdev->features[7]);
720} 712}
721 713
722static void hci_set_le_support(struct hci_dev *hdev) 714static void hci_set_le_support(struct hci_dev *hdev)
@@ -736,7 +728,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
736} 728}
737 729
738static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 730static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
739 struct sk_buff *skb) 731 struct sk_buff *skb)
740{ 732{
741 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
742 734
@@ -762,7 +754,7 @@ done:
762} 754}
763 755
764static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 756static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
765 struct sk_buff *skb) 757 struct sk_buff *skb)
766{ 758{
767 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
768 760
@@ -798,9 +790,8 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
798 hdev->acl_cnt = hdev->acl_pkts; 790 hdev->acl_cnt = hdev->acl_pkts;
799 hdev->sco_cnt = hdev->sco_pkts; 791 hdev->sco_cnt = hdev->sco_pkts;
800 792
801 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
802 hdev->acl_mtu, hdev->acl_pkts, 794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
803 hdev->sco_mtu, hdev->sco_pkts);
804} 795}
805 796
806static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 797static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
@@ -816,7 +807,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
816} 807}
817 808
818static void hci_cc_read_data_block_size(struct hci_dev *hdev, 809static void hci_cc_read_data_block_size(struct hci_dev *hdev,
819 struct sk_buff *skb) 810 struct sk_buff *skb)
820{ 811{
821 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
822 813
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
832 hdev->block_cnt = hdev->num_blocks; 823 hdev->block_cnt = hdev->num_blocks;
833 824
834 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
835 hdev->block_cnt, hdev->block_len); 826 hdev->block_cnt, hdev->block_len);
836 827
837 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); 828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
838} 829}
@@ -847,7 +838,7 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
847} 838}
848 839
849static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 840static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
850 struct sk_buff *skb) 841 struct sk_buff *skb)
851{ 842{
852 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
853 844
@@ -871,7 +862,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
871} 862}
872 863
873static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 864static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
874 struct sk_buff *skb) 865 struct sk_buff *skb)
875{ 866{
876 __u8 status = *((__u8 *) skb->data); 867 __u8 status = *((__u8 *) skb->data);
877 868
@@ -890,7 +881,7 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
890} 881}
891 882
892static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 883static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
893 struct sk_buff *skb) 884 struct sk_buff *skb)
894{ 885{
895 __u8 status = *((__u8 *) skb->data); 886 __u8 status = *((__u8 *) skb->data);
896 887
@@ -900,7 +891,7 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
900} 891}
901 892
902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 893static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
903 struct sk_buff *skb) 894 struct sk_buff *skb)
904{ 895{
905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
906 897
@@ -959,7 +950,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 950
960 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
962 rp->status); 953 rp->status);
963 954
964 hci_dev_unlock(hdev); 955 hci_dev_unlock(hdev);
965} 956}
@@ -1000,7 +991,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1000} 991}
1001 992
1002static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 993static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1003 struct sk_buff *skb) 994 struct sk_buff *skb)
1004{ 995{
1005 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1006 997
@@ -1031,7 +1022,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1031} 1022}
1032 1023
1033static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1024static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1034 struct sk_buff *skb) 1025 struct sk_buff *skb)
1035{ 1026{
1036 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1037 1028
@@ -1047,7 +1038,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1047} 1038}
1048 1039
1049static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 1040static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1050 struct sk_buff *skb) 1041 struct sk_buff *skb)
1051{ 1042{
1052 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1053 1044
@@ -1076,7 +1067,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1076} 1067}
1077 1068
1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1069static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1079 struct sk_buff *skb) 1070 struct sk_buff *skb)
1080{ 1071{
1081 struct hci_cp_le_set_scan_enable *cp; 1072 struct hci_cp_le_set_scan_enable *cp;
1082 __u8 status = *((__u8 *) skb->data); 1073 __u8 status = *((__u8 *) skb->data);
@@ -1156,8 +1147,8 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1156 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1157} 1148}
1158 1149
1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1150static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1160 struct sk_buff *skb) 1151 struct sk_buff *skb)
1161{ 1152{
1162 struct hci_cp_write_le_host_supported *sent; 1153 struct hci_cp_write_le_host_supported *sent;
1163 __u8 status = *((__u8 *) skb->data); 1154 __u8 status = *((__u8 *) skb->data);
@@ -1176,13 +1167,13 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1176 } 1167 }
1177 1168
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1179 !test_bit(HCI_INIT, &hdev->flags)) 1170 !test_bit(HCI_INIT, &hdev->flags))
1180 mgmt_le_enable_complete(hdev, sent->le, status); 1171 mgmt_le_enable_complete(hdev, sent->le, status);
1181 1172
1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1183} 1174}
1184 1175
1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1176static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1186{ 1177{
1187 BT_DBG("%s status 0x%x", hdev->name, status); 1178 BT_DBG("%s status 0x%x", hdev->name, status);
1188 1179
@@ -1203,7 +1194,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1203 hci_dev_unlock(hdev); 1194 hci_dev_unlock(hdev);
1204} 1195}
1205 1196
1206static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1197static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1207{ 1198{
1208 struct hci_cp_create_conn *cp; 1199 struct hci_cp_create_conn *cp;
1209 struct hci_conn *conn; 1200 struct hci_conn *conn;
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1333} 1324}
1334 1325
1335static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1326static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1336 struct hci_conn *conn) 1327 struct hci_conn *conn)
1337{ 1328{
1338 if (conn->state != BT_CONFIG || !conn->out) 1329 if (conn->state != BT_CONFIG || !conn->out)
1339 return 0; 1330 return 0;
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1343 1334
1344 /* Only request authentication for SSP connections or non-SSP 1335 /* Only request authentication for SSP connections or non-SSP
1345 * devices with sec_level HIGH or if MITM protection is requested */ 1336 * devices with sec_level HIGH or if MITM protection is requested */
1346 if (!hci_conn_ssp_enabled(conn) && 1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1347 conn->pending_sec_level != BT_SECURITY_HIGH && 1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1348 !(conn->auth_type & 0x01))
1349 return 0; 1339 return 0;
1350 1340
1351 return 1; 1341 return 1;
1352} 1342}
1353 1343
1354static inline int hci_resolve_name(struct hci_dev *hdev, 1344static int hci_resolve_name(struct hci_dev *hdev,
1355 struct inquiry_entry *e) 1345 struct inquiry_entry *e)
1356{ 1346{
1357 struct hci_cp_remote_name_req cp; 1347 struct hci_cp_remote_name_req cp;
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1628 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1639 1629
1640 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1630 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1641 conn); 1631 conn);
1642 1632
1643 if (status) { 1633 if (status) {
1644 if (conn && conn->state == BT_CONNECT) { 1634 if (conn && conn->state == BT_CONNECT) {
@@ -1668,7 +1658,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1668 BT_DBG("%s status 0x%x", hdev->name, status); 1658 BT_DBG("%s status 0x%x", hdev->name, status);
1669} 1659}
1670 1660
1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1661static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1672{ 1662{
1673 __u8 status = *((__u8 *) skb->data); 1663 __u8 status = *((__u8 *) skb->data);
1674 struct discovery_state *discov = &hdev->discovery; 1664 struct discovery_state *discov = &hdev->discovery;
@@ -1708,7 +1698,7 @@ unlock:
1708 hci_dev_unlock(hdev); 1698 hci_dev_unlock(hdev);
1709} 1699}
1710 1700
1711static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1701static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1712{ 1702{
1713 struct inquiry_data data; 1703 struct inquiry_data data;
1714 struct inquiry_info *info = (void *) (skb->data + 1); 1704 struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1745 hci_dev_unlock(hdev); 1735 hci_dev_unlock(hdev);
1746} 1736}
1747 1737
1748static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1738static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1749{ 1739{
1750 struct hci_ev_conn_complete *ev = (void *) skb->data; 1740 struct hci_ev_conn_complete *ev = (void *) skb->data;
1751 struct hci_conn *conn; 1741 struct hci_conn *conn;
@@ -1823,18 +1813,18 @@ unlock:
1823 hci_conn_check_pending(hdev); 1813 hci_conn_check_pending(hdev);
1824} 1814}
1825 1815
1826static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1816static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1827{ 1817{
1828 struct hci_ev_conn_request *ev = (void *) skb->data; 1818 struct hci_ev_conn_request *ev = (void *) skb->data;
1829 int mask = hdev->link_mode; 1819 int mask = hdev->link_mode;
1830 1820
1831 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1821 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1832 batostr(&ev->bdaddr), ev->link_type); 1822 ev->link_type);
1833 1823
1834 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1824 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1835 1825
1836 if ((mask & HCI_LM_ACCEPT) && 1826 if ((mask & HCI_LM_ACCEPT) &&
1837 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1827 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1838 /* Connection accepted */ 1828 /* Connection accepted */
1839 struct inquiry_entry *ie; 1829 struct inquiry_entry *ie;
1840 struct hci_conn *conn; 1830 struct hci_conn *conn;
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1845 if (ie) 1835 if (ie)
1846 memcpy(ie->data.dev_class, ev->dev_class, 3); 1836 memcpy(ie->data.dev_class, ev->dev_class, 3);
1847 1837
1848 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1838 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1839 &ev->bdaddr);
1849 if (!conn) { 1840 if (!conn) {
1850 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1841 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1851 if (!conn) { 1842 if (!conn) {
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1878 bacpy(&cp.bdaddr, &ev->bdaddr); 1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1879 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1870 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1880 1871
1881 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1872 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1882 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1873 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1883 cp.max_latency = cpu_to_le16(0xffff); 1874 cp.max_latency = __constant_cpu_to_le16(0xffff);
1884 cp.content_format = cpu_to_le16(hdev->voice_setting); 1875 cp.content_format = cpu_to_le16(hdev->voice_setting);
1885 cp.retrans_effort = 0xff; 1876 cp.retrans_effort = 0xff;
1886 1877
@@ -1897,7 +1888,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1897 } 1888 }
1898} 1889}
1899 1890
1900static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1891static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901{ 1892{
1902 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1903 struct hci_conn *conn; 1894 struct hci_conn *conn;
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1914 conn->state = BT_CLOSED; 1905 conn->state = BT_CLOSED;
1915 1906
1916 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1917 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1918 if (ev->status != 0) 1909 if (ev->status != 0)
1919 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1920 conn->dst_type, ev->status); 1911 conn->dst_type, ev->status);
1921 else 1912 else
1922 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1913 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1923 conn->dst_type); 1914 conn->dst_type);
@@ -1934,7 +1925,7 @@ unlock:
1934 hci_dev_unlock(hdev); 1925 hci_dev_unlock(hdev);
1935} 1926}
1936 1927
1937static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1928static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1938{ 1929{
1939 struct hci_ev_auth_complete *ev = (void *) skb->data; 1930 struct hci_ev_auth_complete *ev = (void *) skb->data;
1940 struct hci_conn *conn; 1931 struct hci_conn *conn;
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1949 1940
1950 if (!ev->status) { 1941 if (!ev->status) {
1951 if (!hci_conn_ssp_enabled(conn) && 1942 if (!hci_conn_ssp_enabled(conn) &&
1952 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1943 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1953 BT_INFO("re-auth of legacy device is not possible."); 1944 BT_INFO("re-auth of legacy device is not possible.");
1954 } else { 1945 } else {
1955 conn->link_mode |= HCI_LM_AUTH; 1946 conn->link_mode |= HCI_LM_AUTH;
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1969 cp.handle = ev->handle; 1960 cp.handle = ev->handle;
1970 cp.encrypt = 0x01; 1961 cp.encrypt = 0x01;
1971 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1962 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1972 &cp); 1963 &cp);
1973 } else { 1964 } else {
1974 conn->state = BT_CONNECTED; 1965 conn->state = BT_CONNECTED;
1975 hci_proto_connect_cfm(conn, ev->status); 1966 hci_proto_connect_cfm(conn, ev->status);
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1989 cp.handle = ev->handle; 1980 cp.handle = ev->handle;
1990 cp.encrypt = 0x01; 1981 cp.encrypt = 0x01;
1991 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1982 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1992 &cp); 1983 &cp);
1993 } else { 1984 } else {
1994 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1985 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1995 hci_encrypt_cfm(conn, ev->status, 0x00); 1986 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -2000,7 +1991,7 @@ unlock:
2000 hci_dev_unlock(hdev); 1991 hci_dev_unlock(hdev);
2001} 1992}
2002 1993
2003static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1994static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2004{ 1995{
2005 struct hci_ev_remote_name *ev = (void *) skb->data; 1996 struct hci_ev_remote_name *ev = (void *) skb->data;
2006 struct hci_conn *conn; 1997 struct hci_conn *conn;
@@ -2039,7 +2030,7 @@ unlock:
2039 hci_dev_unlock(hdev); 2030 hci_dev_unlock(hdev);
2040} 2031}
2041 2032
2042static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2033static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2043{ 2034{
2044 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2035 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2045 struct hci_conn *conn; 2036 struct hci_conn *conn;
@@ -2082,7 +2073,8 @@ unlock:
2082 hci_dev_unlock(hdev); 2073 hci_dev_unlock(hdev);
2083} 2074}
2084 2075
2085static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2076static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2077 struct sk_buff *skb)
2086{ 2078{
2087 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2079 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2088 struct hci_conn *conn; 2080 struct hci_conn *conn;
@@ -2104,7 +2096,8 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
2104 hci_dev_unlock(hdev); 2096 hci_dev_unlock(hdev);
2105} 2097}
2106 2098
2107static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2099static void hci_remote_features_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2108{ 2101{
2109 struct hci_ev_remote_features *ev = (void *) skb->data; 2102 struct hci_ev_remote_features *ev = (void *) skb->data;
2110 struct hci_conn *conn; 2103 struct hci_conn *conn;
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
2128 cp.handle = ev->handle; 2121 cp.handle = ev->handle;
2129 cp.page = 0x01; 2122 cp.page = 0x01;
2130 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2131 sizeof(cp), &cp); 2124 sizeof(cp), &cp);
2132 goto unlock; 2125 goto unlock;
2133 } 2126 }
2134 2127
@@ -2153,17 +2146,18 @@ unlock:
2153 hci_dev_unlock(hdev); 2146 hci_dev_unlock(hdev);
2154} 2147}
2155 2148
2156static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 2149static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2157{ 2150{
2158 BT_DBG("%s", hdev->name); 2151 BT_DBG("%s", hdev->name);
2159} 2152}
2160 2153
2161static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2154static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2155 struct sk_buff *skb)
2162{ 2156{
2163 BT_DBG("%s", hdev->name); 2157 BT_DBG("%s", hdev->name);
2164} 2158}
2165 2159
2166static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2160static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2167{ 2161{
2168 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2162 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2169 __u16 opcode; 2163 __u16 opcode;
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2384 } 2378 }
2385} 2379}
2386 2380
2387static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2381static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388{ 2382{
2389 struct hci_ev_cmd_status *ev = (void *) skb->data; 2383 struct hci_ev_cmd_status *ev = (void *) skb->data;
2390 __u16 opcode; 2384 __u16 opcode;
@@ -2465,7 +2459,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2465 } 2459 }
2466} 2460}
2467 2461
2468static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2462static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469{ 2463{
2470 struct hci_ev_role_change *ev = (void *) skb->data; 2464 struct hci_ev_role_change *ev = (void *) skb->data;
2471 struct hci_conn *conn; 2465 struct hci_conn *conn;
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2491 hci_dev_unlock(hdev); 2485 hci_dev_unlock(hdev);
2492} 2486}
2493 2487
2494static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2488static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{ 2489{
2496 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2490 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2497 int i; 2491 int i;
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2502 } 2496 }
2503 2497
2504 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2498 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2505 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2499 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2506 BT_DBG("%s bad parameters", hdev->name); 2500 BT_DBG("%s bad parameters", hdev->name);
2507 return; 2501 return;
2508 } 2502 }
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2557 queue_work(hdev->workqueue, &hdev->tx_work); 2551 queue_work(hdev->workqueue, &hdev->tx_work);
2558} 2552}
2559 2553
2560static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, 2554static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2561 struct sk_buff *skb)
2562{ 2555{
2563 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2556 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2564 int i; 2557 int i;
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2569 } 2562 }
2570 2563
2571 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2564 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2572 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2565 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2573 BT_DBG("%s bad parameters", hdev->name); 2566 BT_DBG("%s bad parameters", hdev->name);
2574 return; 2567 return;
2575 } 2568 }
2576 2569
2577 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2570 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2578 ev->num_hndl); 2571 ev->num_hndl);
2579 2572
2580 for (i = 0; i < ev->num_hndl; i++) { 2573 for (i = 0; i < ev->num_hndl; i++) {
2581 struct hci_comp_blocks_info *info = &ev->handles[i]; 2574 struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -2607,7 +2600,7 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2607 queue_work(hdev->workqueue, &hdev->tx_work); 2600 queue_work(hdev->workqueue, &hdev->tx_work);
2608} 2601}
2609 2602
2610static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2603static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611{ 2604{
2612 struct hci_ev_mode_change *ev = (void *) skb->data; 2605 struct hci_ev_mode_change *ev = (void *) skb->data;
2613 struct hci_conn *conn; 2606 struct hci_conn *conn;
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2621 conn->mode = ev->mode; 2614 conn->mode = ev->mode;
2622 conn->interval = __le16_to_cpu(ev->interval); 2615 conn->interval = __le16_to_cpu(ev->interval);
2623 2616
2624 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2617 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2618 &conn->flags)) {
2625 if (conn->mode == HCI_CM_ACTIVE) 2619 if (conn->mode == HCI_CM_ACTIVE)
2626 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2620 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2627 else 2621 else
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2635 hci_dev_unlock(hdev); 2629 hci_dev_unlock(hdev);
2636} 2630}
2637 2631
2638static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2632static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639{ 2633{
2640 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2634 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2641 struct hci_conn *conn; 2635 struct hci_conn *conn;
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2656 2650
2657 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2651 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2658 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2652 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(ev->bdaddr), &ev->bdaddr); 2653 sizeof(ev->bdaddr), &ev->bdaddr);
2660 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2654 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2661 u8 secure; 2655 u8 secure;
2662 2656
@@ -2672,7 +2666,7 @@ unlock:
2672 hci_dev_unlock(hdev); 2666 hci_dev_unlock(hdev);
2673} 2667}
2674 2668
2675static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2669static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2676{ 2670{
2677 struct hci_ev_link_key_req *ev = (void *) skb->data; 2671 struct hci_ev_link_key_req *ev = (void *) skb->data;
2678 struct hci_cp_link_key_reply cp; 2672 struct hci_cp_link_key_reply cp;
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2689 key = hci_find_link_key(hdev, &ev->bdaddr); 2683 key = hci_find_link_key(hdev, &ev->bdaddr);
2690 if (!key) { 2684 if (!key) {
2691 BT_DBG("%s link key not found for %s", hdev->name, 2685 BT_DBG("%s link key not found for %s", hdev->name,
2692 batostr(&ev->bdaddr)); 2686 batostr(&ev->bdaddr));
2693 goto not_found; 2687 goto not_found;
2694 } 2688 }
2695 2689
2696 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2690 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2697 batostr(&ev->bdaddr)); 2691 batostr(&ev->bdaddr));
2698 2692
2699 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2693 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2700 key->type == HCI_LK_DEBUG_COMBINATION) { 2694 key->type == HCI_LK_DEBUG_COMBINATION) {
2701 BT_DBG("%s ignoring debug key", hdev->name); 2695 BT_DBG("%s ignoring debug key", hdev->name);
2702 goto not_found; 2696 goto not_found;
2703 } 2697 }
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2705 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2699 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2706 if (conn) { 2700 if (conn) {
2707 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2701 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2708 conn->auth_type != 0xff && 2702 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2709 (conn->auth_type & 0x01)) {
2710 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2703 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2711 goto not_found; 2704 goto not_found;
2712 } 2705 }
2713 2706
2714 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2707 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2715 conn->pending_sec_level == BT_SECURITY_HIGH) { 2708 conn->pending_sec_level == BT_SECURITY_HIGH) {
2716 BT_DBG("%s ignoring key unauthenticated for high \ 2709 BT_DBG("%s ignoring key unauthenticated for high security",
2717 security", hdev->name); 2710 hdev->name);
2718 goto not_found; 2711 goto not_found;
2719 } 2712 }
2720 2713
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2723 } 2716 }
2724 2717
2725 bacpy(&cp.bdaddr, &ev->bdaddr); 2718 bacpy(&cp.bdaddr, &ev->bdaddr);
2726 memcpy(cp.link_key, key->val, 16); 2719 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2727 2720
2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2729 2722
@@ -2736,7 +2729,7 @@ not_found:
2736 hci_dev_unlock(hdev); 2729 hci_dev_unlock(hdev);
2737} 2730}
2738 2731
2739static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2732static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2740{ 2733{
2741 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2734 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2742 struct hci_conn *conn; 2735 struct hci_conn *conn;
@@ -2760,12 +2753,12 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2760 2753
2761 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2754 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2762 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2755 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2763 ev->key_type, pin_len); 2756 ev->key_type, pin_len);
2764 2757
2765 hci_dev_unlock(hdev); 2758 hci_dev_unlock(hdev);
2766} 2759}
2767 2760
2768static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2761static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2769{ 2762{
2770 struct hci_ev_clock_offset *ev = (void *) skb->data; 2763 struct hci_ev_clock_offset *ev = (void *) skb->data;
2771 struct hci_conn *conn; 2764 struct hci_conn *conn;
@@ -2788,7 +2781,7 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
2788 hci_dev_unlock(hdev); 2781 hci_dev_unlock(hdev);
2789} 2782}
2790 2783
2791static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2784static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792{ 2785{
2793 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2786 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2794 struct hci_conn *conn; 2787 struct hci_conn *conn;
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
2804 hci_dev_unlock(hdev); 2797 hci_dev_unlock(hdev);
2805} 2798}
2806 2799
2807static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2800static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{ 2801{
2809 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2802 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2810 struct inquiry_entry *ie; 2803 struct inquiry_entry *ie;
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
2822 hci_dev_unlock(hdev); 2815 hci_dev_unlock(hdev);
2823} 2816}
2824 2817
2825static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2818static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2819 struct sk_buff *skb)
2826{ 2820{
2827 struct inquiry_data data; 2821 struct inquiry_data data;
2828 int num_rsp = *((__u8 *) skb->data); 2822 int num_rsp = *((__u8 *) skb->data);
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2881 hci_dev_unlock(hdev); 2875 hci_dev_unlock(hdev);
2882} 2876}
2883 2877
2884static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2878static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2879 struct sk_buff *skb)
2885{ 2880{
2886 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2881 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2887 struct hci_conn *conn; 2882 struct hci_conn *conn;
@@ -2929,7 +2924,8 @@ unlock:
2929 hci_dev_unlock(hdev); 2924 hci_dev_unlock(hdev);
2930} 2925}
2931 2926
2932static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2927static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2928 struct sk_buff *skb)
2933{ 2929{
2934 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2930 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2935 struct hci_conn *conn; 2931 struct hci_conn *conn;
@@ -2984,19 +2980,20 @@ unlock:
2984 hci_dev_unlock(hdev); 2980 hci_dev_unlock(hdev);
2985} 2981}
2986 2982
2987static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2983static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2988{ 2984{
2989 BT_DBG("%s", hdev->name); 2985 BT_DBG("%s", hdev->name);
2990} 2986}
2991 2987
2992static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2988static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2993{ 2989{
2994 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2990 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2995 2991
2996 BT_DBG("%s status %d", hdev->name, ev->status); 2992 BT_DBG("%s status %d", hdev->name, ev->status);
2997} 2993}
2998 2994
2999static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2995static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2996 struct sk_buff *skb)
3000{ 2997{
3001 struct inquiry_data data; 2998 struct inquiry_data data;
3002 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2999 struct extended_inquiry_info *info = (void *) (skb->data + 1);
@@ -3043,7 +3040,51 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
3043 hci_dev_unlock(hdev); 3040 hci_dev_unlock(hdev);
3044} 3041}
3045 3042
3046static inline u8 hci_get_auth_req(struct hci_conn *conn) 3043static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3044 struct sk_buff *skb)
3045{
3046 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3047 struct hci_conn *conn;
3048
3049 BT_DBG("%s status %u handle %u", hdev->name, ev->status,
3050 __le16_to_cpu(ev->handle));
3051
3052 hci_dev_lock(hdev);
3053
3054 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3055 if (!conn)
3056 goto unlock;
3057
3058 if (!ev->status)
3059 conn->sec_level = conn->pending_sec_level;
3060
3061 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3062
3063 if (ev->status && conn->state == BT_CONNECTED) {
3064 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3065 hci_conn_put(conn);
3066 goto unlock;
3067 }
3068
3069 if (conn->state == BT_CONFIG) {
3070 if (!ev->status)
3071 conn->state = BT_CONNECTED;
3072
3073 hci_proto_connect_cfm(conn, ev->status);
3074 hci_conn_put(conn);
3075 } else {
3076 hci_auth_cfm(conn, ev->status);
3077
3078 hci_conn_hold(conn);
3079 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3080 hci_conn_put(conn);
3081 }
3082
3083unlock:
3084 hci_dev_unlock(hdev);
3085}
3086
3087static u8 hci_get_auth_req(struct hci_conn *conn)
3047{ 3088{
3048 /* If remote requests dedicated bonding follow that lead */ 3089 /* If remote requests dedicated bonding follow that lead */
3049 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 3090 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
@@ -3062,7 +3103,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
3062 return conn->auth_type; 3103 return conn->auth_type;
3063} 3104}
3064 3105
3065static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3106static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3066{ 3107{
3067 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3108 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3068 struct hci_conn *conn; 3109 struct hci_conn *conn;
@@ -3081,7 +3122,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3081 goto unlock; 3122 goto unlock;
3082 3123
3083 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3124 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3084 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3125 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3085 struct hci_cp_io_capability_reply cp; 3126 struct hci_cp_io_capability_reply cp;
3086 3127
3087 bacpy(&cp.bdaddr, &ev->bdaddr); 3128 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3092,14 +3133,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3092 conn->auth_type = hci_get_auth_req(conn); 3133 conn->auth_type = hci_get_auth_req(conn);
3093 cp.authentication = conn->auth_type; 3134 cp.authentication = conn->auth_type;
3094 3135
3095 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && 3136 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3096 hci_find_remote_oob_data(hdev, &conn->dst)) 3137 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3097 cp.oob_data = 0x01; 3138 cp.oob_data = 0x01;
3098 else 3139 else
3099 cp.oob_data = 0x00; 3140 cp.oob_data = 0x00;
3100 3141
3101 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3142 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3102 sizeof(cp), &cp); 3143 sizeof(cp), &cp);
3103 } else { 3144 } else {
3104 struct hci_cp_io_capability_neg_reply cp; 3145 struct hci_cp_io_capability_neg_reply cp;
3105 3146
@@ -3107,14 +3148,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3107 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3148 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3108 3149
3109 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3150 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3110 sizeof(cp), &cp); 3151 sizeof(cp), &cp);
3111 } 3152 }
3112 3153
3113unlock: 3154unlock:
3114 hci_dev_unlock(hdev); 3155 hci_dev_unlock(hdev);
3115} 3156}
3116 3157
3117static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3158static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3118{ 3159{
3119 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3160 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3120 struct hci_conn *conn; 3161 struct hci_conn *conn;
@@ -3136,8 +3177,8 @@ unlock:
3136 hci_dev_unlock(hdev); 3177 hci_dev_unlock(hdev);
3137} 3178}
3138 3179
3139static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 3180static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3140 struct sk_buff *skb) 3181 struct sk_buff *skb)
3141{ 3182{
3142 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3183 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3143 int loc_mitm, rem_mitm, confirm_hint = 0; 3184 int loc_mitm, rem_mitm, confirm_hint = 0;
@@ -3165,13 +3206,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3165 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 3206 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3166 BT_DBG("Rejecting request: remote device can't provide MITM"); 3207 BT_DBG("Rejecting request: remote device can't provide MITM");
3167 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3208 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3168 sizeof(ev->bdaddr), &ev->bdaddr); 3209 sizeof(ev->bdaddr), &ev->bdaddr);
3169 goto unlock; 3210 goto unlock;
3170 } 3211 }
3171 3212
3172 /* If no side requires MITM protection; auto-accept */ 3213 /* If no side requires MITM protection; auto-accept */
3173 if ((!loc_mitm || conn->remote_cap == 0x03) && 3214 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3174 (!rem_mitm || conn->io_capability == 0x03)) { 3215 (!rem_mitm || conn->io_capability == 0x03)) {
3175 3216
3176 /* If we're not the initiators request authorization to 3217 /* If we're not the initiators request authorization to
3177 * proceed from user space (mgmt_user_confirm with 3218 * proceed from user space (mgmt_user_confirm with
@@ -3183,7 +3224,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3183 } 3224 }
3184 3225
3185 BT_DBG("Auto-accept of user confirmation with %ums delay", 3226 BT_DBG("Auto-accept of user confirmation with %ums delay",
3186 hdev->auto_accept_delay); 3227 hdev->auto_accept_delay);
3187 3228
3188 if (hdev->auto_accept_delay > 0) { 3229 if (hdev->auto_accept_delay > 0) {
3189 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3230 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
@@ -3192,7 +3233,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3192 } 3233 }
3193 3234
3194 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3235 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3195 sizeof(ev->bdaddr), &ev->bdaddr); 3236 sizeof(ev->bdaddr), &ev->bdaddr);
3196 goto unlock; 3237 goto unlock;
3197 } 3238 }
3198 3239
@@ -3204,8 +3245,8 @@ unlock:
3204 hci_dev_unlock(hdev); 3245 hci_dev_unlock(hdev);
3205} 3246}
3206 3247
3207static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, 3248static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3208 struct sk_buff *skb) 3249 struct sk_buff *skb)
3209{ 3250{
3210 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3251 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3211 3252
@@ -3219,7 +3260,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3219 hci_dev_unlock(hdev); 3260 hci_dev_unlock(hdev);
3220} 3261}
3221 3262
3222static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3263static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3264 struct sk_buff *skb)
3223{ 3265{
3224 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3266 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3225 struct hci_conn *conn; 3267 struct hci_conn *conn;
@@ -3247,7 +3289,8 @@ unlock:
3247 hci_dev_unlock(hdev); 3289 hci_dev_unlock(hdev);
3248} 3290}
3249 3291
3250static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 3292static void hci_remote_host_features_evt(struct hci_dev *hdev,
3293 struct sk_buff *skb)
3251{ 3294{
3252 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3295 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3253 struct inquiry_entry *ie; 3296 struct inquiry_entry *ie;
@@ -3263,8 +3306,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
3263 hci_dev_unlock(hdev); 3306 hci_dev_unlock(hdev);
3264} 3307}
3265 3308
3266static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3309static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3267 struct sk_buff *skb) 3310 struct sk_buff *skb)
3268{ 3311{
3269 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3312 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3270 struct oob_data *data; 3313 struct oob_data *data;
@@ -3285,20 +3328,20 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3285 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3328 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3286 3329
3287 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3330 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3288 &cp); 3331 &cp);
3289 } else { 3332 } else {
3290 struct hci_cp_remote_oob_data_neg_reply cp; 3333 struct hci_cp_remote_oob_data_neg_reply cp;
3291 3334
3292 bacpy(&cp.bdaddr, &ev->bdaddr); 3335 bacpy(&cp.bdaddr, &ev->bdaddr);
3293 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3336 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3294 &cp); 3337 &cp);
3295 } 3338 }
3296 3339
3297unlock: 3340unlock:
3298 hci_dev_unlock(hdev); 3341 hci_dev_unlock(hdev);
3299} 3342}
3300 3343
3301static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3344static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3302{ 3345{
3303 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3346 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3304 struct hci_conn *conn; 3347 struct hci_conn *conn;
@@ -3307,6 +3350,19 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3307 3350
3308 hci_dev_lock(hdev); 3351 hci_dev_lock(hdev);
3309 3352
3353 if (ev->status) {
3354 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3355 if (!conn)
3356 goto unlock;
3357
3358 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3359 conn->dst_type, ev->status);
3360 hci_proto_connect_cfm(conn, ev->status);
3361 conn->state = BT_CLOSED;
3362 hci_conn_del(conn);
3363 goto unlock;
3364 }
3365
3310 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 3366 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3311 if (!conn) { 3367 if (!conn) {
3312 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3368 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3319,15 +3375,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3319 conn->dst_type = ev->bdaddr_type; 3375 conn->dst_type = ev->bdaddr_type;
3320 } 3376 }
3321 3377
3322 if (ev->status) {
3323 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3324 conn->dst_type, ev->status);
3325 hci_proto_connect_cfm(conn, ev->status);
3326 conn->state = BT_CLOSED;
3327 hci_conn_del(conn);
3328 goto unlock;
3329 }
3330
3331 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3378 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3332 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3379 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3333 conn->dst_type, 0, NULL, 0, NULL); 3380 conn->dst_type, 0, NULL, 0, NULL);
@@ -3345,8 +3392,7 @@ unlock:
3345 hci_dev_unlock(hdev); 3392 hci_dev_unlock(hdev);
3346} 3393}
3347 3394
3348static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 3395static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3349 struct sk_buff *skb)
3350{ 3396{
3351 u8 num_reports = skb->data[0]; 3397 u8 num_reports = skb->data[0];
3352 void *ptr = &skb->data[1]; 3398 void *ptr = &skb->data[1];
@@ -3367,8 +3413,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3367 hci_dev_unlock(hdev); 3413 hci_dev_unlock(hdev);
3368} 3414}
3369 3415
3370static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 3416static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3371 struct sk_buff *skb)
3372{ 3417{
3373 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3418 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3374 struct hci_cp_le_ltk_reply cp; 3419 struct hci_cp_le_ltk_reply cp;
@@ -3411,7 +3456,7 @@ not_found:
3411 hci_dev_unlock(hdev); 3456 hci_dev_unlock(hdev);
3412} 3457}
3413 3458
3414static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3459static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3415{ 3460{
3416 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3461 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3417 3462
@@ -3559,6 +3604,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3559 hci_extended_inquiry_result_evt(hdev, skb); 3604 hci_extended_inquiry_result_evt(hdev, skb);
3560 break; 3605 break;
3561 3606
3607 case HCI_EV_KEY_REFRESH_COMPLETE:
3608 hci_key_refresh_complete_evt(hdev, skb);
3609 break;
3610
3562 case HCI_EV_IO_CAPA_REQUEST: 3611 case HCI_EV_IO_CAPA_REQUEST:
3563 hci_io_capa_request_evt(hdev, skb); 3612 hci_io_capa_request_evt(hdev, skb);
3564 break; 3613 break;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623f426..a7f04de03d7 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,25 +24,7 @@
24 24
25/* Bluetooth HCI sockets. */ 25/* Bluetooth HCI sockets. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include <linux/compat.h>
41#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <linux/uaccess.h>
46#include <asm/unaligned.h> 28#include <asm/unaligned.h>
47 29
48#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
113 flt = &hci_pi(sk)->filter; 95 flt = &hci_pi(sk)->filter;
114 96
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 97 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) 98 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
99 &flt->type_mask))
117 continue; 100 continue;
118 101
119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { 102 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 103 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121 104
122 if (!hci_test_bit(evt, &flt->event_mask)) 105 if (!hci_test_bit(evt, &flt->event_mask))
123 continue; 106 continue;
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
240 struct hci_mon_hdr *hdr; 223 struct hci_mon_hdr *hdr;
241 224
242 /* Create a private copy with headroom */ 225 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); 226 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
227 GFP_ATOMIC);
244 if (!skb_copy) 228 if (!skb_copy)
245 continue; 229 continue;
246 230
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
495} 479}
496 480
497/* Ioctls that require bound socket */ 481/* Ioctls that require bound socket */
498static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 482static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
483 unsigned long arg)
499{ 484{
500 struct hci_dev *hdev = hci_pi(sk)->hdev; 485 struct hci_dev *hdev = hci_pi(sk)->hdev;
501 486
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
540 } 525 }
541} 526}
542 527
543static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 528static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
529 unsigned long arg)
544{ 530{
545 struct sock *sk = sock->sk; 531 struct sock *sk = sock->sk;
546 void __user *argp = (void __user *) arg; 532 void __user *argp = (void __user *) arg;
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
601 } 587 }
602} 588}
603 589
604static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 590static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
591 int addr_len)
605{ 592{
606 struct sockaddr_hci haddr; 593 struct sockaddr_hci haddr;
607 struct sock *sk = sock->sk; 594 struct sock *sk = sock->sk;
@@ -690,7 +677,8 @@ done:
690 return err; 677 return err;
691} 678}
692 679
693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) 680static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 int *addr_len, int peer)
694{ 682{
695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 683 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
696 struct sock *sk = sock->sk; 684 struct sock *sk = sock->sk;
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
711 return 0; 699 return 0;
712} 700}
713 701
714static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 702static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
703 struct sk_buff *skb)
715{ 704{
716 __u32 mask = hci_pi(sk)->cmsg_mask; 705 __u32 mask = hci_pi(sk)->cmsg_mask;
717 706
718 if (mask & HCI_CMSG_DIR) { 707 if (mask & HCI_CMSG_DIR) {
719 int incoming = bt_cb(skb)->incoming; 708 int incoming = bt_cb(skb)->incoming;
720 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); 709 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
710 &incoming);
721 } 711 }
722 712
723 if (mask & HCI_CMSG_TSTAMP) { 713 if (mask & HCI_CMSG_TSTAMP) {
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
747} 737}
748 738
749static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 739static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
750 struct msghdr *msg, size_t len, int flags) 740 struct msghdr *msg, size_t len, int flags)
751{ 741{
752 int noblock = flags & MSG_DONTWAIT; 742 int noblock = flags & MSG_DONTWAIT;
753 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
857 u16 ocf = hci_opcode_ocf(opcode); 847 u16 ocf = hci_opcode_ocf(opcode);
858 848
859 if (((ogf > HCI_SFLT_MAX_OGF) || 849 if (((ogf > HCI_SFLT_MAX_OGF) ||
860 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && 850 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
861 !capable(CAP_NET_RAW)) { 851 &hci_sec_filter.ocf_mask[ogf])) &&
852 !capable(CAP_NET_RAW)) {
862 err = -EPERM; 853 err = -EPERM;
863 goto drop; 854 goto drop;
864 } 855 }
@@ -891,7 +882,8 @@ drop:
891 goto done; 882 goto done;
892} 883}
893 884
894static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) 885static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
886 char __user *optval, unsigned int len)
895{ 887{
896 struct hci_ufilter uf = { .opcode = 0 }; 888 struct hci_ufilter uf = { .opcode = 0 };
897 struct sock *sk = sock->sk; 889 struct sock *sk = sock->sk;
@@ -973,7 +965,8 @@ done:
973 return err; 965 return err;
974} 966}
975 967
976static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 968static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
969 char __user *optval, int __user *optlen)
977{ 970{
978 struct hci_ufilter uf; 971 struct hci_ufilter uf;
979 struct sock *sk = sock->sk; 972 struct sock *sk = sock->sk;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 937f3187eaf..a20e61c3653 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,10 +1,6 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h>
4#include <linux/slab.h>
5#include <linux/init.h>
6#include <linux/debugfs.h> 3#include <linux/debugfs.h>
7#include <linux/seq_file.h>
8#include <linux/module.h> 4#include <linux/module.h>
9 5
10#include <net/bluetooth/bluetooth.h> 6#include <net/bluetooth/bluetooth.h>
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
31 } 27 }
32} 28}
33 29
34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 30static ssize_t show_link_type(struct device *dev,
31 struct device_attribute *attr, char *buf)
35{ 32{
36 struct hci_conn *conn = to_hci_conn(dev); 33 struct hci_conn *conn = to_hci_conn(dev);
37 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 34 return sprintf(buf, "%s\n", link_typetostr(conn->type));
38} 35}
39 36
40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 37static ssize_t show_link_address(struct device *dev,
38 struct device_attribute *attr, char *buf)
41{ 39{
42 struct hci_conn *conn = to_hci_conn(dev); 40 struct hci_conn *conn = to_hci_conn(dev);
43 return sprintf(buf, "%s\n", batostr(&conn->dst)); 41 return sprintf(buf, "%s\n", batostr(&conn->dst));
44} 42}
45 43
46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 44static ssize_t show_link_features(struct device *dev,
45 struct device_attribute *attr, char *buf)
47{ 46{
48 struct hci_conn *conn = to_hci_conn(dev); 47 struct hci_conn *conn = to_hci_conn(dev);
49 48
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 49 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 50 conn->features[0], conn->features[1],
52 conn->features[2], conn->features[3], 51 conn->features[2], conn->features[3],
53 conn->features[4], conn->features[5], 52 conn->features[4], conn->features[5],
54 conn->features[6], conn->features[7]); 53 conn->features[6], conn->features[7]);
55} 54}
56 55
57#define LINK_ATTR(_name, _mode, _show, _store) \ 56#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
185 } 184 }
186} 185}
187 186
188static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 187static ssize_t show_bus(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{ 189{
190 struct hci_dev *hdev = to_hci_dev(dev); 190 struct hci_dev *hdev = to_hci_dev(dev);
191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
192} 192}
193 193
194static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 194static ssize_t show_type(struct device *dev,
195 struct device_attribute *attr, char *buf)
195{ 196{
196 struct hci_dev *hdev = to_hci_dev(dev); 197 struct hci_dev *hdev = to_hci_dev(dev);
197 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); 198 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
198} 199}
199 200
200static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 201static ssize_t show_name(struct device *dev,
202 struct device_attribute *attr, char *buf)
201{ 203{
202 struct hci_dev *hdev = to_hci_dev(dev); 204 struct hci_dev *hdev = to_hci_dev(dev);
203 char name[HCI_MAX_NAME_LENGTH + 1]; 205 char name[HCI_MAX_NAME_LENGTH + 1];
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
210 return sprintf(buf, "%s\n", name); 212 return sprintf(buf, "%s\n", name);
211} 213}
212 214
213static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 215static ssize_t show_class(struct device *dev,
216 struct device_attribute *attr, char *buf)
214{ 217{
215 struct hci_dev *hdev = to_hci_dev(dev); 218 struct hci_dev *hdev = to_hci_dev(dev);
216 return sprintf(buf, "0x%.2x%.2x%.2x\n", 219 return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
217 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 220 hdev->dev_class[1], hdev->dev_class[0]);
218} 221}
219 222
220static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 223static ssize_t show_address(struct device *dev,
224 struct device_attribute *attr, char *buf)
221{ 225{
222 struct hci_dev *hdev = to_hci_dev(dev); 226 struct hci_dev *hdev = to_hci_dev(dev);
223 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 227 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
224} 228}
225 229
226static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 230static ssize_t show_features(struct device *dev,
231 struct device_attribute *attr, char *buf)
227{ 232{
228 struct hci_dev *hdev = to_hci_dev(dev); 233 struct hci_dev *hdev = to_hci_dev(dev);
229 234
230 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 235 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
231 hdev->features[0], hdev->features[1], 236 hdev->features[0], hdev->features[1],
232 hdev->features[2], hdev->features[3], 237 hdev->features[2], hdev->features[3],
233 hdev->features[4], hdev->features[5], 238 hdev->features[4], hdev->features[5],
234 hdev->features[6], hdev->features[7]); 239 hdev->features[6], hdev->features[7]);
235} 240}
236 241
237static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 242static ssize_t show_manufacturer(struct device *dev,
243 struct device_attribute *attr, char *buf)
238{ 244{
239 struct hci_dev *hdev = to_hci_dev(dev); 245 struct hci_dev *hdev = to_hci_dev(dev);
240 return sprintf(buf, "%d\n", hdev->manufacturer); 246 return sprintf(buf, "%d\n", hdev->manufacturer);
241} 247}
242 248
243static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 249static ssize_t show_hci_version(struct device *dev,
250 struct device_attribute *attr, char *buf)
244{ 251{
245 struct hci_dev *hdev = to_hci_dev(dev); 252 struct hci_dev *hdev = to_hci_dev(dev);
246 return sprintf(buf, "%d\n", hdev->hci_ver); 253 return sprintf(buf, "%d\n", hdev->hci_ver);
247} 254}
248 255
249static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 256static ssize_t show_hci_revision(struct device *dev,
257 struct device_attribute *attr, char *buf)
250{ 258{
251 struct hci_dev *hdev = to_hci_dev(dev); 259 struct hci_dev *hdev = to_hci_dev(dev);
252 return sprintf(buf, "%d\n", hdev->hci_rev); 260 return sprintf(buf, "%d\n", hdev->hci_rev);
253} 261}
254 262
255static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 263static ssize_t show_idle_timeout(struct device *dev,
264 struct device_attribute *attr, char *buf)
256{ 265{
257 struct hci_dev *hdev = to_hci_dev(dev); 266 struct hci_dev *hdev = to_hci_dev(dev);
258 return sprintf(buf, "%d\n", hdev->idle_timeout); 267 return sprintf(buf, "%d\n", hdev->idle_timeout);
259} 268}
260 269
261static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 270static ssize_t store_idle_timeout(struct device *dev,
271 struct device_attribute *attr,
272 const char *buf, size_t count)
262{ 273{
263 struct hci_dev *hdev = to_hci_dev(dev); 274 struct hci_dev *hdev = to_hci_dev(dev);
264 unsigned int val; 275 unsigned int val;
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
276 return count; 287 return count;
277} 288}
278 289
279static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 290static ssize_t show_sniff_max_interval(struct device *dev,
291 struct device_attribute *attr, char *buf)
280{ 292{
281 struct hci_dev *hdev = to_hci_dev(dev); 293 struct hci_dev *hdev = to_hci_dev(dev);
282 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 294 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
283} 295}
284 296
285static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 297static ssize_t store_sniff_max_interval(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
286{ 300{
287 struct hci_dev *hdev = to_hci_dev(dev); 301 struct hci_dev *hdev = to_hci_dev(dev);
288 u16 val; 302 u16 val;
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
300 return count; 314 return count;
301} 315}
302 316
303static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 317static ssize_t show_sniff_min_interval(struct device *dev,
318 struct device_attribute *attr, char *buf)
304{ 319{
305 struct hci_dev *hdev = to_hci_dev(dev); 320 struct hci_dev *hdev = to_hci_dev(dev);
306 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 321 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
307} 322}
308 323
309static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 324static ssize_t store_sniff_min_interval(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
310{ 327{
311 struct hci_dev *hdev = to_hci_dev(dev); 328 struct hci_dev *hdev = to_hci_dev(dev);
312 u16 val; 329 u16 val;
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
335static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 352static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
336 353
337static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 354static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
338 show_idle_timeout, store_idle_timeout); 355 show_idle_timeout, store_idle_timeout);
339static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, 356static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
340 show_sniff_max_interval, store_sniff_max_interval); 357 show_sniff_max_interval, store_sniff_max_interval);
341static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, 358static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
342 show_sniff_min_interval, store_sniff_min_interval); 359 show_sniff_min_interval, store_sniff_min_interval);
343 360
344static struct attribute *bt_host_attrs[] = { 361static struct attribute *bt_host_attrs[] = {
345 &dev_attr_bus.attr, 362 &dev_attr_bus.attr,
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
455 memcpy(&data5, &uuid[14], 2); 472 memcpy(&data5, &uuid[14], 2);
456 473
457 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", 474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
458 ntohl(data0), ntohs(data1), ntohs(data2), 475 ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
459 ntohs(data3), ntohl(data4), ntohs(data5)); 476 ntohl(data4), ntohs(data5));
460} 477}
461 478
462static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
513} 530}
514 531
515DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 532DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
516 auto_accept_delay_set, "%llu\n"); 533 auto_accept_delay_set, "%llu\n");
517 534
518void hci_init_sysfs(struct hci_dev *hdev) 535void hci_init_sysfs(struct hci_dev *hdev)
519{ 536{
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
547 return 0; 564 return 0;
548 565
549 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 566 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
550 hdev, &inquiry_cache_fops); 567 hdev, &inquiry_cache_fops);
551 568
552 debugfs_create_file("blacklist", 0444, hdev->debugfs, 569 debugfs_create_file("blacklist", 0444, hdev->debugfs,
553 hdev, &blacklist_fops); 570 hdev, &blacklist_fops);
554 571
555 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 572 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
556 573
557 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, 574 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
558 &auto_accept_delay_fops); 575 &auto_accept_delay_fops);
559 return 0; 576 return 0;
560} 577}
561 578
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 2c20d765b39..ccd985da651 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,27 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24
25#include <linux/types.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/poll.h>
31#include <linux/freezer.h>
32#include <linux/fcntl.h>
33#include <linux/skbuff.h>
34#include <linux/socket.h>
35#include <linux/ioctl.h>
36#include <linux/file.h> 24#include <linux/file.h>
37#include <linux/init.h>
38#include <linux/wait.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 25#include <linux/kthread.h>
41#include <net/sock.h>
42
43#include <linux/input.h>
44#include <linux/hid.h>
45#include <linux/hidraw.h> 26#include <linux/hidraw.h>
46 27
47#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
244} 225}
245 226
246static int __hidp_send_ctrl_message(struct hidp_session *session, 227static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size) 228 unsigned char hdr, unsigned char *data,
229 int size)
248{ 230{
249 struct sk_buff *skb; 231 struct sk_buff *skb;
250 232
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
268 return 0; 250 return 0;
269} 251}
270 252
271static inline int hidp_send_ctrl_message(struct hidp_session *session, 253static int hidp_send_ctrl_message(struct hidp_session *session,
272 unsigned char hdr, unsigned char *data, int size) 254 unsigned char hdr, unsigned char *data, int size)
273{ 255{
274 int err; 256 int err;
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
471 mod_timer(&session->timer, jiffies + HZ * session->idle_to); 453 mod_timer(&session->timer, jiffies + HZ * session->idle_to);
472} 454}
473 455
474static inline void hidp_del_timer(struct hidp_session *session) 456static void hidp_del_timer(struct hidp_session *session)
475{ 457{
476 if (session->idle_to > 0) 458 if (session->idle_to > 0)
477 del_timer(&session->timer); 459 del_timer(&session->timer);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d705c1..18b3f6892a3 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,22 +20,8 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/export.h>
24
25#include <linux/types.h>
26#include <linux/capability.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/poll.h>
30#include <linux/fcntl.h>
31#include <linux/skbuff.h>
32#include <linux/socket.h>
33#include <linux/ioctl.h>
34#include <linux/file.h> 24#include <linux/file.h>
35#include <linux/init.h>
36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h>
39 25
40#include "hidp.h" 26#include "hidp.h"
41 27
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 24f144b72a9..4ca88247b7c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -30,32 +30,14 @@
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32 32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h> 33#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h> 34#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54 35
55#include <net/bluetooth/bluetooth.h> 36#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h> 37#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h> 38#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h> 39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
59 41
60bool disable_ertm; 42bool disable_ertm;
61 43
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn, 55static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err); 56 struct l2cap_chan *chan, int err);
75 57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
76/* ---- L2CAP channels ---- */ 61/* ---- L2CAP channels ---- */
77 62
78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state)
196 state_to_string(state)); 181 state_to_string(state));
197 182
198 chan->state = state; 183 chan->state = state;
199 chan->ops->state_change(chan->data, state); 184 chan->ops->state_change(chan, state);
200} 185}
201 186
202static void l2cap_state_change(struct l2cap_chan *chan, int state) 187static void l2cap_state_change(struct l2cap_chan *chan, int state)
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
224 release_sock(sk); 209 release_sock(sk);
225} 210}
226 211
212static void __set_retrans_timer(struct l2cap_chan *chan)
213{
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219}
220
221static void __set_monitor_timer(struct l2cap_chan *chan)
222{
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228}
229
230static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232{
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241}
242
227/* ---- L2CAP sequence number lists ---- */ 243/* ---- L2CAP sequence number lists ---- */
228 244
229/* For ERTM, ordered lists of sequence numbers must be tracked for 245/* For ERTM, ordered lists of sequence numbers must be tracked for
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
366 382
367 l2cap_chan_unlock(chan); 383 l2cap_chan_unlock(chan);
368 384
369 chan->ops->close(chan->data); 385 chan->ops->close(chan);
370 mutex_unlock(&conn->chan_lock); 386 mutex_unlock(&conn->chan_lock);
371 387
372 l2cap_chan_put(chan); 388 l2cap_chan_put(chan);
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void)
392 408
393 atomic_set(&chan->refcnt, 1); 409 atomic_set(&chan->refcnt, 1);
394 410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
395 BT_DBG("chan %p", chan); 414 BT_DBG("chan %p", chan);
396 415
397 return chan; 416 return chan;
@@ -430,7 +449,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
430 case L2CAP_CHAN_CONN_ORIENTED: 449 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) { 450 if (conn->hcon->type == LE_LINK) {
432 /* LE connection */ 451 /* LE connection */
433 chan->omtu = L2CAP_LE_DEFAULT_MTU; 452 chan->omtu = L2CAP_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA; 453 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA; 454 chan->dcid = L2CAP_CID_LE_DATA;
436 } else { 455 } else {
@@ -447,6 +466,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
447 chan->omtu = L2CAP_DEFAULT_MTU; 466 chan->omtu = L2CAP_DEFAULT_MTU;
448 break; 467 break;
449 468
469 case L2CAP_CHAN_CONN_FIX_A2MP:
470 chan->scid = L2CAP_CID_A2MP;
471 chan->dcid = L2CAP_CID_A2MP;
472 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
473 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
474 break;
475
450 default: 476 default:
451 /* Raw socket can send/recv signalling messages only */ 477 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING; 478 chan->scid = L2CAP_CID_SIGNALING;
@@ -466,18 +492,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
466 list_add(&chan->list, &conn->chan_l); 492 list_add(&chan->list, &conn->chan_l);
467} 493}
468 494
469static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 495void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470{ 496{
471 mutex_lock(&conn->chan_lock); 497 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan); 498 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock); 499 mutex_unlock(&conn->chan_lock);
474} 500}
475 501
476static void l2cap_chan_del(struct l2cap_chan *chan, int err) 502void l2cap_chan_del(struct l2cap_chan *chan, int err)
477{ 503{
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn; 504 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
481 505
482 __clear_chan_timer(chan); 506 __clear_chan_timer(chan);
483 507
@@ -490,34 +514,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
490 l2cap_chan_put(chan); 514 l2cap_chan_put(chan);
491 515
492 chan->conn = NULL; 516 chan->conn = NULL;
493 hci_conn_put(conn->hcon);
494 }
495
496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
500 517
501 if (err) 518 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
502 __l2cap_chan_set_err(chan, err); 519 hci_conn_put(conn->hcon);
520 }
503 521
504 if (parent) { 522 if (chan->ops->teardown)
505 bt_accept_unlink(sk); 523 chan->ops->teardown(chan, err);
506 parent->sk_data_ready(parent, 0);
507 } else
508 sk->sk_state_change(sk);
509 524
510 release_sock(sk); 525 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
511
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 return; 526 return;
515 527
516 skb_queue_purge(&chan->tx_q); 528 switch(chan->mode) {
517 529 case L2CAP_MODE_BASIC:
518 if (chan->mode == L2CAP_MODE_ERTM) { 530 break;
519 struct srej_list *l, *tmp;
520 531
532 case L2CAP_MODE_ERTM:
521 __clear_retrans_timer(chan); 533 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan); 534 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan); 535 __clear_ack_timer(chan);
@@ -526,30 +538,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
526 538
527 l2cap_seq_list_free(&chan->srej_list); 539 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list); 540 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
530 list_del(&l->list);
531 kfree(l);
532 }
533 }
534}
535
536static void l2cap_chan_cleanup_listen(struct sock *parent)
537{
538 struct sock *sk;
539
540 BT_DBG("parent %p", parent);
541 541
542 /* Close not yet accepted channels */ 542 /* fall through */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
550 543
551 chan->ops->close(chan->data); 544 case L2CAP_MODE_STREAMING:
545 skb_queue_purge(&chan->tx_q);
546 break;
552 } 547 }
548
549 return;
553} 550}
554 551
555void l2cap_chan_close(struct l2cap_chan *chan, int reason) 552void l2cap_chan_close(struct l2cap_chan *chan, int reason)
@@ -562,12 +559,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 559
563 switch (chan->state) { 560 switch (chan->state) {
564 case BT_LISTEN: 561 case BT_LISTEN:
565 lock_sock(sk); 562 if (chan->ops->teardown)
566 l2cap_chan_cleanup_listen(sk); 563 chan->ops->teardown(chan, 0);
567
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
571 break; 564 break;
572 565
573 case BT_CONNECTED: 566 case BT_CONNECTED:
@@ -595,7 +588,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
595 rsp.scid = cpu_to_le16(chan->dcid); 588 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid); 589 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result); 590 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 591 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 592 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
600 sizeof(rsp), &rsp); 593 sizeof(rsp), &rsp);
601 } 594 }
@@ -609,9 +602,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
609 break; 602 break;
610 603
611 default: 604 default:
612 lock_sock(sk); 605 if (chan->ops->teardown)
613 sock_set_flag(sk, SOCK_ZAPPED); 606 chan->ops->teardown(chan, 0);
614 release_sock(sk);
615 break; 607 break;
616 } 608 }
617} 609}
@@ -627,7 +619,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
627 default: 619 default:
628 return HCI_AT_NO_BONDING; 620 return HCI_AT_NO_BONDING;
629 } 621 }
630 } else if (chan->psm == cpu_to_le16(0x0001)) { 622 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
631 if (chan->sec_level == BT_SECURITY_LOW) 623 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP; 624 chan->sec_level = BT_SECURITY_SDP;
633 625
@@ -773,9 +765,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 765 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data), 766 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control); 767 &bt_cb(skb)->control);
768 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
776 } else { 769 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data), 770 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control); 771 &bt_cb(skb)->control);
772 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
779 } 773 }
780} 774}
781 775
@@ -830,66 +824,102 @@ static inline void __pack_control(struct l2cap_chan *chan,
830 } 824 }
831} 825}
832 826
833static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 827static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
834{ 828{
835 struct sk_buff *skb;
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
838 int count, hlen;
839
840 if (chan->state != BT_CONNECTED)
841 return;
842
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 829 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE; 830 return L2CAP_EXT_HDR_SIZE;
845 else 831 else
846 hlen = L2CAP_ENH_HDR_SIZE; 832 return L2CAP_ENH_HDR_SIZE;
833}
834
835static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
836 u32 control)
837{
838 struct sk_buff *skb;
839 struct l2cap_hdr *lh;
840 int hlen = __ertm_hdr_size(chan);
847 841
848 if (chan->fcs == L2CAP_FCS_CRC16) 842 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE; 843 hlen += L2CAP_FCS_SIZE;
850 844
851 BT_DBG("chan %p, control 0x%8.8x", chan, control); 845 skb = bt_skb_alloc(hlen, GFP_KERNEL);
852
853 count = min_t(unsigned int, conn->mtu, hlen);
854
855 control |= __set_sframe(chan);
856 846
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
859
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
862
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
864 if (!skb) 847 if (!skb)
865 return; 848 return ERR_PTR(-ENOMEM);
866 849
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 851 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid); 852 lh->cid = cpu_to_le16(chan->dcid);
870 853
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
856 else
857 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
872 858
873 if (chan->fcs == L2CAP_FCS_CRC16) { 859 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); 860 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 861 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 } 862 }
877 863
878 skb->priority = HCI_PRIO_MAX; 864 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb); 865 return skb;
880} 866}
881 867
882static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) 868static void l2cap_send_sframe(struct l2cap_chan *chan,
869 struct l2cap_ctrl *control)
883{ 870{
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 871 struct sk_buff *skb;
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 872 u32 control_field;
873
874 BT_DBG("chan %p, control %p", chan, control);
875
876 if (!control->sframe)
877 return;
878
879 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
880 !control->poll)
881 control->final = 1;
882
883 if (control->super == L2CAP_SUPER_RR)
884 clear_bit(CONN_RNR_SENT, &chan->conn_state);
885 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state); 886 set_bit(CONN_RNR_SENT, &chan->conn_state);
887 } else
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
889 887
890 control |= __set_reqseq(chan, chan->buffer_seq); 888 if (control->super != L2CAP_SUPER_SREJ) {
889 chan->last_acked_seq = control->reqseq;
890 __clear_ack_timer(chan);
891 }
892
893 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
894 control->final, control->poll, control->super);
895
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
897 control_field = __pack_extended_control(control);
898 else
899 control_field = __pack_enhanced_control(control);
900
901 skb = l2cap_create_sframe_pdu(chan, control_field);
902 if (!IS_ERR(skb))
903 l2cap_do_send(chan, skb);
904}
905
906static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
907{
908 struct l2cap_ctrl control;
909
910 BT_DBG("chan %p, poll %d", chan, poll);
911
912 memset(&control, 0, sizeof(control));
913 control.sframe = 1;
914 control.poll = poll;
891 915
892 l2cap_send_sframe(chan, control); 916 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
917 control.super = L2CAP_SUPER_RNR;
918 else
919 control.super = L2CAP_SUPER_RR;
920
921 control.reqseq = chan->buffer_seq;
922 l2cap_send_sframe(chan, &control);
893} 923}
894 924
895static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 925static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
@@ -914,25 +944,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
914 944
915static void l2cap_chan_ready(struct l2cap_chan *chan) 945static void l2cap_chan_ready(struct l2cap_chan *chan)
916{ 946{
917 struct sock *sk = chan->sk; 947 /* This clears all conf flags, including CONF_NOT_COMPLETE */
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0; 948 chan->conf_state = 0;
927 __clear_chan_timer(chan); 949 __clear_chan_timer(chan);
928 950
929 __l2cap_state_change(chan, BT_CONNECTED); 951 chan->state = BT_CONNECTED;
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934 952
935 release_sock(sk); 953 chan->ops->ready(chan);
936} 954}
937 955
938static void l2cap_do_start(struct l2cap_chan *chan) 956static void l2cap_do_start(struct l2cap_chan *chan)
@@ -953,7 +971,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
953 l2cap_send_conn_req(chan); 971 l2cap_send_conn_req(chan);
954 } else { 972 } else {
955 struct l2cap_info_req req; 973 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 974 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
957 975
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn); 977 conn->info_ident = l2cap_get_ident(conn);
@@ -995,6 +1013,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
995 __clear_ack_timer(chan); 1013 __clear_ack_timer(chan);
996 } 1014 }
997 1015
1016 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1017 __l2cap_state_change(chan, BT_DISCONN);
1018 return;
1019 }
1020
998 req.dcid = cpu_to_le16(chan->dcid); 1021 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid); 1022 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
@@ -1053,20 +1076,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1053 if (test_bit(BT_SK_DEFER_SETUP, 1076 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) { 1077 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent; 1078 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1079 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1080 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1058 if (parent) 1081 if (parent)
1059 parent->sk_data_ready(parent, 0); 1082 parent->sk_data_ready(parent, 0);
1060 1083
1061 } else { 1084 } else {
1062 __l2cap_state_change(chan, BT_CONFIG); 1085 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1086 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1087 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1065 } 1088 }
1066 release_sock(sk); 1089 release_sock(sk);
1067 } else { 1090 } else {
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1091 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1092 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1070 } 1093 }
1071 1094
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1095 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1150,13 +1173,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1150 1173
1151 lock_sock(parent); 1174 lock_sock(parent);
1152 1175
1153 /* Check for backlog size */ 1176 chan = pchan->ops->new_connection(pchan);
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 goto clean;
1157 }
1158
1159 chan = pchan->ops->new_connection(pchan->data);
1160 if (!chan) 1177 if (!chan)
1161 goto clean; 1178 goto clean;
1162 1179
@@ -1171,10 +1188,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1171 1188
1172 l2cap_chan_add(conn, chan); 1189 l2cap_chan_add(conn, chan);
1173 1190
1174 __set_chan_timer(chan, sk->sk_sndtimeo); 1191 l2cap_chan_ready(chan);
1175
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1178 1192
1179clean: 1193clean:
1180 release_sock(parent); 1194 release_sock(parent);
@@ -1198,6 +1212,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
1198 1212
1199 l2cap_chan_lock(chan); 1213 l2cap_chan_lock(chan);
1200 1214
1215 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1216 l2cap_chan_unlock(chan);
1217 continue;
1218 }
1219
1201 if (conn->hcon->type == LE_LINK) { 1220 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level)) 1221 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan); 1222 l2cap_chan_ready(chan);
@@ -1270,7 +1289,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1270 1289
1271 l2cap_chan_unlock(chan); 1290 l2cap_chan_unlock(chan);
1272 1291
1273 chan->ops->close(chan->data); 1292 chan->ops->close(chan);
1274 l2cap_chan_put(chan); 1293 l2cap_chan_put(chan);
1275 } 1294 }
1276 1295
@@ -1295,7 +1314,12 @@ static void security_timeout(struct work_struct *work)
1295 struct l2cap_conn *conn = container_of(work, struct l2cap_conn, 1314 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1296 security_timer.work); 1315 security_timer.work);
1297 1316
1298 l2cap_conn_del(conn->hcon, ETIMEDOUT); 1317 BT_DBG("conn %p", conn);
1318
1319 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1320 smp_chan_destroy(conn);
1321 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1322 }
1299} 1323}
1300 1324
1301static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 1325static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
@@ -1439,21 +1463,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1439 goto done; 1463 goto done;
1440 } 1464 }
1441 1465
1442 lock_sock(sk); 1466 switch (chan->state) {
1443
1444 switch (sk->sk_state) {
1445 case BT_CONNECT: 1467 case BT_CONNECT:
1446 case BT_CONNECT2: 1468 case BT_CONNECT2:
1447 case BT_CONFIG: 1469 case BT_CONFIG:
1448 /* Already connecting */ 1470 /* Already connecting */
1449 err = 0; 1471 err = 0;
1450 release_sock(sk);
1451 goto done; 1472 goto done;
1452 1473
1453 case BT_CONNECTED: 1474 case BT_CONNECTED:
1454 /* Already connected */ 1475 /* Already connected */
1455 err = -EISCONN; 1476 err = -EISCONN;
1456 release_sock(sk);
1457 goto done; 1477 goto done;
1458 1478
1459 case BT_OPEN: 1479 case BT_OPEN:
@@ -1463,13 +1483,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1463 1483
1464 default: 1484 default:
1465 err = -EBADFD; 1485 err = -EBADFD;
1466 release_sock(sk);
1467 goto done; 1486 goto done;
1468 } 1487 }
1469 1488
1470 /* Set destination address and psm */ 1489 /* Set destination address and psm */
1490 lock_sock(sk);
1471 bacpy(&bt_sk(sk)->dst, dst); 1491 bacpy(&bt_sk(sk)->dst, dst);
1472
1473 release_sock(sk); 1492 release_sock(sk);
1474 1493
1475 chan->psm = psm; 1494 chan->psm = psm;
@@ -1571,23 +1590,20 @@ int __l2cap_wait_ack(struct sock *sk)
1571static void l2cap_monitor_timeout(struct work_struct *work) 1590static void l2cap_monitor_timeout(struct work_struct *work)
1572{ 1591{
1573 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1592 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1574 monitor_timer.work); 1593 monitor_timer.work);
1575 1594
1576 BT_DBG("chan %p", chan); 1595 BT_DBG("chan %p", chan);
1577 1596
1578 l2cap_chan_lock(chan); 1597 l2cap_chan_lock(chan);
1579 1598
1580 if (chan->retry_count >= chan->remote_max_tx) { 1599 if (!chan->conn) {
1581 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1582 l2cap_chan_unlock(chan); 1600 l2cap_chan_unlock(chan);
1583 l2cap_chan_put(chan); 1601 l2cap_chan_put(chan);
1584 return; 1602 return;
1585 } 1603 }
1586 1604
1587 chan->retry_count++; 1605 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1588 __set_monitor_timer(chan);
1589 1606
1590 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1591 l2cap_chan_unlock(chan); 1607 l2cap_chan_unlock(chan);
1592 l2cap_chan_put(chan); 1608 l2cap_chan_put(chan);
1593} 1609}
@@ -1595,234 +1611,293 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1595static void l2cap_retrans_timeout(struct work_struct *work) 1611static void l2cap_retrans_timeout(struct work_struct *work)
1596{ 1612{
1597 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1598 retrans_timer.work); 1614 retrans_timer.work);
1599 1615
1600 BT_DBG("chan %p", chan); 1616 BT_DBG("chan %p", chan);
1601 1617
1602 l2cap_chan_lock(chan); 1618 l2cap_chan_lock(chan);
1603 1619
1604 chan->retry_count = 1; 1620 if (!chan->conn) {
1605 __set_monitor_timer(chan); 1621 l2cap_chan_unlock(chan);
1606 1622 l2cap_chan_put(chan);
1607 set_bit(CONN_WAIT_F, &chan->conn_state); 1623 return;
1608 1624 }
1609 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1610 1625
1626 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1611 l2cap_chan_unlock(chan); 1627 l2cap_chan_unlock(chan);
1612 l2cap_chan_put(chan); 1628 l2cap_chan_put(chan);
1613} 1629}
1614 1630
1615static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1631static void l2cap_streaming_send(struct l2cap_chan *chan,
1632 struct sk_buff_head *skbs)
1616{ 1633{
1617 struct sk_buff *skb; 1634 struct sk_buff *skb;
1635 struct l2cap_ctrl *control;
1618 1636
1619 while ((skb = skb_peek(&chan->tx_q)) && 1637 BT_DBG("chan %p, skbs %p", chan, skbs);
1620 chan->unacked_frames) {
1621 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1622 break;
1623 1638
1624 skb = skb_dequeue(&chan->tx_q); 1639 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1625 kfree_skb(skb);
1626 1640
1627 chan->unacked_frames--; 1641 while (!skb_queue_empty(&chan->tx_q)) {
1628 }
1629 1642
1630 if (!chan->unacked_frames) 1643 skb = skb_dequeue(&chan->tx_q);
1631 __clear_retrans_timer(chan);
1632}
1633 1644
1634static void l2cap_streaming_send(struct l2cap_chan *chan) 1645 bt_cb(skb)->control.retries = 1;
1635{ 1646 control = &bt_cb(skb)->control;
1636 struct sk_buff *skb; 1647
1637 u32 control; 1648 control->reqseq = 0;
1638 u16 fcs; 1649 control->txseq = chan->next_tx_seq;
1639 1650
1640 while ((skb = skb_dequeue(&chan->tx_q))) { 1651 __pack_control(chan, control, skb);
1641 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1642 control |= __set_txseq(chan, chan->next_tx_seq);
1643 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1644 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1645 1652
1646 if (chan->fcs == L2CAP_FCS_CRC16) { 1653 if (chan->fcs == L2CAP_FCS_CRC16) {
1647 fcs = crc16(0, (u8 *)skb->data, 1654 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1648 skb->len - L2CAP_FCS_SIZE); 1655 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1649 put_unaligned_le16(fcs,
1650 skb->data + skb->len - L2CAP_FCS_SIZE);
1651 } 1656 }
1652 1657
1653 l2cap_do_send(chan, skb); 1658 l2cap_do_send(chan, skb);
1654 1659
1660 BT_DBG("Sent txseq %d", (int)control->txseq);
1661
1655 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1662 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1663 chan->frames_sent++;
1656 } 1664 }
1657} 1665}
1658 1666
1659static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) 1667static int l2cap_ertm_send(struct l2cap_chan *chan)
1660{ 1668{
1661 struct sk_buff *skb, *tx_skb; 1669 struct sk_buff *skb, *tx_skb;
1662 u16 fcs; 1670 struct l2cap_ctrl *control;
1663 u32 control; 1671 int sent = 0;
1664 1672
1665 skb = skb_peek(&chan->tx_q); 1673 BT_DBG("chan %p", chan);
1666 if (!skb)
1667 return;
1668 1674
1669 while (bt_cb(skb)->control.txseq != tx_seq) { 1675 if (chan->state != BT_CONNECTED)
1670 if (skb_queue_is_last(&chan->tx_q, skb)) 1676 return -ENOTCONN;
1671 return;
1672 1677
1673 skb = skb_queue_next(&chan->tx_q, skb); 1678 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1674 } 1679 return 0;
1675 1680
1676 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1681 while (chan->tx_send_head &&
1677 chan->remote_max_tx) { 1682 chan->unacked_frames < chan->remote_tx_win &&
1678 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1683 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1679 return;
1680 }
1681 1684
1682 tx_skb = skb_clone(skb, GFP_ATOMIC); 1685 skb = chan->tx_send_head;
1683 bt_cb(skb)->control.retries++;
1684 1686
1685 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1687 bt_cb(skb)->control.retries = 1;
1686 control &= __get_sar_mask(chan); 1688 control = &bt_cb(skb)->control;
1687 1689
1688 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1690 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1689 control |= __set_ctrl_final(chan); 1691 control->final = 1;
1690 1692
1691 control |= __set_reqseq(chan, chan->buffer_seq); 1693 control->reqseq = chan->buffer_seq;
1692 control |= __set_txseq(chan, tx_seq); 1694 chan->last_acked_seq = chan->buffer_seq;
1695 control->txseq = chan->next_tx_seq;
1693 1696
1694 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1697 __pack_control(chan, control, skb);
1695 1698
1696 if (chan->fcs == L2CAP_FCS_CRC16) { 1699 if (chan->fcs == L2CAP_FCS_CRC16) {
1697 fcs = crc16(0, (u8 *)tx_skb->data, 1700 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1698 tx_skb->len - L2CAP_FCS_SIZE); 1701 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1699 put_unaligned_le16(fcs, 1702 }
1700 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); 1703
1704 /* Clone after data has been modified. Data is assumed to be
1705 read-only (for locking purposes) on cloned sk_buffs.
1706 */
1707 tx_skb = skb_clone(skb, GFP_KERNEL);
1708
1709 if (!tx_skb)
1710 break;
1711
1712 __set_retrans_timer(chan);
1713
1714 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1715 chan->unacked_frames++;
1716 chan->frames_sent++;
1717 sent++;
1718
1719 if (skb_queue_is_last(&chan->tx_q, skb))
1720 chan->tx_send_head = NULL;
1721 else
1722 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1723
1724 l2cap_do_send(chan, tx_skb);
1725 BT_DBG("Sent txseq %d", (int)control->txseq);
1701 } 1726 }
1702 1727
1703 l2cap_do_send(chan, tx_skb); 1728 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1729 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1730
1731 return sent;
1704} 1732}
1705 1733
1706static int l2cap_ertm_send(struct l2cap_chan *chan) 1734static void l2cap_ertm_resend(struct l2cap_chan *chan)
1707{ 1735{
1708 struct sk_buff *skb, *tx_skb; 1736 struct l2cap_ctrl control;
1709 u16 fcs; 1737 struct sk_buff *skb;
1710 u32 control; 1738 struct sk_buff *tx_skb;
1711 int nsent = 0; 1739 u16 seq;
1712 1740
1713 if (chan->state != BT_CONNECTED) 1741 BT_DBG("chan %p", chan);
1714 return -ENOTCONN;
1715 1742
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1743 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0; 1744 return;
1718 1745
1719 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1746 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1747 seq = l2cap_seq_list_pop(&chan->retrans_list);
1720 1748
1721 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1749 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1722 chan->remote_max_tx) { 1750 if (!skb) {
1723 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1751 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1724 break; 1752 seq);
1753 continue;
1725 } 1754 }
1726 1755
1727 tx_skb = skb_clone(skb, GFP_ATOMIC);
1728
1729 bt_cb(skb)->control.retries++; 1756 bt_cb(skb)->control.retries++;
1757 control = bt_cb(skb)->control;
1730 1758
1731 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1759 if (chan->max_tx != 0 &&
1732 control &= __get_sar_mask(chan); 1760 bt_cb(skb)->control.retries > chan->max_tx) {
1761 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1762 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1763 l2cap_seq_list_clear(&chan->retrans_list);
1764 break;
1765 }
1733 1766
1767 control.reqseq = chan->buffer_seq;
1734 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1768 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1735 control |= __set_ctrl_final(chan); 1769 control.final = 1;
1770 else
1771 control.final = 0;
1736 1772
1737 control |= __set_reqseq(chan, chan->buffer_seq); 1773 if (skb_cloned(skb)) {
1738 control |= __set_txseq(chan, chan->next_tx_seq); 1774 /* Cloned sk_buffs are read-only, so we need a
1739 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); 1775 * writeable copy
1776 */
1777 tx_skb = skb_copy(skb, GFP_ATOMIC);
1778 } else {
1779 tx_skb = skb_clone(skb, GFP_ATOMIC);
1780 }
1740 1781
1741 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1782 if (!tx_skb) {
1783 l2cap_seq_list_clear(&chan->retrans_list);
1784 break;
1785 }
1786
1787 /* Update skb contents */
1788 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1789 put_unaligned_le32(__pack_extended_control(&control),
1790 tx_skb->data + L2CAP_HDR_SIZE);
1791 } else {
1792 put_unaligned_le16(__pack_enhanced_control(&control),
1793 tx_skb->data + L2CAP_HDR_SIZE);
1794 }
1742 1795
1743 if (chan->fcs == L2CAP_FCS_CRC16) { 1796 if (chan->fcs == L2CAP_FCS_CRC16) {
1744 fcs = crc16(0, (u8 *)skb->data, 1797 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1745 tx_skb->len - L2CAP_FCS_SIZE); 1798 put_unaligned_le16(fcs, skb_put(tx_skb,
1746 put_unaligned_le16(fcs, skb->data + 1799 L2CAP_FCS_SIZE));
1747 tx_skb->len - L2CAP_FCS_SIZE);
1748 } 1800 }
1749 1801
1750 l2cap_do_send(chan, tx_skb); 1802 l2cap_do_send(chan, tx_skb);
1751 1803
1752 __set_retrans_timer(chan); 1804 BT_DBG("Resent txseq %d", control.txseq);
1753
1754 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1755
1756 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1757
1758 if (bt_cb(skb)->control.retries == 1) {
1759 chan->unacked_frames++;
1760
1761 if (!nsent++)
1762 __clear_ack_timer(chan);
1763 }
1764
1765 chan->frames_sent++;
1766 1805
1767 if (skb_queue_is_last(&chan->tx_q, skb)) 1806 chan->last_acked_seq = chan->buffer_seq;
1768 chan->tx_send_head = NULL;
1769 else
1770 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1771 } 1807 }
1772
1773 return nsent;
1774} 1808}
1775 1809
1776static int l2cap_retransmit_frames(struct l2cap_chan *chan) 1810static void l2cap_retransmit(struct l2cap_chan *chan,
1811 struct l2cap_ctrl *control)
1777{ 1812{
1778 int ret; 1813 BT_DBG("chan %p, control %p", chan, control);
1779
1780 if (!skb_queue_empty(&chan->tx_q))
1781 chan->tx_send_head = chan->tx_q.next;
1782 1814
1783 chan->next_tx_seq = chan->expected_ack_seq; 1815 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1784 ret = l2cap_ertm_send(chan); 1816 l2cap_ertm_resend(chan);
1785 return ret;
1786} 1817}
1787 1818
1788static void __l2cap_send_ack(struct l2cap_chan *chan) 1819static void l2cap_retransmit_all(struct l2cap_chan *chan,
1820 struct l2cap_ctrl *control)
1789{ 1821{
1790 u32 control = 0; 1822 struct sk_buff *skb;
1791 1823
1792 control |= __set_reqseq(chan, chan->buffer_seq); 1824 BT_DBG("chan %p, control %p", chan, control);
1793 1825
1794 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 1826 if (control->poll)
1795 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 1827 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1796 set_bit(CONN_RNR_SENT, &chan->conn_state);
1797 l2cap_send_sframe(chan, control);
1798 return;
1799 }
1800 1828
1801 if (l2cap_ertm_send(chan) > 0) 1829 l2cap_seq_list_clear(&chan->retrans_list);
1830
1831 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1802 return; 1832 return;
1803 1833
1804 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 1834 if (chan->unacked_frames) {
1805 l2cap_send_sframe(chan, control); 1835 skb_queue_walk(&chan->tx_q, skb) {
1836 if (bt_cb(skb)->control.txseq == control->reqseq ||
1837 skb == chan->tx_send_head)
1838 break;
1839 }
1840
1841 skb_queue_walk_from(&chan->tx_q, skb) {
1842 if (skb == chan->tx_send_head)
1843 break;
1844
1845 l2cap_seq_list_append(&chan->retrans_list,
1846 bt_cb(skb)->control.txseq);
1847 }
1848
1849 l2cap_ertm_resend(chan);
1850 }
1806} 1851}
1807 1852
1808static void l2cap_send_ack(struct l2cap_chan *chan) 1853static void l2cap_send_ack(struct l2cap_chan *chan)
1809{ 1854{
1810 __clear_ack_timer(chan); 1855 struct l2cap_ctrl control;
1811 __l2cap_send_ack(chan); 1856 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1812} 1857 chan->last_acked_seq);
1858 int threshold;
1813 1859
1814static void l2cap_send_srejtail(struct l2cap_chan *chan) 1860 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1815{ 1861 chan, chan->last_acked_seq, chan->buffer_seq);
1816 struct srej_list *tail;
1817 u32 control;
1818 1862
1819 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 1863 memset(&control, 0, sizeof(control));
1820 control |= __set_ctrl_final(chan); 1864 control.sframe = 1;
1821 1865
1822 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); 1866 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1823 control |= __set_reqseq(chan, tail->tx_seq); 1867 chan->rx_state == L2CAP_RX_STATE_RECV) {
1868 __clear_ack_timer(chan);
1869 control.super = L2CAP_SUPER_RNR;
1870 control.reqseq = chan->buffer_seq;
1871 l2cap_send_sframe(chan, &control);
1872 } else {
1873 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1874 l2cap_ertm_send(chan);
1875 /* If any i-frames were sent, they included an ack */
1876 if (chan->buffer_seq == chan->last_acked_seq)
1877 frames_to_ack = 0;
1878 }
1824 1879
1825 l2cap_send_sframe(chan, control); 1880 /* Ack now if the tx window is 3/4ths full.
1881 * Calculate without mul or div
1882 */
1883 threshold = chan->tx_win;
1884 threshold += threshold << 1;
1885 threshold >>= 2;
1886
1887 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1888 threshold);
1889
1890 if (frames_to_ack >= threshold) {
1891 __clear_ack_timer(chan);
1892 control.super = L2CAP_SUPER_RR;
1893 control.reqseq = chan->buffer_seq;
1894 l2cap_send_sframe(chan, &control);
1895 frames_to_ack = 0;
1896 }
1897
1898 if (frames_to_ack)
1899 __set_ack_timer(chan);
1900 }
1826} 1901}
1827 1902
1828static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 1903static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
@@ -1951,10 +2026,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1951 if (!conn) 2026 if (!conn)
1952 return ERR_PTR(-ENOTCONN); 2027 return ERR_PTR(-ENOTCONN);
1953 2028
1954 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2029 hlen = __ertm_hdr_size(chan);
1955 hlen = L2CAP_EXT_HDR_SIZE;
1956 else
1957 hlen = L2CAP_ENH_HDR_SIZE;
1958 2030
1959 if (sdulen) 2031 if (sdulen)
1960 hlen += L2CAP_SDULEN_SIZE; 2032 hlen += L2CAP_SDULEN_SIZE;
@@ -1974,7 +2046,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1974 lh->cid = cpu_to_le16(chan->dcid); 2046 lh->cid = cpu_to_le16(chan->dcid);
1975 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2047 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1976 2048
1977 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); 2049 /* Control header is populated later */
2050 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2051 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2052 else
2053 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1978 2054
1979 if (sdulen) 2055 if (sdulen)
1980 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2056 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1985,9 +2061,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1985 return ERR_PTR(err); 2061 return ERR_PTR(err);
1986 } 2062 }
1987 2063
1988 if (chan->fcs == L2CAP_FCS_CRC16) 2064 bt_cb(skb)->control.fcs = chan->fcs;
1989 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1990
1991 bt_cb(skb)->control.retries = 0; 2065 bt_cb(skb)->control.retries = 0;
1992 return skb; 2066 return skb;
1993} 2067}
@@ -1999,7 +2073,6 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
1999 struct sk_buff *skb; 2073 struct sk_buff *skb;
2000 u16 sdu_len; 2074 u16 sdu_len;
2001 size_t pdu_len; 2075 size_t pdu_len;
2002 int err = 0;
2003 u8 sar; 2076 u8 sar;
2004 2077
2005 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); 2078 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
@@ -2015,7 +2088,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2015 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2088 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2016 2089
2017 /* Adjust for largest possible L2CAP overhead. */ 2090 /* Adjust for largest possible L2CAP overhead. */
2018 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; 2091 if (chan->fcs)
2092 pdu_len -= L2CAP_FCS_SIZE;
2093
2094 pdu_len -= __ertm_hdr_size(chan);
2019 2095
2020 /* Remote device may have requested smaller PDUs */ 2096 /* Remote device may have requested smaller PDUs */
2021 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2097 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
@@ -2055,7 +2131,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2055 } 2131 }
2056 } 2132 }
2057 2133
2058 return err; 2134 return 0;
2059} 2135}
2060 2136
2061int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2137int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
@@ -2117,17 +2193,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2117 if (err) 2193 if (err)
2118 break; 2194 break;
2119 2195
2120 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2121 chan->tx_send_head = seg_queue.next;
2122 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2123
2124 if (chan->mode == L2CAP_MODE_ERTM) 2196 if (chan->mode == L2CAP_MODE_ERTM)
2125 err = l2cap_ertm_send(chan); 2197 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2126 else 2198 else
2127 l2cap_streaming_send(chan); 2199 l2cap_streaming_send(chan, &seg_queue);
2128 2200
2129 if (err >= 0) 2201 err = len;
2130 err = len;
2131 2202
2132 /* If the skbs were not queued for sending, they'll still be in 2203 /* If the skbs were not queued for sending, they'll still be in
2133 * seg_queue and need to be purged. 2204 * seg_queue and need to be purged.
@@ -2143,6 +2214,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2143 return err; 2214 return err;
2144} 2215}
2145 2216
2217static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2218{
2219 struct l2cap_ctrl control;
2220 u16 seq;
2221
2222 BT_DBG("chan %p, txseq %d", chan, txseq);
2223
2224 memset(&control, 0, sizeof(control));
2225 control.sframe = 1;
2226 control.super = L2CAP_SUPER_SREJ;
2227
2228 for (seq = chan->expected_tx_seq; seq != txseq;
2229 seq = __next_seq(chan, seq)) {
2230 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2231 control.reqseq = seq;
2232 l2cap_send_sframe(chan, &control);
2233 l2cap_seq_list_append(&chan->srej_list, seq);
2234 }
2235 }
2236
2237 chan->expected_tx_seq = __next_seq(chan, txseq);
2238}
2239
2240static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2241{
2242 struct l2cap_ctrl control;
2243
2244 BT_DBG("chan %p", chan);
2245
2246 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2247 return;
2248
2249 memset(&control, 0, sizeof(control));
2250 control.sframe = 1;
2251 control.super = L2CAP_SUPER_SREJ;
2252 control.reqseq = chan->srej_list.tail;
2253 l2cap_send_sframe(chan, &control);
2254}
2255
2256static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2257{
2258 struct l2cap_ctrl control;
2259 u16 initial_head;
2260 u16 seq;
2261
2262 BT_DBG("chan %p, txseq %d", chan, txseq);
2263
2264 memset(&control, 0, sizeof(control));
2265 control.sframe = 1;
2266 control.super = L2CAP_SUPER_SREJ;
2267
2268 /* Capture initial list head to allow only one pass through the list. */
2269 initial_head = chan->srej_list.head;
2270
2271 do {
2272 seq = l2cap_seq_list_pop(&chan->srej_list);
2273 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2274 break;
2275
2276 control.reqseq = seq;
2277 l2cap_send_sframe(chan, &control);
2278 l2cap_seq_list_append(&chan->srej_list, seq);
2279 } while (chan->srej_list.head != initial_head);
2280}
2281
2282static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2283{
2284 struct sk_buff *acked_skb;
2285 u16 ackseq;
2286
2287 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2288
2289 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2290 return;
2291
2292 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2293 chan->expected_ack_seq, chan->unacked_frames);
2294
2295 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2296 ackseq = __next_seq(chan, ackseq)) {
2297
2298 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2299 if (acked_skb) {
2300 skb_unlink(acked_skb, &chan->tx_q);
2301 kfree_skb(acked_skb);
2302 chan->unacked_frames--;
2303 }
2304 }
2305
2306 chan->expected_ack_seq = reqseq;
2307
2308 if (chan->unacked_frames == 0)
2309 __clear_retrans_timer(chan);
2310
2311 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2312}
2313
2314static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2315{
2316 BT_DBG("chan %p", chan);
2317
2318 chan->expected_tx_seq = chan->buffer_seq;
2319 l2cap_seq_list_clear(&chan->srej_list);
2320 skb_queue_purge(&chan->srej_q);
2321 chan->rx_state = L2CAP_RX_STATE_RECV;
2322}
2323
2324static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2325 struct l2cap_ctrl *control,
2326 struct sk_buff_head *skbs, u8 event)
2327{
2328 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2329 event);
2330
2331 switch (event) {
2332 case L2CAP_EV_DATA_REQUEST:
2333 if (chan->tx_send_head == NULL)
2334 chan->tx_send_head = skb_peek(skbs);
2335
2336 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2337 l2cap_ertm_send(chan);
2338 break;
2339 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2340 BT_DBG("Enter LOCAL_BUSY");
2341 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2342
2343 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2344 /* The SREJ_SENT state must be aborted if we are to
2345 * enter the LOCAL_BUSY state.
2346 */
2347 l2cap_abort_rx_srej_sent(chan);
2348 }
2349
2350 l2cap_send_ack(chan);
2351
2352 break;
2353 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2354 BT_DBG("Exit LOCAL_BUSY");
2355 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2356
2357 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2358 struct l2cap_ctrl local_control;
2359
2360 memset(&local_control, 0, sizeof(local_control));
2361 local_control.sframe = 1;
2362 local_control.super = L2CAP_SUPER_RR;
2363 local_control.poll = 1;
2364 local_control.reqseq = chan->buffer_seq;
2365 l2cap_send_sframe(chan, &local_control);
2366
2367 chan->retry_count = 1;
2368 __set_monitor_timer(chan);
2369 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2370 }
2371 break;
2372 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2373 l2cap_process_reqseq(chan, control->reqseq);
2374 break;
2375 case L2CAP_EV_EXPLICIT_POLL:
2376 l2cap_send_rr_or_rnr(chan, 1);
2377 chan->retry_count = 1;
2378 __set_monitor_timer(chan);
2379 __clear_ack_timer(chan);
2380 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2381 break;
2382 case L2CAP_EV_RETRANS_TO:
2383 l2cap_send_rr_or_rnr(chan, 1);
2384 chan->retry_count = 1;
2385 __set_monitor_timer(chan);
2386 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2387 break;
2388 case L2CAP_EV_RECV_FBIT:
2389 /* Nothing to process */
2390 break;
2391 default:
2392 break;
2393 }
2394}
2395
2396static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2397 struct l2cap_ctrl *control,
2398 struct sk_buff_head *skbs, u8 event)
2399{
2400 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2401 event);
2402
2403 switch (event) {
2404 case L2CAP_EV_DATA_REQUEST:
2405 if (chan->tx_send_head == NULL)
2406 chan->tx_send_head = skb_peek(skbs);
2407 /* Queue data, but don't send. */
2408 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2409 break;
2410 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2411 BT_DBG("Enter LOCAL_BUSY");
2412 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2413
2414 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2415 /* The SREJ_SENT state must be aborted if we are to
2416 * enter the LOCAL_BUSY state.
2417 */
2418 l2cap_abort_rx_srej_sent(chan);
2419 }
2420
2421 l2cap_send_ack(chan);
2422
2423 break;
2424 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2425 BT_DBG("Exit LOCAL_BUSY");
2426 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2427
2428 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2429 struct l2cap_ctrl local_control;
2430 memset(&local_control, 0, sizeof(local_control));
2431 local_control.sframe = 1;
2432 local_control.super = L2CAP_SUPER_RR;
2433 local_control.poll = 1;
2434 local_control.reqseq = chan->buffer_seq;
2435 l2cap_send_sframe(chan, &local_control);
2436
2437 chan->retry_count = 1;
2438 __set_monitor_timer(chan);
2439 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2440 }
2441 break;
2442 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2443 l2cap_process_reqseq(chan, control->reqseq);
2444
2445 /* Fall through */
2446
2447 case L2CAP_EV_RECV_FBIT:
2448 if (control && control->final) {
2449 __clear_monitor_timer(chan);
2450 if (chan->unacked_frames > 0)
2451 __set_retrans_timer(chan);
2452 chan->retry_count = 0;
2453 chan->tx_state = L2CAP_TX_STATE_XMIT;
2454 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2455 }
2456 break;
2457 case L2CAP_EV_EXPLICIT_POLL:
2458 /* Ignore */
2459 break;
2460 case L2CAP_EV_MONITOR_TO:
2461 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2462 l2cap_send_rr_or_rnr(chan, 1);
2463 __set_monitor_timer(chan);
2464 chan->retry_count++;
2465 } else {
2466 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2467 }
2468 break;
2469 default:
2470 break;
2471 }
2472}
2473
2474static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2475 struct sk_buff_head *skbs, u8 event)
2476{
2477 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2478 chan, control, skbs, event, chan->tx_state);
2479
2480 switch (chan->tx_state) {
2481 case L2CAP_TX_STATE_XMIT:
2482 l2cap_tx_state_xmit(chan, control, skbs, event);
2483 break;
2484 case L2CAP_TX_STATE_WAIT_F:
2485 l2cap_tx_state_wait_f(chan, control, skbs, event);
2486 break;
2487 default:
2488 /* Ignore event */
2489 break;
2490 }
2491}
2492
2493static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2494 struct l2cap_ctrl *control)
2495{
2496 BT_DBG("chan %p, control %p", chan, control);
2497 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2498}
2499
2500static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2501 struct l2cap_ctrl *control)
2502{
2503 BT_DBG("chan %p, control %p", chan, control);
2504 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2505}
2506
2146/* Copy frame to all raw sockets on that connection */ 2507/* Copy frame to all raw sockets on that connection */
2147static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2508static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2148{ 2509{
@@ -2165,7 +2526,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2165 if (!nskb) 2526 if (!nskb)
2166 continue; 2527 continue;
2167 2528
2168 if (chan->ops->recv(chan->data, nskb)) 2529 if (chan->ops->recv(chan, nskb))
2169 kfree_skb(nskb); 2530 kfree_skb(nskb);
2170 } 2531 }
2171 2532
@@ -2195,9 +2556,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2195 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 2556 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2196 2557
2197 if (conn->hcon->type == LE_LINK) 2558 if (conn->hcon->type == LE_LINK)
2198 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 2559 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2199 else 2560 else
2200 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 2561 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2201 2562
2202 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 2563 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2203 cmd->code = code; 2564 cmd->code = code;
@@ -2309,8 +2670,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2309 efs.stype = chan->local_stype; 2670 efs.stype = chan->local_stype;
2310 efs.msdu = cpu_to_le16(chan->local_msdu); 2671 efs.msdu = cpu_to_le16(chan->local_msdu);
2311 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 2672 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2312 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 2673 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2313 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); 2674 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2314 break; 2675 break;
2315 2676
2316 case L2CAP_MODE_STREAMING: 2677 case L2CAP_MODE_STREAMING:
@@ -2333,20 +2694,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2333static void l2cap_ack_timeout(struct work_struct *work) 2694static void l2cap_ack_timeout(struct work_struct *work)
2334{ 2695{
2335 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 2696 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2336 ack_timer.work); 2697 ack_timer.work);
2698 u16 frames_to_ack;
2337 2699
2338 BT_DBG("chan %p", chan); 2700 BT_DBG("chan %p", chan);
2339 2701
2340 l2cap_chan_lock(chan); 2702 l2cap_chan_lock(chan);
2341 2703
2342 __l2cap_send_ack(chan); 2704 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2705 chan->last_acked_seq);
2343 2706
2344 l2cap_chan_unlock(chan); 2707 if (frames_to_ack)
2708 l2cap_send_rr_or_rnr(chan, 0);
2345 2709
2710 l2cap_chan_unlock(chan);
2346 l2cap_chan_put(chan); 2711 l2cap_chan_put(chan);
2347} 2712}
2348 2713
2349static inline int l2cap_ertm_init(struct l2cap_chan *chan) 2714int l2cap_ertm_init(struct l2cap_chan *chan)
2350{ 2715{
2351 int err; 2716 int err;
2352 2717
@@ -2355,7 +2720,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2355 chan->expected_ack_seq = 0; 2720 chan->expected_ack_seq = 0;
2356 chan->unacked_frames = 0; 2721 chan->unacked_frames = 0;
2357 chan->buffer_seq = 0; 2722 chan->buffer_seq = 0;
2358 chan->num_acked = 0;
2359 chan->frames_sent = 0; 2723 chan->frames_sent = 0;
2360 chan->last_acked_seq = 0; 2724 chan->last_acked_seq = 0;
2361 chan->sdu = NULL; 2725 chan->sdu = NULL;
@@ -2376,12 +2740,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2376 2740
2377 skb_queue_head_init(&chan->srej_q); 2741 skb_queue_head_init(&chan->srej_q);
2378 2742
2379 INIT_LIST_HEAD(&chan->srej_l);
2380 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 2743 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2381 if (err < 0) 2744 if (err < 0)
2382 return err; 2745 return err;
2383 2746
2384 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 2747 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2748 if (err < 0)
2749 l2cap_seq_list_free(&chan->srej_list);
2750
2751 return err;
2385} 2752}
2386 2753
2387static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2754static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2507,6 +2874,7 @@ done:
2507 break; 2874 break;
2508 2875
2509 case L2CAP_MODE_STREAMING: 2876 case L2CAP_MODE_STREAMING:
2877 l2cap_txwin_setup(chan);
2510 rfc.mode = L2CAP_MODE_STREAMING; 2878 rfc.mode = L2CAP_MODE_STREAMING;
2511 rfc.txwin_size = 0; 2879 rfc.txwin_size = 0;
2512 rfc.max_transmit = 0; 2880 rfc.max_transmit = 0;
@@ -2537,7 +2905,7 @@ done:
2537 } 2905 }
2538 2906
2539 req->dcid = cpu_to_le16(chan->dcid); 2907 req->dcid = cpu_to_le16(chan->dcid);
2540 req->flags = cpu_to_le16(0); 2908 req->flags = __constant_cpu_to_le16(0);
2541 2909
2542 return ptr - data; 2910 return ptr - data;
2543} 2911}
@@ -2757,7 +3125,7 @@ done:
2757 } 3125 }
2758 rsp->scid = cpu_to_le16(chan->dcid); 3126 rsp->scid = cpu_to_le16(chan->dcid);
2759 rsp->result = cpu_to_le16(result); 3127 rsp->result = cpu_to_le16(result);
2760 rsp->flags = cpu_to_le16(0x0000); 3128 rsp->flags = __constant_cpu_to_le16(0);
2761 3129
2762 return ptr - data; 3130 return ptr - data;
2763} 3131}
@@ -2856,7 +3224,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2856 } 3224 }
2857 3225
2858 req->dcid = cpu_to_le16(chan->dcid); 3226 req->dcid = cpu_to_le16(chan->dcid);
2859 req->flags = cpu_to_le16(0x0000); 3227 req->flags = __constant_cpu_to_le16(0);
2860 3228
2861 return ptr - data; 3229 return ptr - data;
2862} 3230}
@@ -2883,8 +3251,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2883 3251
2884 rsp.scid = cpu_to_le16(chan->dcid); 3252 rsp.scid = cpu_to_le16(chan->dcid);
2885 rsp.dcid = cpu_to_le16(chan->scid); 3253 rsp.dcid = cpu_to_le16(chan->scid);
2886 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3254 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
2887 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3255 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
2888 l2cap_send_cmd(conn, chan->ident, 3256 l2cap_send_cmd(conn, chan->ident,
2889 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3257 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2890 3258
@@ -2922,8 +3290,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2922 * did not send an RFC option. 3290 * did not send an RFC option.
2923 */ 3291 */
2924 rfc.mode = chan->mode; 3292 rfc.mode = chan->mode;
2925 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 3293 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2926 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 3294 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2927 rfc.max_pdu_size = cpu_to_le16(chan->imtu); 3295 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2928 3296
2929 BT_ERR("Expected RFC option was not found, using defaults"); 3297 BT_ERR("Expected RFC option was not found, using defaults");
@@ -2986,7 +3354,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2986 lock_sock(parent); 3354 lock_sock(parent);
2987 3355
2988 /* Check if the ACL is secure enough (if not SDP) */ 3356 /* Check if the ACL is secure enough (if not SDP) */
2989 if (psm != cpu_to_le16(0x0001) && 3357 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
2990 !hci_conn_check_link_mode(conn->hcon)) { 3358 !hci_conn_check_link_mode(conn->hcon)) {
2991 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3359 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2992 result = L2CAP_CR_SEC_BLOCK; 3360 result = L2CAP_CR_SEC_BLOCK;
@@ -2995,25 +3363,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2995 3363
2996 result = L2CAP_CR_NO_MEM; 3364 result = L2CAP_CR_NO_MEM;
2997 3365
2998 /* Check for backlog size */ 3366 /* Check if we already have channel with that dcid */
2999 if (sk_acceptq_is_full(parent)) { 3367 if (__l2cap_get_chan_by_dcid(conn, scid))
3000 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3001 goto response; 3368 goto response;
3002 }
3003 3369
3004 chan = pchan->ops->new_connection(pchan->data); 3370 chan = pchan->ops->new_connection(pchan);
3005 if (!chan) 3371 if (!chan)
3006 goto response; 3372 goto response;
3007 3373
3008 sk = chan->sk; 3374 sk = chan->sk;
3009 3375
3010 /* Check if we already have channel with that dcid */
3011 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3012 sock_set_flag(sk, SOCK_ZAPPED);
3013 chan->ops->close(chan->data);
3014 goto response;
3015 }
3016
3017 hci_conn_hold(conn->hcon); 3376 hci_conn_hold(conn->hcon);
3018 3377
3019 bacpy(&bt_sk(sk)->src, conn->src); 3378 bacpy(&bt_sk(sk)->src, conn->src);
@@ -3067,7 +3426,7 @@ sendresp:
3067 3426
3068 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 3427 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3069 struct l2cap_info_req info; 3428 struct l2cap_info_req info;
3070 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3429 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3071 3430
3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 3431 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3073 conn->info_ident = l2cap_get_ident(conn); 3432 conn->info_ident = l2cap_get_ident(conn);
@@ -3189,7 +3548,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3189 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3548 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3190 struct l2cap_cmd_rej_cid rej; 3549 struct l2cap_cmd_rej_cid rej;
3191 3550
3192 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 3551 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3193 rej.scid = cpu_to_le16(chan->scid); 3552 rej.scid = cpu_to_le16(chan->scid);
3194 rej.dcid = cpu_to_le16(chan->dcid); 3553 rej.dcid = cpu_to_le16(chan->dcid);
3195 3554
@@ -3211,11 +3570,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3211 memcpy(chan->conf_req + chan->conf_len, req->data, len); 3570 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3212 chan->conf_len += len; 3571 chan->conf_len += len;
3213 3572
3214 if (flags & 0x0001) { 3573 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3215 /* Incomplete config. Send empty response. */ 3574 /* Incomplete config. Send empty response. */
3216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3575 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3217 l2cap_build_conf_rsp(chan, rsp, 3576 l2cap_build_conf_rsp(chan, rsp,
3218 L2CAP_CONF_SUCCESS, 0x0001), rsp); 3577 L2CAP_CONF_SUCCESS, flags), rsp);
3219 goto unlock; 3578 goto unlock;
3220 } 3579 }
3221 3580
@@ -3238,8 +3597,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3238 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 3597 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3239 set_default_fcs(chan); 3598 set_default_fcs(chan);
3240 3599
3241 l2cap_state_change(chan, BT_CONNECTED);
3242
3243 if (chan->mode == L2CAP_MODE_ERTM || 3600 if (chan->mode == L2CAP_MODE_ERTM ||
3244 chan->mode == L2CAP_MODE_STREAMING) 3601 chan->mode == L2CAP_MODE_STREAMING)
3245 err = l2cap_ertm_init(chan); 3602 err = l2cap_ertm_init(chan);
@@ -3271,7 +3628,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3271 3628
3272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3629 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3273 l2cap_build_conf_rsp(chan, rsp, 3630 l2cap_build_conf_rsp(chan, rsp,
3274 L2CAP_CONF_SUCCESS, 0x0000), rsp); 3631 L2CAP_CONF_SUCCESS, flags), rsp);
3275 } 3632 }
3276 3633
3277unlock: 3634unlock:
@@ -3362,7 +3719,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3362 goto done; 3719 goto done;
3363 } 3720 }
3364 3721
3365 if (flags & 0x01) 3722 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3366 goto done; 3723 goto done;
3367 3724
3368 set_bit(CONF_INPUT_DONE, &chan->conf_state); 3725 set_bit(CONF_INPUT_DONE, &chan->conf_state);
@@ -3370,7 +3727,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3370 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 3727 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3371 set_default_fcs(chan); 3728 set_default_fcs(chan);
3372 3729
3373 l2cap_state_change(chan, BT_CONNECTED);
3374 if (chan->mode == L2CAP_MODE_ERTM || 3730 if (chan->mode == L2CAP_MODE_ERTM ||
3375 chan->mode == L2CAP_MODE_STREAMING) 3731 chan->mode == L2CAP_MODE_STREAMING)
3376 err = l2cap_ertm_init(chan); 3732 err = l2cap_ertm_init(chan);
@@ -3424,7 +3780,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3424 3780
3425 l2cap_chan_unlock(chan); 3781 l2cap_chan_unlock(chan);
3426 3782
3427 chan->ops->close(chan->data); 3783 chan->ops->close(chan);
3428 l2cap_chan_put(chan); 3784 l2cap_chan_put(chan);
3429 3785
3430 mutex_unlock(&conn->chan_lock); 3786 mutex_unlock(&conn->chan_lock);
@@ -3458,7 +3814,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3458 3814
3459 l2cap_chan_unlock(chan); 3815 l2cap_chan_unlock(chan);
3460 3816
3461 chan->ops->close(chan->data); 3817 chan->ops->close(chan);
3462 l2cap_chan_put(chan); 3818 l2cap_chan_put(chan);
3463 3819
3464 mutex_unlock(&conn->chan_lock); 3820 mutex_unlock(&conn->chan_lock);
@@ -3479,8 +3835,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3479 u8 buf[8]; 3835 u8 buf[8];
3480 u32 feat_mask = l2cap_feat_mask; 3836 u32 feat_mask = l2cap_feat_mask;
3481 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3837 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3482 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3838 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3483 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3839 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3484 if (!disable_ertm) 3840 if (!disable_ertm)
3485 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3841 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3486 | L2CAP_FEAT_FCS; 3842 | L2CAP_FEAT_FCS;
@@ -3500,15 +3856,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3500 else 3856 else
3501 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; 3857 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3502 3858
3503 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3859 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3504 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3860 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3505 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); 3861 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3506 l2cap_send_cmd(conn, cmd->ident, 3862 l2cap_send_cmd(conn, cmd->ident,
3507 L2CAP_INFO_RSP, sizeof(buf), buf); 3863 L2CAP_INFO_RSP, sizeof(buf), buf);
3508 } else { 3864 } else {
3509 struct l2cap_info_rsp rsp; 3865 struct l2cap_info_rsp rsp;
3510 rsp.type = cpu_to_le16(type); 3866 rsp.type = cpu_to_le16(type);
3511 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 3867 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3512 l2cap_send_cmd(conn, cmd->ident, 3868 l2cap_send_cmd(conn, cmd->ident,
3513 L2CAP_INFO_RSP, sizeof(rsp), &rsp); 3869 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3514 } 3870 }
@@ -3548,7 +3904,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3548 3904
3549 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3905 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3550 struct l2cap_info_req req; 3906 struct l2cap_info_req req;
3551 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3907 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3552 3908
3553 conn->info_ident = l2cap_get_ident(conn); 3909 conn->info_ident = l2cap_get_ident(conn);
3554 3910
@@ -3783,9 +4139,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3783 4139
3784 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 4140 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3785 if (err) 4141 if (err)
3786 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 4142 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3787 else 4143 else
3788 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 4144 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3789 4145
3790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 4146 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3791 sizeof(rsp), &rsp); 4147 sizeof(rsp), &rsp);
@@ -3933,7 +4289,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3933 BT_ERR("Wrong link type (%d)", err); 4289 BT_ERR("Wrong link type (%d)", err);
3934 4290
3935 /* FIXME: Map err to a valid reason */ 4291 /* FIXME: Map err to a valid reason */
3936 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 4292 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3937 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4293 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3938 } 4294 }
3939 4295
@@ -3965,65 +4321,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3965 return 0; 4321 return 0;
3966} 4322}
3967 4323
3968static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 4324static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3969{ 4325{
3970 u32 control = 0; 4326 struct l2cap_ctrl control;
3971 4327
3972 chan->frames_sent = 0; 4328 BT_DBG("chan %p", chan);
3973 4329
3974 control |= __set_reqseq(chan, chan->buffer_seq); 4330 memset(&control, 0, sizeof(control));
4331 control.sframe = 1;
4332 control.final = 1;
4333 control.reqseq = chan->buffer_seq;
4334 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3975 4335
3976 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4336 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3977 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 4337 control.super = L2CAP_SUPER_RNR;
3978 l2cap_send_sframe(chan, control); 4338 l2cap_send_sframe(chan, &control);
3979 set_bit(CONN_RNR_SENT, &chan->conn_state);
3980 } 4339 }
3981 4340
3982 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 4341 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3983 l2cap_retransmit_frames(chan); 4342 chan->unacked_frames > 0)
4343 __set_retrans_timer(chan);
3984 4344
4345 /* Send pending iframes */
3985 l2cap_ertm_send(chan); 4346 l2cap_ertm_send(chan);
3986 4347
3987 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 4348 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3988 chan->frames_sent == 0) { 4349 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
3989 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 4350 /* F-bit wasn't sent in an s-frame or i-frame yet, so
3990 l2cap_send_sframe(chan, control); 4351 * send it now.
3991 } 4352 */
3992} 4353 control.super = L2CAP_SUPER_RR;
3993 4354 l2cap_send_sframe(chan, &control);
3994static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3995{
3996 struct sk_buff *next_skb;
3997 int tx_seq_offset, next_tx_seq_offset;
3998
3999 bt_cb(skb)->control.txseq = tx_seq;
4000 bt_cb(skb)->control.sar = sar;
4001
4002 next_skb = skb_peek(&chan->srej_q);
4003
4004 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4005
4006 while (next_skb) {
4007 if (bt_cb(next_skb)->control.txseq == tx_seq)
4008 return -EINVAL;
4009
4010 next_tx_seq_offset = __seq_offset(chan,
4011 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4012
4013 if (next_tx_seq_offset > tx_seq_offset) {
4014 __skb_queue_before(&chan->srej_q, next_skb, skb);
4015 return 0;
4016 }
4017
4018 if (skb_queue_is_last(&chan->srej_q, next_skb))
4019 next_skb = NULL;
4020 else
4021 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4022 } 4355 }
4023
4024 __skb_queue_tail(&chan->srej_q, skb);
4025
4026 return 0;
4027} 4356}
4028 4357
4029static void append_skb_frag(struct sk_buff *skb, 4358static void append_skb_frag(struct sk_buff *skb,
@@ -4045,16 +4374,17 @@ static void append_skb_frag(struct sk_buff *skb,
4045 skb->truesize += new_frag->truesize; 4374 skb->truesize += new_frag->truesize;
4046} 4375}
4047 4376
4048static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) 4377static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4378 struct l2cap_ctrl *control)
4049{ 4379{
4050 int err = -EINVAL; 4380 int err = -EINVAL;
4051 4381
4052 switch (__get_ctrl_sar(chan, control)) { 4382 switch (control->sar) {
4053 case L2CAP_SAR_UNSEGMENTED: 4383 case L2CAP_SAR_UNSEGMENTED:
4054 if (chan->sdu) 4384 if (chan->sdu)
4055 break; 4385 break;
4056 4386
4057 err = chan->ops->recv(chan->data, skb); 4387 err = chan->ops->recv(chan, skb);
4058 break; 4388 break;
4059 4389
4060 case L2CAP_SAR_START: 4390 case L2CAP_SAR_START:
@@ -4104,7 +4434,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4104 if (chan->sdu->len != chan->sdu_len) 4434 if (chan->sdu->len != chan->sdu_len)
4105 break; 4435 break;
4106 4436
4107 err = chan->ops->recv(chan->data, chan->sdu); 4437 err = chan->ops->recv(chan, chan->sdu);
4108 4438
4109 if (!err) { 4439 if (!err) {
4110 /* Reassembly complete */ 4440 /* Reassembly complete */
@@ -4126,448 +4456,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4126 return err; 4456 return err;
4127} 4457}
4128 4458
4129static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 4459void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4130{ 4460{
4131 BT_DBG("chan %p, Enter local busy", chan); 4461 u8 event;
4132 4462
4133 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4463 if (chan->mode != L2CAP_MODE_ERTM)
4134 l2cap_seq_list_clear(&chan->srej_list); 4464 return;
4135 4465
4136 __set_ack_timer(chan); 4466 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4467 l2cap_tx(chan, NULL, NULL, event);
4137} 4468}
4138 4469
4139static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 4470static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4140{ 4471{
4141 u32 control; 4472 int err = 0;
4142 4473 /* Pass sequential frames to l2cap_reassemble_sdu()
4143 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4474 * until a gap is encountered.
4144 goto done; 4475 */
4145 4476
4146 control = __set_reqseq(chan, chan->buffer_seq); 4477 BT_DBG("chan %p", chan);
4147 control |= __set_ctrl_poll(chan);
4148 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4149 l2cap_send_sframe(chan, control);
4150 chan->retry_count = 1;
4151 4478
4152 __clear_retrans_timer(chan); 4479 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4153 __set_monitor_timer(chan); 4480 struct sk_buff *skb;
4481 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4482 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4154 4483
4155 set_bit(CONN_WAIT_F, &chan->conn_state); 4484 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4156 4485
4157done: 4486 if (!skb)
4158 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4487 break;
4159 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4160 4488
4161 BT_DBG("chan %p, Exit local busy", chan); 4489 skb_unlink(skb, &chan->srej_q);
4162} 4490 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4491 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4492 if (err)
4493 break;
4494 }
4163 4495
4164void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 4496 if (skb_queue_empty(&chan->srej_q)) {
4165{ 4497 chan->rx_state = L2CAP_RX_STATE_RECV;
4166 if (chan->mode == L2CAP_MODE_ERTM) { 4498 l2cap_send_ack(chan);
4167 if (busy)
4168 l2cap_ertm_enter_local_busy(chan);
4169 else
4170 l2cap_ertm_exit_local_busy(chan);
4171 } 4499 }
4500
4501 return err;
4172} 4502}
4173 4503
4174static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) 4504static void l2cap_handle_srej(struct l2cap_chan *chan,
4505 struct l2cap_ctrl *control)
4175{ 4506{
4176 struct sk_buff *skb; 4507 struct sk_buff *skb;
4177 u32 control;
4178 4508
4179 while ((skb = skb_peek(&chan->srej_q)) && 4509 BT_DBG("chan %p, control %p", chan, control);
4180 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4181 int err;
4182 4510
4183 if (bt_cb(skb)->control.txseq != tx_seq) 4511 if (control->reqseq == chan->next_tx_seq) {
4184 break; 4512 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4513 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4514 return;
4515 }
4185 4516
4186 skb = skb_dequeue(&chan->srej_q); 4517 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4187 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4188 err = l2cap_reassemble_sdu(chan, skb, control);
4189 4518
4190 if (err < 0) { 4519 if (skb == NULL) {
4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4520 BT_DBG("Seq %d not available for retransmission",
4192 break; 4521 control->reqseq);
4193 } 4522 return;
4523 }
4194 4524
4195 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); 4525 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4196 tx_seq = __next_seq(chan, tx_seq); 4526 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4527 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4528 return;
4197 } 4529 }
4198}
4199 4530
4200static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4531 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4201{
4202 struct srej_list *l, *tmp;
4203 u32 control;
4204 4532
4205 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 4533 if (control->poll) {
4206 if (l->tx_seq == tx_seq) { 4534 l2cap_pass_to_tx(chan, control);
4207 list_del(&l->list); 4535
4208 kfree(l); 4536 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4209 return; 4537 l2cap_retransmit(chan, control);
4538 l2cap_ertm_send(chan);
4539
4540 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4541 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4542 chan->srej_save_reqseq = control->reqseq;
4543 }
4544 } else {
4545 l2cap_pass_to_tx_fbit(chan, control);
4546
4547 if (control->final) {
4548 if (chan->srej_save_reqseq != control->reqseq ||
4549 !test_and_clear_bit(CONN_SREJ_ACT,
4550 &chan->conn_state))
4551 l2cap_retransmit(chan, control);
4552 } else {
4553 l2cap_retransmit(chan, control);
4554 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4555 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4556 chan->srej_save_reqseq = control->reqseq;
4557 }
4210 } 4558 }
4211 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4212 control |= __set_reqseq(chan, l->tx_seq);
4213 l2cap_send_sframe(chan, control);
4214 list_del(&l->list);
4215 list_add_tail(&l->list, &chan->srej_l);
4216 } 4559 }
4217} 4560}
4218 4561
4219static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4562static void l2cap_handle_rej(struct l2cap_chan *chan,
4563 struct l2cap_ctrl *control)
4220{ 4564{
4221 struct srej_list *new; 4565 struct sk_buff *skb;
4222 u32 control;
4223
4224 while (tx_seq != chan->expected_tx_seq) {
4225 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4226 control |= __set_reqseq(chan, chan->expected_tx_seq);
4227 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4228 l2cap_send_sframe(chan, control);
4229 4566
4230 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 4567 BT_DBG("chan %p, control %p", chan, control);
4231 if (!new)
4232 return -ENOMEM;
4233 4568
4234 new->tx_seq = chan->expected_tx_seq; 4569 if (control->reqseq == chan->next_tx_seq) {
4570 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4571 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4572 return;
4573 }
4235 4574
4236 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4575 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4237 4576
4238 list_add_tail(&new->list, &chan->srej_l); 4577 if (chan->max_tx && skb &&
4578 bt_cb(skb)->control.retries >= chan->max_tx) {
4579 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4580 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4581 return;
4239 } 4582 }
4240 4583
4241 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4584 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4242 4585
4243 return 0; 4586 l2cap_pass_to_tx(chan, control);
4587
4588 if (control->final) {
4589 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4590 l2cap_retransmit_all(chan, control);
4591 } else {
4592 l2cap_retransmit_all(chan, control);
4593 l2cap_ertm_send(chan);
4594 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4595 set_bit(CONN_REJ_ACT, &chan->conn_state);
4596 }
4244} 4597}
4245 4598
4246static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 4599static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4247{ 4600{
4248 u16 tx_seq = __get_txseq(chan, rx_control); 4601 BT_DBG("chan %p, txseq %d", chan, txseq);
4249 u16 req_seq = __get_reqseq(chan, rx_control);
4250 u8 sar = __get_ctrl_sar(chan, rx_control);
4251 int tx_seq_offset, expected_tx_seq_offset;
4252 int num_to_ack = (chan->tx_win/6) + 1;
4253 int err = 0;
4254 4602
4255 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, 4603 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4256 tx_seq, rx_control); 4604 chan->expected_tx_seq);
4257 4605
4258 if (__is_ctrl_final(chan, rx_control) && 4606 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4259 test_bit(CONN_WAIT_F, &chan->conn_state)) { 4607 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4260 __clear_monitor_timer(chan); 4608 chan->tx_win) {
4261 if (chan->unacked_frames > 0) 4609 /* See notes below regarding "double poll" and
4262 __set_retrans_timer(chan); 4610 * invalid packets.
4263 clear_bit(CONN_WAIT_F, &chan->conn_state); 4611 */
4264 } 4612 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4613 BT_DBG("Invalid/Ignore - after SREJ");
4614 return L2CAP_TXSEQ_INVALID_IGNORE;
4615 } else {
4616 BT_DBG("Invalid - in window after SREJ sent");
4617 return L2CAP_TXSEQ_INVALID;
4618 }
4619 }
4265 4620
4266 chan->expected_ack_seq = req_seq; 4621 if (chan->srej_list.head == txseq) {
4267 l2cap_drop_acked_frames(chan); 4622 BT_DBG("Expected SREJ");
4623 return L2CAP_TXSEQ_EXPECTED_SREJ;
4624 }
4268 4625
4269 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 4626 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4627 BT_DBG("Duplicate SREJ - txseq already stored");
4628 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4629 }
4270 4630
4271 /* invalid tx_seq */ 4631 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4272 if (tx_seq_offset >= chan->tx_win) { 4632 BT_DBG("Unexpected SREJ - not requested");
4273 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4633 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4274 goto drop; 4634 }
4275 } 4635 }
4276 4636
4277 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4637 if (chan->expected_tx_seq == txseq) {
4278 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4638 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4279 l2cap_send_ack(chan); 4639 chan->tx_win) {
4280 goto drop; 4640 BT_DBG("Invalid - txseq outside tx window");
4641 return L2CAP_TXSEQ_INVALID;
4642 } else {
4643 BT_DBG("Expected");
4644 return L2CAP_TXSEQ_EXPECTED;
4645 }
4281 } 4646 }
4282 4647
4283 if (tx_seq == chan->expected_tx_seq) 4648 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4284 goto expected; 4649 __seq_offset(chan, chan->expected_tx_seq,
4650 chan->last_acked_seq)){
4651 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4652 return L2CAP_TXSEQ_DUPLICATE;
4653 }
4654
4655 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4656 /* A source of invalid packets is a "double poll" condition,
4657 * where delays cause us to send multiple poll packets. If
4658 * the remote stack receives and processes both polls,
4659 * sequence numbers can wrap around in such a way that a
4660 * resent frame has a sequence number that looks like new data
4661 * with a sequence gap. This would trigger an erroneous SREJ
4662 * request.
4663 *
4664 * Fortunately, this is impossible with a tx window that's
4665 * less than half of the maximum sequence number, which allows
4666 * invalid frames to be safely ignored.
4667 *
4668 * With tx window sizes greater than half of the tx window
4669 * maximum, the frame is invalid and cannot be ignored. This
4670 * causes a disconnect.
4671 */
4672
4673 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4674 BT_DBG("Invalid/Ignore - txseq outside tx window");
4675 return L2CAP_TXSEQ_INVALID_IGNORE;
4676 } else {
4677 BT_DBG("Invalid - txseq outside tx window");
4678 return L2CAP_TXSEQ_INVALID;
4679 }
4680 } else {
4681 BT_DBG("Unexpected - txseq indicates missing frames");
4682 return L2CAP_TXSEQ_UNEXPECTED;
4683 }
4684}
4285 4685
4286 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4686static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4287 struct srej_list *first; 4687 struct l2cap_ctrl *control,
4688 struct sk_buff *skb, u8 event)
4689{
4690 int err = 0;
4691 bool skb_in_use = 0;
4288 4692
4289 first = list_first_entry(&chan->srej_l, 4693 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4290 struct srej_list, list); 4694 event);
4291 if (tx_seq == first->tx_seq) {
4292 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4293 l2cap_check_srej_gap(chan, tx_seq);
4294 4695
4295 list_del(&first->list); 4696 switch (event) {
4296 kfree(first); 4697 case L2CAP_EV_RECV_IFRAME:
4698 switch (l2cap_classify_txseq(chan, control->txseq)) {
4699 case L2CAP_TXSEQ_EXPECTED:
4700 l2cap_pass_to_tx(chan, control);
4297 4701
4298 if (list_empty(&chan->srej_l)) { 4702 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4299 chan->buffer_seq = chan->buffer_seq_srej; 4703 BT_DBG("Busy, discarding expected seq %d",
4300 clear_bit(CONN_SREJ_SENT, &chan->conn_state); 4704 control->txseq);
4301 l2cap_send_ack(chan); 4705 break;
4302 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4303 } 4706 }
4304 } else {
4305 struct srej_list *l;
4306 4707
4307 /* duplicated tx_seq */ 4708 chan->expected_tx_seq = __next_seq(chan,
4308 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) 4709 control->txseq);
4309 goto drop; 4710
4711 chan->buffer_seq = chan->expected_tx_seq;
4712 skb_in_use = 1;
4310 4713
4311 list_for_each_entry(l, &chan->srej_l, list) { 4714 err = l2cap_reassemble_sdu(chan, skb, control);
4312 if (l->tx_seq == tx_seq) { 4715 if (err)
4313 l2cap_resend_srejframe(chan, tx_seq); 4716 break;
4314 return 0; 4717
4718 if (control->final) {
4719 if (!test_and_clear_bit(CONN_REJ_ACT,
4720 &chan->conn_state)) {
4721 control->final = 0;
4722 l2cap_retransmit_all(chan, control);
4723 l2cap_ertm_send(chan);
4315 } 4724 }
4316 } 4725 }
4317 4726
4318 err = l2cap_send_srejframe(chan, tx_seq); 4727 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4319 if (err < 0) { 4728 l2cap_send_ack(chan);
4320 l2cap_send_disconn_req(chan->conn, chan, -err); 4729 break;
4321 return err; 4730 case L2CAP_TXSEQ_UNEXPECTED:
4731 l2cap_pass_to_tx(chan, control);
4732
4733 /* Can't issue SREJ frames in the local busy state.
4734 * Drop this frame, it will be seen as missing
4735 * when local busy is exited.
4736 */
4737 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4738 BT_DBG("Busy, discarding unexpected seq %d",
4739 control->txseq);
4740 break;
4322 } 4741 }
4323 }
4324 } else {
4325 expected_tx_seq_offset = __seq_offset(chan,
4326 chan->expected_tx_seq, chan->buffer_seq);
4327 4742
4328 /* duplicated tx_seq */ 4743 /* There was a gap in the sequence, so an SREJ
4329 if (tx_seq_offset < expected_tx_seq_offset) 4744 * must be sent for each missing frame. The
4330 goto drop; 4745 * current frame is stored for later use.
4331 4746 */
4332 set_bit(CONN_SREJ_SENT, &chan->conn_state); 4747 skb_queue_tail(&chan->srej_q, skb);
4748 skb_in_use = 1;
4749 BT_DBG("Queued %p (queue len %d)", skb,
4750 skb_queue_len(&chan->srej_q));
4333 4751
4334 BT_DBG("chan %p, Enter SREJ", chan); 4752 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4753 l2cap_seq_list_clear(&chan->srej_list);
4754 l2cap_send_srej(chan, control->txseq);
4335 4755
4336 INIT_LIST_HEAD(&chan->srej_l); 4756 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4337 chan->buffer_seq_srej = chan->buffer_seq; 4757 break;
4758 case L2CAP_TXSEQ_DUPLICATE:
4759 l2cap_pass_to_tx(chan, control);
4760 break;
4761 case L2CAP_TXSEQ_INVALID_IGNORE:
4762 break;
4763 case L2CAP_TXSEQ_INVALID:
4764 default:
4765 l2cap_send_disconn_req(chan->conn, chan,
4766 ECONNRESET);
4767 break;
4768 }
4769 break;
4770 case L2CAP_EV_RECV_RR:
4771 l2cap_pass_to_tx(chan, control);
4772 if (control->final) {
4773 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4338 4774
4339 __skb_queue_head_init(&chan->srej_q); 4775 if (!test_and_clear_bit(CONN_REJ_ACT,
4340 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4776 &chan->conn_state)) {
4777 control->final = 0;
4778 l2cap_retransmit_all(chan, control);
4779 }
4341 4780
4342 /* Set P-bit only if there are some I-frames to ack. */ 4781 l2cap_ertm_send(chan);
4343 if (__clear_ack_timer(chan)) 4782 } else if (control->poll) {
4344 set_bit(CONN_SEND_PBIT, &chan->conn_state); 4783 l2cap_send_i_or_rr_or_rnr(chan);
4784 } else {
4785 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4786 &chan->conn_state) &&
4787 chan->unacked_frames)
4788 __set_retrans_timer(chan);
4345 4789
4346 err = l2cap_send_srejframe(chan, tx_seq); 4790 l2cap_ertm_send(chan);
4347 if (err < 0) {
4348 l2cap_send_disconn_req(chan->conn, chan, -err);
4349 return err;
4350 } 4791 }
4792 break;
4793 case L2CAP_EV_RECV_RNR:
4794 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4795 l2cap_pass_to_tx(chan, control);
4796 if (control && control->poll) {
4797 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4798 l2cap_send_rr_or_rnr(chan, 0);
4799 }
4800 __clear_retrans_timer(chan);
4801 l2cap_seq_list_clear(&chan->retrans_list);
4802 break;
4803 case L2CAP_EV_RECV_REJ:
4804 l2cap_handle_rej(chan, control);
4805 break;
4806 case L2CAP_EV_RECV_SREJ:
4807 l2cap_handle_srej(chan, control);
4808 break;
4809 default:
4810 break;
4351 } 4811 }
4352 return 0;
4353
4354expected:
4355 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4356
4357 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4358 bt_cb(skb)->control.txseq = tx_seq;
4359 bt_cb(skb)->control.sar = sar;
4360 __skb_queue_tail(&chan->srej_q, skb);
4361 return 0;
4362 }
4363
4364 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4365 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4366 4812
4367 if (err < 0) { 4813 if (skb && !skb_in_use) {
4368 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4814 BT_DBG("Freeing %p", skb);
4369 return err; 4815 kfree_skb(skb);
4370 } 4816 }
4371 4817
4372 if (__is_ctrl_final(chan, rx_control)) { 4818 return err;
4373 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4819}
4374 l2cap_retransmit_frames(chan);
4375 }
4376 4820
4821static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4822 struct l2cap_ctrl *control,
4823 struct sk_buff *skb, u8 event)
4824{
4825 int err = 0;
4826 u16 txseq = control->txseq;
4827 bool skb_in_use = 0;
4828
4829 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4830 event);
4831
4832 switch (event) {
4833 case L2CAP_EV_RECV_IFRAME:
4834 switch (l2cap_classify_txseq(chan, txseq)) {
4835 case L2CAP_TXSEQ_EXPECTED:
4836 /* Keep frame for reassembly later */
4837 l2cap_pass_to_tx(chan, control);
4838 skb_queue_tail(&chan->srej_q, skb);
4839 skb_in_use = 1;
4840 BT_DBG("Queued %p (queue len %d)", skb,
4841 skb_queue_len(&chan->srej_q));
4842
4843 chan->expected_tx_seq = __next_seq(chan, txseq);
4844 break;
4845 case L2CAP_TXSEQ_EXPECTED_SREJ:
4846 l2cap_seq_list_pop(&chan->srej_list);
4377 4847
4378 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 4848 l2cap_pass_to_tx(chan, control);
4379 if (chan->num_acked == num_to_ack - 1) 4849 skb_queue_tail(&chan->srej_q, skb);
4380 l2cap_send_ack(chan); 4850 skb_in_use = 1;
4381 else 4851 BT_DBG("Queued %p (queue len %d)", skb,
4382 __set_ack_timer(chan); 4852 skb_queue_len(&chan->srej_q));
4383 4853
4384 return 0; 4854 err = l2cap_rx_queued_iframes(chan);
4855 if (err)
4856 break;
4385 4857
4386drop: 4858 break;
4387 kfree_skb(skb); 4859 case L2CAP_TXSEQ_UNEXPECTED:
4388 return 0; 4860 /* Got a frame that can't be reassembled yet.
4389} 4861 * Save it for later, and send SREJs to cover
4862 * the missing frames.
4863 */
4864 skb_queue_tail(&chan->srej_q, skb);
4865 skb_in_use = 1;
4866 BT_DBG("Queued %p (queue len %d)", skb,
4867 skb_queue_len(&chan->srej_q));
4868
4869 l2cap_pass_to_tx(chan, control);
4870 l2cap_send_srej(chan, control->txseq);
4871 break;
4872 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4873 /* This frame was requested with an SREJ, but
4874 * some expected retransmitted frames are
4875 * missing. Request retransmission of missing
4876 * SREJ'd frames.
4877 */
4878 skb_queue_tail(&chan->srej_q, skb);
4879 skb_in_use = 1;
4880 BT_DBG("Queued %p (queue len %d)", skb,
4881 skb_queue_len(&chan->srej_q));
4882
4883 l2cap_pass_to_tx(chan, control);
4884 l2cap_send_srej_list(chan, control->txseq);
4885 break;
4886 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4887 /* We've already queued this frame. Drop this copy. */
4888 l2cap_pass_to_tx(chan, control);
4889 break;
4890 case L2CAP_TXSEQ_DUPLICATE:
4891 /* Expecting a later sequence number, so this frame
4892 * was already received. Ignore it completely.
4893 */
4894 break;
4895 case L2CAP_TXSEQ_INVALID_IGNORE:
4896 break;
4897 case L2CAP_TXSEQ_INVALID:
4898 default:
4899 l2cap_send_disconn_req(chan->conn, chan,
4900 ECONNRESET);
4901 break;
4902 }
4903 break;
4904 case L2CAP_EV_RECV_RR:
4905 l2cap_pass_to_tx(chan, control);
4906 if (control->final) {
4907 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4390 4908
4391static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) 4909 if (!test_and_clear_bit(CONN_REJ_ACT,
4392{ 4910 &chan->conn_state)) {
4393 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, 4911 control->final = 0;
4394 __get_reqseq(chan, rx_control), rx_control); 4912 l2cap_retransmit_all(chan, control);
4913 }
4395 4914
4396 chan->expected_ack_seq = __get_reqseq(chan, rx_control); 4915 l2cap_ertm_send(chan);
4397 l2cap_drop_acked_frames(chan); 4916 } else if (control->poll) {
4917 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4918 &chan->conn_state) &&
4919 chan->unacked_frames) {
4920 __set_retrans_timer(chan);
4921 }
4398 4922
4399 if (__is_ctrl_poll(chan, rx_control)) { 4923 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4400 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4924 l2cap_send_srej_tail(chan);
4401 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4925 } else {
4402 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4926 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4403 (chan->unacked_frames > 0)) 4927 &chan->conn_state) &&
4928 chan->unacked_frames)
4404 __set_retrans_timer(chan); 4929 __set_retrans_timer(chan);
4405 4930
4406 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4931 l2cap_send_ack(chan);
4407 l2cap_send_srejtail(chan); 4932 }
4933 break;
4934 case L2CAP_EV_RECV_RNR:
4935 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4936 l2cap_pass_to_tx(chan, control);
4937 if (control->poll) {
4938 l2cap_send_srej_tail(chan);
4408 } else { 4939 } else {
4409 l2cap_send_i_or_rr_or_rnr(chan); 4940 struct l2cap_ctrl rr_control;
4941 memset(&rr_control, 0, sizeof(rr_control));
4942 rr_control.sframe = 1;
4943 rr_control.super = L2CAP_SUPER_RR;
4944 rr_control.reqseq = chan->buffer_seq;
4945 l2cap_send_sframe(chan, &rr_control);
4410 } 4946 }
4411 4947
4412 } else if (__is_ctrl_final(chan, rx_control)) { 4948 break;
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4949 case L2CAP_EV_RECV_REJ:
4414 4950 l2cap_handle_rej(chan, control);
4415 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4951 break;
4416 l2cap_retransmit_frames(chan); 4952 case L2CAP_EV_RECV_SREJ:
4417 4953 l2cap_handle_srej(chan, control);
4418 } else { 4954 break;
4419 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4955 }
4420 (chan->unacked_frames > 0))
4421 __set_retrans_timer(chan);
4422 4956
4423 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4957 if (skb && !skb_in_use) {
4424 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) 4958 BT_DBG("Freeing %p", skb);
4425 l2cap_send_ack(chan); 4959 kfree_skb(skb);
4426 else
4427 l2cap_ertm_send(chan);
4428 } 4960 }
4961
4962 return err;
4429} 4963}
4430 4964
4431static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) 4965static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4432{ 4966{
4433 u16 tx_seq = __get_reqseq(chan, rx_control); 4967 /* Make sure reqseq is for a packet that has been sent but not acked */
4434 4968 u16 unacked;
4435 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4436
4437 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4438
4439 chan->expected_ack_seq = tx_seq;
4440 l2cap_drop_acked_frames(chan);
4441
4442 if (__is_ctrl_final(chan, rx_control)) {
4443 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4444 l2cap_retransmit_frames(chan);
4445 } else {
4446 l2cap_retransmit_frames(chan);
4447 4969
4448 if (test_bit(CONN_WAIT_F, &chan->conn_state)) 4970 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4449 set_bit(CONN_REJ_ACT, &chan->conn_state); 4971 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4450 }
4451} 4972}
4452static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4453{
4454 u16 tx_seq = __get_reqseq(chan, rx_control);
4455
4456 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4457
4458 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4459 4973
4460 if (__is_ctrl_poll(chan, rx_control)) { 4974static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4461 chan->expected_ack_seq = tx_seq; 4975 struct sk_buff *skb, u8 event)
4462 l2cap_drop_acked_frames(chan); 4976{
4463 4977 int err = 0;
4464 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4465 l2cap_retransmit_one_frame(chan, tx_seq);
4466 4978
4467 l2cap_ertm_send(chan); 4979 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4980 control, skb, event, chan->rx_state);
4468 4981
4469 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4982 if (__valid_reqseq(chan, control->reqseq)) {
4470 chan->srej_save_reqseq = tx_seq; 4983 switch (chan->rx_state) {
4471 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4984 case L2CAP_RX_STATE_RECV:
4985 err = l2cap_rx_state_recv(chan, control, skb, event);
4986 break;
4987 case L2CAP_RX_STATE_SREJ_SENT:
4988 err = l2cap_rx_state_srej_sent(chan, control, skb,
4989 event);
4990 break;
4991 default:
4992 /* shut it down */
4993 break;
4472 } 4994 }
4473 } else if (__is_ctrl_final(chan, rx_control)) {
4474 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4475 chan->srej_save_reqseq == tx_seq)
4476 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4477 else
4478 l2cap_retransmit_one_frame(chan, tx_seq);
4479 } else { 4995 } else {
4480 l2cap_retransmit_one_frame(chan, tx_seq); 4996 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4481 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4997 control->reqseq, chan->next_tx_seq,
4482 chan->srej_save_reqseq = tx_seq; 4998 chan->expected_ack_seq);
4483 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4999 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4484 }
4485 } 5000 }
5001
5002 return err;
4486} 5003}
4487 5004
4488static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) 5005static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5006 struct sk_buff *skb)
4489{ 5007{
4490 u16 tx_seq = __get_reqseq(chan, rx_control); 5008 int err = 0;
4491 5009
4492 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 5010 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5011 chan->rx_state);
4493 5012
4494 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5013 if (l2cap_classify_txseq(chan, control->txseq) ==
4495 chan->expected_ack_seq = tx_seq; 5014 L2CAP_TXSEQ_EXPECTED) {
4496 l2cap_drop_acked_frames(chan); 5015 l2cap_pass_to_tx(chan, control);
4497 5016
4498 if (__is_ctrl_poll(chan, rx_control)) 5017 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4499 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5018 __next_seq(chan, chan->buffer_seq));
4500 5019
4501 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 5020 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4502 __clear_retrans_timer(chan);
4503 if (__is_ctrl_poll(chan, rx_control))
4504 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4505 return;
4506 }
4507 5021
4508 if (__is_ctrl_poll(chan, rx_control)) { 5022 l2cap_reassemble_sdu(chan, skb, control);
4509 l2cap_send_srejtail(chan);
4510 } else { 5023 } else {
4511 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); 5024 if (chan->sdu) {
4512 l2cap_send_sframe(chan, rx_control); 5025 kfree_skb(chan->sdu);
4513 } 5026 chan->sdu = NULL;
4514} 5027 }
4515 5028 chan->sdu_last_frag = NULL;
4516static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 5029 chan->sdu_len = 0;
4517{
4518 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4519 5030
4520 if (__is_ctrl_final(chan, rx_control) && 5031 if (skb) {
4521 test_bit(CONN_WAIT_F, &chan->conn_state)) { 5032 BT_DBG("Freeing %p", skb);
4522 __clear_monitor_timer(chan); 5033 kfree_skb(skb);
4523 if (chan->unacked_frames > 0) 5034 }
4524 __set_retrans_timer(chan);
4525 clear_bit(CONN_WAIT_F, &chan->conn_state);
4526 } 5035 }
4527 5036
4528 switch (__get_ctrl_super(chan, rx_control)) { 5037 chan->last_acked_seq = control->txseq;
4529 case L2CAP_SUPER_RR: 5038 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4530 l2cap_data_channel_rrframe(chan, rx_control);
4531 break;
4532 5039
4533 case L2CAP_SUPER_REJ: 5040 return err;
4534 l2cap_data_channel_rejframe(chan, rx_control);
4535 break;
4536
4537 case L2CAP_SUPER_SREJ:
4538 l2cap_data_channel_srejframe(chan, rx_control);
4539 break;
4540
4541 case L2CAP_SUPER_RNR:
4542 l2cap_data_channel_rnrframe(chan, rx_control);
4543 break;
4544 }
4545
4546 kfree_skb(skb);
4547 return 0;
4548} 5041}
4549 5042
4550static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 5043static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4551{ 5044{
4552 u32 control; 5045 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4553 u16 req_seq; 5046 u16 len;
4554 int len, next_tx_seq_offset, req_seq_offset; 5047 u8 event;
4555 5048
4556 __unpack_control(chan, skb); 5049 __unpack_control(chan, skb);
4557 5050
4558 control = __get_control(chan, skb->data);
4559 skb_pull(skb, __ctrl_size(chan));
4560 len = skb->len; 5051 len = skb->len;
4561 5052
4562 /* 5053 /*
4563 * We can just drop the corrupted I-frame here. 5054 * We can just drop the corrupted I-frame here.
4564 * Receiver will miss it and start proper recovery 5055 * Receiver will miss it and start proper recovery
4565 * procedures and ask retransmission. 5056 * procedures and ask for retransmission.
4566 */ 5057 */
4567 if (l2cap_check_fcs(chan, skb)) 5058 if (l2cap_check_fcs(chan, skb))
4568 goto drop; 5059 goto drop;
4569 5060
4570 if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) 5061 if (!control->sframe && control->sar == L2CAP_SAR_START)
4571 len -= L2CAP_SDULEN_SIZE; 5062 len -= L2CAP_SDULEN_SIZE;
4572 5063
4573 if (chan->fcs == L2CAP_FCS_CRC16) 5064 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -4578,34 +5069,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4578 goto drop; 5069 goto drop;
4579 } 5070 }
4580 5071
4581 req_seq = __get_reqseq(chan, control); 5072 if (!control->sframe) {
4582 5073 int err;
4583 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4584
4585 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4586 chan->expected_ack_seq);
4587 5074
4588 /* check for invalid req-seq */ 5075 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4589 if (req_seq_offset > next_tx_seq_offset) { 5076 control->sar, control->reqseq, control->final,
4590 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5077 control->txseq);
4591 goto drop;
4592 }
4593 5078
4594 if (!__is_sframe(chan, control)) { 5079 /* Validate F-bit - F=0 always valid, F=1 only
4595 if (len < 0) { 5080 * valid in TX WAIT_F
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5081 */
5082 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4597 goto drop; 5083 goto drop;
5084
5085 if (chan->mode != L2CAP_MODE_STREAMING) {
5086 event = L2CAP_EV_RECV_IFRAME;
5087 err = l2cap_rx(chan, control, skb, event);
5088 } else {
5089 err = l2cap_stream_rx(chan, control, skb);
4598 } 5090 }
4599 5091
4600 l2cap_data_channel_iframe(chan, control, skb); 5092 if (err)
5093 l2cap_send_disconn_req(chan->conn, chan,
5094 ECONNRESET);
4601 } else { 5095 } else {
5096 const u8 rx_func_to_event[4] = {
5097 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5098 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5099 };
5100
5101 /* Only I-frames are expected in streaming mode */
5102 if (chan->mode == L2CAP_MODE_STREAMING)
5103 goto drop;
5104
5105 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5106 control->reqseq, control->final, control->poll,
5107 control->super);
5108
4602 if (len != 0) { 5109 if (len != 0) {
4603 BT_ERR("%d", len); 5110 BT_ERR("%d", len);
4604 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5111 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4605 goto drop; 5112 goto drop;
4606 } 5113 }
4607 5114
4608 l2cap_data_channel_sframe(chan, control, skb); 5115 /* Validate F and P bits */
5116 if (control->final && (control->poll ||
5117 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5118 goto drop;
5119
5120 event = rx_func_to_event[control->super];
5121 if (l2cap_rx(chan, control, skb, event))
5122 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4609 } 5123 }
4610 5124
4611 return 0; 5125 return 0;
@@ -4615,19 +5129,27 @@ drop:
4615 return 0; 5129 return 0;
4616} 5130}
4617 5131
4618static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 5132static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5133 struct sk_buff *skb)
4619{ 5134{
4620 struct l2cap_chan *chan; 5135 struct l2cap_chan *chan;
4621 u32 control;
4622 u16 tx_seq;
4623 int len;
4624 5136
4625 chan = l2cap_get_chan_by_scid(conn, cid); 5137 chan = l2cap_get_chan_by_scid(conn, cid);
4626 if (!chan) { 5138 if (!chan) {
4627 BT_DBG("unknown cid 0x%4.4x", cid); 5139 if (cid == L2CAP_CID_A2MP) {
4628 /* Drop packet and return */ 5140 chan = a2mp_channel_create(conn, skb);
4629 kfree_skb(skb); 5141 if (!chan) {
4630 return 0; 5142 kfree_skb(skb);
5143 return;
5144 }
5145
5146 l2cap_chan_lock(chan);
5147 } else {
5148 BT_DBG("unknown cid 0x%4.4x", cid);
5149 /* Drop packet and return */
5150 kfree_skb(skb);
5151 return;
5152 }
4631 } 5153 }
4632 5154
4633 BT_DBG("chan %p, len %d", chan, skb->len); 5155 BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4645,49 +5167,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4645 if (chan->imtu < skb->len) 5167 if (chan->imtu < skb->len)
4646 goto drop; 5168 goto drop;
4647 5169
4648 if (!chan->ops->recv(chan->data, skb)) 5170 if (!chan->ops->recv(chan, skb))
4649 goto done; 5171 goto done;
4650 break; 5172 break;
4651 5173
4652 case L2CAP_MODE_ERTM: 5174 case L2CAP_MODE_ERTM:
4653 l2cap_ertm_data_rcv(chan, skb);
4654
4655 goto done;
4656
4657 case L2CAP_MODE_STREAMING: 5175 case L2CAP_MODE_STREAMING:
4658 control = __get_control(chan, skb->data); 5176 l2cap_data_rcv(chan, skb);
4659 skb_pull(skb, __ctrl_size(chan));
4660 len = skb->len;
4661
4662 if (l2cap_check_fcs(chan, skb))
4663 goto drop;
4664
4665 if (__is_sar_start(chan, control))
4666 len -= L2CAP_SDULEN_SIZE;
4667
4668 if (chan->fcs == L2CAP_FCS_CRC16)
4669 len -= L2CAP_FCS_SIZE;
4670
4671 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4672 goto drop;
4673
4674 tx_seq = __get_txseq(chan, control);
4675
4676 if (chan->expected_tx_seq != tx_seq) {
4677 /* Frame(s) missing - must discard partial SDU */
4678 kfree_skb(chan->sdu);
4679 chan->sdu = NULL;
4680 chan->sdu_last_frag = NULL;
4681 chan->sdu_len = 0;
4682
4683 /* TODO: Notify userland of missing data */
4684 }
4685
4686 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4687
4688 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4689 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4690
4691 goto done; 5177 goto done;
4692 5178
4693 default: 5179 default:
@@ -4700,11 +5186,10 @@ drop:
4700 5186
4701done: 5187done:
4702 l2cap_chan_unlock(chan); 5188 l2cap_chan_unlock(chan);
4703
4704 return 0;
4705} 5189}
4706 5190
4707static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 5191static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5192 struct sk_buff *skb)
4708{ 5193{
4709 struct l2cap_chan *chan; 5194 struct l2cap_chan *chan;
4710 5195
@@ -4720,17 +5205,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4720 if (chan->imtu < skb->len) 5205 if (chan->imtu < skb->len)
4721 goto drop; 5206 goto drop;
4722 5207
4723 if (!chan->ops->recv(chan->data, skb)) 5208 if (!chan->ops->recv(chan, skb))
4724 return 0; 5209 return;
4725 5210
4726drop: 5211drop:
4727 kfree_skb(skb); 5212 kfree_skb(skb);
4728
4729 return 0;
4730} 5213}
4731 5214
4732static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, 5215static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4733 struct sk_buff *skb) 5216 struct sk_buff *skb)
4734{ 5217{
4735 struct l2cap_chan *chan; 5218 struct l2cap_chan *chan;
4736 5219
@@ -4746,13 +5229,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4746 if (chan->imtu < skb->len) 5229 if (chan->imtu < skb->len)
4747 goto drop; 5230 goto drop;
4748 5231
4749 if (!chan->ops->recv(chan->data, skb)) 5232 if (!chan->ops->recv(chan, skb))
4750 return 0; 5233 return;
4751 5234
4752drop: 5235drop:
4753 kfree_skb(skb); 5236 kfree_skb(skb);
4754
4755 return 0;
4756} 5237}
4757 5238
4758static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 5239static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -4780,7 +5261,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4780 5261
4781 case L2CAP_CID_CONN_LESS: 5262 case L2CAP_CID_CONN_LESS:
4782 psm = get_unaligned((__le16 *) skb->data); 5263 psm = get_unaligned((__le16 *) skb->data);
4783 skb_pull(skb, 2); 5264 skb_pull(skb, L2CAP_PSMLEN_SIZE);
4784 l2cap_conless_channel(conn, psm, skb); 5265 l2cap_conless_channel(conn, psm, skb);
4785 break; 5266 break;
4786 5267
@@ -4974,6 +5455,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4974 rsp.status = cpu_to_le16(stat); 5455 rsp.status = cpu_to_le16(stat);
4975 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 5456 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4976 sizeof(rsp), &rsp); 5457 sizeof(rsp), &rsp);
5458
5459 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5460 res == L2CAP_CR_SUCCESS) {
5461 char buf[128];
5462 set_bit(CONF_REQ_SENT, &chan->conf_state);
5463 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5464 L2CAP_CONF_REQ,
5465 l2cap_build_conf_req(chan, buf),
5466 buf);
5467 chan->num_conf_req++;
5468 }
4977 } 5469 }
4978 5470
4979 l2cap_chan_unlock(chan); 5471 l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bb1611b9d4..a4bb27e8427 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,7 +27,6 @@
27 27
28/* Bluetooth L2CAP sockets. */ 28/* Bluetooth L2CAP sockets. */
29 29
30#include <linux/security.h>
31#include <linux/export.h> 30#include <linux/export.h>
32 31
33#include <net/bluetooth/bluetooth.h> 32#include <net/bluetooth/bluetooth.h>
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
89 if (err < 0) 88 if (err < 0)
90 goto done; 89 goto done;
91 90
92 if (__le16_to_cpu(la.l2_psm) == 0x0001 || 91 if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
93 __le16_to_cpu(la.l2_psm) == 0x0003) 92 __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
94 chan->sec_level = BT_SECURITY_SDP; 93 chan->sec_level = BT_SECURITY_SDP;
95 94
96 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 95 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
446 return err; 445 return err;
447} 446}
448 447
448static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
449{
450 switch (chan->scid) {
451 case L2CAP_CID_LE_DATA:
452 if (mtu < L2CAP_LE_MIN_MTU)
453 return false;
454 break;
455
456 default:
457 if (mtu < L2CAP_DEFAULT_MIN_MTU)
458 return false;
459 }
460
461 return true;
462}
463
449static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 464static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
450{ 465{
451 struct sock *sk = sock->sk; 466 struct sock *sk = sock->sk;
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
484 break; 499 break;
485 } 500 }
486 501
502 if (!l2cap_valid_mtu(chan, opts.imtu)) {
503 err = -EINVAL;
504 break;
505 }
506
487 chan->mode = opts.mode; 507 chan->mode = opts.mode;
488 switch (chan->mode) { 508 switch (chan->mode) {
489 case L2CAP_MODE_BASIC: 509 case L2CAP_MODE_BASIC:
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
873 return err; 893 return err;
874} 894}
875 895
876static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) 896static void l2cap_sock_cleanup_listen(struct sock *parent)
877{ 897{
878 struct sock *sk, *parent = data; 898 struct sock *sk;
899
900 BT_DBG("parent %p", parent);
901
902 /* Close not yet accepted channels */
903 while ((sk = bt_accept_dequeue(parent, NULL))) {
904 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
905
906 l2cap_chan_lock(chan);
907 __clear_chan_timer(chan);
908 l2cap_chan_close(chan, ECONNRESET);
909 l2cap_chan_unlock(chan);
910
911 l2cap_sock_kill(sk);
912 }
913}
914
915static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
916{
917 struct sock *sk, *parent = chan->data;
918
919 /* Check for backlog size */
920 if (sk_acceptq_is_full(parent)) {
921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
922 return NULL;
923 }
879 924
880 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
881 GFP_ATOMIC); 926 GFP_ATOMIC);
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
889 return l2cap_pi(sk)->chan; 934 return l2cap_pi(sk)->chan;
890} 935}
891 936
892static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) 937static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
893{ 938{
894 int err; 939 int err;
895 struct sock *sk = data; 940 struct sock *sk = chan->data;
896 struct l2cap_pinfo *pi = l2cap_pi(sk); 941 struct l2cap_pinfo *pi = l2cap_pi(sk);
897 942
898 lock_sock(sk); 943 lock_sock(sk);
@@ -925,16 +970,57 @@ done:
925 return err; 970 return err;
926} 971}
927 972
928static void l2cap_sock_close_cb(void *data) 973static void l2cap_sock_close_cb(struct l2cap_chan *chan)
929{ 974{
930 struct sock *sk = data; 975 struct sock *sk = chan->data;
931 976
932 l2cap_sock_kill(sk); 977 l2cap_sock_kill(sk);
933} 978}
934 979
935static void l2cap_sock_state_change_cb(void *data, int state) 980static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
936{ 981{
937 struct sock *sk = data; 982 struct sock *sk = chan->data;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 sock_set_flag(sk, SOCK_ZAPPED);
990
991 switch (chan->state) {
992 case BT_OPEN:
993 case BT_BOUND:
994 case BT_CLOSED:
995 break;
996 case BT_LISTEN:
997 l2cap_sock_cleanup_listen(sk);
998 sk->sk_state = BT_CLOSED;
999 chan->state = BT_CLOSED;
1000
1001 break;
1002 default:
1003 sk->sk_state = BT_CLOSED;
1004 chan->state = BT_CLOSED;
1005
1006 sk->sk_err = err;
1007
1008 if (parent) {
1009 bt_accept_unlink(sk);
1010 parent->sk_data_ready(parent, 0);
1011 } else {
1012 sk->sk_state_change(sk);
1013 }
1014
1015 break;
1016 }
1017
1018 release_sock(sk);
1019}
1020
1021static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
1022{
1023 struct sock *sk = chan->data;
938 1024
939 sk->sk_state = state; 1025 sk->sk_state = state;
940} 1026}
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
955 return skb; 1041 return skb;
956} 1042}
957 1043
1044static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1045{
1046 struct sock *sk = chan->data;
1047 struct sock *parent;
1048
1049 lock_sock(sk);
1050
1051 parent = bt_sk(sk)->parent;
1052
1053 BT_DBG("sk %p, parent %p", sk, parent);
1054
1055 sk->sk_state = BT_CONNECTED;
1056 sk->sk_state_change(sk);
1057
1058 if (parent)
1059 parent->sk_data_ready(parent, 0);
1060
1061 release_sock(sk);
1062}
1063
958static struct l2cap_ops l2cap_chan_ops = { 1064static struct l2cap_ops l2cap_chan_ops = {
959 .name = "L2CAP Socket Interface", 1065 .name = "L2CAP Socket Interface",
960 .new_connection = l2cap_sock_new_connection_cb, 1066 .new_connection = l2cap_sock_new_connection_cb,
961 .recv = l2cap_sock_recv_cb, 1067 .recv = l2cap_sock_recv_cb,
962 .close = l2cap_sock_close_cb, 1068 .close = l2cap_sock_close_cb,
1069 .teardown = l2cap_sock_teardown_cb,
963 .state_change = l2cap_sock_state_change_cb, 1070 .state_change = l2cap_sock_state_change_cb,
1071 .ready = l2cap_sock_ready_cb,
964 .alloc_skb = l2cap_sock_alloc_skb_cb, 1072 .alloc_skb = l2cap_sock_alloc_skb_cb,
965}; 1073};
966 1074
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 506628876f3..e1c97527e16 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -26,12 +26,7 @@
26 26
27#define pr_fmt(fmt) "Bluetooth: " fmt 27#define pr_fmt(fmt) "Bluetooth: " fmt
28 28
29#include <linux/module.h> 29#include <linux/export.h>
30
31#include <linux/kernel.h>
32#include <linux/stddef.h>
33#include <linux/string.h>
34#include <asm/errno.h>
35 30
36#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
37 32
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 25d22077607..c72307cc25f 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -24,8 +24,6 @@
24 24
25/* Bluetooth HCI Management interface */ 25/* Bluetooth HCI Management interface */
26 26
27#include <linux/kernel.h>
28#include <linux/uaccess.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <asm/unaligned.h> 28#include <asm/unaligned.h>
31 29
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
714} 712}
715 713
716static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 714static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
717 void (*cb)(struct pending_cmd *cmd, void *data), 715 void (*cb)(struct pending_cmd *cmd,
716 void *data),
718 void *data) 717 void *data)
719{ 718{
720 struct list_head *p, *n; 719 struct list_head *p, *n;
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
871 } 870 }
872 871
873 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 872 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
874 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 873 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 874 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
876 MGMT_STATUS_BUSY); 875 MGMT_STATUS_BUSY);
877 goto failed; 876 goto failed;
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
978 } 977 }
979 978
980 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 979 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
981 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 980 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
982 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 981 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
983 MGMT_STATUS_BUSY); 982 MGMT_STATUS_BUSY);
984 goto failed; 983 goto failed;
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1001 scan = 0; 1000 scan = 0;
1002 1001
1003 if (test_bit(HCI_ISCAN, &hdev->flags) && 1002 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1004 hdev->discov_timeout > 0) 1003 hdev->discov_timeout > 0)
1005 cancel_delayed_work(&hdev->discov_off); 1004 cancel_delayed_work(&hdev->discov_off);
1006 } 1005 }
1007 1006
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1056 bool changed = false; 1055 bool changed = false;
1057 1056
1058 if (!!cp->val != test_bit(HCI_LINK_SECURITY, 1057 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1059 &hdev->dev_flags)) { 1058 &hdev->dev_flags)) {
1060 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 1059 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1061 changed = true; 1060 changed = true;
1062 } 1061 }
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
1317} 1316}
1318 1317
1319static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, 1318static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1320 u16 len) 1319 u16 len)
1321{ 1320{
1322 struct mgmt_cp_remove_uuid *cp = data; 1321 struct mgmt_cp_remove_uuid *cp = data;
1323 struct pending_cmd *cmd; 1322 struct pending_cmd *cmd;
@@ -1442,7 +1441,7 @@ unlock:
1442} 1441}
1443 1442
1444static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, 1443static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1445 u16 len) 1444 u16 len)
1446{ 1445{
1447 struct mgmt_cp_load_link_keys *cp = data; 1446 struct mgmt_cp_load_link_keys *cp = data;
1448 u16 key_count, expected_len; 1447 u16 key_count, expected_len;
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1454 sizeof(struct mgmt_link_key_info); 1453 sizeof(struct mgmt_link_key_info);
1455 if (expected_len != len) { 1454 if (expected_len != len) {
1456 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1455 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1457 len, expected_len); 1456 len, expected_len);
1458 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1457 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1459 MGMT_STATUS_INVALID_PARAMS); 1458 MGMT_STATUS_INVALID_PARAMS);
1460 } 1459 }
1461 1460
1462 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1461 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1463 key_count); 1462 key_count);
1464 1463
1465 hci_dev_lock(hdev); 1464 hci_dev_lock(hdev);
1466 1465
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1535 if (cp->disconnect) { 1534 if (cp->disconnect) {
1536 if (cp->addr.type == BDADDR_BREDR) 1535 if (cp->addr.type == BDADDR_BREDR)
1537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1536 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1538 &cp->addr.bdaddr); 1537 &cp->addr.bdaddr);
1539 else 1538 else
1540 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, 1539 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1541 &cp->addr.bdaddr); 1540 &cp->addr.bdaddr);
1542 } else { 1541 } else {
1543 conn = NULL; 1542 conn = NULL;
1544 } 1543 }
@@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1594 } 1593 }
1595 1594
1596 if (cp->addr.type == BDADDR_BREDR) 1595 if (cp->addr.type == BDADDR_BREDR)
1597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 1596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1597 &cp->addr.bdaddr);
1598 else 1598 else
1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1600 1600
@@ -1813,7 +1813,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1813 hdev->io_capability = cp->io_capability; 1813 hdev->io_capability = cp->io_capability;
1814 1814
1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1816 hdev->io_capability); 1816 hdev->io_capability);
1817 1817
1818 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1819 1819
@@ -1821,7 +1821,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1821 0); 1821 0);
1822} 1822}
1823 1823
1824static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1824static struct pending_cmd *find_pairing(struct hci_conn *conn)
1825{ 1825{
1826 struct hci_dev *hdev = conn->hdev; 1826 struct hci_dev *hdev = conn->hdev;
1827 struct pending_cmd *cmd; 1827 struct pending_cmd *cmd;
@@ -1873,6 +1873,22 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1873 pairing_complete(cmd, mgmt_status(status)); 1873 pairing_complete(cmd, mgmt_status(status));
1874} 1874}
1875 1875
1876static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
1877{
1878 struct pending_cmd *cmd;
1879
1880 BT_DBG("status %u", status);
1881
1882 if (!status)
1883 return;
1884
1885 cmd = find_pairing(conn);
1886 if (!cmd)
1887 BT_DBG("Unable to find a pending command");
1888 else
1889 pairing_complete(cmd, mgmt_status(status));
1890}
1891
1876static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, 1892static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1877 u16 len) 1893 u16 len)
1878{ 1894{
@@ -1911,8 +1927,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1911 rp.addr.type = cp->addr.type; 1927 rp.addr.type = cp->addr.type;
1912 1928
1913 if (IS_ERR(conn)) { 1929 if (IS_ERR(conn)) {
1930 int status;
1931
1932 if (PTR_ERR(conn) == -EBUSY)
1933 status = MGMT_STATUS_BUSY;
1934 else
1935 status = MGMT_STATUS_CONNECT_FAILED;
1936
1914 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 1937 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1915 MGMT_STATUS_CONNECT_FAILED, &rp, 1938 status, &rp,
1916 sizeof(rp)); 1939 sizeof(rp));
1917 goto unlock; 1940 goto unlock;
1918 } 1941 }
@@ -1934,6 +1957,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1934 /* For LE, just connecting isn't a proof that the pairing finished */ 1957 /* For LE, just connecting isn't a proof that the pairing finished */
1935 if (cp->addr.type == BDADDR_BREDR) 1958 if (cp->addr.type == BDADDR_BREDR)
1936 conn->connect_cfm_cb = pairing_complete_cb; 1959 conn->connect_cfm_cb = pairing_complete_cb;
1960 else
1961 conn->connect_cfm_cb = le_connect_complete_cb;
1937 1962
1938 conn->security_cfm_cb = pairing_complete_cb; 1963 conn->security_cfm_cb = pairing_complete_cb;
1939 conn->disconn_cfm_cb = pairing_complete_cb; 1964 conn->disconn_cfm_cb = pairing_complete_cb;
@@ -1941,7 +1966,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1941 cmd->user_data = conn; 1966 cmd->user_data = conn;
1942 1967
1943 if (conn->state == BT_CONNECTED && 1968 if (conn->state == BT_CONNECTED &&
1944 hci_conn_security(conn, sec_level, auth_type)) 1969 hci_conn_security(conn, sec_level, auth_type))
1945 pairing_complete(cmd, 0); 1970 pairing_complete(cmd, 0);
1946 1971
1947 err = 0; 1972 err = 0;
@@ -2238,7 +2263,7 @@ unlock:
2238} 2263}
2239 2264
2240static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 2265static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2241 void *data, u16 len) 2266 void *data, u16 len)
2242{ 2267{
2243 struct mgmt_cp_remove_remote_oob_data *cp = data; 2268 struct mgmt_cp_remove_remote_oob_data *cp = data;
2244 u8 status; 2269 u8 status;
@@ -2407,7 +2432,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2407 2432
2408 case DISCOVERY_RESOLVING: 2433 case DISCOVERY_RESOLVING:
2409 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 2434 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2410 NAME_PENDING); 2435 NAME_PENDING);
2411 if (!e) { 2436 if (!e) {
2412 mgmt_pending_remove(cmd); 2437 mgmt_pending_remove(cmd);
2413 err = cmd_complete(sk, hdev->id, 2438 err = cmd_complete(sk, hdev->id,
@@ -2629,7 +2654,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2629 sizeof(struct mgmt_ltk_info); 2654 sizeof(struct mgmt_ltk_info);
2630 if (expected_len != len) { 2655 if (expected_len != len) {
2631 BT_ERR("load_keys: expected %u bytes, got %u bytes", 2656 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2632 len, expected_len); 2657 len, expected_len);
2633 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 2658 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2634 EINVAL); 2659 EINVAL);
2635 } 2660 }
@@ -2754,7 +2779,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2754 } 2779 }
2755 2780
2756 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 2781 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2757 mgmt_handlers[opcode].func == NULL) { 2782 mgmt_handlers[opcode].func == NULL) {
2758 BT_DBG("Unknown op %u", opcode); 2783 BT_DBG("Unknown op %u", opcode);
2759 err = cmd_status(sk, index, opcode, 2784 err = cmd_status(sk, index, opcode,
2760 MGMT_STATUS_UNKNOWN_COMMAND); 2785 MGMT_STATUS_UNKNOWN_COMMAND);
@@ -2762,7 +2787,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2762 } 2787 }
2763 2788
2764 if ((hdev && opcode < MGMT_OP_READ_INFO) || 2789 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2765 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 2790 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2766 err = cmd_status(sk, index, opcode, 2791 err = cmd_status(sk, index, opcode,
2767 MGMT_STATUS_INVALID_INDEX); 2792 MGMT_STATUS_INVALID_INDEX);
2768 goto done; 2793 goto done;
@@ -2771,7 +2796,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2771 handler = &mgmt_handlers[opcode]; 2796 handler = &mgmt_handlers[opcode];
2772 2797
2773 if ((handler->var_len && len < handler->data_len) || 2798 if ((handler->var_len && len < handler->data_len) ||
2774 (!handler->var_len && len != handler->data_len)) { 2799 (!handler->var_len && len != handler->data_len)) {
2775 err = cmd_status(sk, index, opcode, 2800 err = cmd_status(sk, index, opcode,
2776 MGMT_STATUS_INVALID_PARAMS); 2801 MGMT_STATUS_INVALID_PARAMS);
2777 goto done; 2802 goto done;
@@ -2955,7 +2980,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2955 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 2980 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2956 ev.key.addr.type = BDADDR_BREDR; 2981 ev.key.addr.type = BDADDR_BREDR;
2957 ev.key.type = key->type; 2982 ev.key.type = key->type;
2958 memcpy(ev.key.val, key->val, 16); 2983 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
2959 ev.key.pin_len = key->pin_len; 2984 ev.key.pin_len = key->pin_len;
2960 2985
2961 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 2986 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@@ -3090,7 +3115,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3090 mgmt_pending_remove(cmd); 3115 mgmt_pending_remove(cmd);
3091 3116
3092 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3117 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3093 hdev); 3118 hdev);
3094 return err; 3119 return err;
3095} 3120}
3096 3121
@@ -3180,7 +3205,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3180} 3205}
3181 3206
3182int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3207int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 link_type, u8 addr_type) 3208 u8 link_type, u8 addr_type)
3184{ 3209{
3185 struct mgmt_ev_user_passkey_request ev; 3210 struct mgmt_ev_user_passkey_request ev;
3186 3211
@@ -3194,8 +3219,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3194} 3219}
3195 3220
3196static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3221static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3197 u8 link_type, u8 addr_type, u8 status, 3222 u8 link_type, u8 addr_type, u8 status,
3198 u8 opcode) 3223 u8 opcode)
3199{ 3224{
3200 struct pending_cmd *cmd; 3225 struct pending_cmd *cmd;
3201 struct mgmt_rp_user_confirm_reply rp; 3226 struct mgmt_rp_user_confirm_reply rp;
@@ -3226,7 +3251,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3226 u8 link_type, u8 addr_type, u8 status) 3251 u8 link_type, u8 addr_type, u8 status)
3227{ 3252{
3228 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3253 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3229 status, MGMT_OP_USER_CONFIRM_NEG_REPLY); 3254 status,
3255 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3230} 3256}
3231 3257
3232int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3258int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3240,7 +3266,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 link_type, u8 addr_type, u8 status) 3266 u8 link_type, u8 addr_type, u8 status)
3241{ 3267{
3242 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3268 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3243 status, MGMT_OP_USER_PASSKEY_NEG_REPLY); 3269 status,
3270 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3244} 3271}
3245 3272
3246int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3273int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8a602388f1e..c75107ef892 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/device.h>
36#include <linux/debugfs.h> 29#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38#include <linux/net.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 30#include <linux/kthread.h>
41#include <linux/slab.h>
42
43#include <net/sock.h>
44#include <linux/uaccess.h>
45#include <asm/unaligned.h> 31#include <asm/unaligned.h>
46 32
47#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
115#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) 101#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
116#define __get_rpn_parity(line) (((line) >> 3) & 0x7) 102#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
117 103
118static inline void rfcomm_schedule(void) 104static void rfcomm_schedule(void)
119{ 105{
120 if (!rfcomm_thread) 106 if (!rfcomm_thread)
121 return; 107 return;
122 wake_up_process(rfcomm_thread); 108 wake_up_process(rfcomm_thread);
123} 109}
124 110
125static inline void rfcomm_session_put(struct rfcomm_session *s) 111static void rfcomm_session_put(struct rfcomm_session *s)
126{ 112{
127 if (atomic_dec_and_test(&s->refcnt)) 113 if (atomic_dec_and_test(&s->refcnt))
128 rfcomm_session_del(s); 114 rfcomm_session_del(s);
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
227 return err; 213 return err;
228} 214}
229 215
230static inline int rfcomm_check_security(struct rfcomm_dlc *d) 216static int rfcomm_check_security(struct rfcomm_dlc *d)
231{ 217{
232 struct sock *sk = d->session->sock->sk; 218 struct sock *sk = d->session->sock->sk;
233 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 219 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
1750/* Send data queued for the DLC. 1736/* Send data queued for the DLC.
1751 * Return number of frames left in the queue. 1737 * Return number of frames left in the queue.
1752 */ 1738 */
1753static inline int rfcomm_process_tx(struct rfcomm_dlc *d) 1739static int rfcomm_process_tx(struct rfcomm_dlc *d)
1754{ 1740{
1755 struct sk_buff *skb; 1741 struct sk_buff *skb;
1756 int err; 1742 int err;
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
1798 return skb_queue_len(&d->tx_queue); 1784 return skb_queue_len(&d->tx_queue);
1799} 1785}
1800 1786
1801static inline void rfcomm_process_dlcs(struct rfcomm_session *s) 1787static void rfcomm_process_dlcs(struct rfcomm_session *s)
1802{ 1788{
1803 struct rfcomm_dlc *d; 1789 struct rfcomm_dlc *d;
1804 struct list_head *p, *n; 1790 struct list_head *p, *n;
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
1858 } 1844 }
1859} 1845}
1860 1846
1861static inline void rfcomm_process_rx(struct rfcomm_session *s) 1847static void rfcomm_process_rx(struct rfcomm_session *s)
1862{ 1848{
1863 struct socket *sock = s->sock; 1849 struct socket *sock = s->sock;
1864 struct sock *sk = sock->sk; 1850 struct sock *sk = sock->sk;
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
1883 } 1869 }
1884} 1870}
1885 1871
1886static inline void rfcomm_accept_connection(struct rfcomm_session *s) 1872static void rfcomm_accept_connection(struct rfcomm_session *s)
1887{ 1873{
1888 struct socket *sock = s->sock, *nsock; 1874 struct socket *sock = s->sock, *nsock;
1889 int err; 1875 int err;
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1917 sock_release(nsock); 1903 sock_release(nsock);
1918} 1904}
1919 1905
1920static inline void rfcomm_check_connection(struct rfcomm_session *s) 1906static void rfcomm_check_connection(struct rfcomm_session *s)
1921{ 1907{
1922 struct sock *sk = s->sock->sk; 1908 struct sock *sk = s->sock->sk;
1923 1909
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1941 } 1927 }
1942} 1928}
1943 1929
1944static inline void rfcomm_process_sessions(void) 1930static void rfcomm_process_sessions(void)
1945{ 1931{
1946 struct list_head *p, *n; 1932 struct list_head *p, *n;
1947 1933
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index e8707debb86..7e1e59645c0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/socket.h>
40#include <linux/skbuff.h>
41#include <linux/list.h>
42#include <linux/device.h>
43#include <linux/debugfs.h> 29#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45#include <linux/security.h>
46#include <net/sock.h>
47
48#include <linux/uaccess.h>
49 30
50#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d1820ff14ae..cb960773c00 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,11 +31,6 @@
31#include <linux/tty_driver.h> 31#include <linux/tty_driver.h>
32#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
33 33
34#include <linux/capability.h>
35#include <linux/slab.h>
36#include <linux/skbuff.h>
37#include <linux/workqueue.h>
38
39#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
41#include <net/bluetooth/rfcomm.h> 36#include <net/bluetooth/rfcomm.h>
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
132 return NULL; 127 return NULL;
133} 128}
134 129
135static inline struct rfcomm_dev *rfcomm_dev_get(int id) 130static struct rfcomm_dev *rfcomm_dev_get(int id)
136{ 131{
137 struct rfcomm_dev *dev; 132 struct rfcomm_dev *dev;
138 133
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
345 tty_port_put(&dev->port); 340 tty_port_put(&dev->port);
346} 341}
347 342
348static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) 343static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
349{ 344{
350 tty_port_get(&dev->port); 345 tty_port_get(&dev->port);
351 atomic_add(skb->truesize, &dev->wmem_alloc); 346 atomic_add(skb->truesize, &dev->wmem_alloc);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cbdd313659a..40bbe25dcff 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,26 +25,8 @@
25/* Bluetooth SCO sockets. */ 25/* Bluetooth SCO sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <linux/socket.h>
39#include <linux/skbuff.h>
40#include <linux/device.h>
41#include <linux/debugfs.h> 28#include <linux/debugfs.h>
42#include <linux/seq_file.h> 29#include <linux/seq_file.h>
43#include <linux/list.h>
44#include <linux/security.h>
45#include <net/sock.h>
46
47#include <linux/uaccess.h>
48 30
49#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
123 return conn; 105 return conn;
124} 106}
125 107
126static inline struct sock *sco_chan_get(struct sco_conn *conn) 108static struct sock *sco_chan_get(struct sco_conn *conn)
127{ 109{
128 struct sock *sk = NULL; 110 struct sock *sk = NULL;
129 sco_conn_lock(conn); 111 sco_conn_lock(conn);
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
157 return 0; 139 return 0;
158} 140}
159 141
160static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) 142static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
143 struct sock *parent)
161{ 144{
162 int err = 0; 145 int err = 0;
163 146
@@ -228,7 +211,7 @@ done:
228 return err; 211 return err;
229} 212}
230 213
231static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) 214static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
232{ 215{
233 struct sco_conn *conn = sco_pi(sk)->conn; 216 struct sco_conn *conn = sco_pi(sk)->conn;
234 struct sk_buff *skb; 217 struct sk_buff *skb;
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
254 return len; 237 return len;
255} 238}
256 239
257static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 240static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
258{ 241{
259 struct sock *sk = sco_chan_get(conn); 242 struct sock *sk = sco_chan_get(conn);
260 243
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
523 goto done; 506 goto done;
524 507
525 err = bt_sock_wait_state(sk, BT_CONNECTED, 508 err = bt_sock_wait_state(sk, BT_CONNECTED,
526 sock_sndtimeo(sk, flags & O_NONBLOCK)); 509 sock_sndtimeo(sk, flags & O_NONBLOCK));
527 510
528done: 511done:
529 release_sock(sk); 512 release_sock(sk);
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
788 771
789 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 772 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
790 err = bt_sock_wait_state(sk, BT_CLOSED, 773 err = bt_sock_wait_state(sk, BT_CLOSED,
791 sk->sk_lingertime); 774 sk->sk_lingertime);
792 } 775 }
793 release_sock(sk); 776 release_sock(sk);
794 return err; 777 return err;
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
878 bh_lock_sock(parent); 861 bh_lock_sock(parent);
879 862
880 sk = sco_sock_alloc(sock_net(parent), NULL, 863 sk = sco_sock_alloc(sock_net(parent), NULL,
881 BTPROTO_SCO, GFP_ATOMIC); 864 BTPROTO_SCO, GFP_ATOMIC);
882 if (!sk) { 865 if (!sk) {
883 bh_unlock_sock(parent); 866 bh_unlock_sock(parent);
884 goto done; 867 goto done;
@@ -907,7 +890,7 @@ done:
907/* ----- SCO interface with lower layer (HCI) ----- */ 890/* ----- SCO interface with lower layer (HCI) ----- */
908int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 891int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
909{ 892{
910 register struct sock *sk; 893 struct sock *sk;
911 struct hlist_node *node; 894 struct hlist_node *node;
912 int lm = 0; 895 int lm = 0;
913 896
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
920 continue; 903 continue;
921 904
922 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || 905 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
923 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 906 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
924 lm |= HCI_LM_ACCEPT; 907 lm |= HCI_LM_ACCEPT;
925 break; 908 break;
926 } 909 }
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
981 964
982 sk_for_each(sk, node, &sco_sk_list.head) { 965 sk_for_each(sk, node, &sco_sk_list.head) {
983 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), 966 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
984 batostr(&bt_sk(sk)->dst), sk->sk_state); 967 batostr(&bt_sk(sk)->dst), sk->sk_state);
985 } 968 }
986 969
987 read_unlock(&sco_sk_list.lock); 970 read_unlock(&sco_sk_list.lock);
@@ -1044,8 +1027,8 @@ int __init sco_init(void)
1044 } 1027 }
1045 1028
1046 if (bt_debugfs) { 1029 if (bt_debugfs) {
1047 sco_debugfs = debugfs_create_file("sco", 0444, 1030 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1048 bt_debugfs, NULL, &sco_debugfs_fops); 1031 NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs) 1032 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file"); 1033 BT_ERR("Failed to create SCO debug file");
1051 } 1034 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6fc7c4708f3..16ef0dc85a0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,14 +20,15 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/crypto.h>
24#include <linux/scatterlist.h>
25#include <crypto/b128ops.h>
26
23#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h> 28#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h> 29#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/mgmt.h> 30#include <net/bluetooth/mgmt.h>
27#include <net/bluetooth/smp.h> 31#include <net/bluetooth/smp.h>
28#include <linux/crypto.h>
29#include <linux/scatterlist.h>
30#include <crypto/b128ops.h>
31 32
32#define SMP_TIMEOUT msecs_to_jiffies(30000) 33#define SMP_TIMEOUT msecs_to_jiffies(30000)
33 34
@@ -648,7 +649,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
648 649
649 auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; 650 auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
650 651
651 ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); 652 ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
652 if (ret) 653 if (ret)
653 return SMP_UNSPECIFIED; 654 return SMP_UNSPECIFIED;
654 655
@@ -703,7 +704,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
703 return 0; 704 return 0;
704} 705}
705 706
706static u8 smp_ltk_encrypt(struct l2cap_conn *conn) 707static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
707{ 708{
708 struct smp_ltk *key; 709 struct smp_ltk *key;
709 struct hci_conn *hcon = conn->hcon; 710 struct hci_conn *hcon = conn->hcon;
@@ -712,6 +713,9 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
712 if (!key) 713 if (!key)
713 return 0; 714 return 0;
714 715
716 if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
717 return 0;
718
715 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) 719 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
716 return 1; 720 return 1;
717 721
@@ -732,7 +736,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
732 736
733 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 737 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
734 738
735 if (smp_ltk_encrypt(conn)) 739 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
736 return 0; 740 return 0;
737 741
738 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 742 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
@@ -771,7 +775,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
771 return 1; 775 return 1;
772 776
773 if (hcon->link_mode & HCI_LM_MASTER) 777 if (hcon->link_mode & HCI_LM_MASTER)
774 if (smp_ltk_encrypt(conn)) 778 if (smp_ltk_encrypt(conn, sec_level))
775 goto done; 779 goto done;
776 780
777 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 781 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 498c94e3442..85ac364f463 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2097,6 +2097,9 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2097 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2097 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2098 int i, ret; 2098 int i, ret;
2099 2099
2100 if (!ieee80211_sdata_running(sdata))
2101 return -ENETDOWN;
2102
2100 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { 2103 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
2101 ret = drv_set_bitrate_mask(local, sdata, mask); 2104 ret = drv_set_bitrate_mask(local, sdata, mask);
2102 if (ret) 2105 if (ret)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d7134c17033..079038d26a1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1337,6 +1337,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1337 if (WARN_ON(!ifmgd->associated)) 1337 if (WARN_ON(!ifmgd->associated))
1338 return; 1338 return;
1339 1339
1340 ieee80211_stop_poll(sdata);
1341
1340 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); 1342 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1341 1343
1342 ifmgd->associated = NULL; 1344 ifmgd->associated = NULL;
@@ -2592,8 +2594,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2592 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2594 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2593 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 2595 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2594 2596
2595 ieee80211_stop_poll(sdata);
2596
2597 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 2597 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2598 false, frame_buf); 2598 false, frame_buf);
2599 mutex_unlock(&ifmgd->mtx); 2599 mutex_unlock(&ifmgd->mtx);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 3bb24a121c9..a470e1123a5 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -271,6 +271,9 @@ struct sta_ampdu_mlme {
271 * @plink_timer: peer link watch timer 271 * @plink_timer: peer link watch timer
272 * @plink_timer_was_running: used by suspend/resume to restore timers 272 * @plink_timer_was_running: used by suspend/resume to restore timers
273 * @t_offset: timing offset relative to this host 273 * @t_offset: timing offset relative to this host
274 * @t_offset_setpoint: reference timing offset of this sta to be used when
275 * calculating clockdrift
276 * @ch_type: peer's channel type
274 * @debugfs: debug filesystem info 277 * @debugfs: debug filesystem info
275 * @dead: set to true when sta is unlinked 278 * @dead: set to true when sta is unlinked
276 * @uploaded: set to true when sta is uploaded to the driver 279 * @uploaded: set to true when sta is uploaded to the driver
@@ -278,6 +281,8 @@ struct sta_ampdu_mlme {
278 * @sta: station information we share with the driver 281 * @sta: station information we share with the driver
279 * @sta_state: duplicates information about station state (for debug) 282 * @sta_state: duplicates information about station state (for debug)
280 * @beacon_loss_count: number of times beacon loss has triggered 283 * @beacon_loss_count: number of times beacon loss has triggered
284 * @supports_40mhz: tracks whether the station advertised 40 MHz support
285 * as we overwrite its HT parameters with the currently used value
281 */ 286 */
282struct sta_info { 287struct sta_info {
283 /* General information, mostly static */ 288 /* General information, mostly static */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 15f347477a9..baf5704740e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void)
1389 spin_unlock(&reg_requests_lock); 1389 spin_unlock(&reg_requests_lock);
1390 1390
1391 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) 1391 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
1392 cancel_delayed_work_sync(&reg_timeout); 1392 cancel_delayed_work(&reg_timeout);
1393 1393
1394 if (need_more_processing) 1394 if (need_more_processing)
1395 schedule_work(&reg_work); 1395 schedule_work(&reg_work);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8f2d68fc3a4..316cfd00914 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
804 ntype == NL80211_IFTYPE_P2P_CLIENT)) 804 ntype == NL80211_IFTYPE_P2P_CLIENT))
805 return -EBUSY; 805 return -EBUSY;
806 806
807 if (ntype != otype) { 807 if (ntype != otype && netif_running(dev)) {
808 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, 808 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
809 ntype); 809 ntype);
810 if (err) 810 if (err)