diff options
210 files changed, 27577 insertions, 2842 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index ebe89694cf81..49267ea97568 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl | |||
@@ -127,12 +127,11 @@ | |||
127 | !Finclude/net/cfg80211.h cfg80211_ibss_params | 127 | !Finclude/net/cfg80211.h cfg80211_ibss_params |
128 | !Finclude/net/cfg80211.h cfg80211_connect_params | 128 | !Finclude/net/cfg80211.h cfg80211_connect_params |
129 | !Finclude/net/cfg80211.h cfg80211_pmksa | 129 | !Finclude/net/cfg80211.h cfg80211_pmksa |
130 | !Finclude/net/cfg80211.h cfg80211_send_rx_auth | 130 | !Finclude/net/cfg80211.h cfg80211_rx_mlme_mgmt |
131 | !Finclude/net/cfg80211.h cfg80211_send_auth_timeout | 131 | !Finclude/net/cfg80211.h cfg80211_auth_timeout |
132 | !Finclude/net/cfg80211.h cfg80211_send_rx_assoc | 132 | !Finclude/net/cfg80211.h cfg80211_rx_assoc_resp |
133 | !Finclude/net/cfg80211.h cfg80211_send_assoc_timeout | 133 | !Finclude/net/cfg80211.h cfg80211_assoc_timeout |
134 | !Finclude/net/cfg80211.h cfg80211_send_deauth | 134 | !Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt |
135 | !Finclude/net/cfg80211.h cfg80211_send_disassoc | ||
136 | !Finclude/net/cfg80211.h cfg80211_ibss_joined | 135 | !Finclude/net/cfg80211.h cfg80211_ibss_joined |
137 | !Finclude/net/cfg80211.h cfg80211_connect_result | 136 | !Finclude/net/cfg80211.h cfg80211_connect_result |
138 | !Finclude/net/cfg80211.h cfg80211_roamed | 137 | !Finclude/net/cfg80211.h cfg80211_roamed |
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index 8b4221cfd118..380a2003231e 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig | |||
@@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE | |||
26 | config BCMA_HOST_PCI | 26 | config BCMA_HOST_PCI |
27 | bool "Support for BCMA on PCI-host bus" | 27 | bool "Support for BCMA on PCI-host bus" |
28 | depends on BCMA_HOST_PCI_POSSIBLE | 28 | depends on BCMA_HOST_PCI_POSSIBLE |
29 | default y | ||
29 | 30 | ||
30 | config BCMA_DRIVER_PCI_HOSTMODE | 31 | config BCMA_DRIVER_PCI_HOSTMODE |
31 | bool "Driver for PCI core working in hostmode" | 32 | bool "Driver for PCI core working in hostmode" |
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c index 17b26ce7e051..37a5ffe673d5 100644 --- a/drivers/bcma/core.c +++ b/drivers/bcma/core.c | |||
@@ -9,6 +9,25 @@ | |||
9 | #include <linux/export.h> | 9 | #include <linux/export.h> |
10 | #include <linux/bcma/bcma.h> | 10 | #include <linux/bcma/bcma.h> |
11 | 11 | ||
12 | static bool bcma_core_wait_value(struct bcma_device *core, u16 reg, u32 mask, | ||
13 | u32 value, int timeout) | ||
14 | { | ||
15 | unsigned long deadline = jiffies + timeout; | ||
16 | u32 val; | ||
17 | |||
18 | do { | ||
19 | val = bcma_aread32(core, reg); | ||
20 | if ((val & mask) == value) | ||
21 | return true; | ||
22 | cpu_relax(); | ||
23 | udelay(10); | ||
24 | } while (!time_after_eq(jiffies, deadline)); | ||
25 | |||
26 | bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); | ||
27 | |||
28 | return false; | ||
29 | } | ||
30 | |||
12 | bool bcma_core_is_enabled(struct bcma_device *core) | 31 | bool bcma_core_is_enabled(struct bcma_device *core) |
13 | { | 32 | { |
14 | if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) | 33 | if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) |
@@ -25,13 +44,15 @@ void bcma_core_disable(struct bcma_device *core, u32 flags) | |||
25 | if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) | 44 | if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) |
26 | return; | 45 | return; |
27 | 46 | ||
28 | bcma_awrite32(core, BCMA_IOCTL, flags); | 47 | bcma_core_wait_value(core, BCMA_RESET_ST, ~0, 0, 300); |
29 | bcma_aread32(core, BCMA_IOCTL); | ||
30 | udelay(10); | ||
31 | 48 | ||
32 | bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET); | 49 | bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET); |
33 | bcma_aread32(core, BCMA_RESET_CTL); | 50 | bcma_aread32(core, BCMA_RESET_CTL); |
34 | udelay(1); | 51 | udelay(1); |
52 | |||
53 | bcma_awrite32(core, BCMA_IOCTL, flags); | ||
54 | bcma_aread32(core, BCMA_IOCTL); | ||
55 | udelay(10); | ||
35 | } | 56 | } |
36 | EXPORT_SYMBOL_GPL(bcma_core_disable); | 57 | EXPORT_SYMBOL_GPL(bcma_core_disable); |
37 | 58 | ||
@@ -43,6 +64,7 @@ int bcma_core_enable(struct bcma_device *core, u32 flags) | |||
43 | bcma_aread32(core, BCMA_IOCTL); | 64 | bcma_aread32(core, BCMA_IOCTL); |
44 | 65 | ||
45 | bcma_awrite32(core, BCMA_RESET_CTL, 0); | 66 | bcma_awrite32(core, BCMA_RESET_CTL, 0); |
67 | bcma_aread32(core, BCMA_RESET_CTL); | ||
46 | udelay(1); | 68 | udelay(1); |
47 | 69 | ||
48 | bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); | 70 | bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); |
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c index e6ed4fe5dced..4d07cce9c5d9 100644 --- a/drivers/bcma/driver_chipcommon_sflash.c +++ b/drivers/bcma/driver_chipcommon_sflash.c | |||
@@ -30,7 +30,7 @@ struct bcma_sflash_tbl_e { | |||
30 | u16 numblocks; | 30 | u16 numblocks; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { | 33 | static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { |
34 | { "M25P20", 0x11, 0x10000, 4, }, | 34 | { "M25P20", 0x11, 0x10000, 4, }, |
35 | { "M25P40", 0x12, 0x10000, 8, }, | 35 | { "M25P40", 0x12, 0x10000, 8, }, |
36 | 36 | ||
@@ -41,7 +41,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { | |||
41 | { 0 }, | 41 | { 0 }, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = { | 44 | static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = { |
45 | { "SST25WF512", 1, 0x1000, 16, }, | 45 | { "SST25WF512", 1, 0x1000, 16, }, |
46 | { "SST25VF512", 0x48, 0x1000, 16, }, | 46 | { "SST25VF512", 0x48, 0x1000, 16, }, |
47 | { "SST25WF010", 2, 0x1000, 32, }, | 47 | { "SST25WF010", 2, 0x1000, 32, }, |
@@ -59,7 +59,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = { | |||
59 | { 0 }, | 59 | { 0 }, |
60 | }; | 60 | }; |
61 | 61 | ||
62 | static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = { | 62 | static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = { |
63 | { "AT45DB011", 0xc, 256, 512, }, | 63 | { "AT45DB011", 0xc, 256, 512, }, |
64 | { "AT45DB021", 0x14, 256, 1024, }, | 64 | { "AT45DB021", 0x14, 256, 1024, }, |
65 | { "AT45DB041", 0x1c, 256, 2048, }, | 65 | { "AT45DB041", 0x1c, 256, 2048, }, |
@@ -89,7 +89,7 @@ int bcma_sflash_init(struct bcma_drv_cc *cc) | |||
89 | { | 89 | { |
90 | struct bcma_bus *bus = cc->core->bus; | 90 | struct bcma_bus *bus = cc->core->bus; |
91 | struct bcma_sflash *sflash = &cc->sflash; | 91 | struct bcma_sflash *sflash = &cc->sflash; |
92 | struct bcma_sflash_tbl_e *e; | 92 | const struct bcma_sflash_tbl_e *e; |
93 | u32 id, id2; | 93 | u32 id, id2; |
94 | 94 | ||
95 | switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { | 95 | switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { |
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 13693b7a0d5c..75c262694632 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c | |||
@@ -554,6 +554,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) | |||
554 | skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); | 554 | skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); |
555 | if (skb == NULL) { | 555 | if (skb == NULL) { |
556 | BT_ERR("No free skb"); | 556 | BT_ERR("No free skb"); |
557 | ret = -ENOMEM; | ||
557 | goto exit; | 558 | goto exit; |
558 | } | 559 | } |
559 | 560 | ||
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7a7e5f8ecadc..81f12757a842 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -57,6 +57,9 @@ static struct usb_device_id btusb_table[] = { | |||
57 | /* Apple-specific (Broadcom) devices */ | 57 | /* Apple-specific (Broadcom) devices */ |
58 | { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, | 58 | { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, |
59 | 59 | ||
60 | /* MediaTek MT76x0E */ | ||
61 | { USB_DEVICE(0x0e8d, 0x763f) }, | ||
62 | |||
60 | /* Broadcom SoftSailing reporting vendor specific */ | 63 | /* Broadcom SoftSailing reporting vendor specific */ |
61 | { USB_DEVICE(0x0a5c, 0x21e1) }, | 64 | { USB_DEVICE(0x0a5c, 0x21e1) }, |
62 | 65 | ||
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index 2c02b4e84094..1abf1d421173 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig | |||
@@ -31,5 +31,6 @@ source "drivers/net/wireless/ath/carl9170/Kconfig" | |||
31 | source "drivers/net/wireless/ath/ath6kl/Kconfig" | 31 | source "drivers/net/wireless/ath/ath6kl/Kconfig" |
32 | source "drivers/net/wireless/ath/ar5523/Kconfig" | 32 | source "drivers/net/wireless/ath/ar5523/Kconfig" |
33 | source "drivers/net/wireless/ath/wil6210/Kconfig" | 33 | source "drivers/net/wireless/ath/wil6210/Kconfig" |
34 | source "drivers/net/wireless/ath/ath10k/Kconfig" | ||
34 | 35 | ||
35 | endif | 36 | endif |
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile index 97b964ded2be..fb05cfd19361 100644 --- a/drivers/net/wireless/ath/Makefile +++ b/drivers/net/wireless/ath/Makefile | |||
@@ -4,6 +4,7 @@ obj-$(CONFIG_CARL9170) += carl9170/ | |||
4 | obj-$(CONFIG_ATH6KL) += ath6kl/ | 4 | obj-$(CONFIG_ATH6KL) += ath6kl/ |
5 | obj-$(CONFIG_AR5523) += ar5523/ | 5 | obj-$(CONFIG_AR5523) += ar5523/ |
6 | obj-$(CONFIG_WIL6210) += wil6210/ | 6 | obj-$(CONFIG_WIL6210) += wil6210/ |
7 | obj-$(CONFIG_ATH10K) += ath10k/ | ||
7 | 8 | ||
8 | obj-$(CONFIG_ATH_COMMON) += ath.o | 9 | obj-$(CONFIG_ATH_COMMON) += ath.o |
9 | 10 | ||
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index 4521342c62cc..daeafeff186b 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h | |||
@@ -239,13 +239,12 @@ enum ATH_DEBUG { | |||
239 | ATH_DBG_CONFIG = 0x00000200, | 239 | ATH_DBG_CONFIG = 0x00000200, |
240 | ATH_DBG_FATAL = 0x00000400, | 240 | ATH_DBG_FATAL = 0x00000400, |
241 | ATH_DBG_PS = 0x00000800, | 241 | ATH_DBG_PS = 0x00000800, |
242 | ATH_DBG_HWTIMER = 0x00001000, | 242 | ATH_DBG_BTCOEX = 0x00001000, |
243 | ATH_DBG_BTCOEX = 0x00002000, | 243 | ATH_DBG_WMI = 0x00002000, |
244 | ATH_DBG_WMI = 0x00004000, | 244 | ATH_DBG_BSTUCK = 0x00004000, |
245 | ATH_DBG_BSTUCK = 0x00008000, | 245 | ATH_DBG_MCI = 0x00008000, |
246 | ATH_DBG_MCI = 0x00010000, | 246 | ATH_DBG_DFS = 0x00010000, |
247 | ATH_DBG_DFS = 0x00020000, | 247 | ATH_DBG_WOW = 0x00020000, |
248 | ATH_DBG_WOW = 0x00040000, | ||
249 | ATH_DBG_ANY = 0xffffffff | 248 | ATH_DBG_ANY = 0xffffffff |
250 | }; | 249 | }; |
251 | 250 | ||
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig new file mode 100644 index 000000000000..cde58fe96254 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/Kconfig | |||
@@ -0,0 +1,39 @@ | |||
1 | config ATH10K | ||
2 | tristate "Atheros 802.11ac wireless cards support" | ||
3 | depends on MAC80211 | ||
4 | select ATH_COMMON | ||
5 | ---help--- | ||
6 | This module adds support for wireless adapters based on | ||
7 | Atheros IEEE 802.11ac family of chipsets. | ||
8 | |||
9 | If you choose to build a module, it'll be called ath10k. | ||
10 | |||
11 | config ATH10K_PCI | ||
12 | tristate "Atheros ath10k PCI support" | ||
13 | depends on ATH10K && PCI | ||
14 | ---help--- | ||
15 | This module adds support for PCIE bus | ||
16 | |||
17 | config ATH10K_DEBUG | ||
18 | bool "Atheros ath10k debugging" | ||
19 | depends on ATH10K | ||
20 | ---help--- | ||
21 | Enables debug support | ||
22 | |||
23 | If unsure, say Y to make it easier to debug problems. | ||
24 | |||
25 | config ATH10K_DEBUGFS | ||
26 | bool "Atheros ath10k debugfs support" | ||
27 | depends on ATH10K | ||
28 | ---help--- | ||
29 | Enabled debugfs support | ||
30 | |||
31 | If unsure, say Y to make it easier to debug problems. | ||
32 | |||
33 | config ATH10K_TRACING | ||
34 | bool "Atheros ath10k tracing support" | ||
35 | depends on ATH10K | ||
36 | depends on EVENT_TRACING | ||
37 | ---help--- | ||
38 | Select this to ath10k use tracing infrastructure. | ||
39 | |||
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile new file mode 100644 index 000000000000..a4179f49ee1f --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/Makefile | |||
@@ -0,0 +1,20 @@ | |||
1 | obj-$(CONFIG_ATH10K) += ath10k_core.o | ||
2 | ath10k_core-y += mac.o \ | ||
3 | debug.o \ | ||
4 | core.o \ | ||
5 | htc.o \ | ||
6 | htt.o \ | ||
7 | htt_rx.o \ | ||
8 | htt_tx.o \ | ||
9 | txrx.o \ | ||
10 | wmi.o \ | ||
11 | bmi.o | ||
12 | |||
13 | ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o | ||
14 | |||
15 | obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o | ||
16 | ath10k_pci-y += pci.o \ | ||
17 | ce.o | ||
18 | |||
19 | # for tracing framework to find trace.h | ||
20 | CFLAGS_trace.o := -I$(src) | ||
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c new file mode 100644 index 000000000000..1a2ef51b69d9 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/bmi.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include "bmi.h" | ||
19 | #include "hif.h" | ||
20 | #include "debug.h" | ||
21 | #include "htc.h" | ||
22 | |||
23 | int ath10k_bmi_done(struct ath10k *ar) | ||
24 | { | ||
25 | struct bmi_cmd cmd; | ||
26 | u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); | ||
27 | int ret; | ||
28 | |||
29 | if (ar->bmi.done_sent) { | ||
30 | ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__); | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | ar->bmi.done_sent = true; | ||
35 | cmd.id = __cpu_to_le32(BMI_DONE); | ||
36 | |||
37 | ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); | ||
38 | if (ret) { | ||
39 | ath10k_warn("unable to write to the device: %d\n", ret); | ||
40 | return ret; | ||
41 | } | ||
42 | |||
43 | ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n"); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | int ath10k_bmi_get_target_info(struct ath10k *ar, | ||
48 | struct bmi_target_info *target_info) | ||
49 | { | ||
50 | struct bmi_cmd cmd; | ||
51 | union bmi_resp resp; | ||
52 | u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info); | ||
53 | u32 resplen = sizeof(resp.get_target_info); | ||
54 | int ret; | ||
55 | |||
56 | if (ar->bmi.done_sent) { | ||
57 | ath10k_warn("BMI Get Target Info Command disallowed\n"); | ||
58 | return -EBUSY; | ||
59 | } | ||
60 | |||
61 | cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO); | ||
62 | |||
63 | ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); | ||
64 | if (ret) { | ||
65 | ath10k_warn("unable to get target info from device\n"); | ||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | if (resplen < sizeof(resp.get_target_info)) { | ||
70 | ath10k_warn("invalid get_target_info response length (%d)\n", | ||
71 | resplen); | ||
72 | return -EIO; | ||
73 | } | ||
74 | |||
75 | target_info->version = __le32_to_cpu(resp.get_target_info.version); | ||
76 | target_info->type = __le32_to_cpu(resp.get_target_info.type); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | int ath10k_bmi_read_memory(struct ath10k *ar, | ||
81 | u32 address, void *buffer, u32 length) | ||
82 | { | ||
83 | struct bmi_cmd cmd; | ||
84 | union bmi_resp resp; | ||
85 | u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem); | ||
86 | u32 rxlen; | ||
87 | int ret; | ||
88 | |||
89 | if (ar->bmi.done_sent) { | ||
90 | ath10k_warn("command disallowed\n"); | ||
91 | return -EBUSY; | ||
92 | } | ||
93 | |||
94 | ath10k_dbg(ATH10K_DBG_CORE, | ||
95 | "%s: (device: 0x%p, address: 0x%x, length: %d)\n", | ||
96 | __func__, ar, address, length); | ||
97 | |||
98 | while (length) { | ||
99 | rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); | ||
100 | |||
101 | cmd.id = __cpu_to_le32(BMI_READ_MEMORY); | ||
102 | cmd.read_mem.addr = __cpu_to_le32(address); | ||
103 | cmd.read_mem.len = __cpu_to_le32(rxlen); | ||
104 | |||
105 | ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, | ||
106 | &resp, &rxlen); | ||
107 | if (ret) { | ||
108 | ath10k_warn("unable to read from the device\n"); | ||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | memcpy(buffer, resp.read_mem.payload, rxlen); | ||
113 | address += rxlen; | ||
114 | buffer += rxlen; | ||
115 | length -= rxlen; | ||
116 | } | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | int ath10k_bmi_write_memory(struct ath10k *ar, | ||
122 | u32 address, const void *buffer, u32 length) | ||
123 | { | ||
124 | struct bmi_cmd cmd; | ||
125 | u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem); | ||
126 | u32 txlen; | ||
127 | int ret; | ||
128 | |||
129 | if (ar->bmi.done_sent) { | ||
130 | ath10k_warn("command disallowed\n"); | ||
131 | return -EBUSY; | ||
132 | } | ||
133 | |||
134 | ath10k_dbg(ATH10K_DBG_CORE, | ||
135 | "%s: (device: 0x%p, address: 0x%x, length: %d)\n", | ||
136 | __func__, ar, address, length); | ||
137 | |||
138 | while (length) { | ||
139 | txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); | ||
140 | |||
141 | /* copy before roundup to avoid reading beyond buffer*/ | ||
142 | memcpy(cmd.write_mem.payload, buffer, txlen); | ||
143 | txlen = roundup(txlen, 4); | ||
144 | |||
145 | cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY); | ||
146 | cmd.write_mem.addr = __cpu_to_le32(address); | ||
147 | cmd.write_mem.len = __cpu_to_le32(txlen); | ||
148 | |||
149 | ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, | ||
150 | NULL, NULL); | ||
151 | if (ret) { | ||
152 | ath10k_warn("unable to write to the device\n"); | ||
153 | return ret; | ||
154 | } | ||
155 | |||
156 | /* fixup roundup() so `length` zeroes out for last chunk */ | ||
157 | txlen = min(txlen, length); | ||
158 | |||
159 | address += txlen; | ||
160 | buffer += txlen; | ||
161 | length -= txlen; | ||
162 | } | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) | ||
168 | { | ||
169 | struct bmi_cmd cmd; | ||
170 | union bmi_resp resp; | ||
171 | u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute); | ||
172 | u32 resplen = sizeof(resp.execute); | ||
173 | int ret; | ||
174 | |||
175 | if (ar->bmi.done_sent) { | ||
176 | ath10k_warn("command disallowed\n"); | ||
177 | return -EBUSY; | ||
178 | } | ||
179 | |||
180 | ath10k_dbg(ATH10K_DBG_CORE, | ||
181 | "%s: (device: 0x%p, address: 0x%x, param: %d)\n", | ||
182 | __func__, ar, address, *param); | ||
183 | |||
184 | cmd.id = __cpu_to_le32(BMI_EXECUTE); | ||
185 | cmd.execute.addr = __cpu_to_le32(address); | ||
186 | cmd.execute.param = __cpu_to_le32(*param); | ||
187 | |||
188 | ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); | ||
189 | if (ret) { | ||
190 | ath10k_warn("unable to read from the device\n"); | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | if (resplen < sizeof(resp.execute)) { | ||
195 | ath10k_warn("invalid execute response length (%d)\n", | ||
196 | resplen); | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | *param = __le32_to_cpu(resp.execute.result); | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) | ||
205 | { | ||
206 | struct bmi_cmd cmd; | ||
207 | u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data); | ||
208 | u32 txlen; | ||
209 | int ret; | ||
210 | |||
211 | if (ar->bmi.done_sent) { | ||
212 | ath10k_warn("command disallowed\n"); | ||
213 | return -EBUSY; | ||
214 | } | ||
215 | |||
216 | while (length) { | ||
217 | txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); | ||
218 | |||
219 | WARN_ON_ONCE(txlen & 3); | ||
220 | |||
221 | cmd.id = __cpu_to_le32(BMI_LZ_DATA); | ||
222 | cmd.lz_data.len = __cpu_to_le32(txlen); | ||
223 | memcpy(cmd.lz_data.payload, buffer, txlen); | ||
224 | |||
225 | ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, | ||
226 | NULL, NULL); | ||
227 | if (ret) { | ||
228 | ath10k_warn("unable to write to the device\n"); | ||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | buffer += txlen; | ||
233 | length -= txlen; | ||
234 | } | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) | ||
240 | { | ||
241 | struct bmi_cmd cmd; | ||
242 | u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); | ||
243 | int ret; | ||
244 | |||
245 | if (ar->bmi.done_sent) { | ||
246 | ath10k_warn("command disallowed\n"); | ||
247 | return -EBUSY; | ||
248 | } | ||
249 | |||
250 | cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START); | ||
251 | cmd.lz_start.addr = __cpu_to_le32(address); | ||
252 | |||
253 | ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); | ||
254 | if (ret) { | ||
255 | ath10k_warn("unable to Start LZ Stream to the device\n"); | ||
256 | return ret; | ||
257 | } | ||
258 | |||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | int ath10k_bmi_fast_download(struct ath10k *ar, | ||
263 | u32 address, const void *buffer, u32 length) | ||
264 | { | ||
265 | u8 trailer[4] = {}; | ||
266 | u32 head_len = rounddown(length, 4); | ||
267 | u32 trailer_len = length - head_len; | ||
268 | int ret; | ||
269 | |||
270 | ret = ath10k_bmi_lz_stream_start(ar, address); | ||
271 | if (ret) | ||
272 | return ret; | ||
273 | |||
274 | /* copy the last word into a zero padded buffer */ | ||
275 | if (trailer_len > 0) | ||
276 | memcpy(trailer, buffer + head_len, trailer_len); | ||
277 | |||
278 | ret = ath10k_bmi_lz_data(ar, buffer, head_len); | ||
279 | if (ret) | ||
280 | return ret; | ||
281 | |||
282 | if (trailer_len > 0) | ||
283 | ret = ath10k_bmi_lz_data(ar, trailer, 4); | ||
284 | |||
285 | if (ret != 0) | ||
286 | return ret; | ||
287 | |||
288 | /* | ||
289 | * Close compressed stream and open a new (fake) one. | ||
290 | * This serves mainly to flush Target caches. | ||
291 | */ | ||
292 | ret = ath10k_bmi_lz_stream_start(ar, 0x00); | ||
293 | |||
294 | return ret; | ||
295 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h new file mode 100644 index 000000000000..32c56aa33a5e --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/bmi.h | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _BMI_H_ | ||
19 | #define _BMI_H_ | ||
20 | |||
21 | #include "core.h" | ||
22 | |||
23 | /* | ||
24 | * Bootloader Messaging Interface (BMI) | ||
25 | * | ||
26 | * BMI is a very simple messaging interface used during initialization | ||
27 | * to read memory, write memory, execute code, and to define an | ||
28 | * application entry PC. | ||
29 | * | ||
30 | * It is used to download an application to QCA988x, to provide | ||
31 | * patches to code that is already resident on QCA988x, and generally | ||
32 | * to examine and modify state. The Host has an opportunity to use | ||
33 | * BMI only once during bootup. Once the Host issues a BMI_DONE | ||
34 | * command, this opportunity ends. | ||
35 | * | ||
36 | * The Host writes BMI requests to mailbox0, and reads BMI responses | ||
37 | * from mailbox0. BMI requests all begin with a command | ||
38 | * (see below for specific commands), and are followed by | ||
39 | * command-specific data. | ||
40 | * | ||
41 | * Flow control: | ||
42 | * The Host can only issue a command once the Target gives it a | ||
43 | * "BMI Command Credit", using AR8K Counter #4. As soon as the | ||
44 | * Target has completed a command, it issues another BMI Command | ||
45 | * Credit (so the Host can issue the next command). | ||
46 | * | ||
47 | * BMI handles all required Target-side cache flushing. | ||
48 | */ | ||
49 | |||
50 | /* Maximum data size used for BMI transfers */ | ||
51 | #define BMI_MAX_DATA_SIZE 256 | ||
52 | |||
53 | /* len = cmd + addr + length */ | ||
54 | #define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \ | ||
55 | sizeof(u32) + \ | ||
56 | sizeof(u32) + \ | ||
57 | sizeof(u32)) | ||
58 | |||
59 | /* BMI Commands */ | ||
60 | |||
61 | enum bmi_cmd_id { | ||
62 | BMI_NO_COMMAND = 0, | ||
63 | BMI_DONE = 1, | ||
64 | BMI_READ_MEMORY = 2, | ||
65 | BMI_WRITE_MEMORY = 3, | ||
66 | BMI_EXECUTE = 4, | ||
67 | BMI_SET_APP_START = 5, | ||
68 | BMI_READ_SOC_REGISTER = 6, | ||
69 | BMI_READ_SOC_WORD = 6, | ||
70 | BMI_WRITE_SOC_REGISTER = 7, | ||
71 | BMI_WRITE_SOC_WORD = 7, | ||
72 | BMI_GET_TARGET_ID = 8, | ||
73 | BMI_GET_TARGET_INFO = 8, | ||
74 | BMI_ROMPATCH_INSTALL = 9, | ||
75 | BMI_ROMPATCH_UNINSTALL = 10, | ||
76 | BMI_ROMPATCH_ACTIVATE = 11, | ||
77 | BMI_ROMPATCH_DEACTIVATE = 12, | ||
78 | BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */ | ||
79 | BMI_LZ_DATA = 14, | ||
80 | BMI_NVRAM_PROCESS = 15, | ||
81 | }; | ||
82 | |||
83 | #define BMI_NVRAM_SEG_NAME_SZ 16 | ||
84 | |||
85 | struct bmi_cmd { | ||
86 | __le32 id; /* enum bmi_cmd_id */ | ||
87 | union { | ||
88 | struct { | ||
89 | } done; | ||
90 | struct { | ||
91 | __le32 addr; | ||
92 | __le32 len; | ||
93 | } read_mem; | ||
94 | struct { | ||
95 | __le32 addr; | ||
96 | __le32 len; | ||
97 | u8 payload[0]; | ||
98 | } write_mem; | ||
99 | struct { | ||
100 | __le32 addr; | ||
101 | __le32 param; | ||
102 | } execute; | ||
103 | struct { | ||
104 | __le32 addr; | ||
105 | } set_app_start; | ||
106 | struct { | ||
107 | __le32 addr; | ||
108 | } read_soc_reg; | ||
109 | struct { | ||
110 | __le32 addr; | ||
111 | __le32 value; | ||
112 | } write_soc_reg; | ||
113 | struct { | ||
114 | } get_target_info; | ||
115 | struct { | ||
116 | __le32 rom_addr; | ||
117 | __le32 ram_addr; /* or value */ | ||
118 | __le32 size; | ||
119 | __le32 activate; /* 0=install, but dont activate */ | ||
120 | } rompatch_install; | ||
121 | struct { | ||
122 | __le32 patch_id; | ||
123 | } rompatch_uninstall; | ||
124 | struct { | ||
125 | __le32 count; | ||
126 | __le32 patch_ids[0]; /* length of @count */ | ||
127 | } rompatch_activate; | ||
128 | struct { | ||
129 | __le32 count; | ||
130 | __le32 patch_ids[0]; /* length of @count */ | ||
131 | } rompatch_deactivate; | ||
132 | struct { | ||
133 | __le32 addr; | ||
134 | } lz_start; | ||
135 | struct { | ||
136 | __le32 len; /* max BMI_MAX_DATA_SIZE */ | ||
137 | u8 payload[0]; /* length of @len */ | ||
138 | } lz_data; | ||
139 | struct { | ||
140 | u8 name[BMI_NVRAM_SEG_NAME_SZ]; | ||
141 | } nvram_process; | ||
142 | u8 payload[BMI_MAX_CMDBUF_SIZE]; | ||
143 | }; | ||
144 | } __packed; | ||
145 | |||
146 | union bmi_resp { | ||
147 | struct { | ||
148 | u8 payload[0]; | ||
149 | } read_mem; | ||
150 | struct { | ||
151 | __le32 result; | ||
152 | } execute; | ||
153 | struct { | ||
154 | __le32 value; | ||
155 | } read_soc_reg; | ||
156 | struct { | ||
157 | __le32 len; | ||
158 | __le32 version; | ||
159 | __le32 type; | ||
160 | } get_target_info; | ||
161 | struct { | ||
162 | __le32 patch_id; | ||
163 | } rompatch_install; | ||
164 | struct { | ||
165 | __le32 patch_id; | ||
166 | } rompatch_uninstall; | ||
167 | struct { | ||
168 | /* 0 = nothing executed | ||
169 | * otherwise = NVRAM segment return value */ | ||
170 | __le32 result; | ||
171 | } nvram_process; | ||
172 | u8 payload[BMI_MAX_CMDBUF_SIZE]; | ||
173 | } __packed; | ||
174 | |||
175 | struct bmi_target_info { | ||
176 | u32 version; | ||
177 | u32 type; | ||
178 | }; | ||
179 | |||
180 | |||
181 | /* in msec */ | ||
182 | #define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ) | ||
183 | |||
184 | #define BMI_CE_NUM_TO_TARG 0 | ||
185 | #define BMI_CE_NUM_TO_HOST 1 | ||
186 | |||
187 | int ath10k_bmi_done(struct ath10k *ar); | ||
188 | int ath10k_bmi_get_target_info(struct ath10k *ar, | ||
189 | struct bmi_target_info *target_info); | ||
190 | int ath10k_bmi_read_memory(struct ath10k *ar, u32 address, | ||
191 | void *buffer, u32 length); | ||
192 | int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, | ||
193 | const void *buffer, u32 length); | ||
194 | |||
195 | #define ath10k_bmi_read32(ar, item, val) \ | ||
196 | ({ \ | ||
197 | int ret; \ | ||
198 | u32 addr; \ | ||
199 | __le32 tmp; \ | ||
200 | \ | ||
201 | addr = host_interest_item_address(HI_ITEM(item)); \ | ||
202 | ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \ | ||
203 | *val = __le32_to_cpu(tmp); \ | ||
204 | ret; \ | ||
205 | }) | ||
206 | |||
207 | #define ath10k_bmi_write32(ar, item, val) \ | ||
208 | ({ \ | ||
209 | int ret; \ | ||
210 | u32 address; \ | ||
211 | __le32 v = __cpu_to_le32(val); \ | ||
212 | \ | ||
213 | address = host_interest_item_address(HI_ITEM(item)); \ | ||
214 | ret = ath10k_bmi_write_memory(ar, address, \ | ||
215 | (u8 *)&v, sizeof(v)); \ | ||
216 | ret; \ | ||
217 | }) | ||
218 | |||
219 | int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param); | ||
220 | int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address); | ||
221 | int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length); | ||
222 | int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, | ||
223 | const void *buffer, u32 length); | ||
224 | #endif /* _BMI_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c new file mode 100644 index 000000000000..61a8ac70d3ca --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ce.c | |||
@@ -0,0 +1,1189 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include "hif.h" | ||
19 | #include "pci.h" | ||
20 | #include "ce.h" | ||
21 | #include "debug.h" | ||
22 | |||
23 | /* | ||
24 | * Support for Copy Engine hardware, which is mainly used for | ||
25 | * communication between Host and Target over a PCIe interconnect. | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * A single CopyEngine (CE) comprises two "rings": | ||
30 | * a source ring | ||
31 | * a destination ring | ||
32 | * | ||
33 | * Each ring consists of a number of descriptors which specify | ||
34 | * an address, length, and meta-data. | ||
35 | * | ||
36 | * Typically, one side of the PCIe interconnect (Host or Target) | ||
37 | * controls one ring and the other side controls the other ring. | ||
38 | * The source side chooses when to initiate a transfer and it | ||
39 | * chooses what to send (buffer address, length). The destination | ||
40 | * side keeps a supply of "anonymous receive buffers" available and | ||
41 | * it handles incoming data as it arrives (when the destination | ||
42 | * recieves an interrupt). | ||
43 | * | ||
44 | * The sender may send a simple buffer (address/length) or it may | ||
45 | * send a small list of buffers. When a small list is sent, hardware | ||
46 | * "gathers" these and they end up in a single destination buffer | ||
47 | * with a single interrupt. | ||
48 | * | ||
49 | * There are several "contexts" managed by this layer -- more, it | ||
50 | * may seem -- than should be needed. These are provided mainly for | ||
51 | * maximum flexibility and especially to facilitate a simpler HIF | ||
52 | * implementation. There are per-CopyEngine recv, send, and watermark | ||
53 | * contexts. These are supplied by the caller when a recv, send, | ||
54 | * or watermark handler is established and they are echoed back to | ||
55 | * the caller when the respective callbacks are invoked. There is | ||
56 | * also a per-transfer context supplied by the caller when a buffer | ||
57 | * (or sendlist) is sent and when a buffer is enqueued for recv. | ||
58 | * These per-transfer contexts are echoed back to the caller when | ||
59 | * the buffer is sent/received. | ||
60 | */ | ||
61 | |||
62 | static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, | ||
63 | u32 ce_ctrl_addr, | ||
64 | unsigned int n) | ||
65 | { | ||
66 | ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); | ||
67 | } | ||
68 | |||
69 | static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, | ||
70 | u32 ce_ctrl_addr) | ||
71 | { | ||
72 | return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); | ||
73 | } | ||
74 | |||
75 | static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, | ||
76 | u32 ce_ctrl_addr, | ||
77 | unsigned int n) | ||
78 | { | ||
79 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
80 | void __iomem *indicator_addr; | ||
81 | |||
82 | if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { | ||
83 | ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | /* workaround for QCA988x_1.0 HW CE */ | ||
88 | indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS; | ||
89 | |||
90 | if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) { | ||
91 | iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr); | ||
92 | } else { | ||
93 | unsigned long irq_flags; | ||
94 | local_irq_save(irq_flags); | ||
95 | iowrite32(1, indicator_addr); | ||
96 | |||
97 | /* | ||
98 | * PCIE write waits for ACK in IPQ8K, there is no | ||
99 | * need to read back value. | ||
100 | */ | ||
101 | (void)ioread32(indicator_addr); | ||
102 | (void)ioread32(indicator_addr); /* conservative */ | ||
103 | |||
104 | ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); | ||
105 | |||
106 | iowrite32(0, indicator_addr); | ||
107 | local_irq_restore(irq_flags); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, | ||
112 | u32 ce_ctrl_addr) | ||
113 | { | ||
114 | return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); | ||
115 | } | ||
116 | |||
117 | static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, | ||
118 | u32 ce_ctrl_addr) | ||
119 | { | ||
120 | return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); | ||
121 | } | ||
122 | |||
123 | static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, | ||
124 | u32 ce_ctrl_addr, | ||
125 | unsigned int addr) | ||
126 | { | ||
127 | ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); | ||
128 | } | ||
129 | |||
130 | static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, | ||
131 | u32 ce_ctrl_addr, | ||
132 | unsigned int n) | ||
133 | { | ||
134 | ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); | ||
135 | } | ||
136 | |||
137 | static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, | ||
138 | u32 ce_ctrl_addr, | ||
139 | unsigned int n) | ||
140 | { | ||
141 | u32 ctrl1_addr = ath10k_pci_read32((ar), | ||
142 | (ce_ctrl_addr) + CE_CTRL1_ADDRESS); | ||
143 | |||
144 | ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, | ||
145 | (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) | | ||
146 | CE_CTRL1_DMAX_LENGTH_SET(n)); | ||
147 | } | ||
148 | |||
149 | static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, | ||
150 | u32 ce_ctrl_addr, | ||
151 | unsigned int n) | ||
152 | { | ||
153 | u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); | ||
154 | |||
155 | ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, | ||
156 | (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | | ||
157 | CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)); | ||
158 | } | ||
159 | |||
160 | static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, | ||
161 | u32 ce_ctrl_addr, | ||
162 | unsigned int n) | ||
163 | { | ||
164 | u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); | ||
165 | |||
166 | ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, | ||
167 | (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | | ||
168 | CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)); | ||
169 | } | ||
170 | |||
171 | static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, | ||
172 | u32 ce_ctrl_addr) | ||
173 | { | ||
174 | return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); | ||
175 | } | ||
176 | |||
177 | static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, | ||
178 | u32 ce_ctrl_addr, | ||
179 | u32 addr) | ||
180 | { | ||
181 | ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); | ||
182 | } | ||
183 | |||
184 | static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, | ||
185 | u32 ce_ctrl_addr, | ||
186 | unsigned int n) | ||
187 | { | ||
188 | ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); | ||
189 | } | ||
190 | |||
191 | static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, | ||
192 | u32 ce_ctrl_addr, | ||
193 | unsigned int n) | ||
194 | { | ||
195 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); | ||
196 | |||
197 | ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, | ||
198 | (addr & ~SRC_WATERMARK_HIGH_MASK) | | ||
199 | SRC_WATERMARK_HIGH_SET(n)); | ||
200 | } | ||
201 | |||
202 | static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, | ||
203 | u32 ce_ctrl_addr, | ||
204 | unsigned int n) | ||
205 | { | ||
206 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); | ||
207 | |||
208 | ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, | ||
209 | (addr & ~SRC_WATERMARK_LOW_MASK) | | ||
210 | SRC_WATERMARK_LOW_SET(n)); | ||
211 | } | ||
212 | |||
213 | static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, | ||
214 | u32 ce_ctrl_addr, | ||
215 | unsigned int n) | ||
216 | { | ||
217 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); | ||
218 | |||
219 | ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, | ||
220 | (addr & ~DST_WATERMARK_HIGH_MASK) | | ||
221 | DST_WATERMARK_HIGH_SET(n)); | ||
222 | } | ||
223 | |||
224 | static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, | ||
225 | u32 ce_ctrl_addr, | ||
226 | unsigned int n) | ||
227 | { | ||
228 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); | ||
229 | |||
230 | ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, | ||
231 | (addr & ~DST_WATERMARK_LOW_MASK) | | ||
232 | DST_WATERMARK_LOW_SET(n)); | ||
233 | } | ||
234 | |||
235 | static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, | ||
236 | u32 ce_ctrl_addr) | ||
237 | { | ||
238 | u32 host_ie_addr = ath10k_pci_read32(ar, | ||
239 | ce_ctrl_addr + HOST_IE_ADDRESS); | ||
240 | |||
241 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, | ||
242 | host_ie_addr | HOST_IE_COPY_COMPLETE_MASK); | ||
243 | } | ||
244 | |||
245 | static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, | ||
246 | u32 ce_ctrl_addr) | ||
247 | { | ||
248 | u32 host_ie_addr = ath10k_pci_read32(ar, | ||
249 | ce_ctrl_addr + HOST_IE_ADDRESS); | ||
250 | |||
251 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, | ||
252 | host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK); | ||
253 | } | ||
254 | |||
255 | static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, | ||
256 | u32 ce_ctrl_addr) | ||
257 | { | ||
258 | u32 host_ie_addr = ath10k_pci_read32(ar, | ||
259 | ce_ctrl_addr + HOST_IE_ADDRESS); | ||
260 | |||
261 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, | ||
262 | host_ie_addr & ~CE_WATERMARK_MASK); | ||
263 | } | ||
264 | |||
265 | static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, | ||
266 | u32 ce_ctrl_addr) | ||
267 | { | ||
268 | u32 misc_ie_addr = ath10k_pci_read32(ar, | ||
269 | ce_ctrl_addr + MISC_IE_ADDRESS); | ||
270 | |||
271 | ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, | ||
272 | misc_ie_addr | CE_ERROR_MASK); | ||
273 | } | ||
274 | |||
275 | static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, | ||
276 | u32 ce_ctrl_addr, | ||
277 | unsigned int mask) | ||
278 | { | ||
279 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); | ||
280 | } | ||
281 | |||
282 | |||
283 | /* | ||
284 | * Guts of ath10k_ce_send, used by both ath10k_ce_send and | ||
285 | * ath10k_ce_sendlist_send. | ||
286 | * The caller takes responsibility for any needed locking. | ||
287 | */ | ||
288 | static int ath10k_ce_send_nolock(struct ce_state *ce_state, | ||
289 | void *per_transfer_context, | ||
290 | u32 buffer, | ||
291 | unsigned int nbytes, | ||
292 | unsigned int transfer_id, | ||
293 | unsigned int flags) | ||
294 | { | ||
295 | struct ath10k *ar = ce_state->ar; | ||
296 | struct ce_ring_state *src_ring = ce_state->src_ring; | ||
297 | struct ce_desc *desc, *sdesc; | ||
298 | unsigned int nentries_mask = src_ring->nentries_mask; | ||
299 | unsigned int sw_index = src_ring->sw_index; | ||
300 | unsigned int write_index = src_ring->write_index; | ||
301 | u32 ctrl_addr = ce_state->ctrl_addr; | ||
302 | u32 desc_flags = 0; | ||
303 | int ret = 0; | ||
304 | |||
305 | if (nbytes > ce_state->src_sz_max) | ||
306 | ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", | ||
307 | __func__, nbytes, ce_state->src_sz_max); | ||
308 | |||
309 | ath10k_pci_wake(ar); | ||
310 | |||
311 | if (unlikely(CE_RING_DELTA(nentries_mask, | ||
312 | write_index, sw_index - 1) <= 0)) { | ||
313 | ret = -EIO; | ||
314 | goto exit; | ||
315 | } | ||
316 | |||
317 | desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, | ||
318 | write_index); | ||
319 | sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index); | ||
320 | |||
321 | desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); | ||
322 | |||
323 | if (flags & CE_SEND_FLAG_GATHER) | ||
324 | desc_flags |= CE_DESC_FLAGS_GATHER; | ||
325 | if (flags & CE_SEND_FLAG_BYTE_SWAP) | ||
326 | desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; | ||
327 | |||
328 | sdesc->addr = __cpu_to_le32(buffer); | ||
329 | sdesc->nbytes = __cpu_to_le16(nbytes); | ||
330 | sdesc->flags = __cpu_to_le16(desc_flags); | ||
331 | |||
332 | *desc = *sdesc; | ||
333 | |||
334 | src_ring->per_transfer_context[write_index] = per_transfer_context; | ||
335 | |||
336 | /* Update Source Ring Write Index */ | ||
337 | write_index = CE_RING_IDX_INCR(nentries_mask, write_index); | ||
338 | |||
339 | /* WORKAROUND */ | ||
340 | if (!(flags & CE_SEND_FLAG_GATHER)) | ||
341 | ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); | ||
342 | |||
343 | src_ring->write_index = write_index; | ||
344 | exit: | ||
345 | ath10k_pci_sleep(ar); | ||
346 | return ret; | ||
347 | } | ||
348 | |||
349 | int ath10k_ce_send(struct ce_state *ce_state, | ||
350 | void *per_transfer_context, | ||
351 | u32 buffer, | ||
352 | unsigned int nbytes, | ||
353 | unsigned int transfer_id, | ||
354 | unsigned int flags) | ||
355 | { | ||
356 | struct ath10k *ar = ce_state->ar; | ||
357 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
358 | int ret; | ||
359 | |||
360 | spin_lock_bh(&ar_pci->ce_lock); | ||
361 | ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, | ||
362 | buffer, nbytes, transfer_id, flags); | ||
363 | spin_unlock_bh(&ar_pci->ce_lock); | ||
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer, | ||
369 | unsigned int nbytes, u32 flags) | ||
370 | { | ||
371 | unsigned int num_items = sendlist->num_items; | ||
372 | struct ce_sendlist_item *item; | ||
373 | |||
374 | item = &sendlist->item[num_items]; | ||
375 | item->data = buffer; | ||
376 | item->u.nbytes = nbytes; | ||
377 | item->flags = flags; | ||
378 | sendlist->num_items++; | ||
379 | } | ||
380 | |||
381 | int ath10k_ce_sendlist_send(struct ce_state *ce_state, | ||
382 | void *per_transfer_context, | ||
383 | struct ce_sendlist *sendlist, | ||
384 | unsigned int transfer_id) | ||
385 | { | ||
386 | struct ce_ring_state *src_ring = ce_state->src_ring; | ||
387 | struct ce_sendlist_item *item; | ||
388 | struct ath10k *ar = ce_state->ar; | ||
389 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
390 | unsigned int nentries_mask = src_ring->nentries_mask; | ||
391 | unsigned int num_items = sendlist->num_items; | ||
392 | unsigned int sw_index; | ||
393 | unsigned int write_index; | ||
394 | int i, delta, ret = -ENOMEM; | ||
395 | |||
396 | spin_lock_bh(&ar_pci->ce_lock); | ||
397 | |||
398 | sw_index = src_ring->sw_index; | ||
399 | write_index = src_ring->write_index; | ||
400 | |||
401 | delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); | ||
402 | |||
403 | if (delta >= num_items) { | ||
404 | /* | ||
405 | * Handle all but the last item uniformly. | ||
406 | */ | ||
407 | for (i = 0; i < num_items - 1; i++) { | ||
408 | item = &sendlist->item[i]; | ||
409 | ret = ath10k_ce_send_nolock(ce_state, | ||
410 | CE_SENDLIST_ITEM_CTXT, | ||
411 | (u32) item->data, | ||
412 | item->u.nbytes, transfer_id, | ||
413 | item->flags | | ||
414 | CE_SEND_FLAG_GATHER); | ||
415 | if (ret) | ||
416 | ath10k_warn("CE send failed for item: %d\n", i); | ||
417 | } | ||
418 | /* | ||
419 | * Provide valid context pointer for final item. | ||
420 | */ | ||
421 | item = &sendlist->item[i]; | ||
422 | ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, | ||
423 | (u32) item->data, item->u.nbytes, | ||
424 | transfer_id, item->flags); | ||
425 | if (ret) | ||
426 | ath10k_warn("CE send failed for last item: %d\n", i); | ||
427 | } | ||
428 | |||
429 | spin_unlock_bh(&ar_pci->ce_lock); | ||
430 | |||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, | ||
435 | void *per_recv_context, | ||
436 | u32 buffer) | ||
437 | { | ||
438 | struct ce_ring_state *dest_ring = ce_state->dest_ring; | ||
439 | u32 ctrl_addr = ce_state->ctrl_addr; | ||
440 | struct ath10k *ar = ce_state->ar; | ||
441 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
442 | unsigned int nentries_mask = dest_ring->nentries_mask; | ||
443 | unsigned int write_index; | ||
444 | unsigned int sw_index; | ||
445 | int ret; | ||
446 | |||
447 | spin_lock_bh(&ar_pci->ce_lock); | ||
448 | write_index = dest_ring->write_index; | ||
449 | sw_index = dest_ring->sw_index; | ||
450 | |||
451 | ath10k_pci_wake(ar); | ||
452 | |||
453 | if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { | ||
454 | struct ce_desc *base = dest_ring->base_addr_owner_space; | ||
455 | struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); | ||
456 | |||
457 | /* Update destination descriptor */ | ||
458 | desc->addr = __cpu_to_le32(buffer); | ||
459 | desc->nbytes = 0; | ||
460 | |||
461 | dest_ring->per_transfer_context[write_index] = | ||
462 | per_recv_context; | ||
463 | |||
464 | /* Update Destination Ring Write Index */ | ||
465 | write_index = CE_RING_IDX_INCR(nentries_mask, write_index); | ||
466 | ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); | ||
467 | dest_ring->write_index = write_index; | ||
468 | ret = 0; | ||
469 | } else { | ||
470 | ret = -EIO; | ||
471 | } | ||
472 | ath10k_pci_sleep(ar); | ||
473 | spin_unlock_bh(&ar_pci->ce_lock); | ||
474 | |||
475 | return ret; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * Guts of ath10k_ce_completed_recv_next. | ||
480 | * The caller takes responsibility for any necessary locking. | ||
481 | */ | ||
482 | static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, | ||
483 | void **per_transfer_contextp, | ||
484 | u32 *bufferp, | ||
485 | unsigned int *nbytesp, | ||
486 | unsigned int *transfer_idp, | ||
487 | unsigned int *flagsp) | ||
488 | { | ||
489 | struct ce_ring_state *dest_ring = ce_state->dest_ring; | ||
490 | unsigned int nentries_mask = dest_ring->nentries_mask; | ||
491 | unsigned int sw_index = dest_ring->sw_index; | ||
492 | |||
493 | struct ce_desc *base = dest_ring->base_addr_owner_space; | ||
494 | struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); | ||
495 | struct ce_desc sdesc; | ||
496 | u16 nbytes; | ||
497 | |||
498 | /* Copy in one go for performance reasons */ | ||
499 | sdesc = *desc; | ||
500 | |||
501 | nbytes = __le16_to_cpu(sdesc.nbytes); | ||
502 | if (nbytes == 0) { | ||
503 | /* | ||
504 | * This closes a relatively unusual race where the Host | ||
505 | * sees the updated DRRI before the update to the | ||
506 | * corresponding descriptor has completed. We treat this | ||
507 | * as a descriptor that is not yet done. | ||
508 | */ | ||
509 | return -EIO; | ||
510 | } | ||
511 | |||
512 | desc->nbytes = 0; | ||
513 | |||
514 | /* Return data from completed destination descriptor */ | ||
515 | *bufferp = __le32_to_cpu(sdesc.addr); | ||
516 | *nbytesp = nbytes; | ||
517 | *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA); | ||
518 | |||
519 | if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP) | ||
520 | *flagsp = CE_RECV_FLAG_SWAPPED; | ||
521 | else | ||
522 | *flagsp = 0; | ||
523 | |||
524 | if (per_transfer_contextp) | ||
525 | *per_transfer_contextp = | ||
526 | dest_ring->per_transfer_context[sw_index]; | ||
527 | |||
528 | /* sanity */ | ||
529 | dest_ring->per_transfer_context[sw_index] = NULL; | ||
530 | |||
531 | /* Update sw_index */ | ||
532 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); | ||
533 | dest_ring->sw_index = sw_index; | ||
534 | |||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | int ath10k_ce_completed_recv_next(struct ce_state *ce_state, | ||
539 | void **per_transfer_contextp, | ||
540 | u32 *bufferp, | ||
541 | unsigned int *nbytesp, | ||
542 | unsigned int *transfer_idp, | ||
543 | unsigned int *flagsp) | ||
544 | { | ||
545 | struct ath10k *ar = ce_state->ar; | ||
546 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
547 | int ret; | ||
548 | |||
549 | spin_lock_bh(&ar_pci->ce_lock); | ||
550 | ret = ath10k_ce_completed_recv_next_nolock(ce_state, | ||
551 | per_transfer_contextp, | ||
552 | bufferp, nbytesp, | ||
553 | transfer_idp, flagsp); | ||
554 | spin_unlock_bh(&ar_pci->ce_lock); | ||
555 | |||
556 | return ret; | ||
557 | } | ||
558 | |||
559 | int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, | ||
560 | void **per_transfer_contextp, | ||
561 | u32 *bufferp) | ||
562 | { | ||
563 | struct ce_ring_state *dest_ring; | ||
564 | unsigned int nentries_mask; | ||
565 | unsigned int sw_index; | ||
566 | unsigned int write_index; | ||
567 | int ret; | ||
568 | struct ath10k *ar; | ||
569 | struct ath10k_pci *ar_pci; | ||
570 | |||
571 | dest_ring = ce_state->dest_ring; | ||
572 | |||
573 | if (!dest_ring) | ||
574 | return -EIO; | ||
575 | |||
576 | ar = ce_state->ar; | ||
577 | ar_pci = ath10k_pci_priv(ar); | ||
578 | |||
579 | spin_lock_bh(&ar_pci->ce_lock); | ||
580 | |||
581 | nentries_mask = dest_ring->nentries_mask; | ||
582 | sw_index = dest_ring->sw_index; | ||
583 | write_index = dest_ring->write_index; | ||
584 | if (write_index != sw_index) { | ||
585 | struct ce_desc *base = dest_ring->base_addr_owner_space; | ||
586 | struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); | ||
587 | |||
588 | /* Return data from completed destination descriptor */ | ||
589 | *bufferp = __le32_to_cpu(desc->addr); | ||
590 | |||
591 | if (per_transfer_contextp) | ||
592 | *per_transfer_contextp = | ||
593 | dest_ring->per_transfer_context[sw_index]; | ||
594 | |||
595 | /* sanity */ | ||
596 | dest_ring->per_transfer_context[sw_index] = NULL; | ||
597 | |||
598 | /* Update sw_index */ | ||
599 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); | ||
600 | dest_ring->sw_index = sw_index; | ||
601 | ret = 0; | ||
602 | } else { | ||
603 | ret = -EIO; | ||
604 | } | ||
605 | |||
606 | spin_unlock_bh(&ar_pci->ce_lock); | ||
607 | |||
608 | return ret; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Guts of ath10k_ce_completed_send_next. | ||
613 | * The caller takes responsibility for any necessary locking. | ||
614 | */ | ||
615 | static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, | ||
616 | void **per_transfer_contextp, | ||
617 | u32 *bufferp, | ||
618 | unsigned int *nbytesp, | ||
619 | unsigned int *transfer_idp) | ||
620 | { | ||
621 | struct ce_ring_state *src_ring = ce_state->src_ring; | ||
622 | u32 ctrl_addr = ce_state->ctrl_addr; | ||
623 | struct ath10k *ar = ce_state->ar; | ||
624 | unsigned int nentries_mask = src_ring->nentries_mask; | ||
625 | unsigned int sw_index = src_ring->sw_index; | ||
626 | unsigned int read_index; | ||
627 | int ret = -EIO; | ||
628 | |||
629 | if (src_ring->hw_index == sw_index) { | ||
630 | /* | ||
631 | * The SW completion index has caught up with the cached | ||
632 | * version of the HW completion index. | ||
633 | * Update the cached HW completion index to see whether | ||
634 | * the SW has really caught up to the HW, or if the cached | ||
635 | * value of the HW index has become stale. | ||
636 | */ | ||
637 | ath10k_pci_wake(ar); | ||
638 | src_ring->hw_index = | ||
639 | ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); | ||
640 | ath10k_pci_sleep(ar); | ||
641 | } | ||
642 | read_index = src_ring->hw_index; | ||
643 | |||
644 | if ((read_index != sw_index) && (read_index != 0xffffffff)) { | ||
645 | struct ce_desc *sbase = src_ring->shadow_base; | ||
646 | struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); | ||
647 | |||
648 | /* Return data from completed source descriptor */ | ||
649 | *bufferp = __le32_to_cpu(sdesc->addr); | ||
650 | *nbytesp = __le16_to_cpu(sdesc->nbytes); | ||
651 | *transfer_idp = MS(__le16_to_cpu(sdesc->flags), | ||
652 | CE_DESC_FLAGS_META_DATA); | ||
653 | |||
654 | if (per_transfer_contextp) | ||
655 | *per_transfer_contextp = | ||
656 | src_ring->per_transfer_context[sw_index]; | ||
657 | |||
658 | /* sanity */ | ||
659 | src_ring->per_transfer_context[sw_index] = NULL; | ||
660 | |||
661 | /* Update sw_index */ | ||
662 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); | ||
663 | src_ring->sw_index = sw_index; | ||
664 | ret = 0; | ||
665 | } | ||
666 | |||
667 | return ret; | ||
668 | } | ||
669 | |||
670 | /* NB: Modeled after ath10k_ce_completed_send_next */ | ||
671 | int ath10k_ce_cancel_send_next(struct ce_state *ce_state, | ||
672 | void **per_transfer_contextp, | ||
673 | u32 *bufferp, | ||
674 | unsigned int *nbytesp, | ||
675 | unsigned int *transfer_idp) | ||
676 | { | ||
677 | struct ce_ring_state *src_ring; | ||
678 | unsigned int nentries_mask; | ||
679 | unsigned int sw_index; | ||
680 | unsigned int write_index; | ||
681 | int ret; | ||
682 | struct ath10k *ar; | ||
683 | struct ath10k_pci *ar_pci; | ||
684 | |||
685 | src_ring = ce_state->src_ring; | ||
686 | |||
687 | if (!src_ring) | ||
688 | return -EIO; | ||
689 | |||
690 | ar = ce_state->ar; | ||
691 | ar_pci = ath10k_pci_priv(ar); | ||
692 | |||
693 | spin_lock_bh(&ar_pci->ce_lock); | ||
694 | |||
695 | nentries_mask = src_ring->nentries_mask; | ||
696 | sw_index = src_ring->sw_index; | ||
697 | write_index = src_ring->write_index; | ||
698 | |||
699 | if (write_index != sw_index) { | ||
700 | struct ce_desc *base = src_ring->base_addr_owner_space; | ||
701 | struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); | ||
702 | |||
703 | /* Return data from completed source descriptor */ | ||
704 | *bufferp = __le32_to_cpu(desc->addr); | ||
705 | *nbytesp = __le16_to_cpu(desc->nbytes); | ||
706 | *transfer_idp = MS(__le16_to_cpu(desc->flags), | ||
707 | CE_DESC_FLAGS_META_DATA); | ||
708 | |||
709 | if (per_transfer_contextp) | ||
710 | *per_transfer_contextp = | ||
711 | src_ring->per_transfer_context[sw_index]; | ||
712 | |||
713 | /* sanity */ | ||
714 | src_ring->per_transfer_context[sw_index] = NULL; | ||
715 | |||
716 | /* Update sw_index */ | ||
717 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); | ||
718 | src_ring->sw_index = sw_index; | ||
719 | ret = 0; | ||
720 | } else { | ||
721 | ret = -EIO; | ||
722 | } | ||
723 | |||
724 | spin_unlock_bh(&ar_pci->ce_lock); | ||
725 | |||
726 | return ret; | ||
727 | } | ||
728 | |||
729 | int ath10k_ce_completed_send_next(struct ce_state *ce_state, | ||
730 | void **per_transfer_contextp, | ||
731 | u32 *bufferp, | ||
732 | unsigned int *nbytesp, | ||
733 | unsigned int *transfer_idp) | ||
734 | { | ||
735 | struct ath10k *ar = ce_state->ar; | ||
736 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
737 | int ret; | ||
738 | |||
739 | spin_lock_bh(&ar_pci->ce_lock); | ||
740 | ret = ath10k_ce_completed_send_next_nolock(ce_state, | ||
741 | per_transfer_contextp, | ||
742 | bufferp, nbytesp, | ||
743 | transfer_idp); | ||
744 | spin_unlock_bh(&ar_pci->ce_lock); | ||
745 | |||
746 | return ret; | ||
747 | } | ||
748 | |||
749 | /* | ||
750 | * Guts of interrupt handler for per-engine interrupts on a particular CE. | ||
751 | * | ||
752 | * Invokes registered callbacks for recv_complete, | ||
753 | * send_complete, and watermarks. | ||
754 | */ | ||
755 | void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) | ||
756 | { | ||
757 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
758 | struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; | ||
759 | u32 ctrl_addr = ce_state->ctrl_addr; | ||
760 | void *transfer_context; | ||
761 | u32 buf; | ||
762 | unsigned int nbytes; | ||
763 | unsigned int id; | ||
764 | unsigned int flags; | ||
765 | |||
766 | ath10k_pci_wake(ar); | ||
767 | spin_lock_bh(&ar_pci->ce_lock); | ||
768 | |||
769 | /* Clear the copy-complete interrupts that will be handled here. */ | ||
770 | ath10k_ce_engine_int_status_clear(ar, ctrl_addr, | ||
771 | HOST_IS_COPY_COMPLETE_MASK); | ||
772 | |||
773 | if (ce_state->recv_cb) { | ||
774 | /* | ||
775 | * Pop completed recv buffers and call the registered | ||
776 | * recv callback for each | ||
777 | */ | ||
778 | while (ath10k_ce_completed_recv_next_nolock(ce_state, | ||
779 | &transfer_context, | ||
780 | &buf, &nbytes, | ||
781 | &id, &flags) == 0) { | ||
782 | spin_unlock_bh(&ar_pci->ce_lock); | ||
783 | ce_state->recv_cb(ce_state, transfer_context, buf, | ||
784 | nbytes, id, flags); | ||
785 | spin_lock_bh(&ar_pci->ce_lock); | ||
786 | } | ||
787 | } | ||
788 | |||
789 | if (ce_state->send_cb) { | ||
790 | /* | ||
791 | * Pop completed send buffers and call the registered | ||
792 | * send callback for each | ||
793 | */ | ||
794 | while (ath10k_ce_completed_send_next_nolock(ce_state, | ||
795 | &transfer_context, | ||
796 | &buf, | ||
797 | &nbytes, | ||
798 | &id) == 0) { | ||
799 | spin_unlock_bh(&ar_pci->ce_lock); | ||
800 | ce_state->send_cb(ce_state, transfer_context, | ||
801 | buf, nbytes, id); | ||
802 | spin_lock_bh(&ar_pci->ce_lock); | ||
803 | } | ||
804 | } | ||
805 | |||
806 | /* | ||
807 | * Misc CE interrupts are not being handled, but still need | ||
808 | * to be cleared. | ||
809 | */ | ||
810 | ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); | ||
811 | |||
812 | spin_unlock_bh(&ar_pci->ce_lock); | ||
813 | ath10k_pci_sleep(ar); | ||
814 | } | ||
815 | |||
816 | /* | ||
817 | * Handler for per-engine interrupts on ALL active CEs. | ||
818 | * This is used in cases where the system is sharing a | ||
819 | * single interrput for all CEs | ||
820 | */ | ||
821 | |||
822 | void ath10k_ce_per_engine_service_any(struct ath10k *ar) | ||
823 | { | ||
824 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
825 | int ce_id; | ||
826 | u32 intr_summary; | ||
827 | |||
828 | ath10k_pci_wake(ar); | ||
829 | intr_summary = CE_INTERRUPT_SUMMARY(ar); | ||
830 | |||
831 | for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { | ||
832 | if (intr_summary & (1 << ce_id)) | ||
833 | intr_summary &= ~(1 << ce_id); | ||
834 | else | ||
835 | /* no intr pending on this CE */ | ||
836 | continue; | ||
837 | |||
838 | ath10k_ce_per_engine_service(ar, ce_id); | ||
839 | } | ||
840 | |||
841 | ath10k_pci_sleep(ar); | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * Adjust interrupts for the copy complete handler. | ||
846 | * If it's needed for either send or recv, then unmask | ||
847 | * this interrupt; otherwise, mask it. | ||
848 | * | ||
849 | * Called with ce_lock held. | ||
850 | */ | ||
851 | static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, | ||
852 | int disable_copy_compl_intr) | ||
853 | { | ||
854 | u32 ctrl_addr = ce_state->ctrl_addr; | ||
855 | struct ath10k *ar = ce_state->ar; | ||
856 | |||
857 | ath10k_pci_wake(ar); | ||
858 | |||
859 | if ((!disable_copy_compl_intr) && | ||
860 | (ce_state->send_cb || ce_state->recv_cb)) | ||
861 | ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr); | ||
862 | else | ||
863 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); | ||
864 | |||
865 | ath10k_ce_watermark_intr_disable(ar, ctrl_addr); | ||
866 | |||
867 | ath10k_pci_sleep(ar); | ||
868 | } | ||
869 | |||
870 | void ath10k_ce_disable_interrupts(struct ath10k *ar) | ||
871 | { | ||
872 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
873 | int ce_id; | ||
874 | |||
875 | ath10k_pci_wake(ar); | ||
876 | for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { | ||
877 | struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; | ||
878 | u32 ctrl_addr = ce_state->ctrl_addr; | ||
879 | |||
880 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); | ||
881 | } | ||
882 | ath10k_pci_sleep(ar); | ||
883 | } | ||
884 | |||
885 | void ath10k_ce_send_cb_register(struct ce_state *ce_state, | ||
886 | void (*send_cb) (struct ce_state *ce_state, | ||
887 | void *transfer_context, | ||
888 | u32 buffer, | ||
889 | unsigned int nbytes, | ||
890 | unsigned int transfer_id), | ||
891 | int disable_interrupts) | ||
892 | { | ||
893 | struct ath10k *ar = ce_state->ar; | ||
894 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
895 | |||
896 | spin_lock_bh(&ar_pci->ce_lock); | ||
897 | ce_state->send_cb = send_cb; | ||
898 | ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts); | ||
899 | spin_unlock_bh(&ar_pci->ce_lock); | ||
900 | } | ||
901 | |||
902 | void ath10k_ce_recv_cb_register(struct ce_state *ce_state, | ||
903 | void (*recv_cb) (struct ce_state *ce_state, | ||
904 | void *transfer_context, | ||
905 | u32 buffer, | ||
906 | unsigned int nbytes, | ||
907 | unsigned int transfer_id, | ||
908 | unsigned int flags)) | ||
909 | { | ||
910 | struct ath10k *ar = ce_state->ar; | ||
911 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
912 | |||
913 | spin_lock_bh(&ar_pci->ce_lock); | ||
914 | ce_state->recv_cb = recv_cb; | ||
915 | ath10k_ce_per_engine_handler_adjust(ce_state, 0); | ||
916 | spin_unlock_bh(&ar_pci->ce_lock); | ||
917 | } | ||
918 | |||
919 | static int ath10k_ce_init_src_ring(struct ath10k *ar, | ||
920 | unsigned int ce_id, | ||
921 | struct ce_state *ce_state, | ||
922 | const struct ce_attr *attr) | ||
923 | { | ||
924 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
925 | struct ce_ring_state *src_ring; | ||
926 | unsigned int nentries = attr->src_nentries; | ||
927 | unsigned int ce_nbytes; | ||
928 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
929 | dma_addr_t base_addr; | ||
930 | char *ptr; | ||
931 | |||
932 | nentries = roundup_pow_of_two(nentries); | ||
933 | |||
934 | if (ce_state->src_ring) { | ||
935 | WARN_ON(ce_state->src_ring->nentries != nentries); | ||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); | ||
940 | ptr = kzalloc(ce_nbytes, GFP_KERNEL); | ||
941 | if (ptr == NULL) | ||
942 | return -ENOMEM; | ||
943 | |||
944 | ce_state->src_ring = (struct ce_ring_state *)ptr; | ||
945 | src_ring = ce_state->src_ring; | ||
946 | |||
947 | ptr += sizeof(struct ce_ring_state); | ||
948 | src_ring->nentries = nentries; | ||
949 | src_ring->nentries_mask = nentries - 1; | ||
950 | |||
951 | ath10k_pci_wake(ar); | ||
952 | src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); | ||
953 | src_ring->hw_index = src_ring->sw_index; | ||
954 | |||
955 | src_ring->write_index = | ||
956 | ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); | ||
957 | ath10k_pci_sleep(ar); | ||
958 | |||
959 | src_ring->per_transfer_context = (void **)ptr; | ||
960 | |||
961 | /* | ||
962 | * Legacy platforms that do not support cache | ||
963 | * coherent DMA are unsupported | ||
964 | */ | ||
965 | src_ring->base_addr_owner_space_unaligned = | ||
966 | pci_alloc_consistent(ar_pci->pdev, | ||
967 | (nentries * sizeof(struct ce_desc) + | ||
968 | CE_DESC_RING_ALIGN), | ||
969 | &base_addr); | ||
970 | src_ring->base_addr_ce_space_unaligned = base_addr; | ||
971 | |||
972 | src_ring->base_addr_owner_space = PTR_ALIGN( | ||
973 | src_ring->base_addr_owner_space_unaligned, | ||
974 | CE_DESC_RING_ALIGN); | ||
975 | src_ring->base_addr_ce_space = ALIGN( | ||
976 | src_ring->base_addr_ce_space_unaligned, | ||
977 | CE_DESC_RING_ALIGN); | ||
978 | |||
979 | /* | ||
980 | * Also allocate a shadow src ring in regular | ||
981 | * mem to use for faster access. | ||
982 | */ | ||
983 | src_ring->shadow_base_unaligned = | ||
984 | kmalloc((nentries * sizeof(struct ce_desc) + | ||
985 | CE_DESC_RING_ALIGN), GFP_KERNEL); | ||
986 | |||
987 | src_ring->shadow_base = PTR_ALIGN( | ||
988 | src_ring->shadow_base_unaligned, | ||
989 | CE_DESC_RING_ALIGN); | ||
990 | |||
991 | ath10k_pci_wake(ar); | ||
992 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, | ||
993 | src_ring->base_addr_ce_space); | ||
994 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); | ||
995 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); | ||
996 | ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); | ||
997 | ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); | ||
998 | ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); | ||
999 | ath10k_pci_sleep(ar); | ||
1000 | |||
1001 | return 0; | ||
1002 | } | ||
1003 | |||
1004 | static int ath10k_ce_init_dest_ring(struct ath10k *ar, | ||
1005 | unsigned int ce_id, | ||
1006 | struct ce_state *ce_state, | ||
1007 | const struct ce_attr *attr) | ||
1008 | { | ||
1009 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1010 | struct ce_ring_state *dest_ring; | ||
1011 | unsigned int nentries = attr->dest_nentries; | ||
1012 | unsigned int ce_nbytes; | ||
1013 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
1014 | dma_addr_t base_addr; | ||
1015 | char *ptr; | ||
1016 | |||
1017 | nentries = roundup_pow_of_two(nentries); | ||
1018 | |||
1019 | if (ce_state->dest_ring) { | ||
1020 | WARN_ON(ce_state->dest_ring->nentries != nentries); | ||
1021 | return 0; | ||
1022 | } | ||
1023 | |||
1024 | ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); | ||
1025 | ptr = kzalloc(ce_nbytes, GFP_KERNEL); | ||
1026 | if (ptr == NULL) | ||
1027 | return -ENOMEM; | ||
1028 | |||
1029 | ce_state->dest_ring = (struct ce_ring_state *)ptr; | ||
1030 | dest_ring = ce_state->dest_ring; | ||
1031 | |||
1032 | ptr += sizeof(struct ce_ring_state); | ||
1033 | dest_ring->nentries = nentries; | ||
1034 | dest_ring->nentries_mask = nentries - 1; | ||
1035 | |||
1036 | ath10k_pci_wake(ar); | ||
1037 | dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); | ||
1038 | dest_ring->write_index = | ||
1039 | ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); | ||
1040 | ath10k_pci_sleep(ar); | ||
1041 | |||
1042 | dest_ring->per_transfer_context = (void **)ptr; | ||
1043 | |||
1044 | /* | ||
1045 | * Legacy platforms that do not support cache | ||
1046 | * coherent DMA are unsupported | ||
1047 | */ | ||
1048 | dest_ring->base_addr_owner_space_unaligned = | ||
1049 | pci_alloc_consistent(ar_pci->pdev, | ||
1050 | (nentries * sizeof(struct ce_desc) + | ||
1051 | CE_DESC_RING_ALIGN), | ||
1052 | &base_addr); | ||
1053 | dest_ring->base_addr_ce_space_unaligned = base_addr; | ||
1054 | |||
1055 | /* | ||
1056 | * Correctly initialize memory to 0 to prevent garbage | ||
1057 | * data crashing system when download firmware | ||
1058 | */ | ||
1059 | memset(dest_ring->base_addr_owner_space_unaligned, 0, | ||
1060 | nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); | ||
1061 | |||
1062 | dest_ring->base_addr_owner_space = PTR_ALIGN( | ||
1063 | dest_ring->base_addr_owner_space_unaligned, | ||
1064 | CE_DESC_RING_ALIGN); | ||
1065 | dest_ring->base_addr_ce_space = ALIGN( | ||
1066 | dest_ring->base_addr_ce_space_unaligned, | ||
1067 | CE_DESC_RING_ALIGN); | ||
1068 | |||
1069 | ath10k_pci_wake(ar); | ||
1070 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, | ||
1071 | dest_ring->base_addr_ce_space); | ||
1072 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); | ||
1073 | ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); | ||
1074 | ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); | ||
1075 | ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); | ||
1076 | ath10k_pci_sleep(ar); | ||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, | ||
1082 | unsigned int ce_id, | ||
1083 | const struct ce_attr *attr) | ||
1084 | { | ||
1085 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1086 | struct ce_state *ce_state = NULL; | ||
1087 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
1088 | |||
1089 | spin_lock_bh(&ar_pci->ce_lock); | ||
1090 | |||
1091 | if (!ar_pci->ce_id_to_state[ce_id]) { | ||
1092 | ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC); | ||
1093 | if (ce_state == NULL) { | ||
1094 | spin_unlock_bh(&ar_pci->ce_lock); | ||
1095 | return NULL; | ||
1096 | } | ||
1097 | |||
1098 | ar_pci->ce_id_to_state[ce_id] = ce_state; | ||
1099 | ce_state->ar = ar; | ||
1100 | ce_state->id = ce_id; | ||
1101 | ce_state->ctrl_addr = ctrl_addr; | ||
1102 | ce_state->state = CE_RUNNING; | ||
1103 | /* Save attribute flags */ | ||
1104 | ce_state->attr_flags = attr->flags; | ||
1105 | ce_state->src_sz_max = attr->src_sz_max; | ||
1106 | } | ||
1107 | |||
1108 | spin_unlock_bh(&ar_pci->ce_lock); | ||
1109 | |||
1110 | return ce_state; | ||
1111 | } | ||
1112 | |||
1113 | /* | ||
1114 | * Initialize a Copy Engine based on caller-supplied attributes. | ||
1115 | * This may be called once to initialize both source and destination | ||
1116 | * rings or it may be called twice for separate source and destination | ||
1117 | * initialization. It may be that only one side or the other is | ||
1118 | * initialized by software/firmware. | ||
1119 | */ | ||
1120 | struct ce_state *ath10k_ce_init(struct ath10k *ar, | ||
1121 | unsigned int ce_id, | ||
1122 | const struct ce_attr *attr) | ||
1123 | { | ||
1124 | struct ce_state *ce_state; | ||
1125 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); | ||
1126 | |||
1127 | ce_state = ath10k_ce_init_state(ar, ce_id, attr); | ||
1128 | if (!ce_state) { | ||
1129 | ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); | ||
1130 | return NULL; | ||
1131 | } | ||
1132 | |||
1133 | if (attr->src_nentries) { | ||
1134 | if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) { | ||
1135 | ath10k_err("Failed to initialize CE src ring for ID: %d\n", | ||
1136 | ce_id); | ||
1137 | ath10k_ce_deinit(ce_state); | ||
1138 | return NULL; | ||
1139 | } | ||
1140 | } | ||
1141 | |||
1142 | if (attr->dest_nentries) { | ||
1143 | if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) { | ||
1144 | ath10k_err("Failed to initialize CE dest ring for ID: %d\n", | ||
1145 | ce_id); | ||
1146 | ath10k_ce_deinit(ce_state); | ||
1147 | return NULL; | ||
1148 | } | ||
1149 | } | ||
1150 | |||
1151 | /* Enable CE error interrupts */ | ||
1152 | ath10k_pci_wake(ar); | ||
1153 | ath10k_ce_error_intr_enable(ar, ctrl_addr); | ||
1154 | ath10k_pci_sleep(ar); | ||
1155 | |||
1156 | return ce_state; | ||
1157 | } | ||
1158 | |||
1159 | void ath10k_ce_deinit(struct ce_state *ce_state) | ||
1160 | { | ||
1161 | unsigned int ce_id = ce_state->id; | ||
1162 | struct ath10k *ar = ce_state->ar; | ||
1163 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1164 | |||
1165 | ce_state->state = CE_UNUSED; | ||
1166 | ar_pci->ce_id_to_state[ce_id] = NULL; | ||
1167 | |||
1168 | if (ce_state->src_ring) { | ||
1169 | kfree(ce_state->src_ring->shadow_base_unaligned); | ||
1170 | pci_free_consistent(ar_pci->pdev, | ||
1171 | (ce_state->src_ring->nentries * | ||
1172 | sizeof(struct ce_desc) + | ||
1173 | CE_DESC_RING_ALIGN), | ||
1174 | ce_state->src_ring->base_addr_owner_space, | ||
1175 | ce_state->src_ring->base_addr_ce_space); | ||
1176 | kfree(ce_state->src_ring); | ||
1177 | } | ||
1178 | |||
1179 | if (ce_state->dest_ring) { | ||
1180 | pci_free_consistent(ar_pci->pdev, | ||
1181 | (ce_state->dest_ring->nentries * | ||
1182 | sizeof(struct ce_desc) + | ||
1183 | CE_DESC_RING_ALIGN), | ||
1184 | ce_state->dest_ring->base_addr_owner_space, | ||
1185 | ce_state->dest_ring->base_addr_ce_space); | ||
1186 | kfree(ce_state->dest_ring); | ||
1187 | } | ||
1188 | kfree(ce_state); | ||
1189 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h new file mode 100644 index 000000000000..c17f07c026f4 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ce.h | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _CE_H_ | ||
19 | #define _CE_H_ | ||
20 | |||
21 | #include "hif.h" | ||
22 | |||
23 | |||
24 | /* Maximum number of Copy Engine's supported */ | ||
25 | #define CE_COUNT_MAX 8 | ||
26 | #define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 | ||
27 | |||
28 | /* Descriptor rings must be aligned to this boundary */ | ||
29 | #define CE_DESC_RING_ALIGN 8 | ||
30 | #define CE_SENDLIST_ITEMS_MAX 12 | ||
31 | #define CE_SEND_FLAG_GATHER 0x00010000 | ||
32 | |||
33 | /* | ||
34 | * Copy Engine support: low-level Target-side Copy Engine API. | ||
35 | * This is a hardware access layer used by code that understands | ||
36 | * how to use copy engines. | ||
37 | */ | ||
38 | |||
39 | struct ce_state; | ||
40 | |||
41 | |||
42 | /* Copy Engine operational state */ | ||
43 | enum ce_op_state { | ||
44 | CE_UNUSED, | ||
45 | CE_PAUSED, | ||
46 | CE_RUNNING, | ||
47 | }; | ||
48 | |||
49 | #define CE_DESC_FLAGS_GATHER (1 << 0) | ||
50 | #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) | ||
51 | #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC | ||
52 | #define CE_DESC_FLAGS_META_DATA_LSB 3 | ||
53 | |||
54 | struct ce_desc { | ||
55 | __le32 addr; | ||
56 | __le16 nbytes; | ||
57 | __le16 flags; /* %CE_DESC_FLAGS_ */ | ||
58 | }; | ||
59 | |||
60 | /* Copy Engine Ring internal state */ | ||
61 | struct ce_ring_state { | ||
62 | /* Number of entries in this ring; must be power of 2 */ | ||
63 | unsigned int nentries; | ||
64 | unsigned int nentries_mask; | ||
65 | |||
66 | /* | ||
67 | * For dest ring, this is the next index to be processed | ||
68 | * by software after it was/is received into. | ||
69 | * | ||
70 | * For src ring, this is the last descriptor that was sent | ||
71 | * and completion processed by software. | ||
72 | * | ||
73 | * Regardless of src or dest ring, this is an invariant | ||
74 | * (modulo ring size): | ||
75 | * write index >= read index >= sw_index | ||
76 | */ | ||
77 | unsigned int sw_index; | ||
78 | /* cached copy */ | ||
79 | unsigned int write_index; | ||
80 | /* | ||
81 | * For src ring, this is the next index not yet processed by HW. | ||
82 | * This is a cached copy of the real HW index (read index), used | ||
83 | * for avoiding reading the HW index register more often than | ||
84 | * necessary. | ||
85 | * This extends the invariant: | ||
86 | * write index >= read index >= hw_index >= sw_index | ||
87 | * | ||
88 | * For dest ring, this is currently unused. | ||
89 | */ | ||
90 | /* cached copy */ | ||
91 | unsigned int hw_index; | ||
92 | |||
93 | /* Start of DMA-coherent area reserved for descriptors */ | ||
94 | /* Host address space */ | ||
95 | void *base_addr_owner_space_unaligned; | ||
96 | /* CE address space */ | ||
97 | u32 base_addr_ce_space_unaligned; | ||
98 | |||
99 | /* | ||
100 | * Actual start of descriptors. | ||
101 | * Aligned to descriptor-size boundary. | ||
102 | * Points into reserved DMA-coherent area, above. | ||
103 | */ | ||
104 | /* Host address space */ | ||
105 | void *base_addr_owner_space; | ||
106 | |||
107 | /* CE address space */ | ||
108 | u32 base_addr_ce_space; | ||
109 | /* | ||
110 | * Start of shadow copy of descriptors, within regular memory. | ||
111 | * Aligned to descriptor-size boundary. | ||
112 | */ | ||
113 | void *shadow_base_unaligned; | ||
114 | struct ce_desc *shadow_base; | ||
115 | |||
116 | void **per_transfer_context; | ||
117 | }; | ||
118 | |||
119 | /* Copy Engine internal state */ | ||
120 | struct ce_state { | ||
121 | struct ath10k *ar; | ||
122 | unsigned int id; | ||
123 | |||
124 | unsigned int attr_flags; | ||
125 | |||
126 | u32 ctrl_addr; | ||
127 | enum ce_op_state state; | ||
128 | |||
129 | void (*send_cb) (struct ce_state *ce_state, | ||
130 | void *per_transfer_send_context, | ||
131 | u32 buffer, | ||
132 | unsigned int nbytes, | ||
133 | unsigned int transfer_id); | ||
134 | void (*recv_cb) (struct ce_state *ce_state, | ||
135 | void *per_transfer_recv_context, | ||
136 | u32 buffer, | ||
137 | unsigned int nbytes, | ||
138 | unsigned int transfer_id, | ||
139 | unsigned int flags); | ||
140 | |||
141 | unsigned int src_sz_max; | ||
142 | struct ce_ring_state *src_ring; | ||
143 | struct ce_ring_state *dest_ring; | ||
144 | }; | ||
145 | |||
146 | struct ce_sendlist_item { | ||
147 | /* e.g. buffer or desc list */ | ||
148 | dma_addr_t data; | ||
149 | union { | ||
150 | /* simple buffer */ | ||
151 | unsigned int nbytes; | ||
152 | /* Rx descriptor list */ | ||
153 | unsigned int ndesc; | ||
154 | } u; | ||
155 | /* externally-specified flags; OR-ed with internal flags */ | ||
156 | u32 flags; | ||
157 | }; | ||
158 | |||
159 | struct ce_sendlist { | ||
160 | unsigned int num_items; | ||
161 | struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX]; | ||
162 | }; | ||
163 | |||
164 | /* Copy Engine settable attributes */ | ||
165 | struct ce_attr; | ||
166 | |||
167 | /*==================Send====================*/ | ||
168 | |||
169 | /* ath10k_ce_send flags */ | ||
170 | #define CE_SEND_FLAG_BYTE_SWAP 1 | ||
171 | |||
172 | /* | ||
173 | * Queue a source buffer to be sent to an anonymous destination buffer. | ||
174 | * ce - which copy engine to use | ||
175 | * buffer - address of buffer | ||
176 | * nbytes - number of bytes to send | ||
177 | * transfer_id - arbitrary ID; reflected to destination | ||
178 | * flags - CE_SEND_FLAG_* values | ||
179 | * Returns 0 on success; otherwise an error status. | ||
180 | * | ||
181 | * Note: If no flags are specified, use CE's default data swap mode. | ||
182 | * | ||
183 | * Implementation note: pushes 1 buffer to Source ring | ||
184 | */ | ||
185 | int ath10k_ce_send(struct ce_state *ce_state, | ||
186 | void *per_transfer_send_context, | ||
187 | u32 buffer, | ||
188 | unsigned int nbytes, | ||
189 | /* 14 bits */ | ||
190 | unsigned int transfer_id, | ||
191 | unsigned int flags); | ||
192 | |||
193 | void ath10k_ce_send_cb_register(struct ce_state *ce_state, | ||
194 | void (*send_cb) (struct ce_state *ce_state, | ||
195 | void *transfer_context, | ||
196 | u32 buffer, | ||
197 | unsigned int nbytes, | ||
198 | unsigned int transfer_id), | ||
199 | int disable_interrupts); | ||
200 | |||
201 | /* Append a simple buffer (address/length) to a sendlist. */ | ||
202 | void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, | ||
203 | u32 buffer, | ||
204 | unsigned int nbytes, | ||
205 | /* OR-ed with internal flags */ | ||
206 | u32 flags); | ||
207 | |||
208 | /* | ||
209 | * Queue a "sendlist" of buffers to be sent using gather to a single | ||
210 | * anonymous destination buffer | ||
211 | * ce - which copy engine to use | ||
212 | * sendlist - list of simple buffers to send using gather | ||
213 | * transfer_id - arbitrary ID; reflected to destination | ||
214 | * Returns 0 on success; otherwise an error status. | ||
215 | * | ||
216 | * Implemenation note: Pushes multiple buffers with Gather to Source ring. | ||
217 | */ | ||
218 | int ath10k_ce_sendlist_send(struct ce_state *ce_state, | ||
219 | void *per_transfer_send_context, | ||
220 | struct ce_sendlist *sendlist, | ||
221 | /* 14 bits */ | ||
222 | unsigned int transfer_id); | ||
223 | |||
224 | /*==================Recv=======================*/ | ||
225 | |||
226 | /* | ||
227 | * Make a buffer available to receive. The buffer must be at least of a | ||
228 | * minimal size appropriate for this copy engine (src_sz_max attribute). | ||
229 | * ce - which copy engine to use | ||
230 | * per_transfer_recv_context - context passed back to caller's recv_cb | ||
231 | * buffer - address of buffer in CE space | ||
232 | * Returns 0 on success; otherwise an error status. | ||
233 | * | ||
234 | * Implemenation note: Pushes a buffer to Dest ring. | ||
235 | */ | ||
236 | int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, | ||
237 | void *per_transfer_recv_context, | ||
238 | u32 buffer); | ||
239 | |||
240 | void ath10k_ce_recv_cb_register(struct ce_state *ce_state, | ||
241 | void (*recv_cb) (struct ce_state *ce_state, | ||
242 | void *transfer_context, | ||
243 | u32 buffer, | ||
244 | unsigned int nbytes, | ||
245 | unsigned int transfer_id, | ||
246 | unsigned int flags)); | ||
247 | |||
248 | /* recv flags */ | ||
249 | /* Data is byte-swapped */ | ||
250 | #define CE_RECV_FLAG_SWAPPED 1 | ||
251 | |||
252 | /* | ||
253 | * Supply data for the next completed unprocessed receive descriptor. | ||
254 | * Pops buffer from Dest ring. | ||
255 | */ | ||
256 | int ath10k_ce_completed_recv_next(struct ce_state *ce_state, | ||
257 | void **per_transfer_contextp, | ||
258 | u32 *bufferp, | ||
259 | unsigned int *nbytesp, | ||
260 | unsigned int *transfer_idp, | ||
261 | unsigned int *flagsp); | ||
262 | /* | ||
263 | * Supply data for the next completed unprocessed send descriptor. | ||
264 | * Pops 1 completed send buffer from Source ring. | ||
265 | */ | ||
266 | int ath10k_ce_completed_send_next(struct ce_state *ce_state, | ||
267 | void **per_transfer_contextp, | ||
268 | u32 *bufferp, | ||
269 | unsigned int *nbytesp, | ||
270 | unsigned int *transfer_idp); | ||
271 | |||
272 | /*==================CE Engine Initialization=======================*/ | ||
273 | |||
274 | /* Initialize an instance of a CE */ | ||
275 | struct ce_state *ath10k_ce_init(struct ath10k *ar, | ||
276 | unsigned int ce_id, | ||
277 | const struct ce_attr *attr); | ||
278 | |||
279 | /*==================CE Engine Shutdown=======================*/ | ||
280 | /* | ||
281 | * Support clean shutdown by allowing the caller to revoke | ||
282 | * receive buffers. Target DMA must be stopped before using | ||
283 | * this API. | ||
284 | */ | ||
285 | int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, | ||
286 | void **per_transfer_contextp, | ||
287 | u32 *bufferp); | ||
288 | |||
289 | /* | ||
290 | * Support clean shutdown by allowing the caller to cancel | ||
291 | * pending sends. Target DMA must be stopped before using | ||
292 | * this API. | ||
293 | */ | ||
294 | int ath10k_ce_cancel_send_next(struct ce_state *ce_state, | ||
295 | void **per_transfer_contextp, | ||
296 | u32 *bufferp, | ||
297 | unsigned int *nbytesp, | ||
298 | unsigned int *transfer_idp); | ||
299 | |||
300 | void ath10k_ce_deinit(struct ce_state *ce_state); | ||
301 | |||
302 | /*==================CE Interrupt Handlers====================*/ | ||
303 | void ath10k_ce_per_engine_service_any(struct ath10k *ar); | ||
304 | void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); | ||
305 | void ath10k_ce_disable_interrupts(struct ath10k *ar); | ||
306 | |||
307 | /* ce_attr.flags values */ | ||
308 | /* Use NonSnooping PCIe accesses? */ | ||
309 | #define CE_ATTR_NO_SNOOP 1 | ||
310 | |||
311 | /* Byte swap data words */ | ||
312 | #define CE_ATTR_BYTE_SWAP_DATA 2 | ||
313 | |||
314 | /* Swizzle descriptors? */ | ||
315 | #define CE_ATTR_SWIZZLE_DESCRIPTORS 4 | ||
316 | |||
317 | /* no interrupt on copy completion */ | ||
318 | #define CE_ATTR_DIS_INTR 8 | ||
319 | |||
320 | /* Attributes of an instance of a Copy Engine */ | ||
321 | struct ce_attr { | ||
322 | /* CE_ATTR_* values */ | ||
323 | unsigned int flags; | ||
324 | |||
325 | /* currently not in use */ | ||
326 | unsigned int priority; | ||
327 | |||
328 | /* #entries in source ring - Must be a power of 2 */ | ||
329 | unsigned int src_nentries; | ||
330 | |||
331 | /* | ||
332 | * Max source send size for this CE. | ||
333 | * This is also the minimum size of a destination buffer. | ||
334 | */ | ||
335 | unsigned int src_sz_max; | ||
336 | |||
337 | /* #entries in destination ring - Must be a power of 2 */ | ||
338 | unsigned int dest_nentries; | ||
339 | |||
340 | /* Future use */ | ||
341 | void *reserved; | ||
342 | }; | ||
343 | |||
344 | /* | ||
345 | * When using sendlist_send to transfer multiple buffer fragments, the | ||
346 | * transfer context of each fragment, except last one, will be filled | ||
347 | * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for | ||
348 | * each fragment done with send and the transfer context would be | ||
349 | * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the | ||
350 | * status of a send completion. | ||
351 | */ | ||
352 | #define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) | ||
353 | |||
354 | #define SR_BA_ADDRESS 0x0000 | ||
355 | #define SR_SIZE_ADDRESS 0x0004 | ||
356 | #define DR_BA_ADDRESS 0x0008 | ||
357 | #define DR_SIZE_ADDRESS 0x000c | ||
358 | #define CE_CMD_ADDRESS 0x0018 | ||
359 | |||
360 | #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17 | ||
361 | #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 | ||
362 | #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 | ||
363 | #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ | ||
364 | (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ | ||
365 | CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | ||
366 | |||
367 | #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16 | ||
368 | #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 | ||
369 | #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 | ||
370 | #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \ | ||
371 | (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \ | ||
372 | CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) | ||
373 | #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ | ||
374 | (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ | ||
375 | CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | ||
376 | |||
377 | #define CE_CTRL1_DMAX_LENGTH_MSB 15 | ||
378 | #define CE_CTRL1_DMAX_LENGTH_LSB 0 | ||
379 | #define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff | ||
380 | #define CE_CTRL1_DMAX_LENGTH_GET(x) \ | ||
381 | (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB) | ||
382 | #define CE_CTRL1_DMAX_LENGTH_SET(x) \ | ||
383 | (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) | ||
384 | |||
385 | #define CE_CTRL1_ADDRESS 0x0010 | ||
386 | #define CE_CTRL1_HW_MASK 0x0007ffff | ||
387 | #define CE_CTRL1_SW_MASK 0x0007ffff | ||
388 | #define CE_CTRL1_HW_WRITE_MASK 0x00000000 | ||
389 | #define CE_CTRL1_SW_WRITE_MASK 0x0007ffff | ||
390 | #define CE_CTRL1_RSTMASK 0xffffffff | ||
391 | #define CE_CTRL1_RESET 0x00000080 | ||
392 | |||
393 | #define CE_CMD_HALT_STATUS_MSB 3 | ||
394 | #define CE_CMD_HALT_STATUS_LSB 3 | ||
395 | #define CE_CMD_HALT_STATUS_MASK 0x00000008 | ||
396 | #define CE_CMD_HALT_STATUS_GET(x) \ | ||
397 | (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB) | ||
398 | #define CE_CMD_HALT_STATUS_SET(x) \ | ||
399 | (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK) | ||
400 | #define CE_CMD_HALT_STATUS_RESET 0 | ||
401 | #define CE_CMD_HALT_MSB 0 | ||
402 | #define CE_CMD_HALT_MASK 0x00000001 | ||
403 | |||
404 | #define HOST_IE_COPY_COMPLETE_MSB 0 | ||
405 | #define HOST_IE_COPY_COMPLETE_LSB 0 | ||
406 | #define HOST_IE_COPY_COMPLETE_MASK 0x00000001 | ||
407 | #define HOST_IE_COPY_COMPLETE_GET(x) \ | ||
408 | (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB) | ||
409 | #define HOST_IE_COPY_COMPLETE_SET(x) \ | ||
410 | (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK) | ||
411 | #define HOST_IE_COPY_COMPLETE_RESET 0 | ||
412 | #define HOST_IE_ADDRESS 0x002c | ||
413 | |||
414 | #define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 | ||
415 | #define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 | ||
416 | #define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 | ||
417 | #define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 | ||
418 | #define HOST_IS_COPY_COMPLETE_MASK 0x00000001 | ||
419 | #define HOST_IS_ADDRESS 0x0030 | ||
420 | |||
421 | #define MISC_IE_ADDRESS 0x0034 | ||
422 | |||
423 | #define MISC_IS_AXI_ERR_MASK 0x00000400 | ||
424 | |||
425 | #define MISC_IS_DST_ADDR_ERR_MASK 0x00000200 | ||
426 | #define MISC_IS_SRC_LEN_ERR_MASK 0x00000100 | ||
427 | #define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 | ||
428 | #define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 | ||
429 | #define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 | ||
430 | |||
431 | #define MISC_IS_ADDRESS 0x0038 | ||
432 | |||
433 | #define SR_WR_INDEX_ADDRESS 0x003c | ||
434 | |||
435 | #define DST_WR_INDEX_ADDRESS 0x0040 | ||
436 | |||
437 | #define CURRENT_SRRI_ADDRESS 0x0044 | ||
438 | |||
439 | #define CURRENT_DRRI_ADDRESS 0x0048 | ||
440 | |||
441 | #define SRC_WATERMARK_LOW_MSB 31 | ||
442 | #define SRC_WATERMARK_LOW_LSB 16 | ||
443 | #define SRC_WATERMARK_LOW_MASK 0xffff0000 | ||
444 | #define SRC_WATERMARK_LOW_GET(x) \ | ||
445 | (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB) | ||
446 | #define SRC_WATERMARK_LOW_SET(x) \ | ||
447 | (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) | ||
448 | #define SRC_WATERMARK_LOW_RESET 0 | ||
449 | #define SRC_WATERMARK_HIGH_MSB 15 | ||
450 | #define SRC_WATERMARK_HIGH_LSB 0 | ||
451 | #define SRC_WATERMARK_HIGH_MASK 0x0000ffff | ||
452 | #define SRC_WATERMARK_HIGH_GET(x) \ | ||
453 | (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB) | ||
454 | #define SRC_WATERMARK_HIGH_SET(x) \ | ||
455 | (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) | ||
456 | #define SRC_WATERMARK_HIGH_RESET 0 | ||
457 | #define SRC_WATERMARK_ADDRESS 0x004c | ||
458 | |||
459 | #define DST_WATERMARK_LOW_LSB 16 | ||
460 | #define DST_WATERMARK_LOW_MASK 0xffff0000 | ||
461 | #define DST_WATERMARK_LOW_SET(x) \ | ||
462 | (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) | ||
463 | #define DST_WATERMARK_LOW_RESET 0 | ||
464 | #define DST_WATERMARK_HIGH_MSB 15 | ||
465 | #define DST_WATERMARK_HIGH_LSB 0 | ||
466 | #define DST_WATERMARK_HIGH_MASK 0x0000ffff | ||
467 | #define DST_WATERMARK_HIGH_GET(x) \ | ||
468 | (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB) | ||
469 | #define DST_WATERMARK_HIGH_SET(x) \ | ||
470 | (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) | ||
471 | #define DST_WATERMARK_HIGH_RESET 0 | ||
472 | #define DST_WATERMARK_ADDRESS 0x0050 | ||
473 | |||
474 | |||
475 | static inline u32 ath10k_ce_base_address(unsigned int ce_id) | ||
476 | { | ||
477 | return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; | ||
478 | } | ||
479 | |||
480 | #define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \ | ||
481 | HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \ | ||
482 | HOST_IS_DST_RING_LOW_WATERMARK_MASK | \ | ||
483 | HOST_IS_DST_RING_HIGH_WATERMARK_MASK) | ||
484 | |||
485 | #define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \ | ||
486 | MISC_IS_DST_ADDR_ERR_MASK | \ | ||
487 | MISC_IS_SRC_LEN_ERR_MASK | \ | ||
488 | MISC_IS_DST_MAX_LEN_VIO_MASK | \ | ||
489 | MISC_IS_DST_RING_OVERFLOW_MASK | \ | ||
490 | MISC_IS_SRC_RING_OVERFLOW_MASK) | ||
491 | |||
492 | #define CE_SRC_RING_TO_DESC(baddr, idx) \ | ||
493 | (&(((struct ce_desc *)baddr)[idx])) | ||
494 | |||
495 | #define CE_DEST_RING_TO_DESC(baddr, idx) \ | ||
496 | (&(((struct ce_desc *)baddr)[idx])) | ||
497 | |||
498 | /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ | ||
499 | #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ | ||
500 | (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) | ||
501 | |||
502 | #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) | ||
503 | |||
504 | #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 | ||
505 | #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 | ||
506 | #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ | ||
507 | (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ | ||
508 | CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) | ||
509 | #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 | ||
510 | |||
511 | #define CE_INTERRUPT_SUMMARY(ar) \ | ||
512 | CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ | ||
513 | ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ | ||
514 | CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) | ||
515 | |||
516 | #endif /* _CE_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c new file mode 100644 index 000000000000..2b3426b1ff3f --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/core.c | |||
@@ -0,0 +1,665 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/firmware.h> | ||
20 | |||
21 | #include "core.h" | ||
22 | #include "mac.h" | ||
23 | #include "htc.h" | ||
24 | #include "hif.h" | ||
25 | #include "wmi.h" | ||
26 | #include "bmi.h" | ||
27 | #include "debug.h" | ||
28 | #include "htt.h" | ||
29 | |||
30 | unsigned int ath10k_debug_mask; | ||
31 | static bool uart_print; | ||
32 | static unsigned int ath10k_p2p; | ||
33 | module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); | ||
34 | module_param(uart_print, bool, 0644); | ||
35 | module_param_named(p2p, ath10k_p2p, uint, 0644); | ||
36 | MODULE_PARM_DESC(debug_mask, "Debugging mask"); | ||
37 | MODULE_PARM_DESC(uart_print, "Uart target debugging"); | ||
38 | MODULE_PARM_DESC(p2p, "Enable ath10k P2P support"); | ||
39 | |||
40 | static const struct ath10k_hw_params ath10k_hw_params_list[] = { | ||
41 | { | ||
42 | .id = QCA988X_HW_1_0_VERSION, | ||
43 | .name = "qca988x hw1.0", | ||
44 | .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR, | ||
45 | .fw = { | ||
46 | .dir = QCA988X_HW_1_0_FW_DIR, | ||
47 | .fw = QCA988X_HW_1_0_FW_FILE, | ||
48 | .otp = QCA988X_HW_1_0_OTP_FILE, | ||
49 | .board = QCA988X_HW_1_0_BOARD_DATA_FILE, | ||
50 | }, | ||
51 | }, | ||
52 | { | ||
53 | .id = QCA988X_HW_2_0_VERSION, | ||
54 | .name = "qca988x hw2.0", | ||
55 | .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, | ||
56 | .fw = { | ||
57 | .dir = QCA988X_HW_2_0_FW_DIR, | ||
58 | .fw = QCA988X_HW_2_0_FW_FILE, | ||
59 | .otp = QCA988X_HW_2_0_OTP_FILE, | ||
60 | .board = QCA988X_HW_2_0_BOARD_DATA_FILE, | ||
61 | }, | ||
62 | }, | ||
63 | }; | ||
64 | |||
65 | static void ath10k_send_suspend_complete(struct ath10k *ar) | ||
66 | { | ||
67 | ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__); | ||
68 | |||
69 | ar->is_target_paused = true; | ||
70 | wake_up(&ar->event_queue); | ||
71 | } | ||
72 | |||
73 | static int ath10k_check_fw_version(struct ath10k *ar) | ||
74 | { | ||
75 | char version[32]; | ||
76 | |||
77 | if (ar->fw_version_major >= SUPPORTED_FW_MAJOR && | ||
78 | ar->fw_version_minor >= SUPPORTED_FW_MINOR && | ||
79 | ar->fw_version_release >= SUPPORTED_FW_RELEASE && | ||
80 | ar->fw_version_build >= SUPPORTED_FW_BUILD) | ||
81 | return 0; | ||
82 | |||
83 | snprintf(version, sizeof(version), "%u.%u.%u.%u", | ||
84 | SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR, | ||
85 | SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD); | ||
86 | |||
87 | ath10k_warn("WARNING: Firmware version %s is not officially supported.\n", | ||
88 | ar->hw->wiphy->fw_version); | ||
89 | ath10k_warn("Please upgrade to version %s (or newer)\n", version); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static int ath10k_init_connect_htc(struct ath10k *ar) | ||
95 | { | ||
96 | int status; | ||
97 | |||
98 | status = ath10k_wmi_connect_htc_service(ar); | ||
99 | if (status) | ||
100 | goto conn_fail; | ||
101 | |||
102 | /* Start HTC */ | ||
103 | status = ath10k_htc_start(ar->htc); | ||
104 | if (status) | ||
105 | goto conn_fail; | ||
106 | |||
107 | /* Wait for WMI event to be ready */ | ||
108 | status = ath10k_wmi_wait_for_service_ready(ar); | ||
109 | if (status <= 0) { | ||
110 | ath10k_warn("wmi service ready event not received"); | ||
111 | status = -ETIMEDOUT; | ||
112 | goto timeout; | ||
113 | } | ||
114 | |||
115 | ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n"); | ||
116 | return 0; | ||
117 | |||
118 | timeout: | ||
119 | ath10k_htc_stop(ar->htc); | ||
120 | conn_fail: | ||
121 | return status; | ||
122 | } | ||
123 | |||
124 | static int ath10k_init_configure_target(struct ath10k *ar) | ||
125 | { | ||
126 | u32 param_host; | ||
127 | int ret; | ||
128 | |||
129 | /* tell target which HTC version it is used*/ | ||
130 | ret = ath10k_bmi_write32(ar, hi_app_host_interest, | ||
131 | HTC_PROTOCOL_VERSION); | ||
132 | if (ret) { | ||
133 | ath10k_err("settings HTC version failed\n"); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | /* set the firmware mode to STA/IBSS/AP */ | ||
138 | ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host); | ||
139 | if (ret) { | ||
140 | ath10k_err("setting firmware mode (1/2) failed\n"); | ||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | /* TODO following parameters need to be re-visited. */ | ||
145 | /* num_device */ | ||
146 | param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT); | ||
147 | /* Firmware mode */ | ||
148 | /* FIXME: Why FW_MODE_AP ??.*/ | ||
149 | param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT); | ||
150 | /* mac_addr_method */ | ||
151 | param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); | ||
152 | /* firmware_bridge */ | ||
153 | param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); | ||
154 | /* fwsubmode */ | ||
155 | param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT); | ||
156 | |||
157 | ret = ath10k_bmi_write32(ar, hi_option_flag, param_host); | ||
158 | if (ret) { | ||
159 | ath10k_err("setting firmware mode (2/2) failed\n"); | ||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | /* We do all byte-swapping on the host */ | ||
164 | ret = ath10k_bmi_write32(ar, hi_be, 0); | ||
165 | if (ret) { | ||
166 | ath10k_err("setting host CPU BE mode failed\n"); | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | /* FW descriptor/Data swap flags */ | ||
171 | ret = ath10k_bmi_write32(ar, hi_fw_swap, 0); | ||
172 | |||
173 | if (ret) { | ||
174 | ath10k_err("setting FW data/desc swap flags failed\n"); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, | ||
182 | const char *dir, | ||
183 | const char *file) | ||
184 | { | ||
185 | char filename[100]; | ||
186 | const struct firmware *fw; | ||
187 | int ret; | ||
188 | |||
189 | if (file == NULL) | ||
190 | return ERR_PTR(-ENOENT); | ||
191 | |||
192 | if (dir == NULL) | ||
193 | dir = "."; | ||
194 | |||
195 | snprintf(filename, sizeof(filename), "%s/%s", dir, file); | ||
196 | ret = request_firmware(&fw, filename, ar->dev); | ||
197 | if (ret) | ||
198 | return ERR_PTR(ret); | ||
199 | |||
200 | return fw; | ||
201 | } | ||
202 | |||
203 | static int ath10k_push_board_ext_data(struct ath10k *ar, | ||
204 | const struct firmware *fw) | ||
205 | { | ||
206 | u32 board_data_size = QCA988X_BOARD_DATA_SZ; | ||
207 | u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; | ||
208 | u32 board_ext_data_addr; | ||
209 | int ret; | ||
210 | |||
211 | ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); | ||
212 | if (ret) { | ||
213 | ath10k_err("could not read board ext data addr (%d)\n", ret); | ||
214 | return ret; | ||
215 | } | ||
216 | |||
217 | ath10k_dbg(ATH10K_DBG_CORE, | ||
218 | "ath10k: Board extended Data download addr: 0x%x\n", | ||
219 | board_ext_data_addr); | ||
220 | |||
221 | if (board_ext_data_addr == 0) | ||
222 | return 0; | ||
223 | |||
224 | if (fw->size != (board_data_size + board_ext_data_size)) { | ||
225 | ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", | ||
226 | fw->size, board_data_size, board_ext_data_size); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, | ||
231 | fw->data + board_data_size, | ||
232 | board_ext_data_size); | ||
233 | if (ret) { | ||
234 | ath10k_err("could not write board ext data (%d)\n", ret); | ||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, | ||
239 | (board_ext_data_size << 16) | 1); | ||
240 | if (ret) { | ||
241 | ath10k_err("could not write board ext data bit (%d)\n", ret); | ||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int ath10k_download_board_data(struct ath10k *ar) | ||
249 | { | ||
250 | u32 board_data_size = QCA988X_BOARD_DATA_SZ; | ||
251 | u32 address; | ||
252 | const struct firmware *fw; | ||
253 | int ret; | ||
254 | |||
255 | fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, | ||
256 | ar->hw_params.fw.board); | ||
257 | if (IS_ERR(fw)) { | ||
258 | ath10k_err("could not fetch board data fw file (%ld)\n", | ||
259 | PTR_ERR(fw)); | ||
260 | return PTR_ERR(fw); | ||
261 | } | ||
262 | |||
263 | ret = ath10k_push_board_ext_data(ar, fw); | ||
264 | if (ret) { | ||
265 | ath10k_err("could not push board ext data (%d)\n", ret); | ||
266 | goto exit; | ||
267 | } | ||
268 | |||
269 | ret = ath10k_bmi_read32(ar, hi_board_data, &address); | ||
270 | if (ret) { | ||
271 | ath10k_err("could not read board data addr (%d)\n", ret); | ||
272 | goto exit; | ||
273 | } | ||
274 | |||
275 | ret = ath10k_bmi_write_memory(ar, address, fw->data, | ||
276 | min_t(u32, board_data_size, fw->size)); | ||
277 | if (ret) { | ||
278 | ath10k_err("could not write board data (%d)\n", ret); | ||
279 | goto exit; | ||
280 | } | ||
281 | |||
282 | ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); | ||
283 | if (ret) { | ||
284 | ath10k_err("could not write board data bit (%d)\n", ret); | ||
285 | goto exit; | ||
286 | } | ||
287 | |||
288 | exit: | ||
289 | release_firmware(fw); | ||
290 | return ret; | ||
291 | } | ||
292 | |||
293 | static int ath10k_download_and_run_otp(struct ath10k *ar) | ||
294 | { | ||
295 | const struct firmware *fw; | ||
296 | u32 address; | ||
297 | u32 exec_param; | ||
298 | int ret; | ||
299 | |||
300 | /* OTP is optional */ | ||
301 | |||
302 | if (ar->hw_params.fw.otp == NULL) { | ||
303 | ath10k_info("otp file not defined\n"); | ||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | address = ar->hw_params.patch_load_addr; | ||
308 | |||
309 | fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, | ||
310 | ar->hw_params.fw.otp); | ||
311 | if (IS_ERR(fw)) { | ||
312 | ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw)); | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); | ||
317 | if (ret) { | ||
318 | ath10k_err("could not write otp (%d)\n", ret); | ||
319 | goto exit; | ||
320 | } | ||
321 | |||
322 | exec_param = 0; | ||
323 | ret = ath10k_bmi_execute(ar, address, &exec_param); | ||
324 | if (ret) { | ||
325 | ath10k_err("could not execute otp (%d)\n", ret); | ||
326 | goto exit; | ||
327 | } | ||
328 | |||
329 | exit: | ||
330 | release_firmware(fw); | ||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | static int ath10k_download_fw(struct ath10k *ar) | ||
335 | { | ||
336 | const struct firmware *fw; | ||
337 | u32 address; | ||
338 | int ret; | ||
339 | |||
340 | if (ar->hw_params.fw.fw == NULL) | ||
341 | return -EINVAL; | ||
342 | |||
343 | address = ar->hw_params.patch_load_addr; | ||
344 | |||
345 | fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, | ||
346 | ar->hw_params.fw.fw); | ||
347 | if (IS_ERR(fw)) { | ||
348 | ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw)); | ||
349 | return PTR_ERR(fw); | ||
350 | } | ||
351 | |||
352 | ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); | ||
353 | if (ret) { | ||
354 | ath10k_err("could not write fw (%d)\n", ret); | ||
355 | goto exit; | ||
356 | } | ||
357 | |||
358 | exit: | ||
359 | release_firmware(fw); | ||
360 | return ret; | ||
361 | } | ||
362 | |||
363 | static int ath10k_init_download_firmware(struct ath10k *ar) | ||
364 | { | ||
365 | int ret; | ||
366 | |||
367 | ret = ath10k_download_board_data(ar); | ||
368 | if (ret) | ||
369 | return ret; | ||
370 | |||
371 | ret = ath10k_download_and_run_otp(ar); | ||
372 | if (ret) | ||
373 | return ret; | ||
374 | |||
375 | ret = ath10k_download_fw(ar); | ||
376 | if (ret) | ||
377 | return ret; | ||
378 | |||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static int ath10k_init_uart(struct ath10k *ar) | ||
383 | { | ||
384 | int ret; | ||
385 | |||
386 | /* | ||
387 | * Explicitly setting UART prints to zero as target turns it on | ||
388 | * based on scratch registers. | ||
389 | */ | ||
390 | ret = ath10k_bmi_write32(ar, hi_serial_enable, 0); | ||
391 | if (ret) { | ||
392 | ath10k_warn("could not disable UART prints (%d)\n", ret); | ||
393 | return ret; | ||
394 | } | ||
395 | |||
396 | if (!uart_print) { | ||
397 | ath10k_info("UART prints disabled\n"); | ||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); | ||
402 | if (ret) { | ||
403 | ath10k_warn("could not enable UART prints (%d)\n", ret); | ||
404 | return ret; | ||
405 | } | ||
406 | |||
407 | ret = ath10k_bmi_write32(ar, hi_serial_enable, 1); | ||
408 | if (ret) { | ||
409 | ath10k_warn("could not enable UART prints (%d)\n", ret); | ||
410 | return ret; | ||
411 | } | ||
412 | |||
413 | ath10k_info("UART prints enabled\n"); | ||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | static int ath10k_init_hw_params(struct ath10k *ar) | ||
418 | { | ||
419 | const struct ath10k_hw_params *uninitialized_var(hw_params); | ||
420 | int i; | ||
421 | |||
422 | for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { | ||
423 | hw_params = &ath10k_hw_params_list[i]; | ||
424 | |||
425 | if (hw_params->id == ar->target_version) | ||
426 | break; | ||
427 | } | ||
428 | |||
429 | if (i == ARRAY_SIZE(ath10k_hw_params_list)) { | ||
430 | ath10k_err("Unsupported hardware version: 0x%x\n", | ||
431 | ar->target_version); | ||
432 | return -EINVAL; | ||
433 | } | ||
434 | |||
435 | ar->hw_params = *hw_params; | ||
436 | |||
437 | ath10k_info("Hardware name %s version 0x%x\n", | ||
438 | ar->hw_params.name, ar->target_version); | ||
439 | |||
440 | return 0; | ||
441 | } | ||
442 | |||
443 | struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, | ||
444 | enum ath10k_bus bus, | ||
445 | const struct ath10k_hif_ops *hif_ops) | ||
446 | { | ||
447 | struct ath10k *ar; | ||
448 | |||
449 | ar = ath10k_mac_create(); | ||
450 | if (!ar) | ||
451 | return NULL; | ||
452 | |||
453 | ar->ath_common.priv = ar; | ||
454 | ar->ath_common.hw = ar->hw; | ||
455 | |||
456 | ar->p2p = !!ath10k_p2p; | ||
457 | ar->dev = dev; | ||
458 | |||
459 | ar->hif.priv = hif_priv; | ||
460 | ar->hif.ops = hif_ops; | ||
461 | ar->hif.bus = bus; | ||
462 | |||
463 | ar->free_vdev_map = 0xFF; /* 8 vdevs */ | ||
464 | |||
465 | init_completion(&ar->scan.started); | ||
466 | init_completion(&ar->scan.completed); | ||
467 | init_completion(&ar->scan.on_channel); | ||
468 | |||
469 | init_completion(&ar->install_key_done); | ||
470 | init_completion(&ar->vdev_setup_done); | ||
471 | |||
472 | setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar); | ||
473 | |||
474 | ar->workqueue = create_singlethread_workqueue("ath10k_wq"); | ||
475 | if (!ar->workqueue) | ||
476 | goto err_wq; | ||
477 | |||
478 | mutex_init(&ar->conf_mutex); | ||
479 | spin_lock_init(&ar->data_lock); | ||
480 | |||
481 | INIT_LIST_HEAD(&ar->peers); | ||
482 | init_waitqueue_head(&ar->peer_mapping_wq); | ||
483 | |||
484 | init_completion(&ar->offchan_tx_completed); | ||
485 | INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); | ||
486 | skb_queue_head_init(&ar->offchan_tx_queue); | ||
487 | |||
488 | init_waitqueue_head(&ar->event_queue); | ||
489 | |||
490 | return ar; | ||
491 | |||
492 | err_wq: | ||
493 | ath10k_mac_destroy(ar); | ||
494 | return NULL; | ||
495 | } | ||
496 | EXPORT_SYMBOL(ath10k_core_create); | ||
497 | |||
498 | void ath10k_core_destroy(struct ath10k *ar) | ||
499 | { | ||
500 | flush_workqueue(ar->workqueue); | ||
501 | destroy_workqueue(ar->workqueue); | ||
502 | |||
503 | ath10k_mac_destroy(ar); | ||
504 | } | ||
505 | EXPORT_SYMBOL(ath10k_core_destroy); | ||
506 | |||
507 | |||
508 | int ath10k_core_register(struct ath10k *ar) | ||
509 | { | ||
510 | struct ath10k_htc_ops htc_ops; | ||
511 | struct bmi_target_info target_info; | ||
512 | int status; | ||
513 | |||
514 | memset(&target_info, 0, sizeof(target_info)); | ||
515 | status = ath10k_bmi_get_target_info(ar, &target_info); | ||
516 | if (status) | ||
517 | goto err; | ||
518 | |||
519 | ar->target_version = target_info.version; | ||
520 | ar->hw->wiphy->hw_version = target_info.version; | ||
521 | |||
522 | status = ath10k_init_hw_params(ar); | ||
523 | if (status) | ||
524 | goto err; | ||
525 | |||
526 | if (ath10k_init_configure_target(ar)) { | ||
527 | status = -EINVAL; | ||
528 | goto err; | ||
529 | } | ||
530 | |||
531 | status = ath10k_init_download_firmware(ar); | ||
532 | if (status) | ||
533 | goto err; | ||
534 | |||
535 | status = ath10k_init_uart(ar); | ||
536 | if (status) | ||
537 | goto err; | ||
538 | |||
539 | htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete; | ||
540 | |||
541 | ar->htc = ath10k_htc_create(ar, &htc_ops); | ||
542 | if (IS_ERR(ar->htc)) { | ||
543 | status = PTR_ERR(ar->htc); | ||
544 | ath10k_err("could not create HTC (%d)\n", status); | ||
545 | goto err; | ||
546 | } | ||
547 | |||
548 | status = ath10k_bmi_done(ar); | ||
549 | if (status) | ||
550 | goto err_htc_destroy; | ||
551 | |||
552 | status = ath10k_wmi_attach(ar); | ||
553 | if (status) { | ||
554 | ath10k_err("WMI attach failed: %d\n", status); | ||
555 | goto err_htc_destroy; | ||
556 | } | ||
557 | |||
558 | status = ath10k_htc_wait_target(ar->htc); | ||
559 | if (status) | ||
560 | goto err_wmi_detach; | ||
561 | |||
562 | ar->htt = ath10k_htt_attach(ar); | ||
563 | if (!ar->htt) { | ||
564 | status = -ENOMEM; | ||
565 | goto err_wmi_detach; | ||
566 | } | ||
567 | |||
568 | status = ath10k_init_connect_htc(ar); | ||
569 | if (status) | ||
570 | goto err_htt_detach; | ||
571 | |||
572 | ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version); | ||
573 | |||
574 | status = ath10k_check_fw_version(ar); | ||
575 | if (status) | ||
576 | goto err_disconnect_htc; | ||
577 | |||
578 | status = ath10k_wmi_cmd_init(ar); | ||
579 | if (status) { | ||
580 | ath10k_err("could not send WMI init command (%d)\n", status); | ||
581 | goto err_disconnect_htc; | ||
582 | } | ||
583 | |||
584 | status = ath10k_wmi_wait_for_unified_ready(ar); | ||
585 | if (status <= 0) { | ||
586 | ath10k_err("wmi unified ready event not received\n"); | ||
587 | status = -ETIMEDOUT; | ||
588 | goto err_disconnect_htc; | ||
589 | } | ||
590 | |||
591 | status = ath10k_htt_attach_target(ar->htt); | ||
592 | if (status) | ||
593 | goto err_disconnect_htc; | ||
594 | |||
595 | status = ath10k_mac_register(ar); | ||
596 | if (status) | ||
597 | goto err_disconnect_htc; | ||
598 | |||
599 | status = ath10k_debug_create(ar); | ||
600 | if (status) { | ||
601 | ath10k_err("unable to initialize debugfs\n"); | ||
602 | goto err_unregister_mac; | ||
603 | } | ||
604 | |||
605 | return 0; | ||
606 | |||
607 | err_unregister_mac: | ||
608 | ath10k_mac_unregister(ar); | ||
609 | err_disconnect_htc: | ||
610 | ath10k_htc_stop(ar->htc); | ||
611 | err_htt_detach: | ||
612 | ath10k_htt_detach(ar->htt); | ||
613 | err_wmi_detach: | ||
614 | ath10k_wmi_detach(ar); | ||
615 | err_htc_destroy: | ||
616 | ath10k_htc_destroy(ar->htc); | ||
617 | err: | ||
618 | return status; | ||
619 | } | ||
620 | EXPORT_SYMBOL(ath10k_core_register); | ||
621 | |||
622 | void ath10k_core_unregister(struct ath10k *ar) | ||
623 | { | ||
624 | /* We must unregister from mac80211 before we stop HTC and HIF. | ||
625 | * Otherwise we will fail to submit commands to FW and mac80211 will be | ||
626 | * unhappy about callback failures. */ | ||
627 | ath10k_mac_unregister(ar); | ||
628 | ath10k_htc_stop(ar->htc); | ||
629 | ath10k_htt_detach(ar->htt); | ||
630 | ath10k_wmi_detach(ar); | ||
631 | ath10k_htc_destroy(ar->htc); | ||
632 | } | ||
633 | EXPORT_SYMBOL(ath10k_core_unregister); | ||
634 | |||
635 | int ath10k_core_target_suspend(struct ath10k *ar) | ||
636 | { | ||
637 | int ret; | ||
638 | |||
639 | ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); | ||
640 | |||
641 | ret = ath10k_wmi_pdev_suspend_target(ar); | ||
642 | if (ret) | ||
643 | ath10k_warn("could not suspend target (%d)\n", ret); | ||
644 | |||
645 | return ret; | ||
646 | } | ||
647 | EXPORT_SYMBOL(ath10k_core_target_suspend); | ||
648 | |||
649 | int ath10k_core_target_resume(struct ath10k *ar) | ||
650 | { | ||
651 | int ret; | ||
652 | |||
653 | ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__); | ||
654 | |||
655 | ret = ath10k_wmi_pdev_resume_target(ar); | ||
656 | if (ret) | ||
657 | ath10k_warn("could not resume target (%d)\n", ret); | ||
658 | |||
659 | return ret; | ||
660 | } | ||
661 | EXPORT_SYMBOL(ath10k_core_target_resume); | ||
662 | |||
663 | MODULE_AUTHOR("Qualcomm Atheros"); | ||
664 | MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); | ||
665 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h new file mode 100644 index 000000000000..539336d1be4b --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/core.h | |||
@@ -0,0 +1,369 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _CORE_H_ | ||
19 | #define _CORE_H_ | ||
20 | |||
21 | #include <linux/completion.h> | ||
22 | #include <linux/if_ether.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/pci.h> | ||
25 | |||
26 | #include "htc.h" | ||
27 | #include "hw.h" | ||
28 | #include "targaddrs.h" | ||
29 | #include "wmi.h" | ||
30 | #include "../ath.h" | ||
31 | #include "../regd.h" | ||
32 | |||
33 | #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) | ||
34 | #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) | ||
35 | #define WO(_f) ((_f##_OFFSET) >> 2) | ||
36 | |||
37 | #define ATH10K_SCAN_ID 0 | ||
38 | #define WMI_READY_TIMEOUT (5 * HZ) | ||
39 | #define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) | ||
40 | |||
41 | /* Antenna noise floor */ | ||
42 | #define ATH10K_DEFAULT_NOISE_FLOOR -95 | ||
43 | |||
44 | struct ath10k; | ||
45 | |||
46 | enum ath10k_bus { | ||
47 | ATH10K_BUS_PCI, | ||
48 | }; | ||
49 | |||
50 | struct ath10k_skb_cb { | ||
51 | dma_addr_t paddr; | ||
52 | bool is_mapped; | ||
53 | bool is_aborted; | ||
54 | |||
55 | struct { | ||
56 | u8 vdev_id; | ||
57 | u16 msdu_id; | ||
58 | u8 tid; | ||
59 | bool is_offchan; | ||
60 | bool is_conf; | ||
61 | bool discard; | ||
62 | bool no_ack; | ||
63 | u8 refcount; | ||
64 | struct sk_buff *txfrag; | ||
65 | struct sk_buff *msdu; | ||
66 | } __packed htt; | ||
67 | |||
68 | /* 4 bytes left on 64bit arch */ | ||
69 | } __packed; | ||
70 | |||
71 | static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) | ||
72 | { | ||
73 | BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > | ||
74 | IEEE80211_TX_INFO_DRIVER_DATA_SIZE); | ||
75 | return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; | ||
76 | } | ||
77 | |||
78 | static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb) | ||
79 | { | ||
80 | if (ATH10K_SKB_CB(skb)->is_mapped) | ||
81 | return -EINVAL; | ||
82 | |||
83 | ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len, | ||
84 | DMA_TO_DEVICE); | ||
85 | |||
86 | if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr))) | ||
87 | return -EIO; | ||
88 | |||
89 | ATH10K_SKB_CB(skb)->is_mapped = true; | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb) | ||
94 | { | ||
95 | if (!ATH10K_SKB_CB(skb)->is_mapped) | ||
96 | return -EINVAL; | ||
97 | |||
98 | dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len, | ||
99 | DMA_TO_DEVICE); | ||
100 | ATH10K_SKB_CB(skb)->is_mapped = false; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static inline u32 host_interest_item_address(u32 item_offset) | ||
105 | { | ||
106 | return QCA988X_HOST_INTEREST_ADDRESS + item_offset; | ||
107 | } | ||
108 | |||
109 | struct ath10k_bmi { | ||
110 | bool done_sent; | ||
111 | }; | ||
112 | |||
113 | struct ath10k_wmi { | ||
114 | enum ath10k_htc_ep_id eid; | ||
115 | struct completion service_ready; | ||
116 | struct completion unified_ready; | ||
117 | atomic_t pending_tx_count; | ||
118 | wait_queue_head_t wq; | ||
119 | |||
120 | struct sk_buff_head wmi_event_list; | ||
121 | struct work_struct wmi_event_work; | ||
122 | }; | ||
123 | |||
124 | struct ath10k_peer_stat { | ||
125 | u8 peer_macaddr[ETH_ALEN]; | ||
126 | u32 peer_rssi; | ||
127 | u32 peer_tx_rate; | ||
128 | }; | ||
129 | |||
130 | struct ath10k_target_stats { | ||
131 | /* PDEV stats */ | ||
132 | s32 ch_noise_floor; | ||
133 | u32 tx_frame_count; | ||
134 | u32 rx_frame_count; | ||
135 | u32 rx_clear_count; | ||
136 | u32 cycle_count; | ||
137 | u32 phy_err_count; | ||
138 | u32 chan_tx_power; | ||
139 | |||
140 | /* PDEV TX stats */ | ||
141 | s32 comp_queued; | ||
142 | s32 comp_delivered; | ||
143 | s32 msdu_enqued; | ||
144 | s32 mpdu_enqued; | ||
145 | s32 wmm_drop; | ||
146 | s32 local_enqued; | ||
147 | s32 local_freed; | ||
148 | s32 hw_queued; | ||
149 | s32 hw_reaped; | ||
150 | s32 underrun; | ||
151 | s32 tx_abort; | ||
152 | s32 mpdus_requed; | ||
153 | u32 tx_ko; | ||
154 | u32 data_rc; | ||
155 | u32 self_triggers; | ||
156 | u32 sw_retry_failure; | ||
157 | u32 illgl_rate_phy_err; | ||
158 | u32 pdev_cont_xretry; | ||
159 | u32 pdev_tx_timeout; | ||
160 | u32 pdev_resets; | ||
161 | u32 phy_underrun; | ||
162 | u32 txop_ovf; | ||
163 | |||
164 | /* PDEV RX stats */ | ||
165 | s32 mid_ppdu_route_change; | ||
166 | s32 status_rcvd; | ||
167 | s32 r0_frags; | ||
168 | s32 r1_frags; | ||
169 | s32 r2_frags; | ||
170 | s32 r3_frags; | ||
171 | s32 htt_msdus; | ||
172 | s32 htt_mpdus; | ||
173 | s32 loc_msdus; | ||
174 | s32 loc_mpdus; | ||
175 | s32 oversize_amsdu; | ||
176 | s32 phy_errs; | ||
177 | s32 phy_err_drop; | ||
178 | s32 mpdu_errs; | ||
179 | |||
180 | /* VDEV STATS */ | ||
181 | |||
182 | /* PEER STATS */ | ||
183 | u8 peers; | ||
184 | struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS]; | ||
185 | |||
186 | /* TODO: Beacon filter stats */ | ||
187 | |||
188 | }; | ||
189 | |||
190 | #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */ | ||
191 | |||
192 | struct ath10k_peer { | ||
193 | struct list_head list; | ||
194 | int vdev_id; | ||
195 | u8 addr[ETH_ALEN]; | ||
196 | DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); | ||
197 | struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; | ||
198 | }; | ||
199 | |||
200 | #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) | ||
201 | |||
202 | struct ath10k_vif { | ||
203 | u32 vdev_id; | ||
204 | enum wmi_vdev_type vdev_type; | ||
205 | enum wmi_vdev_subtype vdev_subtype; | ||
206 | u32 beacon_interval; | ||
207 | u32 dtim_period; | ||
208 | |||
209 | struct ath10k *ar; | ||
210 | struct ieee80211_vif *vif; | ||
211 | |||
212 | struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; | ||
213 | u8 def_wep_key_index; | ||
214 | |||
215 | u16 tx_seq_no; | ||
216 | |||
217 | union { | ||
218 | struct { | ||
219 | u8 bssid[ETH_ALEN]; | ||
220 | u32 uapsd; | ||
221 | } sta; | ||
222 | struct { | ||
223 | /* 127 stations; wmi limit */ | ||
224 | u8 tim_bitmap[16]; | ||
225 | u8 tim_len; | ||
226 | u32 ssid_len; | ||
227 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | ||
228 | bool hidden_ssid; | ||
229 | /* P2P_IE with NoA attribute for P2P_GO case */ | ||
230 | u32 noa_len; | ||
231 | u8 *noa_data; | ||
232 | } ap; | ||
233 | struct { | ||
234 | u8 bssid[ETH_ALEN]; | ||
235 | } ibss; | ||
236 | } u; | ||
237 | }; | ||
238 | |||
239 | struct ath10k_vif_iter { | ||
240 | u32 vdev_id; | ||
241 | struct ath10k_vif *arvif; | ||
242 | }; | ||
243 | |||
244 | struct ath10k_debug { | ||
245 | struct dentry *debugfs_phy; | ||
246 | |||
247 | struct ath10k_target_stats target_stats; | ||
248 | u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; | ||
249 | |||
250 | struct completion event_stats_compl; | ||
251 | }; | ||
252 | |||
253 | struct ath10k { | ||
254 | struct ath_common ath_common; | ||
255 | struct ieee80211_hw *hw; | ||
256 | struct device *dev; | ||
257 | u8 mac_addr[ETH_ALEN]; | ||
258 | |||
259 | u32 target_version; | ||
260 | u8 fw_version_major; | ||
261 | u32 fw_version_minor; | ||
262 | u16 fw_version_release; | ||
263 | u16 fw_version_build; | ||
264 | u32 phy_capability; | ||
265 | u32 hw_min_tx_power; | ||
266 | u32 hw_max_tx_power; | ||
267 | u32 ht_cap_info; | ||
268 | u32 vht_cap_info; | ||
269 | |||
270 | struct targetdef *targetdef; | ||
271 | struct hostdef *hostdef; | ||
272 | |||
273 | bool p2p; | ||
274 | |||
275 | struct { | ||
276 | void *priv; | ||
277 | enum ath10k_bus bus; | ||
278 | const struct ath10k_hif_ops *ops; | ||
279 | } hif; | ||
280 | |||
281 | struct ath10k_wmi wmi; | ||
282 | |||
283 | wait_queue_head_t event_queue; | ||
284 | bool is_target_paused; | ||
285 | |||
286 | struct ath10k_bmi bmi; | ||
287 | |||
288 | struct ath10k_htc *htc; | ||
289 | struct ath10k_htt *htt; | ||
290 | |||
291 | struct ath10k_hw_params { | ||
292 | u32 id; | ||
293 | const char *name; | ||
294 | u32 patch_load_addr; | ||
295 | |||
296 | struct ath10k_hw_params_fw { | ||
297 | const char *dir; | ||
298 | const char *fw; | ||
299 | const char *otp; | ||
300 | const char *board; | ||
301 | } fw; | ||
302 | } hw_params; | ||
303 | |||
304 | struct { | ||
305 | struct completion started; | ||
306 | struct completion completed; | ||
307 | struct completion on_channel; | ||
308 | struct timer_list timeout; | ||
309 | bool is_roc; | ||
310 | bool in_progress; | ||
311 | bool aborting; | ||
312 | int vdev_id; | ||
313 | int roc_freq; | ||
314 | } scan; | ||
315 | |||
316 | struct { | ||
317 | struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; | ||
318 | } mac; | ||
319 | |||
320 | /* should never be NULL; needed for regular htt rx */ | ||
321 | struct ieee80211_channel *rx_channel; | ||
322 | |||
323 | /* valid during scan; needed for mgmt rx during scan */ | ||
324 | struct ieee80211_channel *scan_channel; | ||
325 | |||
326 | int free_vdev_map; | ||
327 | int monitor_vdev_id; | ||
328 | bool monitor_enabled; | ||
329 | bool monitor_present; | ||
330 | unsigned int filter_flags; | ||
331 | |||
332 | struct wmi_pdev_set_wmm_params_arg wmm_params; | ||
333 | struct completion install_key_done; | ||
334 | |||
335 | struct completion vdev_setup_done; | ||
336 | |||
337 | struct workqueue_struct *workqueue; | ||
338 | |||
339 | /* prevents concurrent FW reconfiguration */ | ||
340 | struct mutex conf_mutex; | ||
341 | |||
342 | /* protects shared structure data */ | ||
343 | spinlock_t data_lock; | ||
344 | |||
345 | struct list_head peers; | ||
346 | wait_queue_head_t peer_mapping_wq; | ||
347 | |||
348 | struct work_struct offchan_tx_work; | ||
349 | struct sk_buff_head offchan_tx_queue; | ||
350 | struct completion offchan_tx_completed; | ||
351 | struct sk_buff *offchan_tx_skb; | ||
352 | |||
353 | #ifdef CONFIG_ATH10K_DEBUGFS | ||
354 | struct ath10k_debug debug; | ||
355 | #endif | ||
356 | }; | ||
357 | |||
358 | struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, | ||
359 | enum ath10k_bus bus, | ||
360 | const struct ath10k_hif_ops *hif_ops); | ||
361 | void ath10k_core_destroy(struct ath10k *ar); | ||
362 | |||
363 | int ath10k_core_register(struct ath10k *ar); | ||
364 | void ath10k_core_unregister(struct ath10k *ar); | ||
365 | |||
366 | int ath10k_core_target_suspend(struct ath10k *ar); | ||
367 | int ath10k_core_target_resume(struct ath10k *ar); | ||
368 | |||
369 | #endif /* _CORE_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c new file mode 100644 index 000000000000..499034b873d1 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/debug.c | |||
@@ -0,0 +1,503 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/debugfs.h> | ||
20 | |||
21 | #include "core.h" | ||
22 | #include "debug.h" | ||
23 | |||
24 | static int ath10k_printk(const char *level, const char *fmt, ...) | ||
25 | { | ||
26 | struct va_format vaf; | ||
27 | va_list args; | ||
28 | int rtn; | ||
29 | |||
30 | va_start(args, fmt); | ||
31 | |||
32 | vaf.fmt = fmt; | ||
33 | vaf.va = &args; | ||
34 | |||
35 | rtn = printk("%sath10k: %pV", level, &vaf); | ||
36 | |||
37 | va_end(args); | ||
38 | |||
39 | return rtn; | ||
40 | } | ||
41 | |||
42 | int ath10k_info(const char *fmt, ...) | ||
43 | { | ||
44 | struct va_format vaf = { | ||
45 | .fmt = fmt, | ||
46 | }; | ||
47 | va_list args; | ||
48 | int ret; | ||
49 | |||
50 | va_start(args, fmt); | ||
51 | vaf.va = &args; | ||
52 | ret = ath10k_printk(KERN_INFO, "%pV", &vaf); | ||
53 | trace_ath10k_log_info(&vaf); | ||
54 | va_end(args); | ||
55 | |||
56 | return ret; | ||
57 | } | ||
58 | EXPORT_SYMBOL(ath10k_info); | ||
59 | |||
60 | int ath10k_err(const char *fmt, ...) | ||
61 | { | ||
62 | struct va_format vaf = { | ||
63 | .fmt = fmt, | ||
64 | }; | ||
65 | va_list args; | ||
66 | int ret; | ||
67 | |||
68 | va_start(args, fmt); | ||
69 | vaf.va = &args; | ||
70 | ret = ath10k_printk(KERN_ERR, "%pV", &vaf); | ||
71 | trace_ath10k_log_err(&vaf); | ||
72 | va_end(args); | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | EXPORT_SYMBOL(ath10k_err); | ||
77 | |||
78 | int ath10k_warn(const char *fmt, ...) | ||
79 | { | ||
80 | struct va_format vaf = { | ||
81 | .fmt = fmt, | ||
82 | }; | ||
83 | va_list args; | ||
84 | int ret = 0; | ||
85 | |||
86 | va_start(args, fmt); | ||
87 | vaf.va = &args; | ||
88 | |||
89 | if (net_ratelimit()) | ||
90 | ret = ath10k_printk(KERN_WARNING, "%pV", &vaf); | ||
91 | |||
92 | trace_ath10k_log_warn(&vaf); | ||
93 | |||
94 | va_end(args); | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | EXPORT_SYMBOL(ath10k_warn); | ||
99 | |||
100 | #ifdef CONFIG_ATH10K_DEBUGFS | ||
101 | |||
102 | void ath10k_debug_read_service_map(struct ath10k *ar, | ||
103 | void *service_map, | ||
104 | size_t map_size) | ||
105 | { | ||
106 | memcpy(ar->debug.wmi_service_bitmap, service_map, map_size); | ||
107 | } | ||
108 | |||
109 | static ssize_t ath10k_read_wmi_services(struct file *file, | ||
110 | char __user *user_buf, | ||
111 | size_t count, loff_t *ppos) | ||
112 | { | ||
113 | struct ath10k *ar = file->private_data; | ||
114 | char *buf; | ||
115 | unsigned int len = 0, buf_len = 1500; | ||
116 | const char *status; | ||
117 | ssize_t ret_cnt; | ||
118 | int i; | ||
119 | |||
120 | buf = kzalloc(buf_len, GFP_KERNEL); | ||
121 | if (!buf) | ||
122 | return -ENOMEM; | ||
123 | |||
124 | mutex_lock(&ar->conf_mutex); | ||
125 | |||
126 | if (len > buf_len) | ||
127 | len = buf_len; | ||
128 | |||
129 | for (i = 0; i < WMI_SERVICE_LAST; i++) { | ||
130 | if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i)) | ||
131 | status = "enabled"; | ||
132 | else | ||
133 | status = "disabled"; | ||
134 | |||
135 | len += scnprintf(buf + len, buf_len - len, | ||
136 | "0x%02x - %20s - %s\n", | ||
137 | i, wmi_service_name(i), status); | ||
138 | } | ||
139 | |||
140 | ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); | ||
141 | |||
142 | mutex_unlock(&ar->conf_mutex); | ||
143 | |||
144 | kfree(buf); | ||
145 | return ret_cnt; | ||
146 | } | ||
147 | |||
148 | static const struct file_operations fops_wmi_services = { | ||
149 | .read = ath10k_read_wmi_services, | ||
150 | .open = simple_open, | ||
151 | .owner = THIS_MODULE, | ||
152 | .llseek = default_llseek, | ||
153 | }; | ||
154 | |||
155 | void ath10k_debug_read_target_stats(struct ath10k *ar, | ||
156 | struct wmi_stats_event *ev) | ||
157 | { | ||
158 | u8 *tmp = ev->data; | ||
159 | struct ath10k_target_stats *stats; | ||
160 | int num_pdev_stats, num_vdev_stats, num_peer_stats; | ||
161 | struct wmi_pdev_stats *ps; | ||
162 | int i; | ||
163 | |||
164 | mutex_lock(&ar->conf_mutex); | ||
165 | |||
166 | stats = &ar->debug.target_stats; | ||
167 | |||
168 | num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */ | ||
169 | num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */ | ||
170 | num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ | ||
171 | |||
172 | if (num_pdev_stats) { | ||
173 | ps = (struct wmi_pdev_stats *)tmp; | ||
174 | |||
175 | stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); | ||
176 | stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); | ||
177 | stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count); | ||
178 | stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count); | ||
179 | stats->cycle_count = __le32_to_cpu(ps->cycle_count); | ||
180 | stats->phy_err_count = __le32_to_cpu(ps->phy_err_count); | ||
181 | stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr); | ||
182 | |||
183 | stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued); | ||
184 | stats->comp_delivered = | ||
185 | __le32_to_cpu(ps->wal.tx.comp_delivered); | ||
186 | stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued); | ||
187 | stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued); | ||
188 | stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop); | ||
189 | stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued); | ||
190 | stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed); | ||
191 | stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued); | ||
192 | stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped); | ||
193 | stats->underrun = __le32_to_cpu(ps->wal.tx.underrun); | ||
194 | stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort); | ||
195 | stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed); | ||
196 | stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko); | ||
197 | stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc); | ||
198 | stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers); | ||
199 | stats->sw_retry_failure = | ||
200 | __le32_to_cpu(ps->wal.tx.sw_retry_failure); | ||
201 | stats->illgl_rate_phy_err = | ||
202 | __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err); | ||
203 | stats->pdev_cont_xretry = | ||
204 | __le32_to_cpu(ps->wal.tx.pdev_cont_xretry); | ||
205 | stats->pdev_tx_timeout = | ||
206 | __le32_to_cpu(ps->wal.tx.pdev_tx_timeout); | ||
207 | stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets); | ||
208 | stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun); | ||
209 | stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf); | ||
210 | |||
211 | stats->mid_ppdu_route_change = | ||
212 | __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change); | ||
213 | stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd); | ||
214 | stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags); | ||
215 | stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags); | ||
216 | stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags); | ||
217 | stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags); | ||
218 | stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus); | ||
219 | stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus); | ||
220 | stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus); | ||
221 | stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus); | ||
222 | stats->oversize_amsdu = | ||
223 | __le32_to_cpu(ps->wal.rx.oversize_amsdu); | ||
224 | stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs); | ||
225 | stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); | ||
226 | stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); | ||
227 | |||
228 | tmp += sizeof(struct wmi_pdev_stats); | ||
229 | } | ||
230 | |||
231 | /* 0 or max vdevs */ | ||
232 | /* Currently firmware does not support VDEV stats */ | ||
233 | if (num_vdev_stats) { | ||
234 | struct wmi_vdev_stats *vdev_stats; | ||
235 | |||
236 | for (i = 0; i < num_vdev_stats; i++) { | ||
237 | vdev_stats = (struct wmi_vdev_stats *)tmp; | ||
238 | tmp += sizeof(struct wmi_vdev_stats); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | if (num_peer_stats) { | ||
243 | struct wmi_peer_stats *peer_stats; | ||
244 | struct ath10k_peer_stat *s; | ||
245 | |||
246 | stats->peers = num_peer_stats; | ||
247 | |||
248 | for (i = 0; i < num_peer_stats; i++) { | ||
249 | peer_stats = (struct wmi_peer_stats *)tmp; | ||
250 | s = &stats->peer_stat[i]; | ||
251 | |||
252 | WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, | ||
253 | s->peer_macaddr); | ||
254 | s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); | ||
255 | s->peer_tx_rate = | ||
256 | __le32_to_cpu(peer_stats->peer_tx_rate); | ||
257 | |||
258 | tmp += sizeof(struct wmi_peer_stats); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | mutex_unlock(&ar->conf_mutex); | ||
263 | complete(&ar->debug.event_stats_compl); | ||
264 | } | ||
265 | |||
266 | static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, | ||
267 | size_t count, loff_t *ppos) | ||
268 | { | ||
269 | struct ath10k *ar = file->private_data; | ||
270 | struct ath10k_target_stats *fw_stats; | ||
271 | char *buf; | ||
272 | unsigned int len = 0, buf_len = 2500; | ||
273 | ssize_t ret_cnt; | ||
274 | long left; | ||
275 | int i; | ||
276 | int ret; | ||
277 | |||
278 | fw_stats = &ar->debug.target_stats; | ||
279 | |||
280 | buf = kzalloc(buf_len, GFP_KERNEL); | ||
281 | if (!buf) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); | ||
285 | if (ret) { | ||
286 | ath10k_warn("could not request stats (%d)\n", ret); | ||
287 | kfree(buf); | ||
288 | return -EIO; | ||
289 | } | ||
290 | |||
291 | left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ); | ||
292 | |||
293 | if (left <= 0) { | ||
294 | kfree(buf); | ||
295 | return -ETIMEDOUT; | ||
296 | } | ||
297 | |||
298 | mutex_lock(&ar->conf_mutex); | ||
299 | |||
300 | len += scnprintf(buf + len, buf_len - len, "\n"); | ||
301 | len += scnprintf(buf + len, buf_len - len, "%30s\n", | ||
302 | "ath10k PDEV stats"); | ||
303 | len += scnprintf(buf + len, buf_len - len, "%30s\n\n", | ||
304 | "================="); | ||
305 | |||
306 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
307 | "Channel noise floor", fw_stats->ch_noise_floor); | ||
308 | len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", | ||
309 | "Channel TX power", fw_stats->chan_tx_power); | ||
310 | len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", | ||
311 | "TX frame count", fw_stats->tx_frame_count); | ||
312 | len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", | ||
313 | "RX frame count", fw_stats->rx_frame_count); | ||
314 | len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", | ||
315 | "RX clear count", fw_stats->rx_clear_count); | ||
316 | len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", | ||
317 | "Cycle count", fw_stats->cycle_count); | ||
318 | len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", | ||
319 | "PHY error count", fw_stats->phy_err_count); | ||
320 | |||
321 | len += scnprintf(buf + len, buf_len - len, "\n"); | ||
322 | len += scnprintf(buf + len, buf_len - len, "%30s\n", | ||
323 | "ath10k PDEV TX stats"); | ||
324 | len += scnprintf(buf + len, buf_len - len, "%30s\n\n", | ||
325 | "================="); | ||
326 | |||
327 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
328 | "HTT cookies queued", fw_stats->comp_queued); | ||
329 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
330 | "HTT cookies disp.", fw_stats->comp_delivered); | ||
331 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
332 | "MSDU queued", fw_stats->msdu_enqued); | ||
333 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
334 | "MPDU queued", fw_stats->mpdu_enqued); | ||
335 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
336 | "MSDUs dropped", fw_stats->wmm_drop); | ||
337 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
338 | "Local enqued", fw_stats->local_enqued); | ||
339 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
340 | "Local freed", fw_stats->local_freed); | ||
341 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
342 | "HW queued", fw_stats->hw_queued); | ||
343 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
344 | "PPDUs reaped", fw_stats->hw_reaped); | ||
345 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
346 | "Num underruns", fw_stats->underrun); | ||
347 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
348 | "PPDUs cleaned", fw_stats->tx_abort); | ||
349 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
350 | "MPDUs requed", fw_stats->mpdus_requed); | ||
351 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
352 | "Excessive retries", fw_stats->tx_ko); | ||
353 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
354 | "HW rate", fw_stats->data_rc); | ||
355 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
356 | "Sched self tiggers", fw_stats->self_triggers); | ||
357 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
358 | "Dropped due to SW retries", | ||
359 | fw_stats->sw_retry_failure); | ||
360 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
361 | "Illegal rate phy errors", | ||
362 | fw_stats->illgl_rate_phy_err); | ||
363 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
364 | "Pdev continous xretry", fw_stats->pdev_cont_xretry); | ||
365 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
366 | "TX timeout", fw_stats->pdev_tx_timeout); | ||
367 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
368 | "PDEV resets", fw_stats->pdev_resets); | ||
369 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
370 | "PHY underrun", fw_stats->phy_underrun); | ||
371 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
372 | "MPDU is more than txop limit", fw_stats->txop_ovf); | ||
373 | |||
374 | len += scnprintf(buf + len, buf_len - len, "\n"); | ||
375 | len += scnprintf(buf + len, buf_len - len, "%30s\n", | ||
376 | "ath10k PDEV RX stats"); | ||
377 | len += scnprintf(buf + len, buf_len - len, "%30s\n\n", | ||
378 | "================="); | ||
379 | |||
380 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
381 | "Mid PPDU route change", | ||
382 | fw_stats->mid_ppdu_route_change); | ||
383 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
384 | "Tot. number of statuses", fw_stats->status_rcvd); | ||
385 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
386 | "Extra frags on rings 0", fw_stats->r0_frags); | ||
387 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
388 | "Extra frags on rings 1", fw_stats->r1_frags); | ||
389 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
390 | "Extra frags on rings 2", fw_stats->r2_frags); | ||
391 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
392 | "Extra frags on rings 3", fw_stats->r3_frags); | ||
393 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
394 | "MSDUs delivered to HTT", fw_stats->htt_msdus); | ||
395 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
396 | "MPDUs delivered to HTT", fw_stats->htt_mpdus); | ||
397 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
398 | "MSDUs delivered to stack", fw_stats->loc_msdus); | ||
399 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
400 | "MPDUs delivered to stack", fw_stats->loc_mpdus); | ||
401 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
402 | "Oversized AMSUs", fw_stats->oversize_amsdu); | ||
403 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
404 | "PHY errors", fw_stats->phy_errs); | ||
405 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
406 | "PHY errors drops", fw_stats->phy_err_drop); | ||
407 | len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", | ||
408 | "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); | ||
409 | |||
410 | len += scnprintf(buf + len, buf_len - len, "\n"); | ||
411 | len += scnprintf(buf + len, buf_len - len, "%30s\n", | ||
412 | "ath10k PEER stats"); | ||
413 | len += scnprintf(buf + len, buf_len - len, "%30s\n\n", | ||
414 | "================="); | ||
415 | |||
416 | for (i = 0; i < fw_stats->peers; i++) { | ||
417 | len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", | ||
418 | "Peer MAC address", | ||
419 | fw_stats->peer_stat[i].peer_macaddr); | ||
420 | len += scnprintf(buf + len, buf_len - len, "%30s %u\n", | ||
421 | "Peer RSSI", fw_stats->peer_stat[i].peer_rssi); | ||
422 | len += scnprintf(buf + len, buf_len - len, "%30s %u\n", | ||
423 | "Peer TX rate", | ||
424 | fw_stats->peer_stat[i].peer_tx_rate); | ||
425 | len += scnprintf(buf + len, buf_len - len, "\n"); | ||
426 | } | ||
427 | |||
428 | if (len > buf_len) | ||
429 | len = buf_len; | ||
430 | |||
431 | ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); | ||
432 | |||
433 | mutex_unlock(&ar->conf_mutex); | ||
434 | |||
435 | kfree(buf); | ||
436 | return ret_cnt; | ||
437 | } | ||
438 | |||
439 | static const struct file_operations fops_fw_stats = { | ||
440 | .read = ath10k_read_fw_stats, | ||
441 | .open = simple_open, | ||
442 | .owner = THIS_MODULE, | ||
443 | .llseek = default_llseek, | ||
444 | }; | ||
445 | |||
446 | int ath10k_debug_create(struct ath10k *ar) | ||
447 | { | ||
448 | ar->debug.debugfs_phy = debugfs_create_dir("ath10k", | ||
449 | ar->hw->wiphy->debugfsdir); | ||
450 | |||
451 | if (!ar->debug.debugfs_phy) | ||
452 | return -ENOMEM; | ||
453 | |||
454 | init_completion(&ar->debug.event_stats_compl); | ||
455 | |||
456 | debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, | ||
457 | &fops_fw_stats); | ||
458 | |||
459 | debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, | ||
460 | &fops_wmi_services); | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | #endif /* CONFIG_ATH10K_DEBUGFS */ | ||
465 | |||
466 | #ifdef CONFIG_ATH10K_DEBUG | ||
467 | void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) | ||
468 | { | ||
469 | struct va_format vaf; | ||
470 | va_list args; | ||
471 | |||
472 | va_start(args, fmt); | ||
473 | |||
474 | vaf.fmt = fmt; | ||
475 | vaf.va = &args; | ||
476 | |||
477 | if (ath10k_debug_mask & mask) | ||
478 | ath10k_printk(KERN_DEBUG, "%pV", &vaf); | ||
479 | |||
480 | trace_ath10k_log_dbg(mask, &vaf); | ||
481 | |||
482 | va_end(args); | ||
483 | } | ||
484 | EXPORT_SYMBOL(ath10k_dbg); | ||
485 | |||
486 | void ath10k_dbg_dump(enum ath10k_debug_mask mask, | ||
487 | const char *msg, const char *prefix, | ||
488 | const void *buf, size_t len) | ||
489 | { | ||
490 | if (ath10k_debug_mask & mask) { | ||
491 | if (msg) | ||
492 | ath10k_dbg(mask, "%s\n", msg); | ||
493 | |||
494 | print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); | ||
495 | } | ||
496 | |||
497 | /* tracing code doesn't like null strings :/ */ | ||
498 | trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "", | ||
499 | buf, len); | ||
500 | } | ||
501 | EXPORT_SYMBOL(ath10k_dbg_dump); | ||
502 | |||
503 | #endif /* CONFIG_ATH10K_DEBUG */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h new file mode 100644 index 000000000000..168140c54028 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/debug.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _DEBUG_H_ | ||
19 | #define _DEBUG_H_ | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include "trace.h" | ||
23 | |||
24 | enum ath10k_debug_mask { | ||
25 | ATH10K_DBG_PCI = 0x00000001, | ||
26 | ATH10K_DBG_WMI = 0x00000002, | ||
27 | ATH10K_DBG_HTC = 0x00000004, | ||
28 | ATH10K_DBG_HTT = 0x00000008, | ||
29 | ATH10K_DBG_MAC = 0x00000010, | ||
30 | ATH10K_DBG_CORE = 0x00000020, | ||
31 | ATH10K_DBG_PCI_DUMP = 0x00000040, | ||
32 | ATH10K_DBG_HTT_DUMP = 0x00000080, | ||
33 | ATH10K_DBG_MGMT = 0x00000100, | ||
34 | ATH10K_DBG_DATA = 0x00000200, | ||
35 | ATH10K_DBG_ANY = 0xffffffff, | ||
36 | }; | ||
37 | |||
38 | extern unsigned int ath10k_debug_mask; | ||
39 | |||
40 | extern __printf(1, 2) int ath10k_info(const char *fmt, ...); | ||
41 | extern __printf(1, 2) int ath10k_err(const char *fmt, ...); | ||
42 | extern __printf(1, 2) int ath10k_warn(const char *fmt, ...); | ||
43 | |||
44 | #ifdef CONFIG_ATH10K_DEBUGFS | ||
45 | int ath10k_debug_create(struct ath10k *ar); | ||
46 | void ath10k_debug_read_service_map(struct ath10k *ar, | ||
47 | void *service_map, | ||
48 | size_t map_size); | ||
49 | void ath10k_debug_read_target_stats(struct ath10k *ar, | ||
50 | struct wmi_stats_event *ev); | ||
51 | |||
52 | #else | ||
53 | static inline int ath10k_debug_create(struct ath10k *ar) | ||
54 | { | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | static inline void ath10k_debug_read_service_map(struct ath10k *ar, | ||
59 | void *service_map, | ||
60 | size_t map_size) | ||
61 | { | ||
62 | } | ||
63 | |||
64 | static inline void ath10k_debug_read_target_stats(struct ath10k *ar, | ||
65 | struct wmi_stats_event *ev) | ||
66 | { | ||
67 | } | ||
68 | #endif /* CONFIG_ATH10K_DEBUGFS */ | ||
69 | |||
70 | #ifdef CONFIG_ATH10K_DEBUG | ||
71 | extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, | ||
72 | const char *fmt, ...); | ||
73 | void ath10k_dbg_dump(enum ath10k_debug_mask mask, | ||
74 | const char *msg, const char *prefix, | ||
75 | const void *buf, size_t len); | ||
76 | #else /* CONFIG_ATH10K_DEBUG */ | ||
77 | |||
78 | static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask, | ||
79 | const char *fmt, ...) | ||
80 | { | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask, | ||
85 | const char *msg, const char *prefix, | ||
86 | const void *buf, size_t len) | ||
87 | { | ||
88 | } | ||
89 | #endif /* CONFIG_ATH10K_DEBUG */ | ||
90 | #endif /* _DEBUG_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h new file mode 100644 index 000000000000..73a24d44d1b4 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/hif.h | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _HIF_H_ | ||
19 | #define _HIF_H_ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include "core.h" | ||
23 | |||
24 | struct ath10k_hif_cb { | ||
25 | int (*tx_completion)(struct ath10k *ar, | ||
26 | struct sk_buff *wbuf, | ||
27 | unsigned transfer_id); | ||
28 | int (*rx_completion)(struct ath10k *ar, | ||
29 | struct sk_buff *wbuf, | ||
30 | u8 pipe_id); | ||
31 | }; | ||
32 | |||
33 | struct ath10k_hif_ops { | ||
34 | /* Send the head of a buffer to HIF for transmission to the target. */ | ||
35 | int (*send_head)(struct ath10k *ar, u8 pipe_id, | ||
36 | unsigned int transfer_id, | ||
37 | unsigned int nbytes, | ||
38 | struct sk_buff *buf); | ||
39 | |||
40 | /* | ||
41 | * API to handle HIF-specific BMI message exchanges, this API is | ||
42 | * synchronous and only allowed to be called from a context that | ||
43 | * can block (sleep) | ||
44 | */ | ||
45 | int (*exchange_bmi_msg)(struct ath10k *ar, | ||
46 | void *request, u32 request_len, | ||
47 | void *response, u32 *response_len); | ||
48 | |||
49 | int (*start)(struct ath10k *ar); | ||
50 | |||
51 | void (*stop)(struct ath10k *ar); | ||
52 | |||
53 | int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, | ||
54 | u8 *ul_pipe, u8 *dl_pipe, | ||
55 | int *ul_is_polled, int *dl_is_polled); | ||
56 | |||
57 | void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe); | ||
58 | |||
59 | /* | ||
60 | * Check if prior sends have completed. | ||
61 | * | ||
62 | * Check whether the pipe in question has any completed | ||
63 | * sends that have not yet been processed. | ||
64 | * This function is only relevant for HIF pipes that are configured | ||
65 | * to be polled rather than interrupt-driven. | ||
66 | */ | ||
67 | void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force); | ||
68 | |||
69 | void (*init)(struct ath10k *ar, | ||
70 | struct ath10k_hif_cb *callbacks); | ||
71 | |||
72 | u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); | ||
73 | }; | ||
74 | |||
75 | |||
76 | static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id, | ||
77 | unsigned int transfer_id, | ||
78 | unsigned int nbytes, | ||
79 | struct sk_buff *buf) | ||
80 | { | ||
81 | return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf); | ||
82 | } | ||
83 | |||
84 | static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, | ||
85 | void *request, u32 request_len, | ||
86 | void *response, u32 *response_len) | ||
87 | { | ||
88 | return ar->hif.ops->exchange_bmi_msg(ar, request, request_len, | ||
89 | response, response_len); | ||
90 | } | ||
91 | |||
92 | static inline int ath10k_hif_start(struct ath10k *ar) | ||
93 | { | ||
94 | return ar->hif.ops->start(ar); | ||
95 | } | ||
96 | |||
97 | static inline void ath10k_hif_stop(struct ath10k *ar) | ||
98 | { | ||
99 | return ar->hif.ops->stop(ar); | ||
100 | } | ||
101 | |||
102 | static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar, | ||
103 | u16 service_id, | ||
104 | u8 *ul_pipe, u8 *dl_pipe, | ||
105 | int *ul_is_polled, | ||
106 | int *dl_is_polled) | ||
107 | { | ||
108 | return ar->hif.ops->map_service_to_pipe(ar, service_id, | ||
109 | ul_pipe, dl_pipe, | ||
110 | ul_is_polled, dl_is_polled); | ||
111 | } | ||
112 | |||
113 | static inline void ath10k_hif_get_default_pipe(struct ath10k *ar, | ||
114 | u8 *ul_pipe, u8 *dl_pipe) | ||
115 | { | ||
116 | ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe); | ||
117 | } | ||
118 | |||
119 | static inline void ath10k_hif_send_complete_check(struct ath10k *ar, | ||
120 | u8 pipe_id, int force) | ||
121 | { | ||
122 | ar->hif.ops->send_complete_check(ar, pipe_id, force); | ||
123 | } | ||
124 | |||
125 | static inline void ath10k_hif_init(struct ath10k *ar, | ||
126 | struct ath10k_hif_cb *callbacks) | ||
127 | { | ||
128 | ar->hif.ops->init(ar, callbacks); | ||
129 | } | ||
130 | |||
131 | static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, | ||
132 | u8 pipe_id) | ||
133 | { | ||
134 | return ar->hif.ops->get_free_queue_number(ar, pipe_id); | ||
135 | } | ||
136 | |||
137 | #endif /* _HIF_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c new file mode 100644 index 000000000000..74363c949392 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htc.c | |||
@@ -0,0 +1,1000 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include "core.h" | ||
19 | #include "hif.h" | ||
20 | #include "debug.h" | ||
21 | |||
22 | /********/ | ||
23 | /* Send */ | ||
24 | /********/ | ||
25 | |||
26 | static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep, | ||
27 | int force) | ||
28 | { | ||
29 | /* | ||
30 | * Check whether HIF has any prior sends that have finished, | ||
31 | * have not had the post-processing done. | ||
32 | */ | ||
33 | ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force); | ||
34 | } | ||
35 | |||
36 | static void ath10k_htc_control_tx_complete(struct ath10k *ar, | ||
37 | struct sk_buff *skb) | ||
38 | { | ||
39 | kfree_skb(skb); | ||
40 | } | ||
41 | |||
42 | static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) | ||
43 | { | ||
44 | struct sk_buff *skb; | ||
45 | struct ath10k_skb_cb *skb_cb; | ||
46 | |||
47 | skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); | ||
48 | if (!skb) { | ||
49 | ath10k_warn("Unable to allocate ctrl skb\n"); | ||
50 | return NULL; | ||
51 | } | ||
52 | |||
53 | skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ | ||
54 | WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); | ||
55 | |||
56 | skb_cb = ATH10K_SKB_CB(skb); | ||
57 | memset(skb_cb, 0, sizeof(*skb_cb)); | ||
58 | |||
59 | ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); | ||
60 | return skb; | ||
61 | } | ||
62 | |||
63 | static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, | ||
64 | struct sk_buff *skb) | ||
65 | { | ||
66 | ath10k_skb_unmap(htc->ar->dev, skb); | ||
67 | skb_pull(skb, sizeof(struct ath10k_htc_hdr)); | ||
68 | } | ||
69 | |||
70 | static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, | ||
71 | struct sk_buff *skb) | ||
72 | { | ||
73 | ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, | ||
74 | ep->eid, skb); | ||
75 | |||
76 | ath10k_htc_restore_tx_skb(ep->htc, skb); | ||
77 | |||
78 | if (!ep->ep_ops.ep_tx_complete) { | ||
79 | ath10k_warn("no tx handler for eid %d\n", ep->eid); | ||
80 | dev_kfree_skb_any(skb); | ||
81 | return; | ||
82 | } | ||
83 | |||
84 | ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); | ||
85 | } | ||
86 | |||
87 | /* assumes tx_lock is held */ | ||
88 | static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep) | ||
89 | { | ||
90 | if (!ep->tx_credit_flow_enabled) | ||
91 | return false; | ||
92 | if (ep->tx_credits >= ep->tx_credits_per_max_message) | ||
93 | return false; | ||
94 | |||
95 | ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", | ||
96 | ep->eid); | ||
97 | return true; | ||
98 | } | ||
99 | |||
100 | static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, | ||
101 | struct sk_buff *skb) | ||
102 | { | ||
103 | struct ath10k_htc_hdr *hdr; | ||
104 | |||
105 | hdr = (struct ath10k_htc_hdr *)skb->data; | ||
106 | memset(hdr, 0, sizeof(*hdr)); | ||
107 | |||
108 | hdr->eid = ep->eid; | ||
109 | hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); | ||
110 | |||
111 | spin_lock_bh(&ep->htc->tx_lock); | ||
112 | hdr->seq_no = ep->seq_no++; | ||
113 | |||
114 | if (ath10k_htc_ep_need_credit_update(ep)) | ||
115 | hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; | ||
116 | |||
117 | spin_unlock_bh(&ep->htc->tx_lock); | ||
118 | } | ||
119 | |||
120 | static int ath10k_htc_issue_skb(struct ath10k_htc *htc, | ||
121 | struct ath10k_htc_ep *ep, | ||
122 | struct sk_buff *skb, | ||
123 | u8 credits) | ||
124 | { | ||
125 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); | ||
126 | int ret; | ||
127 | |||
128 | ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, | ||
129 | ep->eid, skb); | ||
130 | |||
131 | ath10k_htc_prepare_tx_skb(ep, skb); | ||
132 | |||
133 | ret = ath10k_skb_map(htc->ar->dev, skb); | ||
134 | if (ret) | ||
135 | goto err; | ||
136 | |||
137 | ret = ath10k_hif_send_head(htc->ar, | ||
138 | ep->ul_pipe_id, | ||
139 | ep->eid, | ||
140 | skb->len, | ||
141 | skb); | ||
142 | if (unlikely(ret)) | ||
143 | goto err; | ||
144 | |||
145 | return 0; | ||
146 | err: | ||
147 | ath10k_warn("HTC issue failed: %d\n", ret); | ||
148 | |||
149 | spin_lock_bh(&htc->tx_lock); | ||
150 | ep->tx_credits += credits; | ||
151 | spin_unlock_bh(&htc->tx_lock); | ||
152 | |||
153 | /* this is the simplest way to handle out-of-resources for non-credit | ||
154 | * based endpoints. credit based endpoints can still get -ENOSR, but | ||
155 | * this is highly unlikely as credit reservation should prevent that */ | ||
156 | if (ret == -ENOSR) { | ||
157 | spin_lock_bh(&htc->tx_lock); | ||
158 | __skb_queue_head(&ep->tx_queue, skb); | ||
159 | spin_unlock_bh(&htc->tx_lock); | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | skb_cb->is_aborted = true; | ||
165 | ath10k_htc_notify_tx_completion(ep, skb); | ||
166 | |||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc, | ||
171 | struct ath10k_htc_ep *ep, | ||
172 | u8 *credits) | ||
173 | { | ||
174 | struct sk_buff *skb; | ||
175 | struct ath10k_skb_cb *skb_cb; | ||
176 | int credits_required; | ||
177 | int remainder; | ||
178 | unsigned int transfer_len; | ||
179 | |||
180 | lockdep_assert_held(&htc->tx_lock); | ||
181 | |||
182 | skb = __skb_dequeue(&ep->tx_queue); | ||
183 | if (!skb) | ||
184 | return NULL; | ||
185 | |||
186 | skb_cb = ATH10K_SKB_CB(skb); | ||
187 | transfer_len = skb->len; | ||
188 | |||
189 | if (likely(transfer_len <= htc->target_credit_size)) { | ||
190 | credits_required = 1; | ||
191 | } else { | ||
192 | /* figure out how many credits this message requires */ | ||
193 | credits_required = transfer_len / htc->target_credit_size; | ||
194 | remainder = transfer_len % htc->target_credit_size; | ||
195 | |||
196 | if (remainder) | ||
197 | credits_required++; | ||
198 | } | ||
199 | |||
200 | ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n", | ||
201 | credits_required, ep->tx_credits); | ||
202 | |||
203 | if (ep->tx_credits < credits_required) { | ||
204 | __skb_queue_head(&ep->tx_queue, skb); | ||
205 | return NULL; | ||
206 | } | ||
207 | |||
208 | ep->tx_credits -= credits_required; | ||
209 | *credits = credits_required; | ||
210 | return skb; | ||
211 | } | ||
212 | |||
213 | static void ath10k_htc_send_work(struct work_struct *work) | ||
214 | { | ||
215 | struct ath10k_htc_ep *ep = container_of(work, | ||
216 | struct ath10k_htc_ep, send_work); | ||
217 | struct ath10k_htc *htc = ep->htc; | ||
218 | struct sk_buff *skb; | ||
219 | u8 credits = 0; | ||
220 | int ret; | ||
221 | |||
222 | while (true) { | ||
223 | if (ep->ul_is_polled) | ||
224 | ath10k_htc_send_complete_check(ep, 0); | ||
225 | |||
226 | spin_lock_bh(&htc->tx_lock); | ||
227 | if (ep->tx_credit_flow_enabled) | ||
228 | skb = ath10k_htc_get_skb_credit_based(htc, ep, | ||
229 | &credits); | ||
230 | else | ||
231 | skb = __skb_dequeue(&ep->tx_queue); | ||
232 | spin_unlock_bh(&htc->tx_lock); | ||
233 | |||
234 | if (!skb) | ||
235 | break; | ||
236 | |||
237 | ret = ath10k_htc_issue_skb(htc, ep, skb, credits); | ||
238 | if (ret == -ENOSR) | ||
239 | break; | ||
240 | } | ||
241 | } | ||
242 | |||
243 | int ath10k_htc_send(struct ath10k_htc *htc, | ||
244 | enum ath10k_htc_ep_id eid, | ||
245 | struct sk_buff *skb) | ||
246 | { | ||
247 | struct ath10k_htc_ep *ep = &htc->endpoint[eid]; | ||
248 | |||
249 | if (eid >= ATH10K_HTC_EP_COUNT) { | ||
250 | ath10k_warn("Invalid endpoint id: %d\n", eid); | ||
251 | return -ENOENT; | ||
252 | } | ||
253 | |||
254 | skb_push(skb, sizeof(struct ath10k_htc_hdr)); | ||
255 | |||
256 | spin_lock_bh(&htc->tx_lock); | ||
257 | __skb_queue_tail(&ep->tx_queue, skb); | ||
258 | spin_unlock_bh(&htc->tx_lock); | ||
259 | |||
260 | queue_work(htc->ar->workqueue, &ep->send_work); | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | static int ath10k_htc_tx_completion_handler(struct ath10k *ar, | ||
265 | struct sk_buff *skb, | ||
266 | unsigned int eid) | ||
267 | { | ||
268 | struct ath10k_htc *htc = ar->htc; | ||
269 | struct ath10k_htc_ep *ep = &htc->endpoint[eid]; | ||
270 | bool stopping; | ||
271 | |||
272 | ath10k_htc_notify_tx_completion(ep, skb); | ||
273 | /* the skb now belongs to the completion handler */ | ||
274 | |||
275 | spin_lock_bh(&htc->tx_lock); | ||
276 | stopping = htc->stopping; | ||
277 | spin_unlock_bh(&htc->tx_lock); | ||
278 | |||
279 | if (!ep->tx_credit_flow_enabled && !stopping) | ||
280 | /* | ||
281 | * note: when using TX credit flow, the re-checking of | ||
282 | * queues happens when credits flow back from the target. | ||
283 | * in the non-TX credit case, we recheck after the packet | ||
284 | * completes | ||
285 | */ | ||
286 | queue_work(ar->workqueue, &ep->send_work); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* flush endpoint TX queue */ | ||
292 | static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc, | ||
293 | struct ath10k_htc_ep *ep) | ||
294 | { | ||
295 | struct sk_buff *skb; | ||
296 | struct ath10k_skb_cb *skb_cb; | ||
297 | |||
298 | spin_lock_bh(&htc->tx_lock); | ||
299 | for (;;) { | ||
300 | skb = __skb_dequeue(&ep->tx_queue); | ||
301 | if (!skb) | ||
302 | break; | ||
303 | |||
304 | skb_cb = ATH10K_SKB_CB(skb); | ||
305 | skb_cb->is_aborted = true; | ||
306 | ath10k_htc_notify_tx_completion(ep, skb); | ||
307 | } | ||
308 | spin_unlock_bh(&htc->tx_lock); | ||
309 | |||
310 | cancel_work_sync(&ep->send_work); | ||
311 | } | ||
312 | |||
313 | /***********/ | ||
314 | /* Receive */ | ||
315 | /***********/ | ||
316 | |||
317 | static void | ||
318 | ath10k_htc_process_credit_report(struct ath10k_htc *htc, | ||
319 | const struct ath10k_htc_credit_report *report, | ||
320 | int len, | ||
321 | enum ath10k_htc_ep_id eid) | ||
322 | { | ||
323 | struct ath10k_htc_ep *ep; | ||
324 | int i, n_reports; | ||
325 | |||
326 | if (len % sizeof(*report)) | ||
327 | ath10k_warn("Uneven credit report len %d", len); | ||
328 | |||
329 | n_reports = len / sizeof(*report); | ||
330 | |||
331 | spin_lock_bh(&htc->tx_lock); | ||
332 | for (i = 0; i < n_reports; i++, report++) { | ||
333 | if (report->eid >= ATH10K_HTC_EP_COUNT) | ||
334 | break; | ||
335 | |||
336 | ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n", | ||
337 | report->eid, report->credits); | ||
338 | |||
339 | ep = &htc->endpoint[report->eid]; | ||
340 | ep->tx_credits += report->credits; | ||
341 | |||
342 | if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue)) | ||
343 | queue_work(htc->ar->workqueue, &ep->send_work); | ||
344 | } | ||
345 | spin_unlock_bh(&htc->tx_lock); | ||
346 | } | ||
347 | |||
348 | static int ath10k_htc_process_trailer(struct ath10k_htc *htc, | ||
349 | u8 *buffer, | ||
350 | int length, | ||
351 | enum ath10k_htc_ep_id src_eid) | ||
352 | { | ||
353 | int status = 0; | ||
354 | struct ath10k_htc_record *record; | ||
355 | u8 *orig_buffer; | ||
356 | int orig_length; | ||
357 | size_t len; | ||
358 | |||
359 | orig_buffer = buffer; | ||
360 | orig_length = length; | ||
361 | |||
362 | while (length > 0) { | ||
363 | record = (struct ath10k_htc_record *)buffer; | ||
364 | |||
365 | if (length < sizeof(record->hdr)) { | ||
366 | status = -EINVAL; | ||
367 | break; | ||
368 | } | ||
369 | |||
370 | if (record->hdr.len > length) { | ||
371 | /* no room left in buffer for record */ | ||
372 | ath10k_warn("Invalid record length: %d\n", | ||
373 | record->hdr.len); | ||
374 | status = -EINVAL; | ||
375 | break; | ||
376 | } | ||
377 | |||
378 | switch (record->hdr.id) { | ||
379 | case ATH10K_HTC_RECORD_CREDITS: | ||
380 | len = sizeof(struct ath10k_htc_credit_report); | ||
381 | if (record->hdr.len < len) { | ||
382 | ath10k_warn("Credit report too long\n"); | ||
383 | status = -EINVAL; | ||
384 | break; | ||
385 | } | ||
386 | ath10k_htc_process_credit_report(htc, | ||
387 | record->credit_report, | ||
388 | record->hdr.len, | ||
389 | src_eid); | ||
390 | break; | ||
391 | default: | ||
392 | ath10k_warn("Unhandled record: id:%d length:%d\n", | ||
393 | record->hdr.id, record->hdr.len); | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | if (status) | ||
398 | break; | ||
399 | |||
400 | /* multiple records may be present in a trailer */ | ||
401 | buffer += sizeof(record->hdr) + record->hdr.len; | ||
402 | length -= sizeof(record->hdr) + record->hdr.len; | ||
403 | } | ||
404 | |||
405 | if (status) | ||
406 | ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "", | ||
407 | orig_buffer, orig_length); | ||
408 | |||
409 | return status; | ||
410 | } | ||
411 | |||
412 | static int ath10k_htc_rx_completion_handler(struct ath10k *ar, | ||
413 | struct sk_buff *skb, | ||
414 | u8 pipe_id) | ||
415 | { | ||
416 | int status = 0; | ||
417 | struct ath10k_htc *htc = ar->htc; | ||
418 | struct ath10k_htc_hdr *hdr; | ||
419 | struct ath10k_htc_ep *ep; | ||
420 | u16 payload_len; | ||
421 | u32 trailer_len = 0; | ||
422 | size_t min_len; | ||
423 | u8 eid; | ||
424 | bool trailer_present; | ||
425 | |||
426 | hdr = (struct ath10k_htc_hdr *)skb->data; | ||
427 | skb_pull(skb, sizeof(*hdr)); | ||
428 | |||
429 | eid = hdr->eid; | ||
430 | |||
431 | if (eid >= ATH10K_HTC_EP_COUNT) { | ||
432 | ath10k_warn("HTC Rx: invalid eid %d\n", eid); | ||
433 | ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "", | ||
434 | hdr, sizeof(*hdr)); | ||
435 | status = -EINVAL; | ||
436 | goto out; | ||
437 | } | ||
438 | |||
439 | ep = &htc->endpoint[eid]; | ||
440 | |||
441 | /* | ||
442 | * If this endpoint that received a message from the target has | ||
443 | * a to-target HIF pipe whose send completions are polled rather | ||
444 | * than interrupt-driven, this is a good point to ask HIF to check | ||
445 | * whether it has any completed sends to handle. | ||
446 | */ | ||
447 | if (ep->ul_is_polled) | ||
448 | ath10k_htc_send_complete_check(ep, 1); | ||
449 | |||
450 | payload_len = __le16_to_cpu(hdr->len); | ||
451 | |||
452 | if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) { | ||
453 | ath10k_warn("HTC rx frame too long, len: %zu\n", | ||
454 | payload_len + sizeof(*hdr)); | ||
455 | ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "", | ||
456 | hdr, sizeof(*hdr)); | ||
457 | status = -EINVAL; | ||
458 | goto out; | ||
459 | } | ||
460 | |||
461 | if (skb->len < payload_len) { | ||
462 | ath10k_dbg(ATH10K_DBG_HTC, | ||
463 | "HTC Rx: insufficient length, got %d, expected %d\n", | ||
464 | skb->len, payload_len); | ||
465 | ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", | ||
466 | "", hdr, sizeof(*hdr)); | ||
467 | status = -EINVAL; | ||
468 | goto out; | ||
469 | } | ||
470 | |||
471 | /* get flags to check for trailer */ | ||
472 | trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; | ||
473 | if (trailer_present) { | ||
474 | u8 *trailer; | ||
475 | |||
476 | trailer_len = hdr->trailer_len; | ||
477 | min_len = sizeof(struct ath10k_ath10k_htc_record_hdr); | ||
478 | |||
479 | if ((trailer_len < min_len) || | ||
480 | (trailer_len > payload_len)) { | ||
481 | ath10k_warn("Invalid trailer length: %d\n", | ||
482 | trailer_len); | ||
483 | status = -EPROTO; | ||
484 | goto out; | ||
485 | } | ||
486 | |||
487 | trailer = (u8 *)hdr; | ||
488 | trailer += sizeof(*hdr); | ||
489 | trailer += payload_len; | ||
490 | trailer -= trailer_len; | ||
491 | status = ath10k_htc_process_trailer(htc, trailer, | ||
492 | trailer_len, hdr->eid); | ||
493 | if (status) | ||
494 | goto out; | ||
495 | |||
496 | skb_trim(skb, skb->len - trailer_len); | ||
497 | } | ||
498 | |||
499 | if (((int)payload_len - (int)trailer_len) <= 0) | ||
500 | /* zero length packet with trailer data, just drop these */ | ||
501 | goto out; | ||
502 | |||
503 | if (eid == ATH10K_HTC_EP_0) { | ||
504 | struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data; | ||
505 | |||
506 | switch (__le16_to_cpu(msg->hdr.message_id)) { | ||
507 | default: | ||
508 | /* handle HTC control message */ | ||
509 | if (completion_done(&htc->ctl_resp)) { | ||
510 | /* | ||
511 | * this is a fatal error, target should not be | ||
512 | * sending unsolicited messages on the ep 0 | ||
513 | */ | ||
514 | ath10k_warn("HTC rx ctrl still processing\n"); | ||
515 | status = -EINVAL; | ||
516 | complete(&htc->ctl_resp); | ||
517 | goto out; | ||
518 | } | ||
519 | |||
520 | htc->control_resp_len = | ||
521 | min_t(int, skb->len, | ||
522 | ATH10K_HTC_MAX_CTRL_MSG_LEN); | ||
523 | |||
524 | memcpy(htc->control_resp_buffer, skb->data, | ||
525 | htc->control_resp_len); | ||
526 | |||
527 | complete(&htc->ctl_resp); | ||
528 | break; | ||
529 | case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE: | ||
530 | htc->htc_ops.target_send_suspend_complete(ar); | ||
531 | } | ||
532 | goto out; | ||
533 | } | ||
534 | |||
535 | ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", | ||
536 | eid, skb); | ||
537 | ep->ep_ops.ep_rx_complete(ar, skb); | ||
538 | |||
539 | /* skb is now owned by the rx completion handler */ | ||
540 | skb = NULL; | ||
541 | out: | ||
542 | kfree_skb(skb); | ||
543 | |||
544 | return status; | ||
545 | } | ||
546 | |||
547 | static void ath10k_htc_control_rx_complete(struct ath10k *ar, | ||
548 | struct sk_buff *skb) | ||
549 | { | ||
550 | /* This is unexpected. FW is not supposed to send regular rx on this | ||
551 | * endpoint. */ | ||
552 | ath10k_warn("unexpected htc rx\n"); | ||
553 | kfree_skb(skb); | ||
554 | } | ||
555 | |||
556 | /***************/ | ||
557 | /* Init/Deinit */ | ||
558 | /***************/ | ||
559 | |||
560 | static const char *htc_service_name(enum ath10k_htc_svc_id id) | ||
561 | { | ||
562 | switch (id) { | ||
563 | case ATH10K_HTC_SVC_ID_RESERVED: | ||
564 | return "Reserved"; | ||
565 | case ATH10K_HTC_SVC_ID_RSVD_CTRL: | ||
566 | return "Control"; | ||
567 | case ATH10K_HTC_SVC_ID_WMI_CONTROL: | ||
568 | return "WMI"; | ||
569 | case ATH10K_HTC_SVC_ID_WMI_DATA_BE: | ||
570 | return "DATA BE"; | ||
571 | case ATH10K_HTC_SVC_ID_WMI_DATA_BK: | ||
572 | return "DATA BK"; | ||
573 | case ATH10K_HTC_SVC_ID_WMI_DATA_VI: | ||
574 | return "DATA VI"; | ||
575 | case ATH10K_HTC_SVC_ID_WMI_DATA_VO: | ||
576 | return "DATA VO"; | ||
577 | case ATH10K_HTC_SVC_ID_NMI_CONTROL: | ||
578 | return "NMI Control"; | ||
579 | case ATH10K_HTC_SVC_ID_NMI_DATA: | ||
580 | return "NMI Data"; | ||
581 | case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: | ||
582 | return "HTT Data"; | ||
583 | case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: | ||
584 | return "RAW"; | ||
585 | } | ||
586 | |||
587 | return "Unknown"; | ||
588 | } | ||
589 | |||
590 | static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc) | ||
591 | { | ||
592 | struct ath10k_htc_ep *ep; | ||
593 | int i; | ||
594 | |||
595 | for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { | ||
596 | ep = &htc->endpoint[i]; | ||
597 | ep->service_id = ATH10K_HTC_SVC_ID_UNUSED; | ||
598 | ep->max_ep_message_len = 0; | ||
599 | ep->max_tx_queue_depth = 0; | ||
600 | ep->eid = i; | ||
601 | skb_queue_head_init(&ep->tx_queue); | ||
602 | ep->htc = htc; | ||
603 | ep->tx_credit_flow_enabled = true; | ||
604 | INIT_WORK(&ep->send_work, ath10k_htc_send_work); | ||
605 | } | ||
606 | } | ||
607 | |||
608 | static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc) | ||
609 | { | ||
610 | struct ath10k_htc_svc_tx_credits *entry; | ||
611 | |||
612 | entry = &htc->service_tx_alloc[0]; | ||
613 | |||
614 | /* | ||
615 | * for PCIE allocate all credists/HTC buffers to WMI. | ||
616 | * no buffers are used/required for data. data always | ||
617 | * remains on host. | ||
618 | */ | ||
619 | entry++; | ||
620 | entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; | ||
621 | entry->credit_allocation = htc->total_transmit_credits; | ||
622 | } | ||
623 | |||
624 | static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, | ||
625 | u16 service_id) | ||
626 | { | ||
627 | u8 allocation = 0; | ||
628 | int i; | ||
629 | |||
630 | for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) { | ||
631 | if (htc->service_tx_alloc[i].service_id == service_id) | ||
632 | allocation = | ||
633 | htc->service_tx_alloc[i].credit_allocation; | ||
634 | } | ||
635 | |||
636 | return allocation; | ||
637 | } | ||
638 | |||
639 | int ath10k_htc_wait_target(struct ath10k_htc *htc) | ||
640 | { | ||
641 | int status = 0; | ||
642 | struct ath10k_htc_svc_conn_req conn_req; | ||
643 | struct ath10k_htc_svc_conn_resp conn_resp; | ||
644 | struct ath10k_htc_msg *msg; | ||
645 | u16 message_id; | ||
646 | u16 credit_count; | ||
647 | u16 credit_size; | ||
648 | |||
649 | INIT_COMPLETION(htc->ctl_resp); | ||
650 | |||
651 | status = ath10k_hif_start(htc->ar); | ||
652 | if (status) { | ||
653 | ath10k_err("could not start HIF (%d)\n", status); | ||
654 | goto err_start; | ||
655 | } | ||
656 | |||
657 | status = wait_for_completion_timeout(&htc->ctl_resp, | ||
658 | ATH10K_HTC_WAIT_TIMEOUT_HZ); | ||
659 | if (status <= 0) { | ||
660 | if (status == 0) | ||
661 | status = -ETIMEDOUT; | ||
662 | |||
663 | ath10k_err("ctl_resp never came in (%d)\n", status); | ||
664 | goto err_target; | ||
665 | } | ||
666 | |||
667 | if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { | ||
668 | ath10k_err("Invalid HTC ready msg len:%d\n", | ||
669 | htc->control_resp_len); | ||
670 | |||
671 | status = -ECOMM; | ||
672 | goto err_target; | ||
673 | } | ||
674 | |||
675 | msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; | ||
676 | message_id = __le16_to_cpu(msg->hdr.message_id); | ||
677 | credit_count = __le16_to_cpu(msg->ready.credit_count); | ||
678 | credit_size = __le16_to_cpu(msg->ready.credit_size); | ||
679 | |||
680 | if (message_id != ATH10K_HTC_MSG_READY_ID) { | ||
681 | ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); | ||
682 | status = -ECOMM; | ||
683 | goto err_target; | ||
684 | } | ||
685 | |||
686 | htc->total_transmit_credits = credit_count; | ||
687 | htc->target_credit_size = credit_size; | ||
688 | |||
689 | ath10k_dbg(ATH10K_DBG_HTC, | ||
690 | "Target ready! transmit resources: %d size:%d\n", | ||
691 | htc->total_transmit_credits, | ||
692 | htc->target_credit_size); | ||
693 | |||
694 | if ((htc->total_transmit_credits == 0) || | ||
695 | (htc->target_credit_size == 0)) { | ||
696 | status = -ECOMM; | ||
697 | ath10k_err("Invalid credit size received\n"); | ||
698 | goto err_target; | ||
699 | } | ||
700 | |||
701 | ath10k_htc_setup_target_buffer_assignments(htc); | ||
702 | |||
703 | /* setup our pseudo HTC control endpoint connection */ | ||
704 | memset(&conn_req, 0, sizeof(conn_req)); | ||
705 | memset(&conn_resp, 0, sizeof(conn_resp)); | ||
706 | conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete; | ||
707 | conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete; | ||
708 | conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS; | ||
709 | conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL; | ||
710 | |||
711 | /* connect fake service */ | ||
712 | status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); | ||
713 | if (status) { | ||
714 | ath10k_err("could not connect to htc service (%d)\n", status); | ||
715 | goto err_target; | ||
716 | } | ||
717 | |||
718 | return 0; | ||
719 | err_target: | ||
720 | ath10k_hif_stop(htc->ar); | ||
721 | err_start: | ||
722 | return status; | ||
723 | } | ||
724 | |||
725 | int ath10k_htc_connect_service(struct ath10k_htc *htc, | ||
726 | struct ath10k_htc_svc_conn_req *conn_req, | ||
727 | struct ath10k_htc_svc_conn_resp *conn_resp) | ||
728 | { | ||
729 | struct ath10k_htc_msg *msg; | ||
730 | struct ath10k_htc_conn_svc *req_msg; | ||
731 | struct ath10k_htc_conn_svc_response resp_msg_dummy; | ||
732 | struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy; | ||
733 | enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT; | ||
734 | struct ath10k_htc_ep *ep; | ||
735 | struct sk_buff *skb; | ||
736 | unsigned int max_msg_size = 0; | ||
737 | int length, status; | ||
738 | bool disable_credit_flow_ctrl = false; | ||
739 | u16 message_id, service_id, flags = 0; | ||
740 | u8 tx_alloc = 0; | ||
741 | |||
742 | /* special case for HTC pseudo control service */ | ||
743 | if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) { | ||
744 | disable_credit_flow_ctrl = true; | ||
745 | assigned_eid = ATH10K_HTC_EP_0; | ||
746 | max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN; | ||
747 | memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy)); | ||
748 | goto setup; | ||
749 | } | ||
750 | |||
751 | tx_alloc = ath10k_htc_get_credit_allocation(htc, | ||
752 | conn_req->service_id); | ||
753 | if (!tx_alloc) | ||
754 | ath10k_warn("HTC Service %s does not allocate target credits\n", | ||
755 | htc_service_name(conn_req->service_id)); | ||
756 | |||
757 | skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); | ||
758 | if (!skb) { | ||
759 | ath10k_err("Failed to allocate HTC packet\n"); | ||
760 | return -ENOMEM; | ||
761 | } | ||
762 | |||
763 | length = sizeof(msg->hdr) + sizeof(msg->connect_service); | ||
764 | skb_put(skb, length); | ||
765 | memset(skb->data, 0, length); | ||
766 | |||
767 | msg = (struct ath10k_htc_msg *)skb->data; | ||
768 | msg->hdr.message_id = | ||
769 | __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID); | ||
770 | |||
771 | flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); | ||
772 | |||
773 | req_msg = &msg->connect_service; | ||
774 | req_msg->flags = __cpu_to_le16(flags); | ||
775 | req_msg->service_id = __cpu_to_le16(conn_req->service_id); | ||
776 | |||
777 | /* Only enable credit flow control for WMI ctrl service */ | ||
778 | if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) { | ||
779 | flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; | ||
780 | disable_credit_flow_ctrl = true; | ||
781 | } | ||
782 | |||
783 | INIT_COMPLETION(htc->ctl_resp); | ||
784 | |||
785 | status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); | ||
786 | if (status) { | ||
787 | kfree_skb(skb); | ||
788 | return status; | ||
789 | } | ||
790 | |||
791 | /* wait for response */ | ||
792 | status = wait_for_completion_timeout(&htc->ctl_resp, | ||
793 | ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); | ||
794 | if (status <= 0) { | ||
795 | if (status == 0) | ||
796 | status = -ETIMEDOUT; | ||
797 | ath10k_err("Service connect timeout: %d\n", status); | ||
798 | return status; | ||
799 | } | ||
800 | |||
801 | /* we controlled the buffer creation, it's aligned */ | ||
802 | msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; | ||
803 | resp_msg = &msg->connect_service_response; | ||
804 | message_id = __le16_to_cpu(msg->hdr.message_id); | ||
805 | service_id = __le16_to_cpu(resp_msg->service_id); | ||
806 | |||
807 | if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || | ||
808 | (htc->control_resp_len < sizeof(msg->hdr) + | ||
809 | sizeof(msg->connect_service_response))) { | ||
810 | ath10k_err("Invalid resp message ID 0x%x", message_id); | ||
811 | return -EPROTO; | ||
812 | } | ||
813 | |||
814 | ath10k_dbg(ATH10K_DBG_HTC, | ||
815 | "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", | ||
816 | htc_service_name(service_id), | ||
817 | resp_msg->status, resp_msg->eid); | ||
818 | |||
819 | conn_resp->connect_resp_code = resp_msg->status; | ||
820 | |||
821 | /* check response status */ | ||
822 | if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) { | ||
823 | ath10k_err("HTC Service %s connect request failed: 0x%x)\n", | ||
824 | htc_service_name(service_id), | ||
825 | resp_msg->status); | ||
826 | return -EPROTO; | ||
827 | } | ||
828 | |||
829 | assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid; | ||
830 | max_msg_size = __le16_to_cpu(resp_msg->max_msg_size); | ||
831 | |||
832 | setup: | ||
833 | |||
834 | if (assigned_eid >= ATH10K_HTC_EP_COUNT) | ||
835 | return -EPROTO; | ||
836 | |||
837 | if (max_msg_size == 0) | ||
838 | return -EPROTO; | ||
839 | |||
840 | ep = &htc->endpoint[assigned_eid]; | ||
841 | ep->eid = assigned_eid; | ||
842 | |||
843 | if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED) | ||
844 | return -EPROTO; | ||
845 | |||
846 | /* return assigned endpoint to caller */ | ||
847 | conn_resp->eid = assigned_eid; | ||
848 | conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size); | ||
849 | |||
850 | /* setup the endpoint */ | ||
851 | ep->service_id = conn_req->service_id; | ||
852 | ep->max_tx_queue_depth = conn_req->max_send_queue_depth; | ||
853 | ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); | ||
854 | ep->tx_credits = tx_alloc; | ||
855 | ep->tx_credit_size = htc->target_credit_size; | ||
856 | ep->tx_credits_per_max_message = ep->max_ep_message_len / | ||
857 | htc->target_credit_size; | ||
858 | |||
859 | if (ep->max_ep_message_len % htc->target_credit_size) | ||
860 | ep->tx_credits_per_max_message++; | ||
861 | |||
862 | /* copy all the callbacks */ | ||
863 | ep->ep_ops = conn_req->ep_ops; | ||
864 | |||
865 | status = ath10k_hif_map_service_to_pipe(htc->ar, | ||
866 | ep->service_id, | ||
867 | &ep->ul_pipe_id, | ||
868 | &ep->dl_pipe_id, | ||
869 | &ep->ul_is_polled, | ||
870 | &ep->dl_is_polled); | ||
871 | if (status) | ||
872 | return status; | ||
873 | |||
874 | ath10k_dbg(ATH10K_DBG_HTC, | ||
875 | "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n", | ||
876 | htc_service_name(ep->service_id), ep->ul_pipe_id, | ||
877 | ep->dl_pipe_id, ep->eid); | ||
878 | |||
879 | ath10k_dbg(ATH10K_DBG_HTC, | ||
880 | "EP %d UL polled: %d, DL polled: %d\n", | ||
881 | ep->eid, ep->ul_is_polled, ep->dl_is_polled); | ||
882 | |||
883 | if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { | ||
884 | ep->tx_credit_flow_enabled = false; | ||
885 | ath10k_dbg(ATH10K_DBG_HTC, | ||
886 | "HTC service: %s eid: %d TX flow control disabled\n", | ||
887 | htc_service_name(ep->service_id), assigned_eid); | ||
888 | } | ||
889 | |||
890 | return status; | ||
891 | } | ||
892 | |||
893 | struct sk_buff *ath10k_htc_alloc_skb(int size) | ||
894 | { | ||
895 | struct sk_buff *skb; | ||
896 | |||
897 | skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr)); | ||
898 | if (!skb) { | ||
899 | ath10k_warn("could not allocate HTC tx skb\n"); | ||
900 | return NULL; | ||
901 | } | ||
902 | |||
903 | skb_reserve(skb, sizeof(struct ath10k_htc_hdr)); | ||
904 | |||
905 | /* FW/HTC requires 4-byte aligned streams */ | ||
906 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | ||
907 | ath10k_warn("Unaligned HTC tx skb\n"); | ||
908 | |||
909 | return skb; | ||
910 | } | ||
911 | |||
912 | int ath10k_htc_start(struct ath10k_htc *htc) | ||
913 | { | ||
914 | struct sk_buff *skb; | ||
915 | int status = 0; | ||
916 | struct ath10k_htc_msg *msg; | ||
917 | |||
918 | skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); | ||
919 | if (!skb) | ||
920 | return -ENOMEM; | ||
921 | |||
922 | skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext)); | ||
923 | memset(skb->data, 0, skb->len); | ||
924 | |||
925 | msg = (struct ath10k_htc_msg *)skb->data; | ||
926 | msg->hdr.message_id = | ||
927 | __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); | ||
928 | |||
929 | ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); | ||
930 | |||
931 | status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); | ||
932 | if (status) { | ||
933 | kfree_skb(skb); | ||
934 | return status; | ||
935 | } | ||
936 | |||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | /* | ||
941 | * stop HTC communications, i.e. stop interrupt reception, and flush all | ||
942 | * queued buffers | ||
943 | */ | ||
944 | void ath10k_htc_stop(struct ath10k_htc *htc) | ||
945 | { | ||
946 | int i; | ||
947 | struct ath10k_htc_ep *ep; | ||
948 | |||
949 | spin_lock_bh(&htc->tx_lock); | ||
950 | htc->stopping = true; | ||
951 | spin_unlock_bh(&htc->tx_lock); | ||
952 | |||
953 | for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { | ||
954 | ep = &htc->endpoint[i]; | ||
955 | ath10k_htc_flush_endpoint_tx(htc, ep); | ||
956 | } | ||
957 | |||
958 | ath10k_hif_stop(htc->ar); | ||
959 | ath10k_htc_reset_endpoint_states(htc); | ||
960 | } | ||
961 | |||
962 | /* registered target arrival callback from the HIF layer */ | ||
963 | struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, | ||
964 | struct ath10k_htc_ops *htc_ops) | ||
965 | { | ||
966 | struct ath10k_hif_cb htc_callbacks; | ||
967 | struct ath10k_htc_ep *ep = NULL; | ||
968 | struct ath10k_htc *htc = NULL; | ||
969 | |||
970 | /* FIXME: use struct ath10k instead */ | ||
971 | htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL); | ||
972 | if (!htc) | ||
973 | return ERR_PTR(-ENOMEM); | ||
974 | |||
975 | spin_lock_init(&htc->tx_lock); | ||
976 | |||
977 | memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops)); | ||
978 | |||
979 | ath10k_htc_reset_endpoint_states(htc); | ||
980 | |||
981 | /* setup HIF layer callbacks */ | ||
982 | htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler; | ||
983 | htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler; | ||
984 | htc->ar = ar; | ||
985 | |||
986 | /* Get HIF default pipe for HTC message exchange */ | ||
987 | ep = &htc->endpoint[ATH10K_HTC_EP_0]; | ||
988 | |||
989 | ath10k_hif_init(ar, &htc_callbacks); | ||
990 | ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id); | ||
991 | |||
992 | init_completion(&htc->ctl_resp); | ||
993 | |||
994 | return htc; | ||
995 | } | ||
996 | |||
997 | void ath10k_htc_destroy(struct ath10k_htc *htc) | ||
998 | { | ||
999 | kfree(htc); | ||
1000 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h new file mode 100644 index 000000000000..fa45844b59fb --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htc.h | |||
@@ -0,0 +1,368 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _HTC_H_ | ||
19 | #define _HTC_H_ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/bug.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/semaphore.h> | ||
26 | #include <linux/timer.h> | ||
27 | |||
28 | struct ath10k; | ||
29 | |||
30 | /****************/ | ||
31 | /* HTC protocol */ | ||
32 | /****************/ | ||
33 | |||
34 | /* | ||
35 | * HTC - host-target control protocol | ||
36 | * | ||
37 | * tx packets are generally <htc_hdr><payload> | ||
38 | * rx packets are more complex: <htc_hdr><payload><trailer> | ||
39 | * | ||
40 | * The payload + trailer length is stored in len. | ||
41 | * To get payload-only length one needs to payload - trailer_len. | ||
42 | * | ||
43 | * Trailer contains (possibly) multiple <htc_record>. | ||
44 | * Each record is a id-len-value. | ||
45 | * | ||
46 | * HTC header flags, control_byte0, control_byte1 | ||
47 | * have different meaning depending whether its tx | ||
48 | * or rx. | ||
49 | * | ||
50 | * Alignment: htc_hdr, payload and trailer are | ||
51 | * 4-byte aligned. | ||
52 | */ | ||
53 | |||
54 | enum ath10k_htc_tx_flags { | ||
55 | ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, | ||
56 | ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02 | ||
57 | }; | ||
58 | |||
59 | enum ath10k_htc_rx_flags { | ||
60 | ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02, | ||
61 | ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0 | ||
62 | }; | ||
63 | |||
64 | struct ath10k_htc_hdr { | ||
65 | u8 eid; /* @enum ath10k_htc_ep_id */ | ||
66 | u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */ | ||
67 | __le16 len; | ||
68 | union { | ||
69 | u8 trailer_len; /* for rx */ | ||
70 | u8 control_byte0; | ||
71 | } __packed; | ||
72 | union { | ||
73 | u8 seq_no; /* for tx */ | ||
74 | u8 control_byte1; | ||
75 | } __packed; | ||
76 | u8 pad0; | ||
77 | u8 pad1; | ||
78 | } __packed __aligned(4); | ||
79 | |||
80 | enum ath10k_ath10k_htc_msg_id { | ||
81 | ATH10K_HTC_MSG_READY_ID = 1, | ||
82 | ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2, | ||
83 | ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3, | ||
84 | ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4, | ||
85 | ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5, | ||
86 | ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6 | ||
87 | }; | ||
88 | |||
89 | enum ath10k_htc_version { | ||
90 | ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */ | ||
91 | ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */ | ||
92 | }; | ||
93 | |||
94 | enum ath10k_htc_conn_flags { | ||
95 | ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0, | ||
96 | ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1, | ||
97 | ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2, | ||
98 | ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3, | ||
99 | #define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3 | ||
100 | ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2, | ||
101 | ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3 | ||
102 | #define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00 | ||
103 | #define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8 | ||
104 | }; | ||
105 | |||
106 | enum ath10k_htc_conn_svc_status { | ||
107 | ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0, | ||
108 | ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1, | ||
109 | ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2, | ||
110 | ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3, | ||
111 | ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4 | ||
112 | }; | ||
113 | |||
114 | struct ath10k_ath10k_htc_msg_hdr { | ||
115 | __le16 message_id; /* @enum htc_message_id */ | ||
116 | } __packed; | ||
117 | |||
118 | struct ath10k_htc_unknown { | ||
119 | u8 pad0; | ||
120 | u8 pad1; | ||
121 | } __packed; | ||
122 | |||
123 | struct ath10k_htc_ready { | ||
124 | __le16 credit_count; | ||
125 | __le16 credit_size; | ||
126 | u8 max_endpoints; | ||
127 | u8 pad0; | ||
128 | } __packed; | ||
129 | |||
130 | struct ath10k_htc_ready_extended { | ||
131 | struct ath10k_htc_ready base; | ||
132 | u8 htc_version; /* @enum ath10k_htc_version */ | ||
133 | u8 max_msgs_per_htc_bundle; | ||
134 | u8 pad0; | ||
135 | u8 pad1; | ||
136 | } __packed; | ||
137 | |||
138 | struct ath10k_htc_conn_svc { | ||
139 | __le16 service_id; | ||
140 | __le16 flags; /* @enum ath10k_htc_conn_flags */ | ||
141 | u8 pad0; | ||
142 | u8 pad1; | ||
143 | } __packed; | ||
144 | |||
145 | struct ath10k_htc_conn_svc_response { | ||
146 | __le16 service_id; | ||
147 | u8 status; /* @enum ath10k_htc_conn_svc_status */ | ||
148 | u8 eid; | ||
149 | __le16 max_msg_size; | ||
150 | } __packed; | ||
151 | |||
152 | struct ath10k_htc_setup_complete_extended { | ||
153 | u8 pad0; | ||
154 | u8 pad1; | ||
155 | __le32 flags; /* @enum htc_setup_complete_flags */ | ||
156 | u8 max_msgs_per_bundled_recv; | ||
157 | u8 pad2; | ||
158 | u8 pad3; | ||
159 | u8 pad4; | ||
160 | } __packed; | ||
161 | |||
162 | struct ath10k_htc_msg { | ||
163 | struct ath10k_ath10k_htc_msg_hdr hdr; | ||
164 | union { | ||
165 | /* host-to-target */ | ||
166 | struct ath10k_htc_conn_svc connect_service; | ||
167 | struct ath10k_htc_ready ready; | ||
168 | struct ath10k_htc_ready_extended ready_ext; | ||
169 | struct ath10k_htc_unknown unknown; | ||
170 | struct ath10k_htc_setup_complete_extended setup_complete_ext; | ||
171 | |||
172 | /* target-to-host */ | ||
173 | struct ath10k_htc_conn_svc_response connect_service_response; | ||
174 | }; | ||
175 | } __packed __aligned(4); | ||
176 | |||
177 | enum ath10k_ath10k_htc_record_id { | ||
178 | ATH10K_HTC_RECORD_NULL = 0, | ||
179 | ATH10K_HTC_RECORD_CREDITS = 1 | ||
180 | }; | ||
181 | |||
182 | struct ath10k_ath10k_htc_record_hdr { | ||
183 | u8 id; /* @enum ath10k_ath10k_htc_record_id */ | ||
184 | u8 len; | ||
185 | u8 pad0; | ||
186 | u8 pad1; | ||
187 | } __packed; | ||
188 | |||
189 | struct ath10k_htc_credit_report { | ||
190 | u8 eid; /* @enum ath10k_htc_ep_id */ | ||
191 | u8 credits; | ||
192 | u8 pad0; | ||
193 | u8 pad1; | ||
194 | } __packed; | ||
195 | |||
196 | struct ath10k_htc_record { | ||
197 | struct ath10k_ath10k_htc_record_hdr hdr; | ||
198 | union { | ||
199 | struct ath10k_htc_credit_report credit_report[0]; | ||
200 | u8 pauload[0]; | ||
201 | }; | ||
202 | } __packed __aligned(4); | ||
203 | |||
204 | /* | ||
205 | * note: the trailer offset is dynamic depending | ||
206 | * on payload length. this is only a struct layout draft | ||
207 | */ | ||
208 | struct ath10k_htc_frame { | ||
209 | struct ath10k_htc_hdr hdr; | ||
210 | union { | ||
211 | struct ath10k_htc_msg msg; | ||
212 | u8 payload[0]; | ||
213 | }; | ||
214 | struct ath10k_htc_record trailer[0]; | ||
215 | } __packed __aligned(4); | ||
216 | |||
217 | |||
218 | /*******************/ | ||
219 | /* Host-side stuff */ | ||
220 | /*******************/ | ||
221 | |||
222 | enum ath10k_htc_svc_gid { | ||
223 | ATH10K_HTC_SVC_GRP_RSVD = 0, | ||
224 | ATH10K_HTC_SVC_GRP_WMI = 1, | ||
225 | ATH10K_HTC_SVC_GRP_NMI = 2, | ||
226 | ATH10K_HTC_SVC_GRP_HTT = 3, | ||
227 | |||
228 | ATH10K_HTC_SVC_GRP_TEST = 254, | ||
229 | ATH10K_HTC_SVC_GRP_LAST = 255, | ||
230 | }; | ||
231 | |||
232 | #define SVC(group, idx) \ | ||
233 | (int)(((int)(group) << 8) | (int)(idx)) | ||
234 | |||
235 | enum ath10k_htc_svc_id { | ||
236 | /* NOTE: service ID of 0x0000 is reserved and should never be used */ | ||
237 | ATH10K_HTC_SVC_ID_RESERVED = 0x0000, | ||
238 | ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED, | ||
239 | |||
240 | ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1), | ||
241 | ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0), | ||
242 | ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1), | ||
243 | ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2), | ||
244 | ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3), | ||
245 | ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4), | ||
246 | |||
247 | ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0), | ||
248 | ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1), | ||
249 | |||
250 | ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0), | ||
251 | |||
252 | /* raw stream service (i.e. flash, tcmd, calibration apps) */ | ||
253 | ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0), | ||
254 | }; | ||
255 | |||
256 | #undef SVC | ||
257 | |||
258 | enum ath10k_htc_ep_id { | ||
259 | ATH10K_HTC_EP_UNUSED = -1, | ||
260 | ATH10K_HTC_EP_0 = 0, | ||
261 | ATH10K_HTC_EP_1 = 1, | ||
262 | ATH10K_HTC_EP_2, | ||
263 | ATH10K_HTC_EP_3, | ||
264 | ATH10K_HTC_EP_4, | ||
265 | ATH10K_HTC_EP_5, | ||
266 | ATH10K_HTC_EP_6, | ||
267 | ATH10K_HTC_EP_7, | ||
268 | ATH10K_HTC_EP_8, | ||
269 | ATH10K_HTC_EP_COUNT, | ||
270 | }; | ||
271 | |||
272 | struct ath10k_htc_ops { | ||
273 | void (*target_send_suspend_complete)(struct ath10k *ar); | ||
274 | }; | ||
275 | |||
276 | struct ath10k_htc_ep_ops { | ||
277 | void (*ep_tx_complete)(struct ath10k *, struct sk_buff *); | ||
278 | void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); | ||
279 | }; | ||
280 | |||
281 | /* service connection information */ | ||
282 | struct ath10k_htc_svc_conn_req { | ||
283 | u16 service_id; | ||
284 | struct ath10k_htc_ep_ops ep_ops; | ||
285 | int max_send_queue_depth; | ||
286 | }; | ||
287 | |||
288 | /* service connection response information */ | ||
289 | struct ath10k_htc_svc_conn_resp { | ||
290 | u8 buffer_len; | ||
291 | u8 actual_len; | ||
292 | enum ath10k_htc_ep_id eid; | ||
293 | unsigned int max_msg_len; | ||
294 | u8 connect_resp_code; | ||
295 | }; | ||
296 | |||
297 | #define ATH10K_NUM_CONTROL_TX_BUFFERS 2 | ||
298 | #define ATH10K_HTC_MAX_LEN 4096 | ||
299 | #define ATH10K_HTC_MAX_CTRL_MSG_LEN 256 | ||
300 | #define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ) | ||
301 | #define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \ | ||
302 | sizeof(struct ath10k_htc_hdr)) | ||
303 | #define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ) | ||
304 | |||
305 | struct ath10k_htc_ep { | ||
306 | struct ath10k_htc *htc; | ||
307 | enum ath10k_htc_ep_id eid; | ||
308 | enum ath10k_htc_svc_id service_id; | ||
309 | struct ath10k_htc_ep_ops ep_ops; | ||
310 | |||
311 | int max_tx_queue_depth; | ||
312 | int max_ep_message_len; | ||
313 | u8 ul_pipe_id; | ||
314 | u8 dl_pipe_id; | ||
315 | int ul_is_polled; /* call HIF to get tx completions */ | ||
316 | int dl_is_polled; /* call HIF to fetch rx (not implemented) */ | ||
317 | |||
318 | struct sk_buff_head tx_queue; | ||
319 | |||
320 | u8 seq_no; /* for debugging */ | ||
321 | int tx_credits; | ||
322 | int tx_credit_size; | ||
323 | int tx_credits_per_max_message; | ||
324 | bool tx_credit_flow_enabled; | ||
325 | |||
326 | struct work_struct send_work; | ||
327 | }; | ||
328 | |||
329 | struct ath10k_htc_svc_tx_credits { | ||
330 | u16 service_id; | ||
331 | u8 credit_allocation; | ||
332 | }; | ||
333 | |||
334 | struct ath10k_htc { | ||
335 | struct ath10k *ar; | ||
336 | struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT]; | ||
337 | |||
338 | /* protects endpoint and stopping fields */ | ||
339 | spinlock_t tx_lock; | ||
340 | |||
341 | struct ath10k_htc_ops htc_ops; | ||
342 | |||
343 | u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN]; | ||
344 | int control_resp_len; | ||
345 | |||
346 | struct completion ctl_resp; | ||
347 | |||
348 | int total_transmit_credits; | ||
349 | struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; | ||
350 | int target_credit_size; | ||
351 | |||
352 | bool stopping; | ||
353 | }; | ||
354 | |||
355 | struct ath10k_htc *ath10k_htc_create(struct ath10k *ar, | ||
356 | struct ath10k_htc_ops *htc_ops); | ||
357 | int ath10k_htc_wait_target(struct ath10k_htc *htc); | ||
358 | int ath10k_htc_start(struct ath10k_htc *htc); | ||
359 | int ath10k_htc_connect_service(struct ath10k_htc *htc, | ||
360 | struct ath10k_htc_svc_conn_req *conn_req, | ||
361 | struct ath10k_htc_svc_conn_resp *conn_resp); | ||
362 | int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, | ||
363 | struct sk_buff *packet); | ||
364 | void ath10k_htc_stop(struct ath10k_htc *htc); | ||
365 | void ath10k_htc_destroy(struct ath10k_htc *htc); | ||
366 | struct sk_buff *ath10k_htc_alloc_skb(int size); | ||
367 | |||
368 | #endif | ||
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c new file mode 100644 index 000000000000..185a5468a2f2 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include "htt.h" | ||
21 | #include "core.h" | ||
22 | #include "debug.h" | ||
23 | |||
24 | static int ath10k_htt_htc_attach(struct ath10k_htt *htt) | ||
25 | { | ||
26 | struct ath10k_htc_svc_conn_req conn_req; | ||
27 | struct ath10k_htc_svc_conn_resp conn_resp; | ||
28 | int status; | ||
29 | |||
30 | memset(&conn_req, 0, sizeof(conn_req)); | ||
31 | memset(&conn_resp, 0, sizeof(conn_resp)); | ||
32 | |||
33 | conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete; | ||
34 | conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler; | ||
35 | |||
36 | /* connect to control service */ | ||
37 | conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG; | ||
38 | |||
39 | status = ath10k_htc_connect_service(htt->ar->htc, &conn_req, | ||
40 | &conn_resp); | ||
41 | |||
42 | if (status) | ||
43 | return status; | ||
44 | |||
45 | htt->eid = conn_resp.eid; | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar) | ||
51 | { | ||
52 | struct ath10k_htt *htt; | ||
53 | int ret; | ||
54 | |||
55 | htt = kzalloc(sizeof(*htt), GFP_KERNEL); | ||
56 | if (!htt) | ||
57 | return NULL; | ||
58 | |||
59 | htt->ar = ar; | ||
60 | htt->max_throughput_mbps = 800; | ||
61 | |||
62 | /* | ||
63 | * Connect to HTC service. | ||
64 | * This has to be done before calling ath10k_htt_rx_attach, | ||
65 | * since ath10k_htt_rx_attach involves sending a rx ring configure | ||
66 | * message to the target. | ||
67 | */ | ||
68 | if (ath10k_htt_htc_attach(htt)) | ||
69 | goto err_htc_attach; | ||
70 | |||
71 | ret = ath10k_htt_tx_attach(htt); | ||
72 | if (ret) { | ||
73 | ath10k_err("could not attach htt tx (%d)\n", ret); | ||
74 | goto err_htc_attach; | ||
75 | } | ||
76 | |||
77 | if (ath10k_htt_rx_attach(htt)) | ||
78 | goto err_rx_attach; | ||
79 | |||
80 | /* | ||
81 | * Prefetch enough data to satisfy target | ||
82 | * classification engine. | ||
83 | * This is for LL chips. HL chips will probably | ||
84 | * transfer all frame in the tx fragment. | ||
85 | */ | ||
86 | htt->prefetch_len = | ||
87 | 36 + /* 802.11 + qos + ht */ | ||
88 | 4 + /* 802.1q */ | ||
89 | 8 + /* llc snap */ | ||
90 | 2; /* ip4 dscp or ip6 priority */ | ||
91 | |||
92 | return htt; | ||
93 | |||
94 | err_rx_attach: | ||
95 | ath10k_htt_tx_detach(htt); | ||
96 | err_htc_attach: | ||
97 | kfree(htt); | ||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | #define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) | ||
102 | |||
103 | static int ath10k_htt_verify_version(struct ath10k_htt *htt) | ||
104 | { | ||
105 | ath10k_dbg(ATH10K_DBG_HTT, | ||
106 | "htt target version %d.%d; host version %d.%d\n", | ||
107 | htt->target_version_major, | ||
108 | htt->target_version_minor, | ||
109 | HTT_CURRENT_VERSION_MAJOR, | ||
110 | HTT_CURRENT_VERSION_MINOR); | ||
111 | |||
112 | if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) { | ||
113 | ath10k_err("htt major versions are incompatible!\n"); | ||
114 | return -ENOTSUPP; | ||
115 | } | ||
116 | |||
117 | if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR) | ||
118 | ath10k_warn("htt minor version differ but still compatible\n"); | ||
119 | |||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | int ath10k_htt_attach_target(struct ath10k_htt *htt) | ||
124 | { | ||
125 | int status; | ||
126 | |||
127 | init_completion(&htt->target_version_received); | ||
128 | |||
129 | status = ath10k_htt_h2t_ver_req_msg(htt); | ||
130 | if (status) | ||
131 | return status; | ||
132 | |||
133 | status = wait_for_completion_timeout(&htt->target_version_received, | ||
134 | HTT_TARGET_VERSION_TIMEOUT_HZ); | ||
135 | if (status <= 0) { | ||
136 | ath10k_warn("htt version request timed out\n"); | ||
137 | return -ETIMEDOUT; | ||
138 | } | ||
139 | |||
140 | status = ath10k_htt_verify_version(htt); | ||
141 | if (status) | ||
142 | return status; | ||
143 | |||
144 | return ath10k_htt_send_rx_ring_cfg_ll(htt); | ||
145 | } | ||
146 | |||
147 | void ath10k_htt_detach(struct ath10k_htt *htt) | ||
148 | { | ||
149 | ath10k_htt_rx_detach(htt); | ||
150 | ath10k_htt_tx_detach(htt); | ||
151 | kfree(htt); | ||
152 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h new file mode 100644 index 000000000000..a7a7aa040536 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt.h | |||
@@ -0,0 +1,1338 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _HTT_H_ | ||
19 | #define _HTT_H_ | ||
20 | |||
21 | #include <linux/bug.h> | ||
22 | |||
23 | #include "core.h" | ||
24 | #include "htc.h" | ||
25 | #include "rx_desc.h" | ||
26 | |||
27 | #define HTT_CURRENT_VERSION_MAJOR 2 | ||
28 | #define HTT_CURRENT_VERSION_MINOR 1 | ||
29 | |||
30 | enum htt_dbg_stats_type { | ||
31 | HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, | ||
32 | HTT_DBG_STATS_RX_REORDER = 1 << 1, | ||
33 | HTT_DBG_STATS_RX_RATE_INFO = 1 << 2, | ||
34 | HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3, | ||
35 | HTT_DBG_STATS_TX_RATE_INFO = 1 << 4, | ||
36 | /* bits 5-23 currently reserved */ | ||
37 | |||
38 | HTT_DBG_NUM_STATS /* keep this last */ | ||
39 | }; | ||
40 | |||
41 | enum htt_h2t_msg_type { /* host-to-target */ | ||
42 | HTT_H2T_MSG_TYPE_VERSION_REQ = 0, | ||
43 | HTT_H2T_MSG_TYPE_TX_FRM = 1, | ||
44 | HTT_H2T_MSG_TYPE_RX_RING_CFG = 2, | ||
45 | HTT_H2T_MSG_TYPE_STATS_REQ = 3, | ||
46 | HTT_H2T_MSG_TYPE_SYNC = 4, | ||
47 | HTT_H2T_MSG_TYPE_AGGR_CFG = 5, | ||
48 | HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, | ||
49 | HTT_H2T_MSG_TYPE_MGMT_TX = 7, | ||
50 | |||
51 | HTT_H2T_NUM_MSGS /* keep this last */ | ||
52 | }; | ||
53 | |||
54 | struct htt_cmd_hdr { | ||
55 | u8 msg_type; | ||
56 | } __packed; | ||
57 | |||
58 | struct htt_ver_req { | ||
59 | u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; | ||
60 | } __packed; | ||
61 | |||
62 | /* | ||
63 | * HTT tx MSDU descriptor | ||
64 | * | ||
65 | * The HTT tx MSDU descriptor is created by the host HTT SW for each | ||
66 | * tx MSDU. The HTT tx MSDU descriptor contains the information that | ||
67 | * the target firmware needs for the FW's tx processing, particularly | ||
68 | * for creating the HW msdu descriptor. | ||
69 | * The same HTT tx descriptor is used for HL and LL systems, though | ||
70 | * a few fields within the tx descriptor are used only by LL or | ||
71 | * only by HL. | ||
72 | * The HTT tx descriptor is defined in two manners: by a struct with | ||
73 | * bitfields, and by a series of [dword offset, bit mask, bit shift] | ||
74 | * definitions. | ||
75 | * The target should use the struct def, for simplicitly and clarity, | ||
76 | * but the host shall use the bit-mast + bit-shift defs, to be endian- | ||
77 | * neutral. Specifically, the host shall use the get/set macros built | ||
78 | * around the mask + shift defs. | ||
79 | */ | ||
80 | struct htt_data_tx_desc_frag { | ||
81 | __le32 paddr; | ||
82 | __le32 len; | ||
83 | } __packed; | ||
84 | |||
85 | enum htt_data_tx_desc_flags0 { | ||
86 | HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, | ||
87 | HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, | ||
88 | HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2, | ||
89 | HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3, | ||
90 | HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4 | ||
91 | #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0 | ||
92 | #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5 | ||
93 | }; | ||
94 | |||
95 | enum htt_data_tx_desc_flags1 { | ||
96 | #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6 | ||
97 | #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F | ||
98 | #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0 | ||
99 | #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5 | ||
100 | #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0 | ||
101 | #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6 | ||
102 | HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11, | ||
103 | HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12, | ||
104 | HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13, | ||
105 | HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14, | ||
106 | HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15 | ||
107 | }; | ||
108 | |||
109 | enum htt_data_tx_ext_tid { | ||
110 | HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16, | ||
111 | HTT_DATA_TX_EXT_TID_MGMT = 17, | ||
112 | HTT_DATA_TX_EXT_TID_INVALID = 31 | ||
113 | }; | ||
114 | |||
115 | #define HTT_INVALID_PEERID 0xFFFF | ||
116 | |||
117 | /* | ||
118 | * htt_data_tx_desc - used for data tx path | ||
119 | * | ||
120 | * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1. | ||
121 | * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_ | ||
122 | * for special kinds of tids | ||
123 | * postponed: only for HL hosts. indicates if this is a resend | ||
124 | * (HL hosts manage queues on the host ) | ||
125 | * more_in_batch: only for HL hosts. indicates if more packets are | ||
126 | * pending. this allows target to wait and aggregate | ||
127 | */ | ||
128 | struct htt_data_tx_desc { | ||
129 | u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ | ||
130 | __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */ | ||
131 | __le16 len; | ||
132 | __le16 id; | ||
133 | __le32 frags_paddr; | ||
134 | __le32 peerid; | ||
135 | u8 prefetch[0]; /* start of frame, for FW classification engine */ | ||
136 | } __packed; | ||
137 | |||
138 | enum htt_rx_ring_flags { | ||
139 | HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0, | ||
140 | HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1, | ||
141 | HTT_RX_RING_FLAGS_PPDU_START = 1 << 2, | ||
142 | HTT_RX_RING_FLAGS_PPDU_END = 1 << 3, | ||
143 | HTT_RX_RING_FLAGS_MPDU_START = 1 << 4, | ||
144 | HTT_RX_RING_FLAGS_MPDU_END = 1 << 5, | ||
145 | HTT_RX_RING_FLAGS_MSDU_START = 1 << 6, | ||
146 | HTT_RX_RING_FLAGS_MSDU_END = 1 << 7, | ||
147 | HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8, | ||
148 | HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9, | ||
149 | HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10, | ||
150 | HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11, | ||
151 | HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12, | ||
152 | HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13, | ||
153 | HTT_RX_RING_FLAGS_NULL_RX = 1 << 14, | ||
154 | HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15 | ||
155 | }; | ||
156 | |||
157 | struct htt_rx_ring_setup_ring { | ||
158 | __le32 fw_idx_shadow_reg_paddr; | ||
159 | __le32 rx_ring_base_paddr; | ||
160 | __le16 rx_ring_len; /* in 4-byte words */ | ||
161 | __le16 rx_ring_bufsize; /* rx skb size - in bytes */ | ||
162 | __le16 flags; /* %HTT_RX_RING_FLAGS_ */ | ||
163 | __le16 fw_idx_init_val; | ||
164 | |||
165 | /* the following offsets are in 4-byte units */ | ||
166 | __le16 mac80211_hdr_offset; | ||
167 | __le16 msdu_payload_offset; | ||
168 | __le16 ppdu_start_offset; | ||
169 | __le16 ppdu_end_offset; | ||
170 | __le16 mpdu_start_offset; | ||
171 | __le16 mpdu_end_offset; | ||
172 | __le16 msdu_start_offset; | ||
173 | __le16 msdu_end_offset; | ||
174 | __le16 rx_attention_offset; | ||
175 | __le16 frag_info_offset; | ||
176 | } __packed; | ||
177 | |||
178 | struct htt_rx_ring_setup_hdr { | ||
179 | u8 num_rings; /* supported values: 1, 2 */ | ||
180 | __le16 rsvd0; | ||
181 | } __packed; | ||
182 | |||
183 | struct htt_rx_ring_setup { | ||
184 | struct htt_rx_ring_setup_hdr hdr; | ||
185 | struct htt_rx_ring_setup_ring rings[0]; | ||
186 | } __packed; | ||
187 | |||
188 | /* | ||
189 | * htt_stats_req - request target to send specified statistics | ||
190 | * | ||
191 | * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ | ||
192 | * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually | ||
193 | * so make sure its little-endian. | ||
194 | * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually | ||
195 | * so make sure its little-endian. | ||
196 | * @cfg_val: stat_type specific configuration | ||
197 | * @stat_type: see %htt_dbg_stats_type | ||
198 | * @cookie_lsb: used for confirmation message from target->host | ||
199 | * @cookie_msb: ditto as %cookie | ||
200 | */ | ||
201 | struct htt_stats_req { | ||
202 | u8 upload_types[3]; | ||
203 | u8 rsvd0; | ||
204 | u8 reset_types[3]; | ||
205 | struct { | ||
206 | u8 mpdu_bytes; | ||
207 | u8 mpdu_num_msdus; | ||
208 | u8 msdu_bytes; | ||
209 | } __packed; | ||
210 | u8 stat_type; | ||
211 | __le32 cookie_lsb; | ||
212 | __le32 cookie_msb; | ||
213 | } __packed; | ||
214 | |||
215 | #define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff | ||
216 | |||
217 | /* | ||
218 | * htt_oob_sync_req - request out-of-band sync | ||
219 | * | ||
220 | * The HTT SYNC tells the target to suspend processing of subsequent | ||
221 | * HTT host-to-target messages until some other target agent locally | ||
222 | * informs the target HTT FW that the current sync counter is equal to | ||
223 | * or greater than (in a modulo sense) the sync counter specified in | ||
224 | * the SYNC message. | ||
225 | * | ||
226 | * This allows other host-target components to synchronize their operation | ||
227 | * with HTT, e.g. to ensure that tx frames don't get transmitted until a | ||
228 | * security key has been downloaded to and activated by the target. | ||
229 | * In the absence of any explicit synchronization counter value | ||
230 | * specification, the target HTT FW will use zero as the default current | ||
231 | * sync value. | ||
232 | * | ||
233 | * The HTT target FW will suspend its host->target message processing as long | ||
234 | * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128. | ||
235 | */ | ||
236 | struct htt_oob_sync_req { | ||
237 | u8 sync_count; | ||
238 | __le16 rsvd0; | ||
239 | } __packed; | ||
240 | |||
241 | #define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F | ||
242 | #define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB 0 | ||
243 | |||
244 | struct htt_aggr_conf { | ||
245 | u8 max_num_ampdu_subframes; | ||
246 | union { | ||
247 | /* dont use bitfields; undefined behaviour */ | ||
248 | u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */ | ||
249 | u8 max_num_amsdu_subframes:5; | ||
250 | } __packed; | ||
251 | } __packed; | ||
252 | |||
253 | #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 | ||
254 | |||
255 | struct htt_mgmt_tx_desc { | ||
256 | u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; | ||
257 | __le32 msdu_paddr; | ||
258 | __le32 desc_id; | ||
259 | __le32 len; | ||
260 | __le32 vdev_id; | ||
261 | u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN]; | ||
262 | } __packed; | ||
263 | |||
264 | enum htt_mgmt_tx_status { | ||
265 | HTT_MGMT_TX_STATUS_OK = 0, | ||
266 | HTT_MGMT_TX_STATUS_RETRY = 1, | ||
267 | HTT_MGMT_TX_STATUS_DROP = 2 | ||
268 | }; | ||
269 | |||
270 | /*=== target -> host messages ===============================================*/ | ||
271 | |||
272 | |||
273 | enum htt_t2h_msg_type { | ||
274 | HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0, | ||
275 | HTT_T2H_MSG_TYPE_RX_IND = 0x1, | ||
276 | HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2, | ||
277 | HTT_T2H_MSG_TYPE_PEER_MAP = 0x3, | ||
278 | HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4, | ||
279 | HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5, | ||
280 | HTT_T2H_MSG_TYPE_RX_DELBA = 0x6, | ||
281 | HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, | ||
282 | HTT_T2H_MSG_TYPE_PKTLOG = 0x8, | ||
283 | HTT_T2H_MSG_TYPE_STATS_CONF = 0x9, | ||
284 | HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, | ||
285 | HTT_T2H_MSG_TYPE_SEC_IND = 0xb, | ||
286 | HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, | ||
287 | HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, | ||
288 | HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, | ||
289 | HTT_T2H_MSG_TYPE_TEST, | ||
290 | /* keep this last */ | ||
291 | HTT_T2H_NUM_MSGS | ||
292 | }; | ||
293 | |||
294 | /* | ||
295 | * htt_resp_hdr - header for target-to-host messages | ||
296 | * | ||
297 | * msg_type: see htt_t2h_msg_type | ||
298 | */ | ||
299 | struct htt_resp_hdr { | ||
300 | u8 msg_type; | ||
301 | } __packed; | ||
302 | |||
303 | #define HTT_RESP_HDR_MSG_TYPE_OFFSET 0 | ||
304 | #define HTT_RESP_HDR_MSG_TYPE_MASK 0xff | ||
305 | #define HTT_RESP_HDR_MSG_TYPE_LSB 0 | ||
306 | |||
307 | /* htt_ver_resp - response sent for htt_ver_req */ | ||
308 | struct htt_ver_resp { | ||
309 | u8 minor; | ||
310 | u8 major; | ||
311 | u8 rsvd0; | ||
312 | } __packed; | ||
313 | |||
314 | struct htt_mgmt_tx_completion { | ||
315 | u8 rsvd0; | ||
316 | u8 rsvd1; | ||
317 | u8 rsvd2; | ||
318 | __le32 desc_id; | ||
319 | __le32 status; | ||
320 | } __packed; | ||
321 | |||
322 | #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F) | ||
323 | #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) | ||
324 | #define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6) | ||
325 | #define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7) | ||
326 | |||
327 | #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F | ||
328 | #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 | ||
329 | #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0 | ||
330 | #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6 | ||
331 | #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000 | ||
332 | #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12 | ||
333 | #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000 | ||
334 | #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18 | ||
335 | #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000 | ||
336 | #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24 | ||
337 | |||
338 | struct htt_rx_indication_hdr { | ||
339 | u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ | ||
340 | __le16 peer_id; | ||
341 | __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */ | ||
342 | } __packed; | ||
343 | |||
344 | #define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0) | ||
345 | #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E) | ||
346 | #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1) | ||
347 | #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5) | ||
348 | #define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6) | ||
349 | #define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7) | ||
350 | |||
351 | #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF | ||
352 | #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0 | ||
353 | #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000 | ||
354 | #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24 | ||
355 | |||
356 | #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF | ||
357 | #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0 | ||
358 | #define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000 | ||
359 | #define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24 | ||
360 | |||
361 | enum htt_rx_legacy_rate { | ||
362 | HTT_RX_OFDM_48 = 0, | ||
363 | HTT_RX_OFDM_24 = 1, | ||
364 | HTT_RX_OFDM_12, | ||
365 | HTT_RX_OFDM_6, | ||
366 | HTT_RX_OFDM_54, | ||
367 | HTT_RX_OFDM_36, | ||
368 | HTT_RX_OFDM_18, | ||
369 | HTT_RX_OFDM_9, | ||
370 | |||
371 | /* long preamble */ | ||
372 | HTT_RX_CCK_11_LP = 0, | ||
373 | HTT_RX_CCK_5_5_LP = 1, | ||
374 | HTT_RX_CCK_2_LP, | ||
375 | HTT_RX_CCK_1_LP, | ||
376 | /* short preamble */ | ||
377 | HTT_RX_CCK_11_SP, | ||
378 | HTT_RX_CCK_5_5_SP, | ||
379 | HTT_RX_CCK_2_SP | ||
380 | }; | ||
381 | |||
382 | enum htt_rx_legacy_rate_type { | ||
383 | HTT_RX_LEGACY_RATE_OFDM = 0, | ||
384 | HTT_RX_LEGACY_RATE_CCK | ||
385 | }; | ||
386 | |||
387 | enum htt_rx_preamble_type { | ||
388 | HTT_RX_LEGACY = 0x4, | ||
389 | HTT_RX_HT = 0x8, | ||
390 | HTT_RX_HT_WITH_TXBF = 0x9, | ||
391 | HTT_RX_VHT = 0xC, | ||
392 | HTT_RX_VHT_WITH_TXBF = 0xD, | ||
393 | }; | ||
394 | |||
395 | /* | ||
396 | * Fields: phy_err_valid, phy_err_code, tsf, | ||
397 | * usec_timestamp, sub_usec_timestamp | ||
398 | * ..are valid only if end_valid == 1. | ||
399 | * | ||
400 | * Fields: rssi_chains, legacy_rate_type, | ||
401 | * legacy_rate_cck, preamble_type, service, | ||
402 | * vht_sig_* | ||
403 | * ..are valid only if start_valid == 1; | ||
404 | */ | ||
405 | struct htt_rx_indication_ppdu { | ||
406 | u8 combined_rssi; | ||
407 | u8 sub_usec_timestamp; | ||
408 | u8 phy_err_code; | ||
409 | u8 info0; /* HTT_RX_INDICATION_INFO0_ */ | ||
410 | struct { | ||
411 | u8 pri20_db; | ||
412 | u8 ext20_db; | ||
413 | u8 ext40_db; | ||
414 | u8 ext80_db; | ||
415 | } __packed rssi_chains[4]; | ||
416 | __le32 tsf; | ||
417 | __le32 usec_timestamp; | ||
418 | __le32 info1; /* HTT_RX_INDICATION_INFO1_ */ | ||
419 | __le32 info2; /* HTT_RX_INDICATION_INFO2_ */ | ||
420 | } __packed; | ||
421 | |||
422 | enum htt_rx_mpdu_status { | ||
423 | HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0, | ||
424 | HTT_RX_IND_MPDU_STATUS_OK, | ||
425 | HTT_RX_IND_MPDU_STATUS_ERR_FCS, | ||
426 | HTT_RX_IND_MPDU_STATUS_ERR_DUP, | ||
427 | HTT_RX_IND_MPDU_STATUS_ERR_REPLAY, | ||
428 | HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER, | ||
429 | /* only accept EAPOL frames */ | ||
430 | HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER, | ||
431 | HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC, | ||
432 | /* Non-data in promiscous mode */ | ||
433 | HTT_RX_IND_MPDU_STATUS_MGMT_CTRL, | ||
434 | HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR, | ||
435 | HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR, | ||
436 | HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR, | ||
437 | HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR, | ||
438 | HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR, | ||
439 | |||
440 | /* | ||
441 | * MISC: discard for unspecified reasons. | ||
442 | * Leave this enum value last. | ||
443 | */ | ||
444 | HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF | ||
445 | }; | ||
446 | |||
447 | struct htt_rx_indication_mpdu_range { | ||
448 | u8 mpdu_count; | ||
449 | u8 mpdu_range_status; /* %htt_rx_mpdu_status */ | ||
450 | u8 pad0; | ||
451 | u8 pad1; | ||
452 | } __packed; | ||
453 | |||
454 | struct htt_rx_indication_prefix { | ||
455 | __le16 fw_rx_desc_bytes; | ||
456 | u8 pad0; | ||
457 | u8 pad1; | ||
458 | }; | ||
459 | |||
460 | struct htt_rx_indication { | ||
461 | struct htt_rx_indication_hdr hdr; | ||
462 | struct htt_rx_indication_ppdu ppdu; | ||
463 | struct htt_rx_indication_prefix prefix; | ||
464 | |||
465 | /* | ||
466 | * the following fields are both dynamically sized, so | ||
467 | * take care addressing them | ||
468 | */ | ||
469 | |||
470 | /* the size of this is %fw_rx_desc_bytes */ | ||
471 | struct fw_rx_desc_base fw_desc; | ||
472 | |||
473 | /* | ||
474 | * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4) | ||
475 | * and has %num_mpdu_ranges elements. | ||
476 | */ | ||
477 | struct htt_rx_indication_mpdu_range mpdu_ranges[0]; | ||
478 | } __packed; | ||
479 | |||
480 | static inline struct htt_rx_indication_mpdu_range * | ||
481 | htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind) | ||
482 | { | ||
483 | void *ptr = rx_ind; | ||
484 | |||
485 | ptr += sizeof(rx_ind->hdr) | ||
486 | + sizeof(rx_ind->ppdu) | ||
487 | + sizeof(rx_ind->prefix) | ||
488 | + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4); | ||
489 | return ptr; | ||
490 | } | ||
491 | |||
492 | enum htt_rx_flush_mpdu_status { | ||
493 | HTT_RX_FLUSH_MPDU_DISCARD = 0, | ||
494 | HTT_RX_FLUSH_MPDU_REORDER = 1, | ||
495 | }; | ||
496 | |||
497 | /* | ||
498 | * htt_rx_flush - discard or reorder given range of mpdus | ||
499 | * | ||
500 | * Note: host must check if all sequence numbers between | ||
501 | * [seq_num_start, seq_num_end-1] are valid. | ||
502 | */ | ||
503 | struct htt_rx_flush { | ||
504 | __le16 peer_id; | ||
505 | u8 tid; | ||
506 | u8 rsvd0; | ||
507 | u8 mpdu_status; /* %htt_rx_flush_mpdu_status */ | ||
508 | u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */ | ||
509 | u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */ | ||
510 | }; | ||
511 | |||
512 | struct htt_rx_peer_map { | ||
513 | u8 vdev_id; | ||
514 | __le16 peer_id; | ||
515 | u8 addr[6]; | ||
516 | u8 rsvd0; | ||
517 | u8 rsvd1; | ||
518 | } __packed; | ||
519 | |||
520 | struct htt_rx_peer_unmap { | ||
521 | u8 rsvd0; | ||
522 | __le16 peer_id; | ||
523 | } __packed; | ||
524 | |||
525 | enum htt_security_types { | ||
526 | HTT_SECURITY_NONE, | ||
527 | HTT_SECURITY_WEP128, | ||
528 | HTT_SECURITY_WEP104, | ||
529 | HTT_SECURITY_WEP40, | ||
530 | HTT_SECURITY_TKIP, | ||
531 | HTT_SECURITY_TKIP_NOMIC, | ||
532 | HTT_SECURITY_AES_CCMP, | ||
533 | HTT_SECURITY_WAPI, | ||
534 | |||
535 | HTT_NUM_SECURITY_TYPES /* keep this last! */ | ||
536 | }; | ||
537 | |||
538 | enum htt_security_flags { | ||
539 | #define HTT_SECURITY_TYPE_MASK 0x7F | ||
540 | #define HTT_SECURITY_TYPE_LSB 0 | ||
541 | HTT_SECURITY_IS_UNICAST = 1 << 7 | ||
542 | }; | ||
543 | |||
544 | struct htt_security_indication { | ||
545 | union { | ||
546 | /* dont use bitfields; undefined behaviour */ | ||
547 | u8 flags; /* %htt_security_flags */ | ||
548 | struct { | ||
549 | u8 security_type:7, /* %htt_security_types */ | ||
550 | is_unicast:1; | ||
551 | } __packed; | ||
552 | } __packed; | ||
553 | __le16 peer_id; | ||
554 | u8 michael_key[8]; | ||
555 | u8 wapi_rsc[16]; | ||
556 | } __packed; | ||
557 | |||
558 | #define HTT_RX_BA_INFO0_TID_MASK 0x000F | ||
559 | #define HTT_RX_BA_INFO0_TID_LSB 0 | ||
560 | #define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0 | ||
561 | #define HTT_RX_BA_INFO0_PEER_ID_LSB 4 | ||
562 | |||
563 | struct htt_rx_addba { | ||
564 | u8 window_size; | ||
565 | __le16 info0; /* %HTT_RX_BA_INFO0_ */ | ||
566 | } __packed; | ||
567 | |||
568 | struct htt_rx_delba { | ||
569 | u8 rsvd0; | ||
570 | __le16 info0; /* %HTT_RX_BA_INFO0_ */ | ||
571 | } __packed; | ||
572 | |||
573 | enum htt_data_tx_status { | ||
574 | HTT_DATA_TX_STATUS_OK = 0, | ||
575 | HTT_DATA_TX_STATUS_DISCARD = 1, | ||
576 | HTT_DATA_TX_STATUS_NO_ACK = 2, | ||
577 | HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */ | ||
578 | HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128 | ||
579 | }; | ||
580 | |||
581 | enum htt_data_tx_flags { | ||
582 | #define HTT_DATA_TX_STATUS_MASK 0x07 | ||
583 | #define HTT_DATA_TX_STATUS_LSB 0 | ||
584 | #define HTT_DATA_TX_TID_MASK 0x78 | ||
585 | #define HTT_DATA_TX_TID_LSB 3 | ||
586 | HTT_DATA_TX_TID_INVALID = 1 << 7 | ||
587 | }; | ||
588 | |||
589 | #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF | ||
590 | |||
591 | struct htt_data_tx_completion { | ||
592 | union { | ||
593 | u8 flags; | ||
594 | struct { | ||
595 | u8 status:3, | ||
596 | tid:4, | ||
597 | tid_invalid:1; | ||
598 | } __packed; | ||
599 | } __packed; | ||
600 | u8 num_msdus; | ||
601 | u8 rsvd0; | ||
602 | __le16 msdus[0]; /* variable length based on %num_msdus */ | ||
603 | } __packed; | ||
604 | |||
605 | struct htt_tx_compl_ind_base { | ||
606 | u32 hdr; | ||
607 | u16 payload[1/*or more*/]; | ||
608 | } __packed; | ||
609 | |||
610 | struct htt_rc_tx_done_params { | ||
611 | u32 rate_code; | ||
612 | u32 rate_code_flags; | ||
613 | u32 flags; | ||
614 | u32 num_enqued; /* 1 for non-AMPDU */ | ||
615 | u32 num_retries; | ||
616 | u32 num_failed; /* for AMPDU */ | ||
617 | u32 ack_rssi; | ||
618 | u32 time_stamp; | ||
619 | u32 is_probe; | ||
620 | }; | ||
621 | |||
622 | struct htt_rc_update { | ||
623 | u8 vdev_id; | ||
624 | __le16 peer_id; | ||
625 | u8 addr[6]; | ||
626 | u8 num_elems; | ||
627 | u8 rsvd0; | ||
628 | struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */ | ||
629 | } __packed; | ||
630 | |||
631 | /* see htt_rx_indication for similar fields and descriptions */ | ||
632 | struct htt_rx_fragment_indication { | ||
633 | union { | ||
634 | u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */ | ||
635 | struct { | ||
636 | u8 ext_tid:5, | ||
637 | flush_valid:1; | ||
638 | } __packed; | ||
639 | } __packed; | ||
640 | __le16 peer_id; | ||
641 | __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */ | ||
642 | __le16 fw_rx_desc_bytes; | ||
643 | __le16 rsvd0; | ||
644 | |||
645 | u8 fw_msdu_rx_desc[0]; | ||
646 | } __packed; | ||
647 | |||
648 | #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F | ||
649 | #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0 | ||
650 | #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20 | ||
651 | #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5 | ||
652 | |||
653 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F | ||
654 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0 | ||
655 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 | ||
656 | #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 | ||
657 | |||
658 | /* | ||
659 | * target -> host test message definition | ||
660 | * | ||
661 | * The following field definitions describe the format of the test | ||
662 | * message sent from the target to the host. | ||
663 | * The message consists of a 4-octet header, followed by a variable | ||
664 | * number of 32-bit integer values, followed by a variable number | ||
665 | * of 8-bit character values. | ||
666 | * | ||
667 | * |31 16|15 8|7 0| | ||
668 | * |-----------------------------------------------------------| | ||
669 | * | num chars | num ints | msg type | | ||
670 | * |-----------------------------------------------------------| | ||
671 | * | int 0 | | ||
672 | * |-----------------------------------------------------------| | ||
673 | * | int 1 | | ||
674 | * |-----------------------------------------------------------| | ||
675 | * | ... | | ||
676 | * |-----------------------------------------------------------| | ||
677 | * | char 3 | char 2 | char 1 | char 0 | | ||
678 | * |-----------------------------------------------------------| | ||
679 | * | | | ... | char 4 | | ||
680 | * |-----------------------------------------------------------| | ||
681 | * - MSG_TYPE | ||
682 | * Bits 7:0 | ||
683 | * Purpose: identifies this as a test message | ||
684 | * Value: HTT_MSG_TYPE_TEST | ||
685 | * - NUM_INTS | ||
686 | * Bits 15:8 | ||
687 | * Purpose: indicate how many 32-bit integers follow the message header | ||
688 | * - NUM_CHARS | ||
689 | * Bits 31:16 | ||
690 | * Purpose: indicate how many 8-bit charaters follow the series of integers | ||
691 | */ | ||
692 | struct htt_rx_test { | ||
693 | u8 num_ints; | ||
694 | __le16 num_chars; | ||
695 | |||
696 | /* payload consists of 2 lists: | ||
697 | * a) num_ints * sizeof(__le32) | ||
698 | * b) num_chars * sizeof(u8) aligned to 4bytes */ | ||
699 | u8 payload[0]; | ||
700 | } __packed; | ||
701 | |||
702 | static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test) | ||
703 | { | ||
704 | return (__le32 *)rx_test->payload; | ||
705 | } | ||
706 | |||
707 | static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) | ||
708 | { | ||
709 | return rx_test->payload + (rx_test->num_ints * sizeof(__le32)); | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * target -> host packet log message | ||
714 | * | ||
715 | * The following field definitions describe the format of the packet log | ||
716 | * message sent from the target to the host. | ||
717 | * The message consists of a 4-octet header,followed by a variable number | ||
718 | * of 32-bit character values. | ||
719 | * | ||
720 | * |31 24|23 16|15 8|7 0| | ||
721 | * |-----------------------------------------------------------| | ||
722 | * | | | | msg type | | ||
723 | * |-----------------------------------------------------------| | ||
724 | * | payload | | ||
725 | * |-----------------------------------------------------------| | ||
726 | * - MSG_TYPE | ||
727 | * Bits 7:0 | ||
728 | * Purpose: identifies this as a test message | ||
729 | * Value: HTT_MSG_TYPE_PACKETLOG | ||
730 | */ | ||
731 | struct htt_pktlog_msg { | ||
732 | u8 pad[3]; | ||
733 | __le32 payload[1 /* or more */]; | ||
734 | } __packed; | ||
735 | |||
736 | struct htt_dbg_stats_rx_reorder_stats { | ||
737 | /* Non QoS MPDUs received */ | ||
738 | __le32 deliver_non_qos; | ||
739 | |||
740 | /* MPDUs received in-order */ | ||
741 | __le32 deliver_in_order; | ||
742 | |||
743 | /* Flush due to reorder timer expired */ | ||
744 | __le32 deliver_flush_timeout; | ||
745 | |||
746 | /* Flush due to move out of window */ | ||
747 | __le32 deliver_flush_oow; | ||
748 | |||
749 | /* Flush due to DELBA */ | ||
750 | __le32 deliver_flush_delba; | ||
751 | |||
752 | /* MPDUs dropped due to FCS error */ | ||
753 | __le32 fcs_error; | ||
754 | |||
755 | /* MPDUs dropped due to monitor mode non-data packet */ | ||
756 | __le32 mgmt_ctrl; | ||
757 | |||
758 | /* MPDUs dropped due to invalid peer */ | ||
759 | __le32 invalid_peer; | ||
760 | |||
761 | /* MPDUs dropped due to duplication (non aggregation) */ | ||
762 | __le32 dup_non_aggr; | ||
763 | |||
764 | /* MPDUs dropped due to processed before */ | ||
765 | __le32 dup_past; | ||
766 | |||
767 | /* MPDUs dropped due to duplicate in reorder queue */ | ||
768 | __le32 dup_in_reorder; | ||
769 | |||
770 | /* Reorder timeout happened */ | ||
771 | __le32 reorder_timeout; | ||
772 | |||
773 | /* invalid bar ssn */ | ||
774 | __le32 invalid_bar_ssn; | ||
775 | |||
776 | /* reorder reset due to bar ssn */ | ||
777 | __le32 ssn_reset; | ||
778 | }; | ||
779 | |||
780 | struct htt_dbg_stats_wal_tx_stats { | ||
781 | /* Num HTT cookies queued to dispatch list */ | ||
782 | __le32 comp_queued; | ||
783 | |||
784 | /* Num HTT cookies dispatched */ | ||
785 | __le32 comp_delivered; | ||
786 | |||
787 | /* Num MSDU queued to WAL */ | ||
788 | __le32 msdu_enqued; | ||
789 | |||
790 | /* Num MPDU queue to WAL */ | ||
791 | __le32 mpdu_enqued; | ||
792 | |||
793 | /* Num MSDUs dropped by WMM limit */ | ||
794 | __le32 wmm_drop; | ||
795 | |||
796 | /* Num Local frames queued */ | ||
797 | __le32 local_enqued; | ||
798 | |||
799 | /* Num Local frames done */ | ||
800 | __le32 local_freed; | ||
801 | |||
802 | /* Num queued to HW */ | ||
803 | __le32 hw_queued; | ||
804 | |||
805 | /* Num PPDU reaped from HW */ | ||
806 | __le32 hw_reaped; | ||
807 | |||
808 | /* Num underruns */ | ||
809 | __le32 underrun; | ||
810 | |||
811 | /* Num PPDUs cleaned up in TX abort */ | ||
812 | __le32 tx_abort; | ||
813 | |||
814 | /* Num MPDUs requed by SW */ | ||
815 | __le32 mpdus_requed; | ||
816 | |||
817 | /* excessive retries */ | ||
818 | __le32 tx_ko; | ||
819 | |||
820 | /* data hw rate code */ | ||
821 | __le32 data_rc; | ||
822 | |||
823 | /* Scheduler self triggers */ | ||
824 | __le32 self_triggers; | ||
825 | |||
826 | /* frames dropped due to excessive sw retries */ | ||
827 | __le32 sw_retry_failure; | ||
828 | |||
829 | /* illegal rate phy errors */ | ||
830 | __le32 illgl_rate_phy_err; | ||
831 | |||
832 | /* wal pdev continous xretry */ | ||
833 | __le32 pdev_cont_xretry; | ||
834 | |||
835 | /* wal pdev continous xretry */ | ||
836 | __le32 pdev_tx_timeout; | ||
837 | |||
838 | /* wal pdev resets */ | ||
839 | __le32 pdev_resets; | ||
840 | |||
841 | __le32 phy_underrun; | ||
842 | |||
843 | /* MPDU is more than txop limit */ | ||
844 | __le32 txop_ovf; | ||
845 | } __packed; | ||
846 | |||
847 | struct htt_dbg_stats_wal_rx_stats { | ||
848 | /* Cnts any change in ring routing mid-ppdu */ | ||
849 | __le32 mid_ppdu_route_change; | ||
850 | |||
851 | /* Total number of statuses processed */ | ||
852 | __le32 status_rcvd; | ||
853 | |||
854 | /* Extra frags on rings 0-3 */ | ||
855 | __le32 r0_frags; | ||
856 | __le32 r1_frags; | ||
857 | __le32 r2_frags; | ||
858 | __le32 r3_frags; | ||
859 | |||
860 | /* MSDUs / MPDUs delivered to HTT */ | ||
861 | __le32 htt_msdus; | ||
862 | __le32 htt_mpdus; | ||
863 | |||
864 | /* MSDUs / MPDUs delivered to local stack */ | ||
865 | __le32 loc_msdus; | ||
866 | __le32 loc_mpdus; | ||
867 | |||
868 | /* AMSDUs that have more MSDUs than the status ring size */ | ||
869 | __le32 oversize_amsdu; | ||
870 | |||
871 | /* Number of PHY errors */ | ||
872 | __le32 phy_errs; | ||
873 | |||
874 | /* Number of PHY errors drops */ | ||
875 | __le32 phy_err_drop; | ||
876 | |||
877 | /* Number of mpdu errors - FCS, MIC, ENC etc. */ | ||
878 | __le32 mpdu_errs; | ||
879 | } __packed; | ||
880 | |||
881 | struct htt_dbg_stats_wal_peer_stats { | ||
882 | __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ | ||
883 | } __packed; | ||
884 | |||
885 | struct htt_dbg_stats_wal_pdev_txrx { | ||
886 | struct htt_dbg_stats_wal_tx_stats tx_stats; | ||
887 | struct htt_dbg_stats_wal_rx_stats rx_stats; | ||
888 | struct htt_dbg_stats_wal_peer_stats peer_stats; | ||
889 | } __packed; | ||
890 | |||
891 | struct htt_dbg_stats_rx_rate_info { | ||
892 | __le32 mcs[10]; | ||
893 | __le32 sgi[10]; | ||
894 | __le32 nss[4]; | ||
895 | __le32 stbc[10]; | ||
896 | __le32 bw[3]; | ||
897 | __le32 pream[6]; | ||
898 | __le32 ldpc; | ||
899 | __le32 txbf; | ||
900 | }; | ||
901 | |||
902 | /* | ||
903 | * htt_dbg_stats_status - | ||
904 | * present - The requested stats have been delivered in full. | ||
905 | * This indicates that either the stats information was contained | ||
906 | * in its entirety within this message, or else this message | ||
907 | * completes the delivery of the requested stats info that was | ||
908 | * partially delivered through earlier STATS_CONF messages. | ||
909 | * partial - The requested stats have been delivered in part. | ||
910 | * One or more subsequent STATS_CONF messages with the same | ||
911 | * cookie value will be sent to deliver the remainder of the | ||
912 | * information. | ||
913 | * error - The requested stats could not be delivered, for example due | ||
914 | * to a shortage of memory to construct a message holding the | ||
915 | * requested stats. | ||
916 | * invalid - The requested stat type is either not recognized, or the | ||
917 | * target is configured to not gather the stats type in question. | ||
918 | * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - | ||
919 | * series_done - This special value indicates that no further stats info | ||
920 | * elements are present within a series of stats info elems | ||
921 | * (within a stats upload confirmation message). | ||
922 | */ | ||
923 | enum htt_dbg_stats_status { | ||
924 | HTT_DBG_STATS_STATUS_PRESENT = 0, | ||
925 | HTT_DBG_STATS_STATUS_PARTIAL = 1, | ||
926 | HTT_DBG_STATS_STATUS_ERROR = 2, | ||
927 | HTT_DBG_STATS_STATUS_INVALID = 3, | ||
928 | HTT_DBG_STATS_STATUS_SERIES_DONE = 7 | ||
929 | }; | ||
930 | |||
931 | /* | ||
932 | * target -> host statistics upload | ||
933 | * | ||
934 | * The following field definitions describe the format of the HTT target | ||
935 | * to host stats upload confirmation message. | ||
936 | * The message contains a cookie echoed from the HTT host->target stats | ||
937 | * upload request, which identifies which request the confirmation is | ||
938 | * for, and a series of tag-length-value stats information elements. | ||
939 | * The tag-length header for each stats info element also includes a | ||
940 | * status field, to indicate whether the request for the stat type in | ||
941 | * question was fully met, partially met, unable to be met, or invalid | ||
942 | * (if the stat type in question is disabled in the target). | ||
943 | * A special value of all 1's in this status field is used to indicate | ||
944 | * the end of the series of stats info elements. | ||
945 | * | ||
946 | * | ||
947 | * |31 16|15 8|7 5|4 0| | ||
948 | * |------------------------------------------------------------| | ||
949 | * | reserved | msg type | | ||
950 | * |------------------------------------------------------------| | ||
951 | * | cookie LSBs | | ||
952 | * |------------------------------------------------------------| | ||
953 | * | cookie MSBs | | ||
954 | * |------------------------------------------------------------| | ||
955 | * | stats entry length | reserved | S |stat type| | ||
956 | * |------------------------------------------------------------| | ||
957 | * | | | ||
958 | * | type-specific stats info | | ||
959 | * | | | ||
960 | * |------------------------------------------------------------| | ||
961 | * | stats entry length | reserved | S |stat type| | ||
962 | * |------------------------------------------------------------| | ||
963 | * | | | ||
964 | * | type-specific stats info | | ||
965 | * | | | ||
966 | * |------------------------------------------------------------| | ||
967 | * | n/a | reserved | 111 | n/a | | ||
968 | * |------------------------------------------------------------| | ||
969 | * Header fields: | ||
970 | * - MSG_TYPE | ||
971 | * Bits 7:0 | ||
972 | * Purpose: identifies this is a statistics upload confirmation message | ||
973 | * Value: 0x9 | ||
974 | * - COOKIE_LSBS | ||
975 | * Bits 31:0 | ||
976 | * Purpose: Provide a mechanism to match a target->host stats confirmation | ||
977 | * message with its preceding host->target stats request message. | ||
978 | * Value: LSBs of the opaque cookie specified by the host-side requestor | ||
979 | * - COOKIE_MSBS | ||
980 | * Bits 31:0 | ||
981 | * Purpose: Provide a mechanism to match a target->host stats confirmation | ||
982 | * message with its preceding host->target stats request message. | ||
983 | * Value: MSBs of the opaque cookie specified by the host-side requestor | ||
984 | * | ||
985 | * Stats Information Element tag-length header fields: | ||
986 | * - STAT_TYPE | ||
987 | * Bits 4:0 | ||
988 | * Purpose: identifies the type of statistics info held in the | ||
989 | * following information element | ||
990 | * Value: htt_dbg_stats_type | ||
991 | * - STATUS | ||
992 | * Bits 7:5 | ||
993 | * Purpose: indicate whether the requested stats are present | ||
994 | * Value: htt_dbg_stats_status, including a special value (0x7) to mark | ||
995 | * the completion of the stats entry series | ||
996 | * - LENGTH | ||
997 | * Bits 31:16 | ||
998 | * Purpose: indicate the stats information size | ||
999 | * Value: This field specifies the number of bytes of stats information | ||
1000 | * that follows the element tag-length header. | ||
1001 | * It is expected but not required that this length is a multiple of | ||
1002 | * 4 bytes. Even if the length is not an integer multiple of 4, the | ||
1003 | * subsequent stats entry header will begin on a 4-byte aligned | ||
1004 | * boundary. | ||
1005 | */ | ||
1006 | |||
1007 | #define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F | ||
1008 | #define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0 | ||
1009 | #define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0 | ||
1010 | #define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5 | ||
1011 | |||
1012 | struct htt_stats_conf_item { | ||
1013 | union { | ||
1014 | u8 info; | ||
1015 | struct { | ||
1016 | u8 stat_type:5; /* %HTT_DBG_STATS_ */ | ||
1017 | u8 status:3; /* %HTT_DBG_STATS_STATUS_ */ | ||
1018 | } __packed; | ||
1019 | } __packed; | ||
1020 | u8 pad; | ||
1021 | __le16 length; | ||
1022 | u8 payload[0]; /* roundup(length, 4) long */ | ||
1023 | } __packed; | ||
1024 | |||
1025 | struct htt_stats_conf { | ||
1026 | u8 pad[3]; | ||
1027 | __le32 cookie_lsb; | ||
1028 | __le32 cookie_msb; | ||
1029 | |||
1030 | /* each item has variable length! */ | ||
1031 | struct htt_stats_conf_item items[0]; | ||
1032 | } __packed; | ||
1033 | |||
1034 | static inline struct htt_stats_conf_item *htt_stats_conf_next_item( | ||
1035 | const struct htt_stats_conf_item *item) | ||
1036 | { | ||
1037 | return (void *)item + sizeof(*item) + roundup(item->length, 4); | ||
1038 | } | ||
1039 | /* | ||
1040 | * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank | ||
1041 | * | ||
1042 | * The following field definitions describe the format of the HTT host | ||
1043 | * to target frag_desc/msdu_ext bank configuration message. | ||
1044 | * The message contains the based address and the min and max id of the | ||
1045 | * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and | ||
1046 | * MSDU_EXT/FRAG_DESC. | ||
1047 | * HTT will use id in HTT descriptor instead sending the frag_desc_ptr. | ||
1048 | * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0 | ||
1049 | * the hardware does the mapping/translation. | ||
1050 | * | ||
1051 | * Total banks that can be configured is configured to 16. | ||
1052 | * | ||
1053 | * This should be called before any TX has be initiated by the HTT | ||
1054 | * | ||
1055 | * |31 16|15 8|7 5|4 0| | ||
1056 | * |------------------------------------------------------------| | ||
1057 | * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type | | ||
1058 | * |------------------------------------------------------------| | ||
1059 | * | BANK0_BASE_ADDRESS | | ||
1060 | * |------------------------------------------------------------| | ||
1061 | * | ... | | ||
1062 | * |------------------------------------------------------------| | ||
1063 | * | BANK15_BASE_ADDRESS | | ||
1064 | * |------------------------------------------------------------| | ||
1065 | * | BANK0_MAX_ID | BANK0_MIN_ID | | ||
1066 | * |------------------------------------------------------------| | ||
1067 | * | ... | | ||
1068 | * |------------------------------------------------------------| | ||
1069 | * | BANK15_MAX_ID | BANK15_MIN_ID | | ||
1070 | * |------------------------------------------------------------| | ||
1071 | * Header fields: | ||
1072 | * - MSG_TYPE | ||
1073 | * Bits 7:0 | ||
1074 | * Value: 0x6 | ||
1075 | * - BANKx_BASE_ADDRESS | ||
1076 | * Bits 31:0 | ||
1077 | * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT | ||
1078 | * bank physical/bus address. | ||
1079 | * - BANKx_MIN_ID | ||
1080 | * Bits 15:0 | ||
1081 | * Purpose: Provide a mechanism to specify the min index that needs to | ||
1082 | * mapped. | ||
1083 | * - BANKx_MAX_ID | ||
1084 | * Bits 31:16 | ||
1085 | * Purpose: Provide a mechanism to specify the max index that needs to | ||
1086 | * | ||
1087 | */ | ||
1088 | struct htt_frag_desc_bank_id { | ||
1089 | __le16 bank_min_id; | ||
1090 | __le16 bank_max_id; | ||
1091 | } __packed; | ||
1092 | |||
1093 | /* real is 16 but it wouldn't fit in the max htt message size | ||
1094 | * so we use a conservatively safe value for now */ | ||
1095 | #define HTT_FRAG_DESC_BANK_MAX 4 | ||
1096 | |||
1097 | #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 | ||
1098 | #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 | ||
1099 | #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2) | ||
1100 | |||
1101 | struct htt_frag_desc_bank_cfg { | ||
1102 | u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ | ||
1103 | u8 num_banks; | ||
1104 | u8 desc_size; | ||
1105 | __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; | ||
1106 | struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; | ||
1107 | } __packed; | ||
1108 | |||
1109 | union htt_rx_pn_t { | ||
1110 | /* WEP: 24-bit PN */ | ||
1111 | u32 pn24; | ||
1112 | |||
1113 | /* TKIP or CCMP: 48-bit PN */ | ||
1114 | u_int64_t pn48; | ||
1115 | |||
1116 | /* WAPI: 128-bit PN */ | ||
1117 | u_int64_t pn128[2]; | ||
1118 | }; | ||
1119 | |||
1120 | struct htt_cmd { | ||
1121 | struct htt_cmd_hdr hdr; | ||
1122 | union { | ||
1123 | struct htt_ver_req ver_req; | ||
1124 | struct htt_mgmt_tx_desc mgmt_tx; | ||
1125 | struct htt_data_tx_desc data_tx; | ||
1126 | struct htt_rx_ring_setup rx_setup; | ||
1127 | struct htt_stats_req stats_req; | ||
1128 | struct htt_oob_sync_req oob_sync_req; | ||
1129 | struct htt_aggr_conf aggr_conf; | ||
1130 | struct htt_frag_desc_bank_cfg frag_desc_bank_cfg; | ||
1131 | }; | ||
1132 | } __packed; | ||
1133 | |||
1134 | struct htt_resp { | ||
1135 | struct htt_resp_hdr hdr; | ||
1136 | union { | ||
1137 | struct htt_ver_resp ver_resp; | ||
1138 | struct htt_mgmt_tx_completion mgmt_tx_completion; | ||
1139 | struct htt_data_tx_completion data_tx_completion; | ||
1140 | struct htt_rx_indication rx_ind; | ||
1141 | struct htt_rx_fragment_indication rx_frag_ind; | ||
1142 | struct htt_rx_peer_map peer_map; | ||
1143 | struct htt_rx_peer_unmap peer_unmap; | ||
1144 | struct htt_rx_flush rx_flush; | ||
1145 | struct htt_rx_addba rx_addba; | ||
1146 | struct htt_rx_delba rx_delba; | ||
1147 | struct htt_security_indication security_indication; | ||
1148 | struct htt_rc_update rc_update; | ||
1149 | struct htt_rx_test rx_test; | ||
1150 | struct htt_pktlog_msg pktlog_msg; | ||
1151 | struct htt_stats_conf stats_conf; | ||
1152 | }; | ||
1153 | } __packed; | ||
1154 | |||
1155 | |||
1156 | /*** host side structures follow ***/ | ||
1157 | |||
1158 | struct htt_tx_done { | ||
1159 | u32 msdu_id; | ||
1160 | bool discard; | ||
1161 | bool no_ack; | ||
1162 | }; | ||
1163 | |||
1164 | struct htt_peer_map_event { | ||
1165 | u8 vdev_id; | ||
1166 | u16 peer_id; | ||
1167 | u8 addr[ETH_ALEN]; | ||
1168 | }; | ||
1169 | |||
1170 | struct htt_peer_unmap_event { | ||
1171 | u16 peer_id; | ||
1172 | }; | ||
1173 | |||
1174 | struct htt_rx_info { | ||
1175 | struct sk_buff *skb; | ||
1176 | enum htt_rx_mpdu_status status; | ||
1177 | enum htt_rx_mpdu_encrypt_type encrypt_type; | ||
1178 | s8 signal; | ||
1179 | struct { | ||
1180 | u8 info0; | ||
1181 | u32 info1; | ||
1182 | u32 info2; | ||
1183 | } rate; | ||
1184 | bool fcs_err; | ||
1185 | }; | ||
1186 | |||
1187 | struct ath10k_htt { | ||
1188 | struct ath10k *ar; | ||
1189 | enum ath10k_htc_ep_id eid; | ||
1190 | |||
1191 | int max_throughput_mbps; | ||
1192 | u8 target_version_major; | ||
1193 | u8 target_version_minor; | ||
1194 | struct completion target_version_received; | ||
1195 | |||
1196 | struct { | ||
1197 | /* | ||
1198 | * Ring of network buffer objects - This ring is | ||
1199 | * used exclusively by the host SW. This ring | ||
1200 | * mirrors the dev_addrs_ring that is shared | ||
1201 | * between the host SW and the MAC HW. The host SW | ||
1202 | * uses this netbufs ring to locate the network | ||
1203 | * buffer objects whose data buffers the HW has | ||
1204 | * filled. | ||
1205 | */ | ||
1206 | struct sk_buff **netbufs_ring; | ||
1207 | /* | ||
1208 | * Ring of buffer addresses - | ||
1209 | * This ring holds the "physical" device address of the | ||
1210 | * rx buffers the host SW provides for the MAC HW to | ||
1211 | * fill. | ||
1212 | */ | ||
1213 | __le32 *paddrs_ring; | ||
1214 | |||
1215 | /* | ||
1216 | * Base address of ring, as a "physical" device address | ||
1217 | * rather than a CPU address. | ||
1218 | */ | ||
1219 | dma_addr_t base_paddr; | ||
1220 | |||
1221 | /* how many elems in the ring (power of 2) */ | ||
1222 | int size; | ||
1223 | |||
1224 | /* size - 1 */ | ||
1225 | unsigned size_mask; | ||
1226 | |||
1227 | /* how many rx buffers to keep in the ring */ | ||
1228 | int fill_level; | ||
1229 | |||
1230 | /* how many rx buffers (full+empty) are in the ring */ | ||
1231 | int fill_cnt; | ||
1232 | |||
1233 | /* | ||
1234 | * alloc_idx - where HTT SW has deposited empty buffers | ||
1235 | * This is allocated in consistent mem, so that the FW can | ||
1236 | * read this variable, and program the HW's FW_IDX reg with | ||
1237 | * the value of this shadow register. | ||
1238 | */ | ||
1239 | struct { | ||
1240 | __le32 *vaddr; | ||
1241 | dma_addr_t paddr; | ||
1242 | } alloc_idx; | ||
1243 | |||
1244 | /* where HTT SW has processed bufs filled by rx MAC DMA */ | ||
1245 | struct { | ||
1246 | unsigned msdu_payld; | ||
1247 | } sw_rd_idx; | ||
1248 | |||
1249 | /* | ||
1250 | * refill_retry_timer - timer triggered when the ring is | ||
1251 | * not refilled to the level expected | ||
1252 | */ | ||
1253 | struct timer_list refill_retry_timer; | ||
1254 | |||
1255 | /* Protects access to all rx ring buffer state variables */ | ||
1256 | spinlock_t lock; | ||
1257 | } rx_ring; | ||
1258 | |||
1259 | unsigned int prefetch_len; | ||
1260 | |||
1261 | /* Protects access to %pending_tx, %used_msdu_ids */ | ||
1262 | spinlock_t tx_lock; | ||
1263 | int max_num_pending_tx; | ||
1264 | int num_pending_tx; | ||
1265 | struct sk_buff **pending_tx; | ||
1266 | unsigned long *used_msdu_ids; /* bitmap */ | ||
1267 | wait_queue_head_t empty_tx_wq; | ||
1268 | |||
1269 | /* set if host-fw communication goes haywire | ||
1270 | * used to avoid further failures */ | ||
1271 | bool rx_confused; | ||
1272 | }; | ||
1273 | |||
1274 | #define RX_HTT_HDR_STATUS_LEN 64 | ||
1275 | |||
1276 | /* This structure layout is programmed via rx ring setup | ||
1277 | * so that FW knows how to transfer the rx descriptor to the host. | ||
1278 | * Buffers like this are placed on the rx ring. */ | ||
1279 | struct htt_rx_desc { | ||
1280 | union { | ||
1281 | /* This field is filled on the host using the msdu buffer | ||
1282 | * from htt_rx_indication */ | ||
1283 | struct fw_rx_desc_base fw_desc; | ||
1284 | u32 pad; | ||
1285 | } __packed; | ||
1286 | struct { | ||
1287 | struct rx_attention attention; | ||
1288 | struct rx_frag_info frag_info; | ||
1289 | struct rx_mpdu_start mpdu_start; | ||
1290 | struct rx_msdu_start msdu_start; | ||
1291 | struct rx_msdu_end msdu_end; | ||
1292 | struct rx_mpdu_end mpdu_end; | ||
1293 | struct rx_ppdu_start ppdu_start; | ||
1294 | struct rx_ppdu_end ppdu_end; | ||
1295 | } __packed; | ||
1296 | u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN]; | ||
1297 | u8 msdu_payload[0]; | ||
1298 | }; | ||
1299 | |||
1300 | #define HTT_RX_DESC_ALIGN 8 | ||
1301 | |||
1302 | #define HTT_MAC_ADDR_LEN 6 | ||
1303 | |||
1304 | /* | ||
1305 | * FIX THIS | ||
1306 | * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size, | ||
1307 | * rounded up to a cache line size. | ||
1308 | */ | ||
1309 | #define HTT_RX_BUF_SIZE 1920 | ||
1310 | #define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc)) | ||
1311 | |||
1312 | /* | ||
1313 | * DMA_MAP expects the buffer to be an integral number of cache lines. | ||
1314 | * Rather than checking the actual cache line size, this code makes a | ||
1315 | * conservative estimate of what the cache line size could be. | ||
1316 | */ | ||
1317 | #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ | ||
1318 | #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) | ||
1319 | |||
1320 | struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar); | ||
1321 | int ath10k_htt_attach_target(struct ath10k_htt *htt); | ||
1322 | void ath10k_htt_detach(struct ath10k_htt *htt); | ||
1323 | |||
1324 | int ath10k_htt_tx_attach(struct ath10k_htt *htt); | ||
1325 | void ath10k_htt_tx_detach(struct ath10k_htt *htt); | ||
1326 | int ath10k_htt_rx_attach(struct ath10k_htt *htt); | ||
1327 | void ath10k_htt_rx_detach(struct ath10k_htt *htt); | ||
1328 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); | ||
1329 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); | ||
1330 | int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); | ||
1331 | int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); | ||
1332 | |||
1333 | void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); | ||
1334 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); | ||
1335 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); | ||
1336 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); | ||
1337 | int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); | ||
1338 | #endif | ||
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c new file mode 100644 index 000000000000..de058d7adca8 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c | |||
@@ -0,0 +1,1167 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include "htc.h" | ||
19 | #include "htt.h" | ||
20 | #include "txrx.h" | ||
21 | #include "debug.h" | ||
22 | |||
23 | #include <linux/log2.h> | ||
24 | |||
25 | /* slightly larger than one large A-MPDU */ | ||
26 | #define HTT_RX_RING_SIZE_MIN 128 | ||
27 | |||
28 | /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ | ||
29 | #define HTT_RX_RING_SIZE_MAX 2048 | ||
30 | |||
31 | #define HTT_RX_AVG_FRM_BYTES 1000 | ||
32 | |||
33 | /* ms, very conservative */ | ||
34 | #define HTT_RX_HOST_LATENCY_MAX_MS 20 | ||
35 | |||
36 | /* ms, conservative */ | ||
37 | #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 | ||
38 | |||
39 | /* when under memory pressure rx ring refill may fail and needs a retry */ | ||
40 | #define HTT_RX_RING_REFILL_RETRY_MS 50 | ||
41 | |||
42 | static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) | ||
43 | { | ||
44 | int size; | ||
45 | |||
46 | /* | ||
47 | * It is expected that the host CPU will typically be able to | ||
48 | * service the rx indication from one A-MPDU before the rx | ||
49 | * indication from the subsequent A-MPDU happens, roughly 1-2 ms | ||
50 | * later. However, the rx ring should be sized very conservatively, | ||
51 | * to accomodate the worst reasonable delay before the host CPU | ||
52 | * services a rx indication interrupt. | ||
53 | * | ||
54 | * The rx ring need not be kept full of empty buffers. In theory, | ||
55 | * the htt host SW can dynamically track the low-water mark in the | ||
56 | * rx ring, and dynamically adjust the level to which the rx ring | ||
57 | * is filled with empty buffers, to dynamically meet the desired | ||
58 | * low-water mark. | ||
59 | * | ||
60 | * In contrast, it's difficult to resize the rx ring itself, once | ||
61 | * it's in use. Thus, the ring itself should be sized very | ||
62 | * conservatively, while the degree to which the ring is filled | ||
63 | * with empty buffers should be sized moderately conservatively. | ||
64 | */ | ||
65 | |||
66 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | ||
67 | size = | ||
68 | htt->max_throughput_mbps + | ||
69 | 1000 / | ||
70 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; | ||
71 | |||
72 | if (size < HTT_RX_RING_SIZE_MIN) | ||
73 | size = HTT_RX_RING_SIZE_MIN; | ||
74 | |||
75 | if (size > HTT_RX_RING_SIZE_MAX) | ||
76 | size = HTT_RX_RING_SIZE_MAX; | ||
77 | |||
78 | size = roundup_pow_of_two(size); | ||
79 | |||
80 | return size; | ||
81 | } | ||
82 | |||
83 | static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) | ||
84 | { | ||
85 | int size; | ||
86 | |||
87 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | ||
88 | size = | ||
89 | htt->max_throughput_mbps * | ||
90 | 1000 / | ||
91 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; | ||
92 | |||
93 | /* | ||
94 | * Make sure the fill level is at least 1 less than the ring size. | ||
95 | * Leaving 1 element empty allows the SW to easily distinguish | ||
96 | * between a full ring vs. an empty ring. | ||
97 | */ | ||
98 | if (size >= htt->rx_ring.size) | ||
99 | size = htt->rx_ring.size - 1; | ||
100 | |||
101 | return size; | ||
102 | } | ||
103 | |||
104 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) | ||
105 | { | ||
106 | struct sk_buff *skb; | ||
107 | struct ath10k_skb_cb *cb; | ||
108 | int i; | ||
109 | |||
110 | for (i = 0; i < htt->rx_ring.fill_cnt; i++) { | ||
111 | skb = htt->rx_ring.netbufs_ring[i]; | ||
112 | cb = ATH10K_SKB_CB(skb); | ||
113 | dma_unmap_single(htt->ar->dev, cb->paddr, | ||
114 | skb->len + skb_tailroom(skb), | ||
115 | DMA_FROM_DEVICE); | ||
116 | dev_kfree_skb_any(skb); | ||
117 | } | ||
118 | |||
119 | htt->rx_ring.fill_cnt = 0; | ||
120 | } | ||
121 | |||
122 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | ||
123 | { | ||
124 | struct htt_rx_desc *rx_desc; | ||
125 | struct sk_buff *skb; | ||
126 | dma_addr_t paddr; | ||
127 | int ret = 0, idx; | ||
128 | |||
129 | idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); | ||
130 | while (num > 0) { | ||
131 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); | ||
132 | if (!skb) { | ||
133 | ret = -ENOMEM; | ||
134 | goto fail; | ||
135 | } | ||
136 | |||
137 | if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) | ||
138 | skb_pull(skb, | ||
139 | PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - | ||
140 | skb->data); | ||
141 | |||
142 | /* Clear rx_desc attention word before posting to Rx ring */ | ||
143 | rx_desc = (struct htt_rx_desc *)skb->data; | ||
144 | rx_desc->attention.flags = __cpu_to_le32(0); | ||
145 | |||
146 | paddr = dma_map_single(htt->ar->dev, skb->data, | ||
147 | skb->len + skb_tailroom(skb), | ||
148 | DMA_FROM_DEVICE); | ||
149 | |||
150 | if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { | ||
151 | dev_kfree_skb_any(skb); | ||
152 | ret = -ENOMEM; | ||
153 | goto fail; | ||
154 | } | ||
155 | |||
156 | ATH10K_SKB_CB(skb)->paddr = paddr; | ||
157 | htt->rx_ring.netbufs_ring[idx] = skb; | ||
158 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); | ||
159 | htt->rx_ring.fill_cnt++; | ||
160 | |||
161 | num--; | ||
162 | idx++; | ||
163 | idx &= htt->rx_ring.size_mask; | ||
164 | } | ||
165 | |||
166 | fail: | ||
167 | *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); | ||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | ||
172 | { | ||
173 | lockdep_assert_held(&htt->rx_ring.lock); | ||
174 | return __ath10k_htt_rx_ring_fill_n(htt, num); | ||
175 | } | ||
176 | |||
177 | static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) | ||
178 | { | ||
179 | int ret, num_to_fill; | ||
180 | |||
181 | spin_lock_bh(&htt->rx_ring.lock); | ||
182 | num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; | ||
183 | ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); | ||
184 | if (ret == -ENOMEM) { | ||
185 | /* | ||
186 | * Failed to fill it to the desired level - | ||
187 | * we'll start a timer and try again next time. | ||
188 | * As long as enough buffers are left in the ring for | ||
189 | * another A-MPDU rx, no special recovery is needed. | ||
190 | */ | ||
191 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + | ||
192 | msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); | ||
193 | } | ||
194 | spin_unlock_bh(&htt->rx_ring.lock); | ||
195 | } | ||
196 | |||
197 | static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) | ||
198 | { | ||
199 | struct ath10k_htt *htt = (struct ath10k_htt *)arg; | ||
200 | ath10k_htt_rx_msdu_buff_replenish(htt); | ||
201 | } | ||
202 | |||
203 | static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) | ||
204 | { | ||
205 | return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - | ||
206 | htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; | ||
207 | } | ||
208 | |||
209 | void ath10k_htt_rx_detach(struct ath10k_htt *htt) | ||
210 | { | ||
211 | int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; | ||
212 | |||
213 | del_timer_sync(&htt->rx_ring.refill_retry_timer); | ||
214 | |||
215 | while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { | ||
216 | struct sk_buff *skb = | ||
217 | htt->rx_ring.netbufs_ring[sw_rd_idx]; | ||
218 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); | ||
219 | |||
220 | dma_unmap_single(htt->ar->dev, cb->paddr, | ||
221 | skb->len + skb_tailroom(skb), | ||
222 | DMA_FROM_DEVICE); | ||
223 | dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); | ||
224 | sw_rd_idx++; | ||
225 | sw_rd_idx &= htt->rx_ring.size_mask; | ||
226 | } | ||
227 | |||
228 | dma_free_coherent(htt->ar->dev, | ||
229 | (htt->rx_ring.size * | ||
230 | sizeof(htt->rx_ring.paddrs_ring)), | ||
231 | htt->rx_ring.paddrs_ring, | ||
232 | htt->rx_ring.base_paddr); | ||
233 | |||
234 | dma_free_coherent(htt->ar->dev, | ||
235 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | ||
236 | htt->rx_ring.alloc_idx.vaddr, | ||
237 | htt->rx_ring.alloc_idx.paddr); | ||
238 | |||
239 | kfree(htt->rx_ring.netbufs_ring); | ||
240 | } | ||
241 | |||
242 | static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | ||
243 | { | ||
244 | int idx; | ||
245 | struct sk_buff *msdu; | ||
246 | |||
247 | spin_lock_bh(&htt->rx_ring.lock); | ||
248 | |||
249 | if (ath10k_htt_rx_ring_elems(htt) == 0) | ||
250 | ath10k_warn("htt rx ring is empty!\n"); | ||
251 | |||
252 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; | ||
253 | msdu = htt->rx_ring.netbufs_ring[idx]; | ||
254 | |||
255 | idx++; | ||
256 | idx &= htt->rx_ring.size_mask; | ||
257 | htt->rx_ring.sw_rd_idx.msdu_payld = idx; | ||
258 | htt->rx_ring.fill_cnt--; | ||
259 | |||
260 | spin_unlock_bh(&htt->rx_ring.lock); | ||
261 | return msdu; | ||
262 | } | ||
263 | |||
264 | static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) | ||
265 | { | ||
266 | struct sk_buff *next; | ||
267 | |||
268 | while (skb) { | ||
269 | next = skb->next; | ||
270 | dev_kfree_skb_any(skb); | ||
271 | skb = next; | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, | ||
276 | u8 **fw_desc, int *fw_desc_len, | ||
277 | struct sk_buff **head_msdu, | ||
278 | struct sk_buff **tail_msdu) | ||
279 | { | ||
280 | int msdu_len, msdu_chaining = 0; | ||
281 | struct sk_buff *msdu; | ||
282 | struct htt_rx_desc *rx_desc; | ||
283 | |||
284 | if (ath10k_htt_rx_ring_elems(htt) == 0) | ||
285 | ath10k_warn("htt rx ring is empty!\n"); | ||
286 | |||
287 | if (htt->rx_confused) { | ||
288 | ath10k_warn("htt is confused. refusing rx\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); | ||
293 | while (msdu) { | ||
294 | int last_msdu, msdu_len_invalid, msdu_chained; | ||
295 | |||
296 | dma_unmap_single(htt->ar->dev, | ||
297 | ATH10K_SKB_CB(msdu)->paddr, | ||
298 | msdu->len + skb_tailroom(msdu), | ||
299 | DMA_FROM_DEVICE); | ||
300 | |||
301 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", | ||
302 | msdu->data, msdu->len + skb_tailroom(msdu)); | ||
303 | |||
304 | rx_desc = (struct htt_rx_desc *)msdu->data; | ||
305 | |||
306 | /* FIXME: we must report msdu payload since this is what caller | ||
307 | * expects now */ | ||
308 | skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | ||
309 | skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | ||
310 | |||
311 | /* | ||
312 | * Sanity check - confirm the HW is finished filling in the | ||
313 | * rx data. | ||
314 | * If the HW and SW are working correctly, then it's guaranteed | ||
315 | * that the HW's MAC DMA is done before this point in the SW. | ||
316 | * To prevent the case that we handle a stale Rx descriptor, | ||
317 | * just assert for now until we have a way to recover. | ||
318 | */ | ||
319 | if (!(__le32_to_cpu(rx_desc->attention.flags) | ||
320 | & RX_ATTENTION_FLAGS_MSDU_DONE)) { | ||
321 | ath10k_htt_rx_free_msdu_chain(*head_msdu); | ||
322 | *head_msdu = NULL; | ||
323 | msdu = NULL; | ||
324 | ath10k_err("htt rx stopped. cannot recover\n"); | ||
325 | htt->rx_confused = true; | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Copy the FW rx descriptor for this MSDU from the rx | ||
331 | * indication message into the MSDU's netbuf. HL uses the | ||
332 | * same rx indication message definition as LL, and simply | ||
333 | * appends new info (fields from the HW rx desc, and the | ||
334 | * MSDU payload itself). So, the offset into the rx | ||
335 | * indication message only has to account for the standard | ||
336 | * offset of the per-MSDU FW rx desc info within the | ||
337 | * message, and how many bytes of the per-MSDU FW rx desc | ||
338 | * info have already been consumed. (And the endianness of | ||
339 | * the host, since for a big-endian host, the rx ind | ||
340 | * message contents, including the per-MSDU rx desc bytes, | ||
341 | * were byteswapped during upload.) | ||
342 | */ | ||
343 | if (*fw_desc_len > 0) { | ||
344 | rx_desc->fw_desc.info0 = **fw_desc; | ||
345 | /* | ||
346 | * The target is expected to only provide the basic | ||
347 | * per-MSDU rx descriptors. Just to be sure, verify | ||
348 | * that the target has not attached extension data | ||
349 | * (e.g. LRO flow ID). | ||
350 | */ | ||
351 | |||
352 | /* or more, if there's extension data */ | ||
353 | (*fw_desc)++; | ||
354 | (*fw_desc_len)--; | ||
355 | } else { | ||
356 | /* | ||
357 | * When an oversized AMSDU happened, FW will lost | ||
358 | * some of MSDU status - in this case, the FW | ||
359 | * descriptors provided will be less than the | ||
360 | * actual MSDUs inside this MPDU. Mark the FW | ||
361 | * descriptors so that it will still deliver to | ||
362 | * upper stack, if no CRC error for this MPDU. | ||
363 | * | ||
364 | * FIX THIS - the FW descriptors are actually for | ||
365 | * MSDUs in the end of this A-MSDU instead of the | ||
366 | * beginning. | ||
367 | */ | ||
368 | rx_desc->fw_desc.info0 = 0; | ||
369 | } | ||
370 | |||
371 | msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) | ||
372 | & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | | ||
373 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); | ||
374 | msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), | ||
375 | RX_MSDU_START_INFO0_MSDU_LENGTH); | ||
376 | msdu_chained = rx_desc->frag_info.ring2_more_count; | ||
377 | |||
378 | if (msdu_len_invalid) | ||
379 | msdu_len = 0; | ||
380 | |||
381 | skb_trim(msdu, 0); | ||
382 | skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); | ||
383 | msdu_len -= msdu->len; | ||
384 | |||
385 | /* FIXME: Do chained buffers include htt_rx_desc or not? */ | ||
386 | while (msdu_chained--) { | ||
387 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); | ||
388 | |||
389 | dma_unmap_single(htt->ar->dev, | ||
390 | ATH10K_SKB_CB(next)->paddr, | ||
391 | next->len + skb_tailroom(next), | ||
392 | DMA_FROM_DEVICE); | ||
393 | |||
394 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", | ||
395 | next->data, | ||
396 | next->len + skb_tailroom(next)); | ||
397 | |||
398 | skb_trim(next, 0); | ||
399 | skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); | ||
400 | msdu_len -= next->len; | ||
401 | |||
402 | msdu->next = next; | ||
403 | msdu = next; | ||
404 | msdu_chaining = 1; | ||
405 | } | ||
406 | |||
407 | if (msdu_len > 0) { | ||
408 | /* This may suggest FW bug? */ | ||
409 | ath10k_warn("htt rx msdu len not consumed (%d)\n", | ||
410 | msdu_len); | ||
411 | } | ||
412 | |||
413 | last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & | ||
414 | RX_MSDU_END_INFO0_LAST_MSDU; | ||
415 | |||
416 | if (last_msdu) { | ||
417 | msdu->next = NULL; | ||
418 | break; | ||
419 | } else { | ||
420 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); | ||
421 | msdu->next = next; | ||
422 | msdu = next; | ||
423 | } | ||
424 | } | ||
425 | *tail_msdu = msdu; | ||
426 | |||
427 | /* | ||
428 | * Don't refill the ring yet. | ||
429 | * | ||
430 | * First, the elements popped here are still in use - it is not | ||
431 | * safe to overwrite them until the matching call to | ||
432 | * mpdu_desc_list_next. Second, for efficiency it is preferable to | ||
433 | * refill the rx ring with 1 PPDU's worth of rx buffers (something | ||
434 | * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers | ||
435 | * (something like 3 buffers). Consequently, we'll rely on the txrx | ||
436 | * SW to tell us when it is done pulling all the PPDU's rx buffers | ||
437 | * out of the rx ring, and then refill it just once. | ||
438 | */ | ||
439 | |||
440 | return msdu_chaining; | ||
441 | } | ||
442 | |||
443 | int ath10k_htt_rx_attach(struct ath10k_htt *htt) | ||
444 | { | ||
445 | dma_addr_t paddr; | ||
446 | void *vaddr; | ||
447 | struct timer_list *timer = &htt->rx_ring.refill_retry_timer; | ||
448 | |||
449 | htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); | ||
450 | if (!is_power_of_2(htt->rx_ring.size)) { | ||
451 | ath10k_warn("htt rx ring size is not power of 2\n"); | ||
452 | return -EINVAL; | ||
453 | } | ||
454 | |||
455 | htt->rx_ring.size_mask = htt->rx_ring.size - 1; | ||
456 | |||
457 | /* | ||
458 | * Set the initial value for the level to which the rx ring | ||
459 | * should be filled, based on the max throughput and the | ||
460 | * worst likely latency for the host to fill the rx ring | ||
461 | * with new buffers. In theory, this fill level can be | ||
462 | * dynamically adjusted from the initial value set here, to | ||
463 | * reflect the actual host latency rather than a | ||
464 | * conservative assumption about the host latency. | ||
465 | */ | ||
466 | htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); | ||
467 | |||
468 | htt->rx_ring.netbufs_ring = | ||
469 | kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), | ||
470 | GFP_KERNEL); | ||
471 | if (!htt->rx_ring.netbufs_ring) | ||
472 | goto err_netbuf; | ||
473 | |||
474 | vaddr = dma_alloc_coherent(htt->ar->dev, | ||
475 | (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), | ||
476 | &paddr, GFP_DMA); | ||
477 | if (!vaddr) | ||
478 | goto err_dma_ring; | ||
479 | |||
480 | htt->rx_ring.paddrs_ring = vaddr; | ||
481 | htt->rx_ring.base_paddr = paddr; | ||
482 | |||
483 | vaddr = dma_alloc_coherent(htt->ar->dev, | ||
484 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | ||
485 | &paddr, GFP_DMA); | ||
486 | if (!vaddr) | ||
487 | goto err_dma_idx; | ||
488 | |||
489 | htt->rx_ring.alloc_idx.vaddr = vaddr; | ||
490 | htt->rx_ring.alloc_idx.paddr = paddr; | ||
491 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; | ||
492 | *htt->rx_ring.alloc_idx.vaddr = 0; | ||
493 | |||
494 | /* Initialize the Rx refill retry timer */ | ||
495 | setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); | ||
496 | |||
497 | spin_lock_init(&htt->rx_ring.lock); | ||
498 | |||
499 | htt->rx_ring.fill_cnt = 0; | ||
500 | if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) | ||
501 | goto err_fill_ring; | ||
502 | |||
503 | ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n", | ||
504 | htt->rx_ring.size, htt->rx_ring.fill_level); | ||
505 | return 0; | ||
506 | |||
507 | err_fill_ring: | ||
508 | ath10k_htt_rx_ring_free(htt); | ||
509 | dma_free_coherent(htt->ar->dev, | ||
510 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | ||
511 | htt->rx_ring.alloc_idx.vaddr, | ||
512 | htt->rx_ring.alloc_idx.paddr); | ||
513 | err_dma_idx: | ||
514 | dma_free_coherent(htt->ar->dev, | ||
515 | (htt->rx_ring.size * | ||
516 | sizeof(htt->rx_ring.paddrs_ring)), | ||
517 | htt->rx_ring.paddrs_ring, | ||
518 | htt->rx_ring.base_paddr); | ||
519 | err_dma_ring: | ||
520 | kfree(htt->rx_ring.netbufs_ring); | ||
521 | err_netbuf: | ||
522 | return -ENOMEM; | ||
523 | } | ||
524 | |||
525 | static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) | ||
526 | { | ||
527 | switch (type) { | ||
528 | case HTT_RX_MPDU_ENCRYPT_WEP40: | ||
529 | case HTT_RX_MPDU_ENCRYPT_WEP104: | ||
530 | return 4; | ||
531 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: | ||
532 | case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ | ||
533 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | ||
534 | case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ | ||
535 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: | ||
536 | return 8; | ||
537 | case HTT_RX_MPDU_ENCRYPT_NONE: | ||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | ath10k_warn("unknown encryption type %d\n", type); | ||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) | ||
546 | { | ||
547 | switch (type) { | ||
548 | case HTT_RX_MPDU_ENCRYPT_NONE: | ||
549 | case HTT_RX_MPDU_ENCRYPT_WEP40: | ||
550 | case HTT_RX_MPDU_ENCRYPT_WEP104: | ||
551 | case HTT_RX_MPDU_ENCRYPT_WEP128: | ||
552 | case HTT_RX_MPDU_ENCRYPT_WAPI: | ||
553 | return 0; | ||
554 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: | ||
555 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | ||
556 | return 4; | ||
557 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: | ||
558 | return 8; | ||
559 | } | ||
560 | |||
561 | ath10k_warn("unknown encryption type %d\n", type); | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | /* Applies for first msdu in chain, before altering it. */ | ||
566 | static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) | ||
567 | { | ||
568 | struct htt_rx_desc *rxd; | ||
569 | enum rx_msdu_decap_format fmt; | ||
570 | |||
571 | rxd = (void *)skb->data - sizeof(*rxd); | ||
572 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | ||
573 | RX_MSDU_START_INFO1_DECAP_FORMAT); | ||
574 | |||
575 | if (fmt == RX_MSDU_DECAP_RAW) | ||
576 | return (void *)skb->data; | ||
577 | else | ||
578 | return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; | ||
579 | } | ||
580 | |||
581 | /* This function only applies for first msdu in an msdu chain */ | ||
582 | static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) | ||
583 | { | ||
584 | if (ieee80211_is_data_qos(hdr->frame_control)) { | ||
585 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
586 | if (qc[0] & 0x80) | ||
587 | return true; | ||
588 | } | ||
589 | return false; | ||
590 | } | ||
591 | |||
592 | static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt, | ||
593 | struct htt_rx_info *info) | ||
594 | { | ||
595 | struct htt_rx_desc *rxd; | ||
596 | struct sk_buff *amsdu; | ||
597 | struct sk_buff *first; | ||
598 | struct ieee80211_hdr *hdr; | ||
599 | struct sk_buff *skb = info->skb; | ||
600 | enum rx_msdu_decap_format fmt; | ||
601 | enum htt_rx_mpdu_encrypt_type enctype; | ||
602 | unsigned int hdr_len; | ||
603 | int crypto_len; | ||
604 | |||
605 | rxd = (void *)skb->data - sizeof(*rxd); | ||
606 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | ||
607 | RX_MSDU_START_INFO1_DECAP_FORMAT); | ||
608 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), | ||
609 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | ||
610 | |||
611 | /* FIXME: No idea what assumptions are safe here. Need logs */ | ||
612 | if ((fmt == RX_MSDU_DECAP_RAW && skb->next) || | ||
613 | (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) { | ||
614 | ath10k_htt_rx_free_msdu_chain(skb->next); | ||
615 | skb->next = NULL; | ||
616 | return -ENOTSUPP; | ||
617 | } | ||
618 | |||
619 | /* A-MSDU max is a little less than 8K */ | ||
620 | amsdu = dev_alloc_skb(8*1024); | ||
621 | if (!amsdu) { | ||
622 | ath10k_warn("A-MSDU allocation failed\n"); | ||
623 | ath10k_htt_rx_free_msdu_chain(skb->next); | ||
624 | skb->next = NULL; | ||
625 | return -ENOMEM; | ||
626 | } | ||
627 | |||
628 | if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) { | ||
629 | int hdrlen; | ||
630 | |||
631 | hdr = (void *)rxd->rx_hdr_status; | ||
632 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
633 | memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen); | ||
634 | } | ||
635 | |||
636 | first = skb; | ||
637 | while (skb) { | ||
638 | void *decap_hdr; | ||
639 | int decap_len = 0; | ||
640 | |||
641 | rxd = (void *)skb->data - sizeof(*rxd); | ||
642 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | ||
643 | RX_MSDU_START_INFO1_DECAP_FORMAT); | ||
644 | decap_hdr = (void *)rxd->rx_hdr_status; | ||
645 | |||
646 | if (skb == first) { | ||
647 | /* We receive linked A-MSDU subframe skbuffs. The | ||
648 | * first one contains the original 802.11 header (and | ||
649 | * possible crypto param) in the RX descriptor. The | ||
650 | * A-MSDU subframe header follows that. Each part is | ||
651 | * aligned to 4 byte boundary. */ | ||
652 | |||
653 | hdr = (void *)amsdu->data; | ||
654 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | ||
655 | crypto_len = ath10k_htt_rx_crypto_param_len(enctype); | ||
656 | |||
657 | decap_hdr += roundup(hdr_len, 4); | ||
658 | decap_hdr += roundup(crypto_len, 4); | ||
659 | } | ||
660 | |||
661 | if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { | ||
662 | /* Ethernet2 decap inserts ethernet header in place of | ||
663 | * A-MSDU subframe header. */ | ||
664 | skb_pull(skb, 6 + 6 + 2); | ||
665 | |||
666 | /* A-MSDU subframe header length */ | ||
667 | decap_len += 6 + 6 + 2; | ||
668 | |||
669 | /* Ethernet2 decap also strips the LLC/SNAP so we need | ||
670 | * to re-insert it. The LLC/SNAP follows A-MSDU | ||
671 | * subframe header. */ | ||
672 | /* FIXME: Not all LLCs are 8 bytes long */ | ||
673 | decap_len += 8; | ||
674 | |||
675 | memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); | ||
676 | } | ||
677 | |||
678 | if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) { | ||
679 | /* Native Wifi decap inserts regular 802.11 header | ||
680 | * in place of A-MSDU subframe header. */ | ||
681 | hdr = (struct ieee80211_hdr *)skb->data; | ||
682 | skb_pull(skb, ieee80211_hdrlen(hdr->frame_control)); | ||
683 | |||
684 | /* A-MSDU subframe header length */ | ||
685 | decap_len += 6 + 6 + 2; | ||
686 | |||
687 | memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); | ||
688 | } | ||
689 | |||
690 | if (fmt == RX_MSDU_DECAP_RAW) | ||
691 | skb_trim(skb, skb->len - 4); /* remove FCS */ | ||
692 | |||
693 | memcpy(skb_put(amsdu, skb->len), skb->data, skb->len); | ||
694 | |||
695 | /* A-MSDU subframes are padded to 4bytes | ||
696 | * but relative to first subframe, not the whole MPDU */ | ||
697 | if (skb->next && ((decap_len + skb->len) & 3)) { | ||
698 | int padlen = 4 - ((decap_len + skb->len) & 3); | ||
699 | memset(skb_put(amsdu, padlen), 0, padlen); | ||
700 | } | ||
701 | |||
702 | skb = skb->next; | ||
703 | } | ||
704 | |||
705 | info->skb = amsdu; | ||
706 | info->encrypt_type = enctype; | ||
707 | |||
708 | ath10k_htt_rx_free_msdu_chain(first); | ||
709 | |||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) | ||
714 | { | ||
715 | struct sk_buff *skb = info->skb; | ||
716 | struct htt_rx_desc *rxd; | ||
717 | struct ieee80211_hdr *hdr; | ||
718 | enum rx_msdu_decap_format fmt; | ||
719 | enum htt_rx_mpdu_encrypt_type enctype; | ||
720 | |||
721 | /* This shouldn't happen. If it does than it may be a FW bug. */ | ||
722 | if (skb->next) { | ||
723 | ath10k_warn("received chained non A-MSDU frame\n"); | ||
724 | ath10k_htt_rx_free_msdu_chain(skb->next); | ||
725 | skb->next = NULL; | ||
726 | } | ||
727 | |||
728 | rxd = (void *)skb->data - sizeof(*rxd); | ||
729 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | ||
730 | RX_MSDU_START_INFO1_DECAP_FORMAT); | ||
731 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), | ||
732 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | ||
733 | hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN; | ||
734 | |||
735 | switch (fmt) { | ||
736 | case RX_MSDU_DECAP_RAW: | ||
737 | /* remove trailing FCS */ | ||
738 | skb_trim(skb, skb->len - 4); | ||
739 | break; | ||
740 | case RX_MSDU_DECAP_NATIVE_WIFI: | ||
741 | /* nothing to do here */ | ||
742 | break; | ||
743 | case RX_MSDU_DECAP_ETHERNET2_DIX: | ||
744 | /* macaddr[6] + macaddr[6] + ethertype[2] */ | ||
745 | skb_pull(skb, 6 + 6 + 2); | ||
746 | break; | ||
747 | case RX_MSDU_DECAP_8023_SNAP_LLC: | ||
748 | /* macaddr[6] + macaddr[6] + len[2] */ | ||
749 | /* we don't need this for non-A-MSDU */ | ||
750 | skb_pull(skb, 6 + 6 + 2); | ||
751 | break; | ||
752 | } | ||
753 | |||
754 | if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { | ||
755 | void *llc; | ||
756 | int llclen; | ||
757 | |||
758 | llclen = 8; | ||
759 | llc = hdr; | ||
760 | llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4); | ||
761 | llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); | ||
762 | |||
763 | skb_push(skb, llclen); | ||
764 | memcpy(skb->data, llc, llclen); | ||
765 | } | ||
766 | |||
767 | if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) { | ||
768 | int len = ieee80211_hdrlen(hdr->frame_control); | ||
769 | skb_push(skb, len); | ||
770 | memcpy(skb->data, hdr, len); | ||
771 | } | ||
772 | |||
773 | info->skb = skb; | ||
774 | info->encrypt_type = enctype; | ||
775 | return 0; | ||
776 | } | ||
777 | |||
778 | static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) | ||
779 | { | ||
780 | struct htt_rx_desc *rxd; | ||
781 | u32 flags; | ||
782 | |||
783 | rxd = (void *)skb->data - sizeof(*rxd); | ||
784 | flags = __le32_to_cpu(rxd->attention.flags); | ||
785 | |||
786 | if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) | ||
787 | return true; | ||
788 | |||
789 | return false; | ||
790 | } | ||
791 | |||
792 | static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) | ||
793 | { | ||
794 | struct htt_rx_desc *rxd; | ||
795 | u32 flags; | ||
796 | |||
797 | rxd = (void *)skb->data - sizeof(*rxd); | ||
798 | flags = __le32_to_cpu(rxd->attention.flags); | ||
799 | |||
800 | if (flags & RX_ATTENTION_FLAGS_FCS_ERR) | ||
801 | return true; | ||
802 | |||
803 | return false; | ||
804 | } | ||
805 | |||
806 | static void ath10k_htt_rx_handler(struct ath10k_htt *htt, | ||
807 | struct htt_rx_indication *rx) | ||
808 | { | ||
809 | struct htt_rx_info info; | ||
810 | struct htt_rx_indication_mpdu_range *mpdu_ranges; | ||
811 | struct ieee80211_hdr *hdr; | ||
812 | int num_mpdu_ranges; | ||
813 | int fw_desc_len; | ||
814 | u8 *fw_desc; | ||
815 | int i, j; | ||
816 | int ret; | ||
817 | |||
818 | memset(&info, 0, sizeof(info)); | ||
819 | |||
820 | fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); | ||
821 | fw_desc = (u8 *)&rx->fw_desc; | ||
822 | |||
823 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), | ||
824 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); | ||
825 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); | ||
826 | |||
827 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", | ||
828 | rx, sizeof(*rx) + | ||
829 | (sizeof(struct htt_rx_indication_mpdu_range) * | ||
830 | num_mpdu_ranges)); | ||
831 | |||
832 | for (i = 0; i < num_mpdu_ranges; i++) { | ||
833 | info.status = mpdu_ranges[i].mpdu_range_status; | ||
834 | |||
835 | for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { | ||
836 | struct sk_buff *msdu_head, *msdu_tail; | ||
837 | enum htt_rx_mpdu_status status; | ||
838 | int msdu_chaining; | ||
839 | |||
840 | msdu_head = NULL; | ||
841 | msdu_tail = NULL; | ||
842 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, | ||
843 | &fw_desc, | ||
844 | &fw_desc_len, | ||
845 | &msdu_head, | ||
846 | &msdu_tail); | ||
847 | |||
848 | if (!msdu_head) { | ||
849 | ath10k_warn("htt rx no data!\n"); | ||
850 | continue; | ||
851 | } | ||
852 | |||
853 | if (msdu_head->len == 0) { | ||
854 | ath10k_dbg(ATH10K_DBG_HTT, | ||
855 | "htt rx dropping due to zero-len\n"); | ||
856 | ath10k_htt_rx_free_msdu_chain(msdu_head); | ||
857 | continue; | ||
858 | } | ||
859 | |||
860 | if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { | ||
861 | ath10k_htt_rx_free_msdu_chain(msdu_head); | ||
862 | continue; | ||
863 | } | ||
864 | |||
865 | status = info.status; | ||
866 | |||
867 | /* Skip mgmt frames while we handle this in WMI */ | ||
868 | if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { | ||
869 | ath10k_htt_rx_free_msdu_chain(msdu_head); | ||
870 | continue; | ||
871 | } | ||
872 | |||
873 | if (status != HTT_RX_IND_MPDU_STATUS_OK && | ||
874 | status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && | ||
875 | !htt->ar->monitor_enabled) { | ||
876 | ath10k_dbg(ATH10K_DBG_HTT, | ||
877 | "htt rx ignoring frame w/ status %d\n", | ||
878 | status); | ||
879 | ath10k_htt_rx_free_msdu_chain(msdu_head); | ||
880 | continue; | ||
881 | } | ||
882 | |||
883 | /* FIXME: we do not support chaining yet. | ||
884 | * this needs investigation */ | ||
885 | if (msdu_chaining) { | ||
886 | ath10k_warn("msdu_chaining is true\n"); | ||
887 | ath10k_htt_rx_free_msdu_chain(msdu_head); | ||
888 | continue; | ||
889 | } | ||
890 | |||
891 | info.skb = msdu_head; | ||
892 | info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); | ||
893 | info.signal = ATH10K_DEFAULT_NOISE_FLOOR; | ||
894 | info.signal += rx->ppdu.combined_rssi; | ||
895 | |||
896 | info.rate.info0 = rx->ppdu.info0; | ||
897 | info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); | ||
898 | info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); | ||
899 | |||
900 | hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); | ||
901 | |||
902 | if (ath10k_htt_rx_hdr_is_amsdu(hdr)) | ||
903 | ret = ath10k_htt_rx_amsdu(htt, &info); | ||
904 | else | ||
905 | ret = ath10k_htt_rx_msdu(htt, &info); | ||
906 | |||
907 | if (ret && !info.fcs_err) { | ||
908 | ath10k_warn("error processing msdus %d\n", ret); | ||
909 | dev_kfree_skb_any(info.skb); | ||
910 | continue; | ||
911 | } | ||
912 | |||
913 | if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) | ||
914 | ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); | ||
915 | |||
916 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", | ||
917 | info.skb->data, info.skb->len); | ||
918 | ath10k_process_rx(htt->ar, &info); | ||
919 | } | ||
920 | } | ||
921 | |||
922 | ath10k_htt_rx_msdu_buff_replenish(htt); | ||
923 | } | ||
924 | |||
925 | static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, | ||
926 | struct htt_rx_fragment_indication *frag) | ||
927 | { | ||
928 | struct sk_buff *msdu_head, *msdu_tail; | ||
929 | struct htt_rx_desc *rxd; | ||
930 | enum rx_msdu_decap_format fmt; | ||
931 | struct htt_rx_info info = {}; | ||
932 | struct ieee80211_hdr *hdr; | ||
933 | int msdu_chaining; | ||
934 | bool tkip_mic_err; | ||
935 | bool decrypt_err; | ||
936 | u8 *fw_desc; | ||
937 | int fw_desc_len, hdrlen, paramlen; | ||
938 | int trim; | ||
939 | |||
940 | fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); | ||
941 | fw_desc = (u8 *)frag->fw_msdu_rx_desc; | ||
942 | |||
943 | msdu_head = NULL; | ||
944 | msdu_tail = NULL; | ||
945 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, | ||
946 | &msdu_head, &msdu_tail); | ||
947 | |||
948 | ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); | ||
949 | |||
950 | if (!msdu_head) { | ||
951 | ath10k_warn("htt rx frag no data\n"); | ||
952 | return; | ||
953 | } | ||
954 | |||
955 | if (msdu_chaining || msdu_head != msdu_tail) { | ||
956 | ath10k_warn("aggregation with fragmentation?!\n"); | ||
957 | ath10k_htt_rx_free_msdu_chain(msdu_head); | ||
958 | return; | ||
959 | } | ||
960 | |||
961 | /* FIXME: implement signal strength */ | ||
962 | |||
963 | hdr = (struct ieee80211_hdr *)msdu_head->data; | ||
964 | rxd = (void *)msdu_head->data - sizeof(*rxd); | ||
965 | tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & | ||
966 | RX_ATTENTION_FLAGS_TKIP_MIC_ERR); | ||
967 | decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & | ||
968 | RX_ATTENTION_FLAGS_DECRYPT_ERR); | ||
969 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | ||
970 | RX_MSDU_START_INFO1_DECAP_FORMAT); | ||
971 | |||
972 | if (fmt != RX_MSDU_DECAP_RAW) { | ||
973 | ath10k_warn("we dont support non-raw fragmented rx yet\n"); | ||
974 | dev_kfree_skb_any(msdu_head); | ||
975 | goto end; | ||
976 | } | ||
977 | |||
978 | info.skb = msdu_head; | ||
979 | info.status = HTT_RX_IND_MPDU_STATUS_OK; | ||
980 | info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), | ||
981 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | ||
982 | |||
983 | if (tkip_mic_err) { | ||
984 | ath10k_warn("tkip mic error\n"); | ||
985 | info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; | ||
986 | } | ||
987 | |||
988 | if (decrypt_err) { | ||
989 | ath10k_warn("decryption err in fragmented rx\n"); | ||
990 | dev_kfree_skb_any(info.skb); | ||
991 | goto end; | ||
992 | } | ||
993 | |||
994 | if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { | ||
995 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
996 | paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); | ||
997 | |||
998 | /* It is more efficient to move the header than the payload */ | ||
999 | memmove((void *)info.skb->data + paramlen, | ||
1000 | (void *)info.skb->data, | ||
1001 | hdrlen); | ||
1002 | skb_pull(info.skb, paramlen); | ||
1003 | hdr = (struct ieee80211_hdr *)info.skb->data; | ||
1004 | } | ||
1005 | |||
1006 | /* remove trailing FCS */ | ||
1007 | trim = 4; | ||
1008 | |||
1009 | /* remove crypto trailer */ | ||
1010 | trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); | ||
1011 | |||
1012 | /* last fragment of TKIP frags has MIC */ | ||
1013 | if (!ieee80211_has_morefrags(hdr->frame_control) && | ||
1014 | info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) | ||
1015 | trim += 8; | ||
1016 | |||
1017 | if (trim > info.skb->len) { | ||
1018 | ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); | ||
1019 | dev_kfree_skb_any(info.skb); | ||
1020 | goto end; | ||
1021 | } | ||
1022 | |||
1023 | skb_trim(info.skb, info.skb->len - trim); | ||
1024 | |||
1025 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", | ||
1026 | info.skb->data, info.skb->len); | ||
1027 | ath10k_process_rx(htt->ar, &info); | ||
1028 | |||
1029 | end: | ||
1030 | if (fw_desc_len > 0) { | ||
1031 | ath10k_dbg(ATH10K_DBG_HTT, | ||
1032 | "expecting more fragmented rx in one indication %d\n", | ||
1033 | fw_desc_len); | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) | ||
1038 | { | ||
1039 | struct ath10k_htt *htt = ar->htt; | ||
1040 | struct htt_resp *resp = (struct htt_resp *)skb->data; | ||
1041 | |||
1042 | /* confirm alignment */ | ||
1043 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | ||
1044 | ath10k_warn("unaligned htt message, expect trouble\n"); | ||
1045 | |||
1046 | ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", | ||
1047 | resp->hdr.msg_type); | ||
1048 | switch (resp->hdr.msg_type) { | ||
1049 | case HTT_T2H_MSG_TYPE_VERSION_CONF: { | ||
1050 | htt->target_version_major = resp->ver_resp.major; | ||
1051 | htt->target_version_minor = resp->ver_resp.minor; | ||
1052 | complete(&htt->target_version_received); | ||
1053 | break; | ||
1054 | } | ||
1055 | case HTT_T2H_MSG_TYPE_RX_IND: { | ||
1056 | ath10k_htt_rx_handler(htt, &resp->rx_ind); | ||
1057 | break; | ||
1058 | } | ||
1059 | case HTT_T2H_MSG_TYPE_PEER_MAP: { | ||
1060 | struct htt_peer_map_event ev = { | ||
1061 | .vdev_id = resp->peer_map.vdev_id, | ||
1062 | .peer_id = __le16_to_cpu(resp->peer_map.peer_id), | ||
1063 | }; | ||
1064 | memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); | ||
1065 | ath10k_peer_map_event(htt, &ev); | ||
1066 | break; | ||
1067 | } | ||
1068 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: { | ||
1069 | struct htt_peer_unmap_event ev = { | ||
1070 | .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), | ||
1071 | }; | ||
1072 | ath10k_peer_unmap_event(htt, &ev); | ||
1073 | break; | ||
1074 | } | ||
1075 | case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { | ||
1076 | struct htt_tx_done tx_done = {}; | ||
1077 | int status = __le32_to_cpu(resp->mgmt_tx_completion.status); | ||
1078 | |||
1079 | tx_done.msdu_id = | ||
1080 | __le32_to_cpu(resp->mgmt_tx_completion.desc_id); | ||
1081 | |||
1082 | switch (status) { | ||
1083 | case HTT_MGMT_TX_STATUS_OK: | ||
1084 | break; | ||
1085 | case HTT_MGMT_TX_STATUS_RETRY: | ||
1086 | tx_done.no_ack = true; | ||
1087 | break; | ||
1088 | case HTT_MGMT_TX_STATUS_DROP: | ||
1089 | tx_done.discard = true; | ||
1090 | break; | ||
1091 | } | ||
1092 | |||
1093 | ath10k_txrx_tx_completed(htt, &tx_done); | ||
1094 | break; | ||
1095 | } | ||
1096 | case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { | ||
1097 | struct htt_tx_done tx_done = {}; | ||
1098 | int status = MS(resp->data_tx_completion.flags, | ||
1099 | HTT_DATA_TX_STATUS); | ||
1100 | __le16 msdu_id; | ||
1101 | int i; | ||
1102 | |||
1103 | switch (status) { | ||
1104 | case HTT_DATA_TX_STATUS_NO_ACK: | ||
1105 | tx_done.no_ack = true; | ||
1106 | break; | ||
1107 | case HTT_DATA_TX_STATUS_OK: | ||
1108 | break; | ||
1109 | case HTT_DATA_TX_STATUS_DISCARD: | ||
1110 | case HTT_DATA_TX_STATUS_POSTPONE: | ||
1111 | case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: | ||
1112 | tx_done.discard = true; | ||
1113 | break; | ||
1114 | default: | ||
1115 | ath10k_warn("unhandled tx completion status %d\n", | ||
1116 | status); | ||
1117 | tx_done.discard = true; | ||
1118 | break; | ||
1119 | } | ||
1120 | |||
1121 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", | ||
1122 | resp->data_tx_completion.num_msdus); | ||
1123 | |||
1124 | for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { | ||
1125 | msdu_id = resp->data_tx_completion.msdus[i]; | ||
1126 | tx_done.msdu_id = __le16_to_cpu(msdu_id); | ||
1127 | ath10k_txrx_tx_completed(htt, &tx_done); | ||
1128 | } | ||
1129 | break; | ||
1130 | } | ||
1131 | case HTT_T2H_MSG_TYPE_SEC_IND: { | ||
1132 | struct ath10k *ar = htt->ar; | ||
1133 | struct htt_security_indication *ev = &resp->security_indication; | ||
1134 | |||
1135 | ath10k_dbg(ATH10K_DBG_HTT, | ||
1136 | "sec ind peer_id %d unicast %d type %d\n", | ||
1137 | __le16_to_cpu(ev->peer_id), | ||
1138 | !!(ev->flags & HTT_SECURITY_IS_UNICAST), | ||
1139 | MS(ev->flags, HTT_SECURITY_TYPE)); | ||
1140 | complete(&ar->install_key_done); | ||
1141 | break; | ||
1142 | } | ||
1143 | case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { | ||
1144 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", | ||
1145 | skb->data, skb->len); | ||
1146 | ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); | ||
1147 | break; | ||
1148 | } | ||
1149 | case HTT_T2H_MSG_TYPE_TEST: | ||
1150 | /* FIX THIS */ | ||
1151 | break; | ||
1152 | case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: | ||
1153 | case HTT_T2H_MSG_TYPE_STATS_CONF: | ||
1154 | case HTT_T2H_MSG_TYPE_RX_ADDBA: | ||
1155 | case HTT_T2H_MSG_TYPE_RX_DELBA: | ||
1156 | case HTT_T2H_MSG_TYPE_RX_FLUSH: | ||
1157 | default: | ||
1158 | ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", | ||
1159 | resp->hdr.msg_type); | ||
1160 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", | ||
1161 | skb->data, skb->len); | ||
1162 | break; | ||
1163 | }; | ||
1164 | |||
1165 | /* Free the indication buffer */ | ||
1166 | dev_kfree_skb_any(skb); | ||
1167 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c new file mode 100644 index 000000000000..ef79106db247 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c | |||
@@ -0,0 +1,510 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/etherdevice.h> | ||
19 | #include "htt.h" | ||
20 | #include "mac.h" | ||
21 | #include "hif.h" | ||
22 | #include "txrx.h" | ||
23 | #include "debug.h" | ||
24 | |||
25 | void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) | ||
26 | { | ||
27 | htt->num_pending_tx--; | ||
28 | if (htt->num_pending_tx == htt->max_num_pending_tx - 1) | ||
29 | ieee80211_wake_queues(htt->ar->hw); | ||
30 | } | ||
31 | |||
32 | static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) | ||
33 | { | ||
34 | spin_lock_bh(&htt->tx_lock); | ||
35 | __ath10k_htt_tx_dec_pending(htt); | ||
36 | spin_unlock_bh(&htt->tx_lock); | ||
37 | } | ||
38 | |||
39 | static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) | ||
40 | { | ||
41 | int ret = 0; | ||
42 | |||
43 | spin_lock_bh(&htt->tx_lock); | ||
44 | |||
45 | if (htt->num_pending_tx >= htt->max_num_pending_tx) { | ||
46 | ret = -EBUSY; | ||
47 | goto exit; | ||
48 | } | ||
49 | |||
50 | htt->num_pending_tx++; | ||
51 | if (htt->num_pending_tx == htt->max_num_pending_tx) | ||
52 | ieee80211_stop_queues(htt->ar->hw); | ||
53 | |||
54 | exit: | ||
55 | spin_unlock_bh(&htt->tx_lock); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) | ||
60 | { | ||
61 | int msdu_id; | ||
62 | |||
63 | lockdep_assert_held(&htt->tx_lock); | ||
64 | |||
65 | msdu_id = find_first_zero_bit(htt->used_msdu_ids, | ||
66 | htt->max_num_pending_tx); | ||
67 | if (msdu_id == htt->max_num_pending_tx) | ||
68 | return -ENOBUFS; | ||
69 | |||
70 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); | ||
71 | __set_bit(msdu_id, htt->used_msdu_ids); | ||
72 | return msdu_id; | ||
73 | } | ||
74 | |||
75 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) | ||
76 | { | ||
77 | lockdep_assert_held(&htt->tx_lock); | ||
78 | |||
79 | if (!test_bit(msdu_id, htt->used_msdu_ids)) | ||
80 | ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id); | ||
81 | |||
82 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); | ||
83 | __clear_bit(msdu_id, htt->used_msdu_ids); | ||
84 | } | ||
85 | |||
86 | int ath10k_htt_tx_attach(struct ath10k_htt *htt) | ||
87 | { | ||
88 | u8 pipe; | ||
89 | |||
90 | spin_lock_init(&htt->tx_lock); | ||
91 | init_waitqueue_head(&htt->empty_tx_wq); | ||
92 | |||
93 | /* At the beginning free queue number should hint us the maximum | ||
94 | * queue length */ | ||
95 | pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id; | ||
96 | htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, | ||
97 | pipe); | ||
98 | |||
99 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", | ||
100 | htt->max_num_pending_tx); | ||
101 | |||
102 | htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * | ||
103 | htt->max_num_pending_tx, GFP_KERNEL); | ||
104 | if (!htt->pending_tx) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * | ||
108 | BITS_TO_LONGS(htt->max_num_pending_tx), | ||
109 | GFP_KERNEL); | ||
110 | if (!htt->used_msdu_ids) { | ||
111 | kfree(htt->pending_tx); | ||
112 | return -ENOMEM; | ||
113 | } | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) | ||
119 | { | ||
120 | struct sk_buff *txdesc; | ||
121 | int msdu_id; | ||
122 | |||
123 | /* No locks needed. Called after communication with the device has | ||
124 | * been stopped. */ | ||
125 | |||
126 | for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { | ||
127 | if (!test_bit(msdu_id, htt->used_msdu_ids)) | ||
128 | continue; | ||
129 | |||
130 | txdesc = htt->pending_tx[msdu_id]; | ||
131 | if (!txdesc) | ||
132 | continue; | ||
133 | |||
134 | ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", | ||
135 | msdu_id); | ||
136 | |||
137 | if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) | ||
138 | ATH10K_SKB_CB(txdesc)->htt.refcount = 1; | ||
139 | |||
140 | ATH10K_SKB_CB(txdesc)->htt.discard = true; | ||
141 | ath10k_txrx_tx_unref(htt, txdesc); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | void ath10k_htt_tx_detach(struct ath10k_htt *htt) | ||
146 | { | ||
147 | ath10k_htt_tx_cleanup_pending(htt); | ||
148 | kfree(htt->pending_tx); | ||
149 | kfree(htt->used_msdu_ids); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) | ||
154 | { | ||
155 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); | ||
156 | struct ath10k_htt *htt = ar->htt; | ||
157 | |||
158 | if (skb_cb->htt.is_conf) { | ||
159 | dev_kfree_skb_any(skb); | ||
160 | return; | ||
161 | } | ||
162 | |||
163 | if (skb_cb->is_aborted) { | ||
164 | skb_cb->htt.discard = true; | ||
165 | |||
166 | /* if the skbuff is aborted we need to make sure we'll free up | ||
167 | * the tx resources, we can't simply run tx_unref() 2 times | ||
168 | * because if htt tx completion came in earlier we'd access | ||
169 | * unallocated memory */ | ||
170 | if (skb_cb->htt.refcount > 1) | ||
171 | skb_cb->htt.refcount = 1; | ||
172 | } | ||
173 | |||
174 | ath10k_txrx_tx_unref(htt, skb); | ||
175 | } | ||
176 | |||
177 | int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) | ||
178 | { | ||
179 | struct sk_buff *skb; | ||
180 | struct htt_cmd *cmd; | ||
181 | int len = 0; | ||
182 | int ret; | ||
183 | |||
184 | len += sizeof(cmd->hdr); | ||
185 | len += sizeof(cmd->ver_req); | ||
186 | |||
187 | skb = ath10k_htc_alloc_skb(len); | ||
188 | if (!skb) | ||
189 | return -ENOMEM; | ||
190 | |||
191 | skb_put(skb, len); | ||
192 | cmd = (struct htt_cmd *)skb->data; | ||
193 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; | ||
194 | |||
195 | ATH10K_SKB_CB(skb)->htt.is_conf = true; | ||
196 | |||
197 | ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); | ||
198 | if (ret) { | ||
199 | dev_kfree_skb_any(skb); | ||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) | ||
207 | { | ||
208 | struct sk_buff *skb; | ||
209 | struct htt_cmd *cmd; | ||
210 | struct htt_rx_ring_setup_ring *ring; | ||
211 | const int num_rx_ring = 1; | ||
212 | u16 flags; | ||
213 | u32 fw_idx; | ||
214 | int len; | ||
215 | int ret; | ||
216 | |||
217 | /* | ||
218 | * the HW expects the buffer to be an integral number of 4-byte | ||
219 | * "words" | ||
220 | */ | ||
221 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); | ||
222 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); | ||
223 | |||
224 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) | ||
225 | + (sizeof(*ring) * num_rx_ring); | ||
226 | skb = ath10k_htc_alloc_skb(len); | ||
227 | if (!skb) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | skb_put(skb, len); | ||
231 | |||
232 | cmd = (struct htt_cmd *)skb->data; | ||
233 | ring = &cmd->rx_setup.rings[0]; | ||
234 | |||
235 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; | ||
236 | cmd->rx_setup.hdr.num_rings = 1; | ||
237 | |||
238 | /* FIXME: do we need all of this? */ | ||
239 | flags = 0; | ||
240 | flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; | ||
241 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; | ||
242 | flags |= HTT_RX_RING_FLAGS_PPDU_START; | ||
243 | flags |= HTT_RX_RING_FLAGS_PPDU_END; | ||
244 | flags |= HTT_RX_RING_FLAGS_MPDU_START; | ||
245 | flags |= HTT_RX_RING_FLAGS_MPDU_END; | ||
246 | flags |= HTT_RX_RING_FLAGS_MSDU_START; | ||
247 | flags |= HTT_RX_RING_FLAGS_MSDU_END; | ||
248 | flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; | ||
249 | flags |= HTT_RX_RING_FLAGS_FRAG_INFO; | ||
250 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; | ||
251 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; | ||
252 | flags |= HTT_RX_RING_FLAGS_CTRL_RX; | ||
253 | flags |= HTT_RX_RING_FLAGS_MGMT_RX; | ||
254 | flags |= HTT_RX_RING_FLAGS_NULL_RX; | ||
255 | flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; | ||
256 | |||
257 | fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); | ||
258 | |||
259 | ring->fw_idx_shadow_reg_paddr = | ||
260 | __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); | ||
261 | ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); | ||
262 | ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); | ||
263 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); | ||
264 | ring->flags = __cpu_to_le16(flags); | ||
265 | ring->fw_idx_init_val = __cpu_to_le16(fw_idx); | ||
266 | |||
267 | #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) | ||
268 | |||
269 | ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); | ||
270 | ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); | ||
271 | ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); | ||
272 | ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); | ||
273 | ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); | ||
274 | ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); | ||
275 | ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); | ||
276 | ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); | ||
277 | ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); | ||
278 | ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); | ||
279 | |||
280 | #undef desc_offset | ||
281 | |||
282 | ATH10K_SKB_CB(skb)->htt.is_conf = true; | ||
283 | |||
284 | ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb); | ||
285 | if (ret) { | ||
286 | dev_kfree_skb_any(skb); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) | ||
294 | { | ||
295 | struct device *dev = htt->ar->dev; | ||
296 | struct ath10k_skb_cb *skb_cb; | ||
297 | struct sk_buff *txdesc = NULL; | ||
298 | struct htt_cmd *cmd; | ||
299 | u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; | ||
300 | int len = 0; | ||
301 | int msdu_id = -1; | ||
302 | int res; | ||
303 | |||
304 | |||
305 | res = ath10k_htt_tx_inc_pending(htt); | ||
306 | if (res) | ||
307 | return res; | ||
308 | |||
309 | len += sizeof(cmd->hdr); | ||
310 | len += sizeof(cmd->mgmt_tx); | ||
311 | |||
312 | txdesc = ath10k_htc_alloc_skb(len); | ||
313 | if (!txdesc) { | ||
314 | res = -ENOMEM; | ||
315 | goto err; | ||
316 | } | ||
317 | |||
318 | spin_lock_bh(&htt->tx_lock); | ||
319 | msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); | ||
320 | if (msdu_id < 0) { | ||
321 | spin_unlock_bh(&htt->tx_lock); | ||
322 | res = msdu_id; | ||
323 | goto err; | ||
324 | } | ||
325 | htt->pending_tx[msdu_id] = txdesc; | ||
326 | spin_unlock_bh(&htt->tx_lock); | ||
327 | |||
328 | res = ath10k_skb_map(dev, msdu); | ||
329 | if (res) | ||
330 | goto err; | ||
331 | |||
332 | skb_put(txdesc, len); | ||
333 | cmd = (struct htt_cmd *)txdesc->data; | ||
334 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; | ||
335 | cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); | ||
336 | cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); | ||
337 | cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); | ||
338 | cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); | ||
339 | memcpy(cmd->mgmt_tx.hdr, msdu->data, | ||
340 | min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); | ||
341 | |||
342 | /* refcount is decremented by HTC and HTT completions until it reaches | ||
343 | * zero and is freed */ | ||
344 | skb_cb = ATH10K_SKB_CB(txdesc); | ||
345 | skb_cb->htt.msdu_id = msdu_id; | ||
346 | skb_cb->htt.refcount = 2; | ||
347 | skb_cb->htt.msdu = msdu; | ||
348 | |||
349 | res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); | ||
350 | if (res) | ||
351 | goto err; | ||
352 | |||
353 | return 0; | ||
354 | |||
355 | err: | ||
356 | ath10k_skb_unmap(dev, msdu); | ||
357 | |||
358 | if (txdesc) | ||
359 | dev_kfree_skb_any(txdesc); | ||
360 | if (msdu_id >= 0) { | ||
361 | spin_lock_bh(&htt->tx_lock); | ||
362 | htt->pending_tx[msdu_id] = NULL; | ||
363 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); | ||
364 | spin_unlock_bh(&htt->tx_lock); | ||
365 | } | ||
366 | ath10k_htt_tx_dec_pending(htt); | ||
367 | return res; | ||
368 | } | ||
369 | |||
370 | int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) | ||
371 | { | ||
372 | struct device *dev = htt->ar->dev; | ||
373 | struct htt_cmd *cmd; | ||
374 | struct htt_data_tx_desc_frag *tx_frags; | ||
375 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; | ||
376 | struct ath10k_skb_cb *skb_cb; | ||
377 | struct sk_buff *txdesc = NULL; | ||
378 | struct sk_buff *txfrag = NULL; | ||
379 | u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; | ||
380 | u8 tid; | ||
381 | int prefetch_len, desc_len, frag_len; | ||
382 | dma_addr_t frags_paddr; | ||
383 | int msdu_id = -1; | ||
384 | int res; | ||
385 | u8 flags0; | ||
386 | u16 flags1; | ||
387 | |||
388 | res = ath10k_htt_tx_inc_pending(htt); | ||
389 | if (res) | ||
390 | return res; | ||
391 | |||
392 | prefetch_len = min(htt->prefetch_len, msdu->len); | ||
393 | prefetch_len = roundup(prefetch_len, 4); | ||
394 | |||
395 | desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; | ||
396 | frag_len = sizeof(*tx_frags) * 2; | ||
397 | |||
398 | txdesc = ath10k_htc_alloc_skb(desc_len); | ||
399 | if (!txdesc) { | ||
400 | res = -ENOMEM; | ||
401 | goto err; | ||
402 | } | ||
403 | |||
404 | txfrag = dev_alloc_skb(frag_len); | ||
405 | if (!txfrag) { | ||
406 | res = -ENOMEM; | ||
407 | goto err; | ||
408 | } | ||
409 | |||
410 | if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { | ||
411 | ath10k_warn("htt alignment check failed. dropping packet.\n"); | ||
412 | res = -EIO; | ||
413 | goto err; | ||
414 | } | ||
415 | |||
416 | spin_lock_bh(&htt->tx_lock); | ||
417 | msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); | ||
418 | if (msdu_id < 0) { | ||
419 | spin_unlock_bh(&htt->tx_lock); | ||
420 | res = msdu_id; | ||
421 | goto err; | ||
422 | } | ||
423 | htt->pending_tx[msdu_id] = txdesc; | ||
424 | spin_unlock_bh(&htt->tx_lock); | ||
425 | |||
426 | res = ath10k_skb_map(dev, msdu); | ||
427 | if (res) | ||
428 | goto err; | ||
429 | |||
430 | /* tx fragment list must be terminated with zero-entry */ | ||
431 | skb_put(txfrag, frag_len); | ||
432 | tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; | ||
433 | tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); | ||
434 | tx_frags[0].len = __cpu_to_le32(msdu->len); | ||
435 | tx_frags[1].paddr = __cpu_to_le32(0); | ||
436 | tx_frags[1].len = __cpu_to_le32(0); | ||
437 | |||
438 | res = ath10k_skb_map(dev, txfrag); | ||
439 | if (res) | ||
440 | goto err; | ||
441 | |||
442 | ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", | ||
443 | (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, | ||
444 | (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); | ||
445 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", | ||
446 | txfrag->data, frag_len); | ||
447 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", | ||
448 | msdu->data, msdu->len); | ||
449 | |||
450 | skb_put(txdesc, desc_len); | ||
451 | cmd = (struct htt_cmd *)txdesc->data; | ||
452 | memset(cmd, 0, desc_len); | ||
453 | |||
454 | tid = ATH10K_SKB_CB(msdu)->htt.tid; | ||
455 | |||
456 | ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); | ||
457 | |||
458 | flags0 = 0; | ||
459 | if (!ieee80211_has_protected(hdr->frame_control)) | ||
460 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; | ||
461 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; | ||
462 | flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, | ||
463 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); | ||
464 | |||
465 | flags1 = 0; | ||
466 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); | ||
467 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); | ||
468 | |||
469 | frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; | ||
470 | |||
471 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; | ||
472 | cmd->data_tx.flags0 = flags0; | ||
473 | cmd->data_tx.flags1 = __cpu_to_le16(flags1); | ||
474 | cmd->data_tx.len = __cpu_to_le16(msdu->len); | ||
475 | cmd->data_tx.id = __cpu_to_le16(msdu_id); | ||
476 | cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); | ||
477 | cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); | ||
478 | |||
479 | memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); | ||
480 | |||
481 | /* refcount is decremented by HTC and HTT completions until it reaches | ||
482 | * zero and is freed */ | ||
483 | skb_cb = ATH10K_SKB_CB(txdesc); | ||
484 | skb_cb->htt.msdu_id = msdu_id; | ||
485 | skb_cb->htt.refcount = 2; | ||
486 | skb_cb->htt.txfrag = txfrag; | ||
487 | skb_cb->htt.msdu = msdu; | ||
488 | |||
489 | res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc); | ||
490 | if (res) | ||
491 | goto err; | ||
492 | |||
493 | return 0; | ||
494 | err: | ||
495 | if (txfrag) | ||
496 | ath10k_skb_unmap(dev, txfrag); | ||
497 | if (txdesc) | ||
498 | dev_kfree_skb_any(txdesc); | ||
499 | if (txfrag) | ||
500 | dev_kfree_skb_any(txfrag); | ||
501 | if (msdu_id >= 0) { | ||
502 | spin_lock_bh(&htt->tx_lock); | ||
503 | htt->pending_tx[msdu_id] = NULL; | ||
504 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); | ||
505 | spin_unlock_bh(&htt->tx_lock); | ||
506 | } | ||
507 | ath10k_htt_tx_dec_pending(htt); | ||
508 | ath10k_skb_unmap(dev, msdu); | ||
509 | return res; | ||
510 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h new file mode 100644 index 000000000000..44ed5af0a204 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/hw.h | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _HW_H_ | ||
19 | #define _HW_H_ | ||
20 | |||
21 | #include "targaddrs.h" | ||
22 | |||
23 | /* Supported FW version */ | ||
24 | #define SUPPORTED_FW_MAJOR 1 | ||
25 | #define SUPPORTED_FW_MINOR 0 | ||
26 | #define SUPPORTED_FW_RELEASE 0 | ||
27 | #define SUPPORTED_FW_BUILD 629 | ||
28 | |||
29 | /* QCA988X 1.0 definitions */ | ||
30 | #define QCA988X_HW_1_0_VERSION 0x4000002c | ||
31 | #define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0" | ||
32 | #define QCA988X_HW_1_0_FW_FILE "firmware.bin" | ||
33 | #define QCA988X_HW_1_0_OTP_FILE "otp.bin" | ||
34 | #define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin" | ||
35 | #define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234 | ||
36 | |||
37 | /* QCA988X 2.0 definitions */ | ||
38 | #define QCA988X_HW_2_0_VERSION 0x4100016c | ||
39 | #define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" | ||
40 | #define QCA988X_HW_2_0_FW_FILE "firmware.bin" | ||
41 | #define QCA988X_HW_2_0_OTP_FILE "otp.bin" | ||
42 | #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" | ||
43 | #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 | ||
44 | |||
45 | /* Known pecularities: | ||
46 | * - current FW doesn't support raw rx mode (last tested v599) | ||
47 | * - current FW dumps upon raw tx mode (last tested v599) | ||
48 | * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap | ||
49 | * - raw have FCS, nwifi doesn't | ||
50 | * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher | ||
51 | * param, llc/snap) are aligned to 4byte boundaries each */ | ||
52 | enum ath10k_hw_txrx_mode { | ||
53 | ATH10K_HW_TXRX_RAW = 0, | ||
54 | ATH10K_HW_TXRX_NATIVE_WIFI = 1, | ||
55 | ATH10K_HW_TXRX_ETHERNET = 2, | ||
56 | }; | ||
57 | |||
58 | enum ath10k_mcast2ucast_mode { | ||
59 | ATH10K_MCAST2UCAST_DISABLED = 0, | ||
60 | ATH10K_MCAST2UCAST_ENABLED = 1, | ||
61 | }; | ||
62 | |||
63 | #define TARGET_NUM_VDEVS 8 | ||
64 | #define TARGET_NUM_PEER_AST 2 | ||
65 | #define TARGET_NUM_WDS_ENTRIES 32 | ||
66 | #define TARGET_DMA_BURST_SIZE 0 | ||
67 | #define TARGET_MAC_AGGR_DELIM 0 | ||
68 | #define TARGET_AST_SKID_LIMIT 16 | ||
69 | #define TARGET_NUM_PEERS 16 | ||
70 | #define TARGET_NUM_OFFLOAD_PEERS 0 | ||
71 | #define TARGET_NUM_OFFLOAD_REORDER_BUFS 0 | ||
72 | #define TARGET_NUM_PEER_KEYS 2 | ||
73 | #define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS))) | ||
74 | #define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) | ||
75 | #define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) | ||
76 | #define TARGET_RX_TIMEOUT_LO_PRI 100 | ||
77 | #define TARGET_RX_TIMEOUT_HI_PRI 40 | ||
78 | #define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET | ||
79 | #define TARGET_SCAN_MAX_PENDING_REQS 4 | ||
80 | #define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 | ||
81 | #define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 | ||
82 | #define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8 | ||
83 | #define TARGET_GTK_OFFLOAD_MAX_VDEV 3 | ||
84 | #define TARGET_NUM_MCAST_GROUPS 0 | ||
85 | #define TARGET_NUM_MCAST_TABLE_ELEMS 0 | ||
86 | #define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED | ||
87 | #define TARGET_TX_DBG_LOG_SIZE 1024 | ||
88 | #define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0 | ||
89 | #define TARGET_VOW_CONFIG 0 | ||
90 | #define TARGET_NUM_MSDU_DESC (1024 + 400) | ||
91 | #define TARGET_MAX_FRAG_ENTRIES 0 | ||
92 | |||
93 | |||
94 | /* Number of Copy Engines supported */ | ||
95 | #define CE_COUNT 8 | ||
96 | |||
97 | /* | ||
98 | * Total number of PCIe MSI interrupts requested for all interrupt sources. | ||
99 | * PCIe standard forces this to be a power of 2. | ||
100 | * Some Host OS's limit MSI requests that can be granted to 8 | ||
101 | * so for now we abide by this limit and avoid requesting more | ||
102 | * than that. | ||
103 | */ | ||
104 | #define MSI_NUM_REQUEST_LOG2 3 | ||
105 | #define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2) | ||
106 | |||
107 | /* | ||
108 | * Granted MSIs are assigned as follows: | ||
109 | * Firmware uses the first | ||
110 | * Remaining MSIs, if any, are used by Copy Engines | ||
111 | * This mapping is known to both Target firmware and Host software. | ||
112 | * It may be changed as long as Host and Target are kept in sync. | ||
113 | */ | ||
114 | /* MSI for firmware (errors, etc.) */ | ||
115 | #define MSI_ASSIGN_FW 0 | ||
116 | |||
117 | /* MSIs for Copy Engines */ | ||
118 | #define MSI_ASSIGN_CE_INITIAL 1 | ||
119 | #define MSI_ASSIGN_CE_MAX 7 | ||
120 | |||
121 | /* as of IP3.7.1 */ | ||
122 | #define RTC_STATE_V_ON 3 | ||
123 | |||
124 | #define RTC_STATE_COLD_RESET_MASK 0x00000400 | ||
125 | #define RTC_STATE_V_LSB 0 | ||
126 | #define RTC_STATE_V_MASK 0x00000007 | ||
127 | #define RTC_STATE_ADDRESS 0x0000 | ||
128 | #define PCIE_SOC_WAKE_V_MASK 0x00000001 | ||
129 | #define PCIE_SOC_WAKE_ADDRESS 0x0004 | ||
130 | #define PCIE_SOC_WAKE_RESET 0x00000000 | ||
131 | #define SOC_GLOBAL_RESET_ADDRESS 0x0008 | ||
132 | |||
133 | #define RTC_SOC_BASE_ADDRESS 0x00004000 | ||
134 | #define RTC_WMAC_BASE_ADDRESS 0x00005000 | ||
135 | #define MAC_COEX_BASE_ADDRESS 0x00006000 | ||
136 | #define BT_COEX_BASE_ADDRESS 0x00007000 | ||
137 | #define SOC_PCIE_BASE_ADDRESS 0x00008000 | ||
138 | #define SOC_CORE_BASE_ADDRESS 0x00009000 | ||
139 | #define WLAN_UART_BASE_ADDRESS 0x0000c000 | ||
140 | #define WLAN_SI_BASE_ADDRESS 0x00010000 | ||
141 | #define WLAN_GPIO_BASE_ADDRESS 0x00014000 | ||
142 | #define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000 | ||
143 | #define WLAN_MAC_BASE_ADDRESS 0x00020000 | ||
144 | #define EFUSE_BASE_ADDRESS 0x00030000 | ||
145 | #define FPGA_REG_BASE_ADDRESS 0x00039000 | ||
146 | #define WLAN_UART2_BASE_ADDRESS 0x00054c00 | ||
147 | #define CE_WRAPPER_BASE_ADDRESS 0x00057000 | ||
148 | #define CE0_BASE_ADDRESS 0x00057400 | ||
149 | #define CE1_BASE_ADDRESS 0x00057800 | ||
150 | #define CE2_BASE_ADDRESS 0x00057c00 | ||
151 | #define CE3_BASE_ADDRESS 0x00058000 | ||
152 | #define CE4_BASE_ADDRESS 0x00058400 | ||
153 | #define CE5_BASE_ADDRESS 0x00058800 | ||
154 | #define CE6_BASE_ADDRESS 0x00058c00 | ||
155 | #define CE7_BASE_ADDRESS 0x00059000 | ||
156 | #define DBI_BASE_ADDRESS 0x00060000 | ||
157 | #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 | ||
158 | #define PCIE_LOCAL_BASE_ADDRESS 0x00080000 | ||
159 | |||
160 | #define SOC_RESET_CONTROL_OFFSET 0x00000000 | ||
161 | #define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 | ||
162 | #define SOC_CPU_CLOCK_OFFSET 0x00000020 | ||
163 | #define SOC_CPU_CLOCK_STANDARD_LSB 0 | ||
164 | #define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003 | ||
165 | #define SOC_CLOCK_CONTROL_OFFSET 0x00000028 | ||
166 | #define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001 | ||
167 | #define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4 | ||
168 | #define SOC_LPO_CAL_OFFSET 0x000000e0 | ||
169 | #define SOC_LPO_CAL_ENABLE_LSB 20 | ||
170 | #define SOC_LPO_CAL_ENABLE_MASK 0x00100000 | ||
171 | |||
172 | #define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 | ||
173 | #define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 | ||
174 | #define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 | ||
175 | #define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 | ||
176 | |||
177 | #define WLAN_GPIO_PIN0_ADDRESS 0x00000028 | ||
178 | #define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 | ||
179 | #define WLAN_GPIO_PIN1_ADDRESS 0x0000002c | ||
180 | #define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 | ||
181 | #define WLAN_GPIO_PIN10_ADDRESS 0x00000050 | ||
182 | #define WLAN_GPIO_PIN11_ADDRESS 0x00000054 | ||
183 | #define WLAN_GPIO_PIN12_ADDRESS 0x00000058 | ||
184 | #define WLAN_GPIO_PIN13_ADDRESS 0x0000005c | ||
185 | |||
186 | #define CLOCK_GPIO_OFFSET 0xffffffff | ||
187 | #define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0 | ||
188 | #define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 | ||
189 | |||
190 | #define SI_CONFIG_OFFSET 0x00000000 | ||
191 | #define SI_CONFIG_BIDIR_OD_DATA_LSB 18 | ||
192 | #define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 | ||
193 | #define SI_CONFIG_I2C_LSB 16 | ||
194 | #define SI_CONFIG_I2C_MASK 0x00010000 | ||
195 | #define SI_CONFIG_POS_SAMPLE_LSB 7 | ||
196 | #define SI_CONFIG_POS_SAMPLE_MASK 0x00000080 | ||
197 | #define SI_CONFIG_INACTIVE_DATA_LSB 5 | ||
198 | #define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020 | ||
199 | #define SI_CONFIG_INACTIVE_CLK_LSB 4 | ||
200 | #define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010 | ||
201 | #define SI_CONFIG_DIVIDER_LSB 0 | ||
202 | #define SI_CONFIG_DIVIDER_MASK 0x0000000f | ||
203 | #define SI_CS_OFFSET 0x00000004 | ||
204 | #define SI_CS_DONE_ERR_MASK 0x00000400 | ||
205 | #define SI_CS_DONE_INT_MASK 0x00000200 | ||
206 | #define SI_CS_START_LSB 8 | ||
207 | #define SI_CS_START_MASK 0x00000100 | ||
208 | #define SI_CS_RX_CNT_LSB 4 | ||
209 | #define SI_CS_RX_CNT_MASK 0x000000f0 | ||
210 | #define SI_CS_TX_CNT_LSB 0 | ||
211 | #define SI_CS_TX_CNT_MASK 0x0000000f | ||
212 | |||
213 | #define SI_TX_DATA0_OFFSET 0x00000008 | ||
214 | #define SI_TX_DATA1_OFFSET 0x0000000c | ||
215 | #define SI_RX_DATA0_OFFSET 0x00000010 | ||
216 | #define SI_RX_DATA1_OFFSET 0x00000014 | ||
217 | |||
218 | #define CORE_CTRL_CPU_INTR_MASK 0x00002000 | ||
219 | #define CORE_CTRL_ADDRESS 0x0000 | ||
220 | #define PCIE_INTR_ENABLE_ADDRESS 0x0008 | ||
221 | #define PCIE_INTR_CLR_ADDRESS 0x0014 | ||
222 | #define SCRATCH_3_ADDRESS 0x0030 | ||
223 | |||
224 | /* Firmware indications to the Host via SCRATCH_3 register. */ | ||
225 | #define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS) | ||
226 | #define FW_IND_EVENT_PENDING 1 | ||
227 | #define FW_IND_INITIALIZED 2 | ||
228 | |||
229 | /* HOST_REG interrupt from firmware */ | ||
230 | #define PCIE_INTR_FIRMWARE_MASK 0x00000400 | ||
231 | #define PCIE_INTR_CE_MASK_ALL 0x0007f800 | ||
232 | |||
233 | #define DRAM_BASE_ADDRESS 0x00400000 | ||
234 | |||
235 | #define MISSING 0 | ||
236 | |||
237 | #define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET | ||
238 | #define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET | ||
239 | #define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET | ||
240 | #define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET | ||
241 | #define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK | ||
242 | #define RESET_CONTROL_MBOX_RST_MASK MISSING | ||
243 | #define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK | ||
244 | #define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS | ||
245 | #define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS | ||
246 | #define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS | ||
247 | #define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK | ||
248 | #define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK | ||
249 | #define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS | ||
250 | #define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS | ||
251 | #define LOCAL_SCRATCH_OFFSET 0x18 | ||
252 | #define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET | ||
253 | #define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET | ||
254 | #define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS | ||
255 | #define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS | ||
256 | #define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS | ||
257 | #define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS | ||
258 | #define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB | ||
259 | #define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK | ||
260 | #define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB | ||
261 | #define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK | ||
262 | #define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS | ||
263 | #define MBOX_BASE_ADDRESS MISSING | ||
264 | #define INT_STATUS_ENABLE_ERROR_LSB MISSING | ||
265 | #define INT_STATUS_ENABLE_ERROR_MASK MISSING | ||
266 | #define INT_STATUS_ENABLE_CPU_LSB MISSING | ||
267 | #define INT_STATUS_ENABLE_CPU_MASK MISSING | ||
268 | #define INT_STATUS_ENABLE_COUNTER_LSB MISSING | ||
269 | #define INT_STATUS_ENABLE_COUNTER_MASK MISSING | ||
270 | #define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING | ||
271 | #define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING | ||
272 | #define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING | ||
273 | #define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING | ||
274 | #define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING | ||
275 | #define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING | ||
276 | #define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING | ||
277 | #define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING | ||
278 | #define INT_STATUS_ENABLE_ADDRESS MISSING | ||
279 | #define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING | ||
280 | #define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING | ||
281 | #define HOST_INT_STATUS_ADDRESS MISSING | ||
282 | #define CPU_INT_STATUS_ADDRESS MISSING | ||
283 | #define ERROR_INT_STATUS_ADDRESS MISSING | ||
284 | #define ERROR_INT_STATUS_WAKEUP_MASK MISSING | ||
285 | #define ERROR_INT_STATUS_WAKEUP_LSB MISSING | ||
286 | #define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING | ||
287 | #define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING | ||
288 | #define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING | ||
289 | #define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING | ||
290 | #define COUNT_DEC_ADDRESS MISSING | ||
291 | #define HOST_INT_STATUS_CPU_MASK MISSING | ||
292 | #define HOST_INT_STATUS_CPU_LSB MISSING | ||
293 | #define HOST_INT_STATUS_ERROR_MASK MISSING | ||
294 | #define HOST_INT_STATUS_ERROR_LSB MISSING | ||
295 | #define HOST_INT_STATUS_COUNTER_MASK MISSING | ||
296 | #define HOST_INT_STATUS_COUNTER_LSB MISSING | ||
297 | #define RX_LOOKAHEAD_VALID_ADDRESS MISSING | ||
298 | #define WINDOW_DATA_ADDRESS MISSING | ||
299 | #define WINDOW_READ_ADDR_ADDRESS MISSING | ||
300 | #define WINDOW_WRITE_ADDR_ADDRESS MISSING | ||
301 | |||
302 | #define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) | ||
303 | |||
304 | #endif /* _HW_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c new file mode 100644 index 000000000000..3446c989d6a6 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -0,0 +1,3066 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include "mac.h" | ||
19 | |||
20 | #include <net/mac80211.h> | ||
21 | #include <linux/etherdevice.h> | ||
22 | |||
23 | #include "core.h" | ||
24 | #include "debug.h" | ||
25 | #include "wmi.h" | ||
26 | #include "htt.h" | ||
27 | #include "txrx.h" | ||
28 | |||
29 | /**********/ | ||
30 | /* Crypto */ | ||
31 | /**********/ | ||
32 | |||
33 | static int ath10k_send_key(struct ath10k_vif *arvif, | ||
34 | struct ieee80211_key_conf *key, | ||
35 | enum set_key_cmd cmd, | ||
36 | const u8 *macaddr) | ||
37 | { | ||
38 | struct wmi_vdev_install_key_arg arg = { | ||
39 | .vdev_id = arvif->vdev_id, | ||
40 | .key_idx = key->keyidx, | ||
41 | .key_len = key->keylen, | ||
42 | .key_data = key->key, | ||
43 | .macaddr = macaddr, | ||
44 | }; | ||
45 | |||
46 | if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) | ||
47 | arg.key_flags = WMI_KEY_PAIRWISE; | ||
48 | else | ||
49 | arg.key_flags = WMI_KEY_GROUP; | ||
50 | |||
51 | switch (key->cipher) { | ||
52 | case WLAN_CIPHER_SUITE_CCMP: | ||
53 | arg.key_cipher = WMI_CIPHER_AES_CCM; | ||
54 | key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; | ||
55 | break; | ||
56 | case WLAN_CIPHER_SUITE_TKIP: | ||
57 | key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; | ||
58 | arg.key_cipher = WMI_CIPHER_TKIP; | ||
59 | arg.key_txmic_len = 8; | ||
60 | arg.key_rxmic_len = 8; | ||
61 | break; | ||
62 | case WLAN_CIPHER_SUITE_WEP40: | ||
63 | case WLAN_CIPHER_SUITE_WEP104: | ||
64 | arg.key_cipher = WMI_CIPHER_WEP; | ||
65 | /* AP/IBSS mode requires self-key to be groupwise | ||
66 | * Otherwise pairwise key must be set */ | ||
67 | if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) | ||
68 | arg.key_flags = WMI_KEY_PAIRWISE; | ||
69 | break; | ||
70 | default: | ||
71 | ath10k_warn("cipher %d is not supported\n", key->cipher); | ||
72 | return -EOPNOTSUPP; | ||
73 | } | ||
74 | |||
75 | if (cmd == DISABLE_KEY) { | ||
76 | arg.key_cipher = WMI_CIPHER_NONE; | ||
77 | arg.key_data = NULL; | ||
78 | } | ||
79 | |||
80 | return ath10k_wmi_vdev_install_key(arvif->ar, &arg); | ||
81 | } | ||
82 | |||
83 | static int ath10k_install_key(struct ath10k_vif *arvif, | ||
84 | struct ieee80211_key_conf *key, | ||
85 | enum set_key_cmd cmd, | ||
86 | const u8 *macaddr) | ||
87 | { | ||
88 | struct ath10k *ar = arvif->ar; | ||
89 | int ret; | ||
90 | |||
91 | INIT_COMPLETION(ar->install_key_done); | ||
92 | |||
93 | ret = ath10k_send_key(arvif, key, cmd, macaddr); | ||
94 | if (ret) | ||
95 | return ret; | ||
96 | |||
97 | ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ); | ||
98 | if (ret == 0) | ||
99 | return -ETIMEDOUT; | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, | ||
105 | const u8 *addr) | ||
106 | { | ||
107 | struct ath10k *ar = arvif->ar; | ||
108 | struct ath10k_peer *peer; | ||
109 | int ret; | ||
110 | int i; | ||
111 | |||
112 | lockdep_assert_held(&ar->conf_mutex); | ||
113 | |||
114 | spin_lock_bh(&ar->data_lock); | ||
115 | peer = ath10k_peer_find(ar, arvif->vdev_id, addr); | ||
116 | spin_unlock_bh(&ar->data_lock); | ||
117 | |||
118 | if (!peer) | ||
119 | return -ENOENT; | ||
120 | |||
121 | for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { | ||
122 | if (arvif->wep_keys[i] == NULL) | ||
123 | continue; | ||
124 | |||
125 | ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, | ||
126 | addr); | ||
127 | if (ret) | ||
128 | return ret; | ||
129 | |||
130 | peer->keys[i] = arvif->wep_keys[i]; | ||
131 | } | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, | ||
137 | const u8 *addr) | ||
138 | { | ||
139 | struct ath10k *ar = arvif->ar; | ||
140 | struct ath10k_peer *peer; | ||
141 | int first_errno = 0; | ||
142 | int ret; | ||
143 | int i; | ||
144 | |||
145 | lockdep_assert_held(&ar->conf_mutex); | ||
146 | |||
147 | spin_lock_bh(&ar->data_lock); | ||
148 | peer = ath10k_peer_find(ar, arvif->vdev_id, addr); | ||
149 | spin_unlock_bh(&ar->data_lock); | ||
150 | |||
151 | if (!peer) | ||
152 | return -ENOENT; | ||
153 | |||
154 | for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { | ||
155 | if (peer->keys[i] == NULL) | ||
156 | continue; | ||
157 | |||
158 | ret = ath10k_install_key(arvif, peer->keys[i], | ||
159 | DISABLE_KEY, addr); | ||
160 | if (ret && first_errno == 0) | ||
161 | first_errno = ret; | ||
162 | |||
163 | if (ret) | ||
164 | ath10k_warn("could not remove peer wep key %d (%d)\n", | ||
165 | i, ret); | ||
166 | |||
167 | peer->keys[i] = NULL; | ||
168 | } | ||
169 | |||
170 | return first_errno; | ||
171 | } | ||
172 | |||
173 | static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, | ||
174 | struct ieee80211_key_conf *key) | ||
175 | { | ||
176 | struct ath10k *ar = arvif->ar; | ||
177 | struct ath10k_peer *peer; | ||
178 | u8 addr[ETH_ALEN]; | ||
179 | int first_errno = 0; | ||
180 | int ret; | ||
181 | int i; | ||
182 | |||
183 | lockdep_assert_held(&ar->conf_mutex); | ||
184 | |||
185 | for (;;) { | ||
186 | /* since ath10k_install_key we can't hold data_lock all the | ||
187 | * time, so we try to remove the keys incrementally */ | ||
188 | spin_lock_bh(&ar->data_lock); | ||
189 | i = 0; | ||
190 | list_for_each_entry(peer, &ar->peers, list) { | ||
191 | for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { | ||
192 | if (peer->keys[i] == key) { | ||
193 | memcpy(addr, peer->addr, ETH_ALEN); | ||
194 | peer->keys[i] = NULL; | ||
195 | break; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | if (i < ARRAY_SIZE(peer->keys)) | ||
200 | break; | ||
201 | } | ||
202 | spin_unlock_bh(&ar->data_lock); | ||
203 | |||
204 | if (i == ARRAY_SIZE(peer->keys)) | ||
205 | break; | ||
206 | |||
207 | ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr); | ||
208 | if (ret && first_errno == 0) | ||
209 | first_errno = ret; | ||
210 | |||
211 | if (ret) | ||
212 | ath10k_warn("could not remove key for %pM\n", addr); | ||
213 | } | ||
214 | |||
215 | return first_errno; | ||
216 | } | ||
217 | |||
218 | |||
219 | /*********************/ | ||
220 | /* General utilities */ | ||
221 | /*********************/ | ||
222 | |||
223 | static inline enum wmi_phy_mode | ||
224 | chan_to_phymode(const struct cfg80211_chan_def *chandef) | ||
225 | { | ||
226 | enum wmi_phy_mode phymode = MODE_UNKNOWN; | ||
227 | |||
228 | switch (chandef->chan->band) { | ||
229 | case IEEE80211_BAND_2GHZ: | ||
230 | switch (chandef->width) { | ||
231 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
232 | phymode = MODE_11G; | ||
233 | break; | ||
234 | case NL80211_CHAN_WIDTH_20: | ||
235 | phymode = MODE_11NG_HT20; | ||
236 | break; | ||
237 | case NL80211_CHAN_WIDTH_40: | ||
238 | phymode = MODE_11NG_HT40; | ||
239 | break; | ||
240 | case NL80211_CHAN_WIDTH_80: | ||
241 | case NL80211_CHAN_WIDTH_80P80: | ||
242 | case NL80211_CHAN_WIDTH_160: | ||
243 | phymode = MODE_UNKNOWN; | ||
244 | break; | ||
245 | } | ||
246 | break; | ||
247 | case IEEE80211_BAND_5GHZ: | ||
248 | switch (chandef->width) { | ||
249 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
250 | phymode = MODE_11A; | ||
251 | break; | ||
252 | case NL80211_CHAN_WIDTH_20: | ||
253 | phymode = MODE_11NA_HT20; | ||
254 | break; | ||
255 | case NL80211_CHAN_WIDTH_40: | ||
256 | phymode = MODE_11NA_HT40; | ||
257 | break; | ||
258 | case NL80211_CHAN_WIDTH_80: | ||
259 | phymode = MODE_11AC_VHT80; | ||
260 | break; | ||
261 | case NL80211_CHAN_WIDTH_80P80: | ||
262 | case NL80211_CHAN_WIDTH_160: | ||
263 | phymode = MODE_UNKNOWN; | ||
264 | break; | ||
265 | } | ||
266 | break; | ||
267 | default: | ||
268 | break; | ||
269 | } | ||
270 | |||
271 | WARN_ON(phymode == MODE_UNKNOWN); | ||
272 | return phymode; | ||
273 | } | ||
274 | |||
275 | static u8 ath10k_parse_mpdudensity(u8 mpdudensity) | ||
276 | { | ||
277 | /* | ||
278 | * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": | ||
279 | * 0 for no restriction | ||
280 | * 1 for 1/4 us | ||
281 | * 2 for 1/2 us | ||
282 | * 3 for 1 us | ||
283 | * 4 for 2 us | ||
284 | * 5 for 4 us | ||
285 | * 6 for 8 us | ||
286 | * 7 for 16 us | ||
287 | */ | ||
288 | switch (mpdudensity) { | ||
289 | case 0: | ||
290 | return 0; | ||
291 | case 1: | ||
292 | case 2: | ||
293 | case 3: | ||
294 | /* Our lower layer calculations limit our precision to | ||
295 | 1 microsecond */ | ||
296 | return 1; | ||
297 | case 4: | ||
298 | return 2; | ||
299 | case 5: | ||
300 | return 4; | ||
301 | case 6: | ||
302 | return 8; | ||
303 | case 7: | ||
304 | return 16; | ||
305 | default: | ||
306 | return 0; | ||
307 | } | ||
308 | } | ||
309 | |||
310 | static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) | ||
311 | { | ||
312 | int ret; | ||
313 | |||
314 | lockdep_assert_held(&ar->conf_mutex); | ||
315 | |||
316 | ret = ath10k_wmi_peer_create(ar, vdev_id, addr); | ||
317 | if (ret) | ||
318 | return ret; | ||
319 | |||
320 | ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); | ||
321 | if (ret) | ||
322 | return ret; | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) | ||
328 | { | ||
329 | int ret; | ||
330 | |||
331 | lockdep_assert_held(&ar->conf_mutex); | ||
332 | |||
333 | ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); | ||
334 | if (ret) | ||
335 | return ret; | ||
336 | |||
337 | ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); | ||
338 | if (ret) | ||
339 | return ret; | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) | ||
345 | { | ||
346 | struct ath10k_peer *peer, *tmp; | ||
347 | |||
348 | lockdep_assert_held(&ar->conf_mutex); | ||
349 | |||
350 | spin_lock_bh(&ar->data_lock); | ||
351 | list_for_each_entry_safe(peer, tmp, &ar->peers, list) { | ||
352 | if (peer->vdev_id != vdev_id) | ||
353 | continue; | ||
354 | |||
355 | ath10k_warn("removing stale peer %pM from vdev_id %d\n", | ||
356 | peer->addr, vdev_id); | ||
357 | |||
358 | list_del(&peer->list); | ||
359 | kfree(peer); | ||
360 | } | ||
361 | spin_unlock_bh(&ar->data_lock); | ||
362 | } | ||
363 | |||
364 | /************************/ | ||
365 | /* Interface management */ | ||
366 | /************************/ | ||
367 | |||
368 | static inline int ath10k_vdev_setup_sync(struct ath10k *ar) | ||
369 | { | ||
370 | int ret; | ||
371 | |||
372 | ret = wait_for_completion_timeout(&ar->vdev_setup_done, | ||
373 | ATH10K_VDEV_SETUP_TIMEOUT_HZ); | ||
374 | if (ret == 0) | ||
375 | return -ETIMEDOUT; | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static int ath10k_vdev_start(struct ath10k_vif *arvif) | ||
381 | { | ||
382 | struct ath10k *ar = arvif->ar; | ||
383 | struct ieee80211_conf *conf = &ar->hw->conf; | ||
384 | struct ieee80211_channel *channel = conf->chandef.chan; | ||
385 | struct wmi_vdev_start_request_arg arg = {}; | ||
386 | int ret = 0; | ||
387 | |||
388 | lockdep_assert_held(&ar->conf_mutex); | ||
389 | |||
390 | INIT_COMPLETION(ar->vdev_setup_done); | ||
391 | |||
392 | arg.vdev_id = arvif->vdev_id; | ||
393 | arg.dtim_period = arvif->dtim_period; | ||
394 | arg.bcn_intval = arvif->beacon_interval; | ||
395 | |||
396 | arg.channel.freq = channel->center_freq; | ||
397 | |||
398 | arg.channel.band_center_freq1 = conf->chandef.center_freq1; | ||
399 | |||
400 | arg.channel.mode = chan_to_phymode(&conf->chandef); | ||
401 | |||
402 | arg.channel.min_power = channel->max_power * 3; | ||
403 | arg.channel.max_power = channel->max_power * 4; | ||
404 | arg.channel.max_reg_power = channel->max_reg_power * 4; | ||
405 | arg.channel.max_antenna_gain = channel->max_antenna_gain; | ||
406 | |||
407 | if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { | ||
408 | arg.ssid = arvif->u.ap.ssid; | ||
409 | arg.ssid_len = arvif->u.ap.ssid_len; | ||
410 | arg.hidden_ssid = arvif->u.ap.hidden_ssid; | ||
411 | } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { | ||
412 | arg.ssid = arvif->vif->bss_conf.ssid; | ||
413 | arg.ssid_len = arvif->vif->bss_conf.ssid_len; | ||
414 | } | ||
415 | |||
416 | ret = ath10k_wmi_vdev_start(ar, &arg); | ||
417 | if (ret) { | ||
418 | ath10k_warn("WMI vdev start failed: ret %d\n", ret); | ||
419 | return ret; | ||
420 | } | ||
421 | |||
422 | ret = ath10k_vdev_setup_sync(ar); | ||
423 | if (ret) { | ||
424 | ath10k_warn("vdev setup failed %d\n", ret); | ||
425 | return ret; | ||
426 | } | ||
427 | |||
428 | return ret; | ||
429 | } | ||
430 | |||
431 | static int ath10k_vdev_stop(struct ath10k_vif *arvif) | ||
432 | { | ||
433 | struct ath10k *ar = arvif->ar; | ||
434 | int ret; | ||
435 | |||
436 | lockdep_assert_held(&ar->conf_mutex); | ||
437 | |||
438 | INIT_COMPLETION(ar->vdev_setup_done); | ||
439 | |||
440 | ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); | ||
441 | if (ret) { | ||
442 | ath10k_warn("WMI vdev stop failed: ret %d\n", ret); | ||
443 | return ret; | ||
444 | } | ||
445 | |||
446 | ret = ath10k_vdev_setup_sync(ar); | ||
447 | if (ret) { | ||
448 | ath10k_warn("vdev setup failed %d\n", ret); | ||
449 | return ret; | ||
450 | } | ||
451 | |||
452 | return ret; | ||
453 | } | ||
454 | |||
455 | static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) | ||
456 | { | ||
457 | struct ieee80211_channel *channel = ar->hw->conf.chandef.chan; | ||
458 | struct wmi_vdev_start_request_arg arg = {}; | ||
459 | enum nl80211_channel_type type; | ||
460 | int ret = 0; | ||
461 | |||
462 | lockdep_assert_held(&ar->conf_mutex); | ||
463 | |||
464 | type = cfg80211_get_chandef_type(&ar->hw->conf.chandef); | ||
465 | |||
466 | arg.vdev_id = vdev_id; | ||
467 | arg.channel.freq = channel->center_freq; | ||
468 | arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; | ||
469 | |||
470 | /* TODO setup this dynamically, what in case we | ||
471 | don't have any vifs? */ | ||
472 | arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef); | ||
473 | |||
474 | arg.channel.min_power = channel->max_power * 3; | ||
475 | arg.channel.max_power = channel->max_power * 4; | ||
476 | arg.channel.max_reg_power = channel->max_reg_power * 4; | ||
477 | arg.channel.max_antenna_gain = channel->max_antenna_gain; | ||
478 | |||
479 | ret = ath10k_wmi_vdev_start(ar, &arg); | ||
480 | if (ret) { | ||
481 | ath10k_warn("Monitor vdev start failed: ret %d\n", ret); | ||
482 | return ret; | ||
483 | } | ||
484 | |||
485 | ret = ath10k_vdev_setup_sync(ar); | ||
486 | if (ret) { | ||
487 | ath10k_warn("Monitor vdev setup failed %d\n", ret); | ||
488 | return ret; | ||
489 | } | ||
490 | |||
491 | ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); | ||
492 | if (ret) { | ||
493 | ath10k_warn("Monitor vdev up failed: %d\n", ret); | ||
494 | goto vdev_stop; | ||
495 | } | ||
496 | |||
497 | ar->monitor_vdev_id = vdev_id; | ||
498 | ar->monitor_enabled = true; | ||
499 | |||
500 | return 0; | ||
501 | |||
502 | vdev_stop: | ||
503 | ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); | ||
504 | if (ret) | ||
505 | ath10k_warn("Monitor vdev stop failed: %d\n", ret); | ||
506 | |||
507 | return ret; | ||
508 | } | ||
509 | |||
510 | static int ath10k_monitor_stop(struct ath10k *ar) | ||
511 | { | ||
512 | int ret = 0; | ||
513 | |||
514 | lockdep_assert_held(&ar->conf_mutex); | ||
515 | |||
516 | /* For some reasons, ath10k_wmi_vdev_down() here couse | ||
517 | * often ath10k_wmi_vdev_stop() to fail. Next we could | ||
518 | * not run monitor vdev and driver reload | ||
519 | * required. Don't see such problems we skip | ||
520 | * ath10k_wmi_vdev_down() here. | ||
521 | */ | ||
522 | |||
523 | ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); | ||
524 | if (ret) | ||
525 | ath10k_warn("Monitor vdev stop failed: %d\n", ret); | ||
526 | |||
527 | ret = ath10k_vdev_setup_sync(ar); | ||
528 | if (ret) | ||
529 | ath10k_warn("Monitor_down sync failed: %d\n", ret); | ||
530 | |||
531 | ar->monitor_enabled = false; | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | static int ath10k_monitor_create(struct ath10k *ar) | ||
536 | { | ||
537 | int bit, ret = 0; | ||
538 | |||
539 | lockdep_assert_held(&ar->conf_mutex); | ||
540 | |||
541 | if (ar->monitor_present) { | ||
542 | ath10k_warn("Monitor mode already enabled\n"); | ||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | bit = ffs(ar->free_vdev_map); | ||
547 | if (bit == 0) { | ||
548 | ath10k_warn("No free VDEV slots\n"); | ||
549 | return -ENOMEM; | ||
550 | } | ||
551 | |||
552 | ar->monitor_vdev_id = bit - 1; | ||
553 | ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); | ||
554 | |||
555 | ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, | ||
556 | WMI_VDEV_TYPE_MONITOR, | ||
557 | 0, ar->mac_addr); | ||
558 | if (ret) { | ||
559 | ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret); | ||
560 | goto vdev_fail; | ||
561 | } | ||
562 | |||
563 | ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n", | ||
564 | ar->monitor_vdev_id); | ||
565 | |||
566 | ar->monitor_present = true; | ||
567 | return 0; | ||
568 | |||
569 | vdev_fail: | ||
570 | /* | ||
571 | * Restore the ID to the global map. | ||
572 | */ | ||
573 | ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | static int ath10k_monitor_destroy(struct ath10k *ar) | ||
578 | { | ||
579 | int ret = 0; | ||
580 | |||
581 | lockdep_assert_held(&ar->conf_mutex); | ||
582 | |||
583 | if (!ar->monitor_present) | ||
584 | return 0; | ||
585 | |||
586 | ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); | ||
587 | if (ret) { | ||
588 | ath10k_warn("WMI vdev monitor delete failed: %d\n", ret); | ||
589 | return ret; | ||
590 | } | ||
591 | |||
592 | ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); | ||
593 | ar->monitor_present = false; | ||
594 | |||
595 | ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n", | ||
596 | ar->monitor_vdev_id); | ||
597 | return ret; | ||
598 | } | ||
599 | |||
600 | static void ath10k_control_beaconing(struct ath10k_vif *arvif, | ||
601 | struct ieee80211_bss_conf *info) | ||
602 | { | ||
603 | int ret = 0; | ||
604 | |||
605 | if (!info->enable_beacon) { | ||
606 | ath10k_vdev_stop(arvif); | ||
607 | return; | ||
608 | } | ||
609 | |||
610 | arvif->tx_seq_no = 0x1000; | ||
611 | |||
612 | ret = ath10k_vdev_start(arvif); | ||
613 | if (ret) | ||
614 | return; | ||
615 | |||
616 | ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid); | ||
617 | if (ret) { | ||
618 | ath10k_warn("Failed to bring up VDEV: %d\n", | ||
619 | arvif->vdev_id); | ||
620 | return; | ||
621 | } | ||
622 | ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id); | ||
623 | } | ||
624 | |||
625 | static void ath10k_control_ibss(struct ath10k_vif *arvif, | ||
626 | struct ieee80211_bss_conf *info, | ||
627 | const u8 self_peer[ETH_ALEN]) | ||
628 | { | ||
629 | int ret = 0; | ||
630 | |||
631 | if (!info->ibss_joined) { | ||
632 | ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); | ||
633 | if (ret) | ||
634 | ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n", | ||
635 | self_peer, arvif->vdev_id, ret); | ||
636 | |||
637 | if (is_zero_ether_addr(arvif->u.ibss.bssid)) | ||
638 | return; | ||
639 | |||
640 | ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, | ||
641 | arvif->u.ibss.bssid); | ||
642 | if (ret) { | ||
643 | ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n", | ||
644 | arvif->u.ibss.bssid, arvif->vdev_id, ret); | ||
645 | return; | ||
646 | } | ||
647 | |||
648 | memset(arvif->u.ibss.bssid, 0, ETH_ALEN); | ||
649 | |||
650 | return; | ||
651 | } | ||
652 | |||
653 | ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); | ||
654 | if (ret) { | ||
655 | ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n", | ||
656 | self_peer, arvif->vdev_id, ret); | ||
657 | return; | ||
658 | } | ||
659 | |||
660 | ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, | ||
661 | WMI_VDEV_PARAM_ATIM_WINDOW, | ||
662 | ATH10K_DEFAULT_ATIM); | ||
663 | if (ret) | ||
664 | ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", | ||
665 | arvif->vdev_id, ret); | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * Review this when mac80211 gains per-interface powersave support. | ||
670 | */ | ||
671 | static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | ||
672 | { | ||
673 | struct ath10k_generic_iter *ar_iter = data; | ||
674 | struct ieee80211_conf *conf = &ar_iter->ar->hw->conf; | ||
675 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
676 | enum wmi_sta_powersave_param param; | ||
677 | enum wmi_sta_ps_mode psmode; | ||
678 | int ret; | ||
679 | |||
680 | if (vif->type != NL80211_IFTYPE_STATION) | ||
681 | return; | ||
682 | |||
683 | if (conf->flags & IEEE80211_CONF_PS) { | ||
684 | psmode = WMI_STA_PS_MODE_ENABLED; | ||
685 | param = WMI_STA_PS_PARAM_INACTIVITY_TIME; | ||
686 | |||
687 | ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar, | ||
688 | arvif->vdev_id, | ||
689 | param, | ||
690 | conf->dynamic_ps_timeout); | ||
691 | if (ret) { | ||
692 | ath10k_warn("Failed to set inactivity time for VDEV: %d\n", | ||
693 | arvif->vdev_id); | ||
694 | return; | ||
695 | } | ||
696 | |||
697 | ar_iter->ret = ret; | ||
698 | } else { | ||
699 | psmode = WMI_STA_PS_MODE_DISABLED; | ||
700 | } | ||
701 | |||
702 | ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id, | ||
703 | psmode); | ||
704 | if (ar_iter->ret) | ||
705 | ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", | ||
706 | psmode, arvif->vdev_id); | ||
707 | else | ||
708 | ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n", | ||
709 | psmode, arvif->vdev_id); | ||
710 | } | ||
711 | |||
712 | /**********************/ | ||
713 | /* Station management */ | ||
714 | /**********************/ | ||
715 | |||
716 | static void ath10k_peer_assoc_h_basic(struct ath10k *ar, | ||
717 | struct ath10k_vif *arvif, | ||
718 | struct ieee80211_sta *sta, | ||
719 | struct ieee80211_bss_conf *bss_conf, | ||
720 | struct wmi_peer_assoc_complete_arg *arg) | ||
721 | { | ||
722 | memcpy(arg->addr, sta->addr, ETH_ALEN); | ||
723 | arg->vdev_id = arvif->vdev_id; | ||
724 | arg->peer_aid = sta->aid; | ||
725 | arg->peer_flags |= WMI_PEER_AUTH; | ||
726 | |||
727 | if (arvif->vdev_type == WMI_VDEV_TYPE_STA) | ||
728 | /* | ||
729 | * Seems FW have problems with Power Save in STA | ||
730 | * mode when we setup this parameter to high (eg. 5). | ||
731 | * Often we see that FW don't send NULL (with clean P flags) | ||
732 | * frame even there is info about buffered frames in beacons. | ||
733 | * Sometimes we have to wait more than 10 seconds before FW | ||
734 | * will wakeup. Often sending one ping from AP to our device | ||
735 | * just fail (more than 50%). | ||
736 | * | ||
737 | * Seems setting this FW parameter to 1 couse FW | ||
738 | * will check every beacon and will wakup immediately | ||
739 | * after detection buffered data. | ||
740 | */ | ||
741 | arg->peer_listen_intval = 1; | ||
742 | else | ||
743 | arg->peer_listen_intval = ar->hw->conf.listen_interval; | ||
744 | |||
745 | arg->peer_num_spatial_streams = 1; | ||
746 | |||
747 | /* | ||
748 | * The assoc capabilities are available only in managed mode. | ||
749 | */ | ||
750 | if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf) | ||
751 | arg->peer_caps = bss_conf->assoc_capability; | ||
752 | } | ||
753 | |||
754 | static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, | ||
755 | struct ath10k_vif *arvif, | ||
756 | struct wmi_peer_assoc_complete_arg *arg) | ||
757 | { | ||
758 | struct ieee80211_vif *vif = arvif->vif; | ||
759 | struct ieee80211_bss_conf *info = &vif->bss_conf; | ||
760 | struct cfg80211_bss *bss; | ||
761 | const u8 *rsnie = NULL; | ||
762 | const u8 *wpaie = NULL; | ||
763 | |||
764 | bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan, | ||
765 | info->bssid, NULL, 0, 0, 0); | ||
766 | if (bss) { | ||
767 | const struct cfg80211_bss_ies *ies; | ||
768 | |||
769 | rcu_read_lock(); | ||
770 | rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); | ||
771 | |||
772 | ies = rcu_dereference(bss->ies); | ||
773 | |||
774 | wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, | ||
775 | WLAN_OUI_TYPE_MICROSOFT_WPA, | ||
776 | ies->data, | ||
777 | ies->len); | ||
778 | rcu_read_unlock(); | ||
779 | cfg80211_put_bss(ar->hw->wiphy, bss); | ||
780 | } | ||
781 | |||
782 | /* FIXME: base on RSN IE/WPA IE is a correct idea? */ | ||
783 | if (rsnie || wpaie) { | ||
784 | ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); | ||
785 | arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; | ||
786 | } | ||
787 | |||
788 | if (wpaie) { | ||
789 | ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); | ||
790 | arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; | ||
791 | } | ||
792 | } | ||
793 | |||
794 | static void ath10k_peer_assoc_h_rates(struct ath10k *ar, | ||
795 | struct ieee80211_sta *sta, | ||
796 | struct wmi_peer_assoc_complete_arg *arg) | ||
797 | { | ||
798 | struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; | ||
799 | const struct ieee80211_supported_band *sband; | ||
800 | const struct ieee80211_rate *rates; | ||
801 | u32 ratemask; | ||
802 | int i; | ||
803 | |||
804 | sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band]; | ||
805 | ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band]; | ||
806 | rates = sband->bitrates; | ||
807 | |||
808 | rateset->num_rates = 0; | ||
809 | |||
810 | for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { | ||
811 | if (!(ratemask & 1)) | ||
812 | continue; | ||
813 | |||
814 | rateset->rates[rateset->num_rates] = rates->hw_value; | ||
815 | rateset->num_rates++; | ||
816 | } | ||
817 | } | ||
818 | |||
819 | static void ath10k_peer_assoc_h_ht(struct ath10k *ar, | ||
820 | struct ieee80211_sta *sta, | ||
821 | struct wmi_peer_assoc_complete_arg *arg) | ||
822 | { | ||
823 | const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; | ||
824 | int smps; | ||
825 | int i, n; | ||
826 | |||
827 | if (!ht_cap->ht_supported) | ||
828 | return; | ||
829 | |||
830 | arg->peer_flags |= WMI_PEER_HT; | ||
831 | arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + | ||
832 | ht_cap->ampdu_factor)) - 1; | ||
833 | |||
834 | arg->peer_mpdu_density = | ||
835 | ath10k_parse_mpdudensity(ht_cap->ampdu_density); | ||
836 | |||
837 | arg->peer_ht_caps = ht_cap->cap; | ||
838 | arg->peer_rate_caps |= WMI_RC_HT_FLAG; | ||
839 | |||
840 | if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) | ||
841 | arg->peer_flags |= WMI_PEER_LDPC; | ||
842 | |||
843 | if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { | ||
844 | arg->peer_flags |= WMI_PEER_40MHZ; | ||
845 | arg->peer_rate_caps |= WMI_RC_CW40_FLAG; | ||
846 | } | ||
847 | |||
848 | if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) | ||
849 | arg->peer_rate_caps |= WMI_RC_SGI_FLAG; | ||
850 | |||
851 | if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) | ||
852 | arg->peer_rate_caps |= WMI_RC_SGI_FLAG; | ||
853 | |||
854 | if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { | ||
855 | arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; | ||
856 | arg->peer_flags |= WMI_PEER_STBC; | ||
857 | } | ||
858 | |||
859 | if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { | ||
860 | u32 stbc; | ||
861 | stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; | ||
862 | stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; | ||
863 | stbc = stbc << WMI_RC_RX_STBC_FLAG_S; | ||
864 | arg->peer_rate_caps |= stbc; | ||
865 | arg->peer_flags |= WMI_PEER_STBC; | ||
866 | } | ||
867 | |||
868 | smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; | ||
869 | smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; | ||
870 | |||
871 | if (smps == WLAN_HT_CAP_SM_PS_STATIC) { | ||
872 | arg->peer_flags |= WMI_PEER_SPATIAL_MUX; | ||
873 | arg->peer_flags |= WMI_PEER_STATIC_MIMOPS; | ||
874 | } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) { | ||
875 | arg->peer_flags |= WMI_PEER_SPATIAL_MUX; | ||
876 | arg->peer_flags |= WMI_PEER_DYN_MIMOPS; | ||
877 | } | ||
878 | |||
879 | if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) | ||
880 | arg->peer_rate_caps |= WMI_RC_TS_FLAG; | ||
881 | else if (ht_cap->mcs.rx_mask[1]) | ||
882 | arg->peer_rate_caps |= WMI_RC_DS_FLAG; | ||
883 | |||
884 | for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++) | ||
885 | if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8)) | ||
886 | arg->peer_ht_rates.rates[n++] = i; | ||
887 | |||
888 | arg->peer_ht_rates.num_rates = n; | ||
889 | arg->peer_num_spatial_streams = max((n+7) / 8, 1); | ||
890 | |||
891 | ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n", | ||
892 | arg->peer_ht_rates.num_rates, | ||
893 | arg->peer_num_spatial_streams); | ||
894 | } | ||
895 | |||
896 | static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, | ||
897 | struct ath10k_vif *arvif, | ||
898 | struct ieee80211_sta *sta, | ||
899 | struct ieee80211_bss_conf *bss_conf, | ||
900 | struct wmi_peer_assoc_complete_arg *arg) | ||
901 | { | ||
902 | u32 uapsd = 0; | ||
903 | u32 max_sp = 0; | ||
904 | |||
905 | if (sta->wme) | ||
906 | arg->peer_flags |= WMI_PEER_QOS; | ||
907 | |||
908 | if (sta->wme && sta->uapsd_queues) { | ||
909 | ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n", | ||
910 | sta->uapsd_queues, sta->max_sp); | ||
911 | |||
912 | arg->peer_flags |= WMI_PEER_APSD; | ||
913 | arg->peer_flags |= WMI_RC_UAPSD_FLAG; | ||
914 | |||
915 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) | ||
916 | uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | | ||
917 | WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; | ||
918 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) | ||
919 | uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | | ||
920 | WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; | ||
921 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) | ||
922 | uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | | ||
923 | WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; | ||
924 | if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) | ||
925 | uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | | ||
926 | WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; | ||
927 | |||
928 | |||
929 | if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) | ||
930 | max_sp = sta->max_sp; | ||
931 | |||
932 | ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, | ||
933 | sta->addr, | ||
934 | WMI_AP_PS_PEER_PARAM_UAPSD, | ||
935 | uapsd); | ||
936 | |||
937 | ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, | ||
938 | sta->addr, | ||
939 | WMI_AP_PS_PEER_PARAM_MAX_SP, | ||
940 | max_sp); | ||
941 | |||
942 | /* TODO setup this based on STA listen interval and | ||
943 | beacon interval. Currently we don't know | ||
944 | sta->listen_interval - mac80211 patch required. | ||
945 | Currently use 10 seconds */ | ||
946 | ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, | ||
947 | sta->addr, | ||
948 | WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, | ||
949 | 10); | ||
950 | } | ||
951 | } | ||
952 | |||
953 | static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar, | ||
954 | struct ath10k_vif *arvif, | ||
955 | struct ieee80211_sta *sta, | ||
956 | struct ieee80211_bss_conf *bss_conf, | ||
957 | struct wmi_peer_assoc_complete_arg *arg) | ||
958 | { | ||
959 | if (bss_conf->qos) | ||
960 | arg->peer_flags |= WMI_PEER_QOS; | ||
961 | } | ||
962 | |||
963 | static void ath10k_peer_assoc_h_vht(struct ath10k *ar, | ||
964 | struct ieee80211_sta *sta, | ||
965 | struct wmi_peer_assoc_complete_arg *arg) | ||
966 | { | ||
967 | const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; | ||
968 | |||
969 | if (!vht_cap->vht_supported) | ||
970 | return; | ||
971 | |||
972 | arg->peer_flags |= WMI_PEER_VHT; | ||
973 | |||
974 | arg->peer_vht_caps = vht_cap->cap; | ||
975 | |||
976 | if (sta->bandwidth == IEEE80211_STA_RX_BW_80) | ||
977 | arg->peer_flags |= WMI_PEER_80MHZ; | ||
978 | |||
979 | arg->peer_vht_rates.rx_max_rate = | ||
980 | __le16_to_cpu(vht_cap->vht_mcs.rx_highest); | ||
981 | arg->peer_vht_rates.rx_mcs_set = | ||
982 | __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); | ||
983 | arg->peer_vht_rates.tx_max_rate = | ||
984 | __le16_to_cpu(vht_cap->vht_mcs.tx_highest); | ||
985 | arg->peer_vht_rates.tx_mcs_set = | ||
986 | __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); | ||
987 | |||
988 | ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n"); | ||
989 | } | ||
990 | |||
991 | static void ath10k_peer_assoc_h_qos(struct ath10k *ar, | ||
992 | struct ath10k_vif *arvif, | ||
993 | struct ieee80211_sta *sta, | ||
994 | struct ieee80211_bss_conf *bss_conf, | ||
995 | struct wmi_peer_assoc_complete_arg *arg) | ||
996 | { | ||
997 | switch (arvif->vdev_type) { | ||
998 | case WMI_VDEV_TYPE_AP: | ||
999 | ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg); | ||
1000 | break; | ||
1001 | case WMI_VDEV_TYPE_STA: | ||
1002 | ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg); | ||
1003 | break; | ||
1004 | default: | ||
1005 | break; | ||
1006 | } | ||
1007 | } | ||
1008 | |||
1009 | static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, | ||
1010 | struct ath10k_vif *arvif, | ||
1011 | struct ieee80211_sta *sta, | ||
1012 | struct wmi_peer_assoc_complete_arg *arg) | ||
1013 | { | ||
1014 | enum wmi_phy_mode phymode = MODE_UNKNOWN; | ||
1015 | |||
1016 | /* FIXME: add VHT */ | ||
1017 | |||
1018 | switch (ar->hw->conf.chandef.chan->band) { | ||
1019 | case IEEE80211_BAND_2GHZ: | ||
1020 | if (sta->ht_cap.ht_supported) { | ||
1021 | if (sta->bandwidth == IEEE80211_STA_RX_BW_40) | ||
1022 | phymode = MODE_11NG_HT40; | ||
1023 | else | ||
1024 | phymode = MODE_11NG_HT20; | ||
1025 | } else { | ||
1026 | phymode = MODE_11G; | ||
1027 | } | ||
1028 | |||
1029 | break; | ||
1030 | case IEEE80211_BAND_5GHZ: | ||
1031 | if (sta->ht_cap.ht_supported) { | ||
1032 | if (sta->bandwidth == IEEE80211_STA_RX_BW_40) | ||
1033 | phymode = MODE_11NA_HT40; | ||
1034 | else | ||
1035 | phymode = MODE_11NA_HT20; | ||
1036 | } else { | ||
1037 | phymode = MODE_11A; | ||
1038 | } | ||
1039 | |||
1040 | break; | ||
1041 | default: | ||
1042 | break; | ||
1043 | } | ||
1044 | |||
1045 | arg->peer_phymode = phymode; | ||
1046 | WARN_ON(phymode == MODE_UNKNOWN); | ||
1047 | } | ||
1048 | |||
1049 | static int ath10k_peer_assoc(struct ath10k *ar, | ||
1050 | struct ath10k_vif *arvif, | ||
1051 | struct ieee80211_sta *sta, | ||
1052 | struct ieee80211_bss_conf *bss_conf) | ||
1053 | { | ||
1054 | struct wmi_peer_assoc_complete_arg arg; | ||
1055 | |||
1056 | memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); | ||
1057 | |||
1058 | ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); | ||
1059 | ath10k_peer_assoc_h_crypto(ar, arvif, &arg); | ||
1060 | ath10k_peer_assoc_h_rates(ar, sta, &arg); | ||
1061 | ath10k_peer_assoc_h_ht(ar, sta, &arg); | ||
1062 | ath10k_peer_assoc_h_vht(ar, sta, &arg); | ||
1063 | ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg); | ||
1064 | ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg); | ||
1065 | |||
1066 | return ath10k_wmi_peer_assoc(ar, &arg); | ||
1067 | } | ||
1068 | |||
1069 | /* can be called only in mac80211 callbacks due to `key_count` usage */ | ||
1070 | static void ath10k_bss_assoc(struct ieee80211_hw *hw, | ||
1071 | struct ieee80211_vif *vif, | ||
1072 | struct ieee80211_bss_conf *bss_conf) | ||
1073 | { | ||
1074 | struct ath10k *ar = hw->priv; | ||
1075 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
1076 | struct ieee80211_sta *ap_sta; | ||
1077 | int ret; | ||
1078 | |||
1079 | rcu_read_lock(); | ||
1080 | |||
1081 | ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); | ||
1082 | if (!ap_sta) { | ||
1083 | ath10k_warn("Failed to find station entry for %pM\n", | ||
1084 | bss_conf->bssid); | ||
1085 | rcu_read_unlock(); | ||
1086 | return; | ||
1087 | } | ||
1088 | |||
1089 | ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf); | ||
1090 | if (ret) { | ||
1091 | ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid); | ||
1092 | rcu_read_unlock(); | ||
1093 | return; | ||
1094 | } | ||
1095 | |||
1096 | rcu_read_unlock(); | ||
1097 | |||
1098 | ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid, | ||
1099 | bss_conf->bssid); | ||
1100 | if (ret) | ||
1101 | ath10k_warn("VDEV: %d up failed: ret %d\n", | ||
1102 | arvif->vdev_id, ret); | ||
1103 | else | ||
1104 | ath10k_dbg(ATH10K_DBG_MAC, | ||
1105 | "VDEV: %d associated, BSSID: %pM, AID: %d\n", | ||
1106 | arvif->vdev_id, bss_conf->bssid, bss_conf->aid); | ||
1107 | } | ||
1108 | |||
1109 | /* | ||
1110 | * FIXME: flush TIDs | ||
1111 | */ | ||
1112 | static void ath10k_bss_disassoc(struct ieee80211_hw *hw, | ||
1113 | struct ieee80211_vif *vif) | ||
1114 | { | ||
1115 | struct ath10k *ar = hw->priv; | ||
1116 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
1117 | int ret; | ||
1118 | |||
1119 | /* | ||
1120 | * For some reason, calling VDEV-DOWN before VDEV-STOP | ||
1121 | * makes the FW to send frames via HTT after disassociation. | ||
1122 | * No idea why this happens, even though VDEV-DOWN is supposed | ||
1123 | * to be analogous to link down, so just stop the VDEV. | ||
1124 | */ | ||
1125 | ret = ath10k_vdev_stop(arvif); | ||
1126 | if (!ret) | ||
1127 | ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n", | ||
1128 | arvif->vdev_id); | ||
1129 | |||
1130 | /* | ||
1131 | * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and | ||
1132 | * report beacons from previously associated network through HTT. | ||
1133 | * This in turn would spam mac80211 WARN_ON if we bring down all | ||
1134 | * interfaces as it expects there is no rx when no interface is | ||
1135 | * running. | ||
1136 | */ | ||
1137 | ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); | ||
1138 | if (ret) | ||
1139 | ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n", | ||
1140 | arvif->vdev_id, ret); | ||
1141 | |||
1142 | ath10k_wmi_flush_tx(ar); | ||
1143 | |||
1144 | arvif->def_wep_key_index = 0; | ||
1145 | } | ||
1146 | |||
1147 | static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, | ||
1148 | struct ieee80211_sta *sta) | ||
1149 | { | ||
1150 | int ret = 0; | ||
1151 | |||
1152 | ret = ath10k_peer_assoc(ar, arvif, sta, NULL); | ||
1153 | if (ret) { | ||
1154 | ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); | ||
1155 | return ret; | ||
1156 | } | ||
1157 | |||
1158 | ret = ath10k_install_peer_wep_keys(arvif, sta->addr); | ||
1159 | if (ret) { | ||
1160 | ath10k_warn("could not install peer wep keys (%d)\n", ret); | ||
1161 | return ret; | ||
1162 | } | ||
1163 | |||
1164 | return ret; | ||
1165 | } | ||
1166 | |||
1167 | static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, | ||
1168 | struct ieee80211_sta *sta) | ||
1169 | { | ||
1170 | int ret = 0; | ||
1171 | |||
1172 | ret = ath10k_clear_peer_keys(arvif, sta->addr); | ||
1173 | if (ret) { | ||
1174 | ath10k_warn("could not clear all peer wep keys (%d)\n", ret); | ||
1175 | return ret; | ||
1176 | } | ||
1177 | |||
1178 | return ret; | ||
1179 | } | ||
1180 | |||
1181 | /**************/ | ||
1182 | /* Regulatory */ | ||
1183 | /**************/ | ||
1184 | |||
1185 | static int ath10k_update_channel_list(struct ath10k *ar) | ||
1186 | { | ||
1187 | struct ieee80211_hw *hw = ar->hw; | ||
1188 | struct ieee80211_supported_band **bands; | ||
1189 | enum ieee80211_band band; | ||
1190 | struct ieee80211_channel *channel; | ||
1191 | struct wmi_scan_chan_list_arg arg = {0}; | ||
1192 | struct wmi_channel_arg *ch; | ||
1193 | bool passive; | ||
1194 | int len; | ||
1195 | int ret; | ||
1196 | int i; | ||
1197 | |||
1198 | bands = hw->wiphy->bands; | ||
1199 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
1200 | if (!bands[band]) | ||
1201 | continue; | ||
1202 | |||
1203 | for (i = 0; i < bands[band]->n_channels; i++) { | ||
1204 | if (bands[band]->channels[i].flags & | ||
1205 | IEEE80211_CHAN_DISABLED) | ||
1206 | continue; | ||
1207 | |||
1208 | arg.n_channels++; | ||
1209 | } | ||
1210 | } | ||
1211 | |||
1212 | len = sizeof(struct wmi_channel_arg) * arg.n_channels; | ||
1213 | arg.channels = kzalloc(len, GFP_KERNEL); | ||
1214 | if (!arg.channels) | ||
1215 | return -ENOMEM; | ||
1216 | |||
1217 | ch = arg.channels; | ||
1218 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
1219 | if (!bands[band]) | ||
1220 | continue; | ||
1221 | |||
1222 | for (i = 0; i < bands[band]->n_channels; i++) { | ||
1223 | channel = &bands[band]->channels[i]; | ||
1224 | |||
1225 | if (channel->flags & IEEE80211_CHAN_DISABLED) | ||
1226 | continue; | ||
1227 | |||
1228 | ch->allow_ht = true; | ||
1229 | |||
1230 | /* FIXME: when should we really allow VHT? */ | ||
1231 | ch->allow_vht = true; | ||
1232 | |||
1233 | ch->allow_ibss = | ||
1234 | !(channel->flags & IEEE80211_CHAN_NO_IBSS); | ||
1235 | |||
1236 | ch->ht40plus = | ||
1237 | !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); | ||
1238 | |||
1239 | passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN; | ||
1240 | ch->passive = passive; | ||
1241 | |||
1242 | ch->freq = channel->center_freq; | ||
1243 | ch->min_power = channel->max_power * 3; | ||
1244 | ch->max_power = channel->max_power * 4; | ||
1245 | ch->max_reg_power = channel->max_reg_power * 4; | ||
1246 | ch->max_antenna_gain = channel->max_antenna_gain; | ||
1247 | ch->reg_class_id = 0; /* FIXME */ | ||
1248 | |||
1249 | /* FIXME: why use only legacy modes, why not any | ||
1250 | * HT/VHT modes? Would that even make any | ||
1251 | * difference? */ | ||
1252 | if (channel->band == IEEE80211_BAND_2GHZ) | ||
1253 | ch->mode = MODE_11G; | ||
1254 | else | ||
1255 | ch->mode = MODE_11A; | ||
1256 | |||
1257 | if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) | ||
1258 | continue; | ||
1259 | |||
1260 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1261 | "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", | ||
1262 | __func__, ch - arg.channels, arg.n_channels, | ||
1263 | ch->freq, ch->max_power, ch->max_reg_power, | ||
1264 | ch->max_antenna_gain, ch->mode); | ||
1265 | |||
1266 | ch++; | ||
1267 | } | ||
1268 | } | ||
1269 | |||
1270 | ret = ath10k_wmi_scan_chan_list(ar, &arg); | ||
1271 | kfree(arg.channels); | ||
1272 | |||
1273 | return ret; | ||
1274 | } | ||
1275 | |||
1276 | static void ath10k_reg_notifier(struct wiphy *wiphy, | ||
1277 | struct regulatory_request *request) | ||
1278 | { | ||
1279 | struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); | ||
1280 | struct reg_dmn_pair_mapping *regpair; | ||
1281 | struct ath10k *ar = hw->priv; | ||
1282 | int ret; | ||
1283 | |||
1284 | ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); | ||
1285 | |||
1286 | ret = ath10k_update_channel_list(ar); | ||
1287 | if (ret) | ||
1288 | ath10k_warn("could not update channel list (%d)\n", ret); | ||
1289 | |||
1290 | regpair = ar->ath_common.regulatory.regpair; | ||
1291 | /* Target allows setting up per-band regdomain but ath_common provides | ||
1292 | * a combined one only */ | ||
1293 | ret = ath10k_wmi_pdev_set_regdomain(ar, | ||
1294 | regpair->regDmnEnum, | ||
1295 | regpair->regDmnEnum, /* 2ghz */ | ||
1296 | regpair->regDmnEnum, /* 5ghz */ | ||
1297 | regpair->reg_2ghz_ctl, | ||
1298 | regpair->reg_5ghz_ctl); | ||
1299 | if (ret) | ||
1300 | ath10k_warn("could not set pdev regdomain (%d)\n", ret); | ||
1301 | } | ||
1302 | |||
1303 | /***************/ | ||
1304 | /* TX handlers */ | ||
1305 | /***************/ | ||
1306 | |||
1307 | /* | ||
1308 | * Frames sent to the FW have to be in "Native Wifi" format. | ||
1309 | * Strip the QoS field from the 802.11 header. | ||
1310 | */ | ||
1311 | static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw, | ||
1312 | struct ieee80211_tx_control *control, | ||
1313 | struct sk_buff *skb) | ||
1314 | { | ||
1315 | struct ieee80211_hdr *hdr = (void *)skb->data; | ||
1316 | u8 *qos_ctl; | ||
1317 | |||
1318 | if (!ieee80211_is_data_qos(hdr->frame_control)) | ||
1319 | return; | ||
1320 | |||
1321 | qos_ctl = ieee80211_get_qos_ctl(hdr); | ||
1322 | memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN, | ||
1323 | skb->len - ieee80211_hdrlen(hdr->frame_control)); | ||
1324 | skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN); | ||
1325 | } | ||
1326 | |||
1327 | static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) | ||
1328 | { | ||
1329 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1330 | struct ieee80211_vif *vif = info->control.vif; | ||
1331 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
1332 | struct ath10k *ar = arvif->ar; | ||
1333 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1334 | struct ieee80211_key_conf *key = info->control.hw_key; | ||
1335 | int ret; | ||
1336 | |||
1337 | /* TODO AP mode should be implemented */ | ||
1338 | if (vif->type != NL80211_IFTYPE_STATION) | ||
1339 | return; | ||
1340 | |||
1341 | if (!ieee80211_has_protected(hdr->frame_control)) | ||
1342 | return; | ||
1343 | |||
1344 | if (!key) | ||
1345 | return; | ||
1346 | |||
1347 | if (key->cipher != WLAN_CIPHER_SUITE_WEP40 && | ||
1348 | key->cipher != WLAN_CIPHER_SUITE_WEP104) | ||
1349 | return; | ||
1350 | |||
1351 | if (key->keyidx == arvif->def_wep_key_index) | ||
1352 | return; | ||
1353 | |||
1354 | ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx); | ||
1355 | |||
1356 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, | ||
1357 | WMI_VDEV_PARAM_DEF_KEYID, | ||
1358 | key->keyidx); | ||
1359 | if (ret) { | ||
1360 | ath10k_warn("could not update wep keyidx (%d)\n", ret); | ||
1361 | return; | ||
1362 | } | ||
1363 | |||
1364 | arvif->def_wep_key_index = key->keyidx; | ||
1365 | } | ||
1366 | |||
1367 | static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) | ||
1368 | { | ||
1369 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1370 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1371 | struct ieee80211_vif *vif = info->control.vif; | ||
1372 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
1373 | |||
1374 | /* This is case only for P2P_GO */ | ||
1375 | if (arvif->vdev_type != WMI_VDEV_TYPE_AP || | ||
1376 | arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) | ||
1377 | return; | ||
1378 | |||
1379 | if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { | ||
1380 | spin_lock_bh(&ar->data_lock); | ||
1381 | if (arvif->u.ap.noa_data) | ||
1382 | if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, | ||
1383 | GFP_ATOMIC)) | ||
1384 | memcpy(skb_put(skb, arvif->u.ap.noa_len), | ||
1385 | arvif->u.ap.noa_data, | ||
1386 | arvif->u.ap.noa_len); | ||
1387 | spin_unlock_bh(&ar->data_lock); | ||
1388 | } | ||
1389 | } | ||
1390 | |||
1391 | static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) | ||
1392 | { | ||
1393 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1394 | int ret; | ||
1395 | |||
1396 | if (ieee80211_is_mgmt(hdr->frame_control)) | ||
1397 | ret = ath10k_htt_mgmt_tx(ar->htt, skb); | ||
1398 | else if (ieee80211_is_nullfunc(hdr->frame_control)) | ||
1399 | /* FW does not report tx status properly for NullFunc frames | ||
1400 | * unless they are sent through mgmt tx path. mac80211 sends | ||
1401 | * those frames when it detects link/beacon loss and depends on | ||
1402 | * the tx status to be correct. */ | ||
1403 | ret = ath10k_htt_mgmt_tx(ar->htt, skb); | ||
1404 | else | ||
1405 | ret = ath10k_htt_tx(ar->htt, skb); | ||
1406 | |||
1407 | if (ret) { | ||
1408 | ath10k_warn("tx failed (%d). dropping packet.\n", ret); | ||
1409 | ieee80211_free_txskb(ar->hw, skb); | ||
1410 | } | ||
1411 | } | ||
1412 | |||
1413 | void ath10k_offchan_tx_purge(struct ath10k *ar) | ||
1414 | { | ||
1415 | struct sk_buff *skb; | ||
1416 | |||
1417 | for (;;) { | ||
1418 | skb = skb_dequeue(&ar->offchan_tx_queue); | ||
1419 | if (!skb) | ||
1420 | break; | ||
1421 | |||
1422 | ieee80211_free_txskb(ar->hw, skb); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | void ath10k_offchan_tx_work(struct work_struct *work) | ||
1427 | { | ||
1428 | struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); | ||
1429 | struct ath10k_peer *peer; | ||
1430 | struct ieee80211_hdr *hdr; | ||
1431 | struct sk_buff *skb; | ||
1432 | const u8 *peer_addr; | ||
1433 | int vdev_id; | ||
1434 | int ret; | ||
1435 | |||
1436 | /* FW requirement: We must create a peer before FW will send out | ||
1437 | * an offchannel frame. Otherwise the frame will be stuck and | ||
1438 | * never transmitted. We delete the peer upon tx completion. | ||
1439 | * It is unlikely that a peer for offchannel tx will already be | ||
1440 | * present. However it may be in some rare cases so account for that. | ||
1441 | * Otherwise we might remove a legitimate peer and break stuff. */ | ||
1442 | |||
1443 | for (;;) { | ||
1444 | skb = skb_dequeue(&ar->offchan_tx_queue); | ||
1445 | if (!skb) | ||
1446 | break; | ||
1447 | |||
1448 | mutex_lock(&ar->conf_mutex); | ||
1449 | |||
1450 | ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n", | ||
1451 | skb); | ||
1452 | |||
1453 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1454 | peer_addr = ieee80211_get_DA(hdr); | ||
1455 | vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id; | ||
1456 | |||
1457 | spin_lock_bh(&ar->data_lock); | ||
1458 | peer = ath10k_peer_find(ar, vdev_id, peer_addr); | ||
1459 | spin_unlock_bh(&ar->data_lock); | ||
1460 | |||
1461 | if (peer) | ||
1462 | ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", | ||
1463 | peer_addr, vdev_id); | ||
1464 | |||
1465 | if (!peer) { | ||
1466 | ret = ath10k_peer_create(ar, vdev_id, peer_addr); | ||
1467 | if (ret) | ||
1468 | ath10k_warn("peer %pM on vdev %d not created (%d)\n", | ||
1469 | peer_addr, vdev_id, ret); | ||
1470 | } | ||
1471 | |||
1472 | spin_lock_bh(&ar->data_lock); | ||
1473 | INIT_COMPLETION(ar->offchan_tx_completed); | ||
1474 | ar->offchan_tx_skb = skb; | ||
1475 | spin_unlock_bh(&ar->data_lock); | ||
1476 | |||
1477 | ath10k_tx_htt(ar, skb); | ||
1478 | |||
1479 | ret = wait_for_completion_timeout(&ar->offchan_tx_completed, | ||
1480 | 3 * HZ); | ||
1481 | if (ret <= 0) | ||
1482 | ath10k_warn("timed out waiting for offchannel skb %p\n", | ||
1483 | skb); | ||
1484 | |||
1485 | if (!peer) { | ||
1486 | ret = ath10k_peer_delete(ar, vdev_id, peer_addr); | ||
1487 | if (ret) | ||
1488 | ath10k_warn("peer %pM on vdev %d not deleted (%d)\n", | ||
1489 | peer_addr, vdev_id, ret); | ||
1490 | } | ||
1491 | |||
1492 | mutex_unlock(&ar->conf_mutex); | ||
1493 | } | ||
1494 | } | ||
1495 | |||
1496 | /************/ | ||
1497 | /* Scanning */ | ||
1498 | /************/ | ||
1499 | |||
1500 | /* | ||
1501 | * This gets called if we dont get a heart-beat during scan. | ||
1502 | * This may indicate the FW has hung and we need to abort the | ||
1503 | * scan manually to prevent cancel_hw_scan() from deadlocking | ||
1504 | */ | ||
1505 | void ath10k_reset_scan(unsigned long ptr) | ||
1506 | { | ||
1507 | struct ath10k *ar = (struct ath10k *)ptr; | ||
1508 | |||
1509 | spin_lock_bh(&ar->data_lock); | ||
1510 | if (!ar->scan.in_progress) { | ||
1511 | spin_unlock_bh(&ar->data_lock); | ||
1512 | return; | ||
1513 | } | ||
1514 | |||
1515 | ath10k_warn("scan timeout. resetting. fw issue?\n"); | ||
1516 | |||
1517 | if (ar->scan.is_roc) | ||
1518 | ieee80211_remain_on_channel_expired(ar->hw); | ||
1519 | else | ||
1520 | ieee80211_scan_completed(ar->hw, 1 /* aborted */); | ||
1521 | |||
1522 | ar->scan.in_progress = false; | ||
1523 | complete_all(&ar->scan.completed); | ||
1524 | spin_unlock_bh(&ar->data_lock); | ||
1525 | } | ||
1526 | |||
1527 | static int ath10k_abort_scan(struct ath10k *ar) | ||
1528 | { | ||
1529 | struct wmi_stop_scan_arg arg = { | ||
1530 | .req_id = 1, /* FIXME */ | ||
1531 | .req_type = WMI_SCAN_STOP_ONE, | ||
1532 | .u.scan_id = ATH10K_SCAN_ID, | ||
1533 | }; | ||
1534 | int ret; | ||
1535 | |||
1536 | lockdep_assert_held(&ar->conf_mutex); | ||
1537 | |||
1538 | del_timer_sync(&ar->scan.timeout); | ||
1539 | |||
1540 | spin_lock_bh(&ar->data_lock); | ||
1541 | if (!ar->scan.in_progress) { | ||
1542 | spin_unlock_bh(&ar->data_lock); | ||
1543 | return 0; | ||
1544 | } | ||
1545 | |||
1546 | ar->scan.aborting = true; | ||
1547 | spin_unlock_bh(&ar->data_lock); | ||
1548 | |||
1549 | ret = ath10k_wmi_stop_scan(ar, &arg); | ||
1550 | if (ret) { | ||
1551 | ath10k_warn("could not submit wmi stop scan (%d)\n", ret); | ||
1552 | return -EIO; | ||
1553 | } | ||
1554 | |||
1555 | ath10k_wmi_flush_tx(ar); | ||
1556 | |||
1557 | ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); | ||
1558 | if (ret == 0) | ||
1559 | ath10k_warn("timed out while waiting for scan to stop\n"); | ||
1560 | |||
1561 | /* scan completion may be done right after we timeout here, so let's | ||
1562 | * check the in_progress and tell mac80211 scan is completed. if we | ||
1563 | * don't do that and FW fails to send us scan completion indication | ||
1564 | * then userspace won't be able to scan anymore */ | ||
1565 | ret = 0; | ||
1566 | |||
1567 | spin_lock_bh(&ar->data_lock); | ||
1568 | if (ar->scan.in_progress) { | ||
1569 | ath10k_warn("could not stop scan. its still in progress\n"); | ||
1570 | ar->scan.in_progress = false; | ||
1571 | ath10k_offchan_tx_purge(ar); | ||
1572 | ret = -ETIMEDOUT; | ||
1573 | } | ||
1574 | spin_unlock_bh(&ar->data_lock); | ||
1575 | |||
1576 | return ret; | ||
1577 | } | ||
1578 | |||
1579 | static int ath10k_start_scan(struct ath10k *ar, | ||
1580 | const struct wmi_start_scan_arg *arg) | ||
1581 | { | ||
1582 | int ret; | ||
1583 | |||
1584 | lockdep_assert_held(&ar->conf_mutex); | ||
1585 | |||
1586 | ret = ath10k_wmi_start_scan(ar, arg); | ||
1587 | if (ret) | ||
1588 | return ret; | ||
1589 | |||
1590 | /* make sure we submit the command so the completion | ||
1591 | * timeout makes sense */ | ||
1592 | ath10k_wmi_flush_tx(ar); | ||
1593 | |||
1594 | ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); | ||
1595 | if (ret == 0) { | ||
1596 | ath10k_abort_scan(ar); | ||
1597 | return ret; | ||
1598 | } | ||
1599 | |||
1600 | /* the scan can complete earlier, before we even | ||
1601 | * start the timer. in that case the timer handler | ||
1602 | * checks ar->scan.in_progress and bails out if its | ||
1603 | * false. Add a 200ms margin to account event/command | ||
1604 | * processing. */ | ||
1605 | mod_timer(&ar->scan.timeout, jiffies + | ||
1606 | msecs_to_jiffies(arg->max_scan_time+200)); | ||
1607 | return 0; | ||
1608 | } | ||
1609 | |||
1610 | /**********************/ | ||
1611 | /* mac80211 callbacks */ | ||
1612 | /**********************/ | ||
1613 | |||
1614 | static void ath10k_tx(struct ieee80211_hw *hw, | ||
1615 | struct ieee80211_tx_control *control, | ||
1616 | struct sk_buff *skb) | ||
1617 | { | ||
1618 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1619 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1620 | struct ath10k *ar = hw->priv; | ||
1621 | struct ath10k_vif *arvif = NULL; | ||
1622 | u32 vdev_id = 0; | ||
1623 | u8 tid; | ||
1624 | |||
1625 | if (info->control.vif) { | ||
1626 | arvif = ath10k_vif_to_arvif(info->control.vif); | ||
1627 | vdev_id = arvif->vdev_id; | ||
1628 | } else if (ar->monitor_enabled) { | ||
1629 | vdev_id = ar->monitor_vdev_id; | ||
1630 | } | ||
1631 | |||
1632 | /* We should disable CCK RATE due to P2P */ | ||
1633 | if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) | ||
1634 | ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); | ||
1635 | |||
1636 | /* we must calculate tid before we apply qos workaround | ||
1637 | * as we'd lose the qos control field */ | ||
1638 | tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; | ||
1639 | if (ieee80211_is_data_qos(hdr->frame_control) && | ||
1640 | is_unicast_ether_addr(ieee80211_get_DA(hdr))) { | ||
1641 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
1642 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
1643 | } | ||
1644 | |||
1645 | ath10k_tx_h_qos_workaround(hw, control, skb); | ||
1646 | ath10k_tx_h_update_wep_key(skb); | ||
1647 | ath10k_tx_h_add_p2p_noa_ie(ar, skb); | ||
1648 | ath10k_tx_h_seq_no(skb); | ||
1649 | |||
1650 | memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); | ||
1651 | ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; | ||
1652 | ATH10K_SKB_CB(skb)->htt.tid = tid; | ||
1653 | |||
1654 | if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { | ||
1655 | spin_lock_bh(&ar->data_lock); | ||
1656 | ATH10K_SKB_CB(skb)->htt.is_offchan = true; | ||
1657 | ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id; | ||
1658 | spin_unlock_bh(&ar->data_lock); | ||
1659 | |||
1660 | ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); | ||
1661 | |||
1662 | skb_queue_tail(&ar->offchan_tx_queue, skb); | ||
1663 | ieee80211_queue_work(hw, &ar->offchan_tx_work); | ||
1664 | return; | ||
1665 | } | ||
1666 | |||
1667 | ath10k_tx_htt(ar, skb); | ||
1668 | } | ||
1669 | |||
1670 | /* | ||
1671 | * Initialize various parameters with default vaules. | ||
1672 | */ | ||
1673 | static int ath10k_start(struct ieee80211_hw *hw) | ||
1674 | { | ||
1675 | struct ath10k *ar = hw->priv; | ||
1676 | int ret; | ||
1677 | |||
1678 | ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); | ||
1679 | if (ret) | ||
1680 | ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", | ||
1681 | ret); | ||
1682 | |||
1683 | ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0); | ||
1684 | if (ret) | ||
1685 | ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", | ||
1686 | ret); | ||
1687 | |||
1688 | return 0; | ||
1689 | } | ||
1690 | |||
1691 | static void ath10k_stop(struct ieee80211_hw *hw) | ||
1692 | { | ||
1693 | struct ath10k *ar = hw->priv; | ||
1694 | |||
1695 | /* avoid leaks in case FW never confirms scan for offchannel */ | ||
1696 | cancel_work_sync(&ar->offchan_tx_work); | ||
1697 | ath10k_offchan_tx_purge(ar); | ||
1698 | } | ||
1699 | |||
1700 | static int ath10k_config(struct ieee80211_hw *hw, u32 changed) | ||
1701 | { | ||
1702 | struct ath10k_generic_iter ar_iter; | ||
1703 | struct ath10k *ar = hw->priv; | ||
1704 | struct ieee80211_conf *conf = &hw->conf; | ||
1705 | int ret = 0; | ||
1706 | u32 flags; | ||
1707 | |||
1708 | mutex_lock(&ar->conf_mutex); | ||
1709 | |||
1710 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { | ||
1711 | ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n", | ||
1712 | conf->chandef.chan->center_freq); | ||
1713 | spin_lock_bh(&ar->data_lock); | ||
1714 | ar->rx_channel = conf->chandef.chan; | ||
1715 | spin_unlock_bh(&ar->data_lock); | ||
1716 | } | ||
1717 | |||
1718 | if (changed & IEEE80211_CONF_CHANGE_PS) { | ||
1719 | memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); | ||
1720 | ar_iter.ar = ar; | ||
1721 | flags = IEEE80211_IFACE_ITER_RESUME_ALL; | ||
1722 | |||
1723 | ieee80211_iterate_active_interfaces_atomic(hw, | ||
1724 | flags, | ||
1725 | ath10k_ps_iter, | ||
1726 | &ar_iter); | ||
1727 | |||
1728 | ret = ar_iter.ret; | ||
1729 | } | ||
1730 | |||
1731 | if (changed & IEEE80211_CONF_CHANGE_MONITOR) { | ||
1732 | if (conf->flags & IEEE80211_CONF_MONITOR) | ||
1733 | ret = ath10k_monitor_create(ar); | ||
1734 | else | ||
1735 | ret = ath10k_monitor_destroy(ar); | ||
1736 | } | ||
1737 | |||
1738 | mutex_unlock(&ar->conf_mutex); | ||
1739 | return ret; | ||
1740 | } | ||
1741 | |||
1742 | /* | ||
1743 | * TODO: | ||
1744 | * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, | ||
1745 | * because we will send mgmt frames without CCK. This requirement | ||
1746 | * for P2P_FIND/GO_NEG should be handled by checking CCK flag | ||
1747 | * in the TX packet. | ||
1748 | */ | ||
1749 | static int ath10k_add_interface(struct ieee80211_hw *hw, | ||
1750 | struct ieee80211_vif *vif) | ||
1751 | { | ||
1752 | struct ath10k *ar = hw->priv; | ||
1753 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
1754 | enum wmi_sta_powersave_param param; | ||
1755 | int ret = 0; | ||
1756 | u32 value; | ||
1757 | int bit; | ||
1758 | |||
1759 | mutex_lock(&ar->conf_mutex); | ||
1760 | |||
1761 | arvif->ar = ar; | ||
1762 | arvif->vif = vif; | ||
1763 | |||
1764 | if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { | ||
1765 | ath10k_warn("Only one monitor interface allowed\n"); | ||
1766 | ret = -EBUSY; | ||
1767 | goto exit; | ||
1768 | } | ||
1769 | |||
1770 | bit = ffs(ar->free_vdev_map); | ||
1771 | if (bit == 0) { | ||
1772 | ret = -EBUSY; | ||
1773 | goto exit; | ||
1774 | } | ||
1775 | |||
1776 | arvif->vdev_id = bit - 1; | ||
1777 | arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; | ||
1778 | ar->free_vdev_map &= ~(1 << arvif->vdev_id); | ||
1779 | |||
1780 | if (ar->p2p) | ||
1781 | arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; | ||
1782 | |||
1783 | switch (vif->type) { | ||
1784 | case NL80211_IFTYPE_UNSPECIFIED: | ||
1785 | case NL80211_IFTYPE_STATION: | ||
1786 | arvif->vdev_type = WMI_VDEV_TYPE_STA; | ||
1787 | if (vif->p2p) | ||
1788 | arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; | ||
1789 | break; | ||
1790 | case NL80211_IFTYPE_ADHOC: | ||
1791 | arvif->vdev_type = WMI_VDEV_TYPE_IBSS; | ||
1792 | break; | ||
1793 | case NL80211_IFTYPE_AP: | ||
1794 | arvif->vdev_type = WMI_VDEV_TYPE_AP; | ||
1795 | |||
1796 | if (vif->p2p) | ||
1797 | arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; | ||
1798 | break; | ||
1799 | case NL80211_IFTYPE_MONITOR: | ||
1800 | arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; | ||
1801 | break; | ||
1802 | default: | ||
1803 | WARN_ON(1); | ||
1804 | break; | ||
1805 | } | ||
1806 | |||
1807 | ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n", | ||
1808 | arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); | ||
1809 | |||
1810 | ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, | ||
1811 | arvif->vdev_subtype, vif->addr); | ||
1812 | if (ret) { | ||
1813 | ath10k_warn("WMI vdev create failed: ret %d\n", ret); | ||
1814 | goto exit; | ||
1815 | } | ||
1816 | |||
1817 | ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID, | ||
1818 | arvif->def_wep_key_index); | ||
1819 | if (ret) | ||
1820 | ath10k_warn("Failed to set default keyid: %d\n", ret); | ||
1821 | |||
1822 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, | ||
1823 | WMI_VDEV_PARAM_TX_ENCAP_TYPE, | ||
1824 | ATH10K_HW_TXRX_NATIVE_WIFI); | ||
1825 | if (ret) | ||
1826 | ath10k_warn("Failed to set TX encap: %d\n", ret); | ||
1827 | |||
1828 | if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { | ||
1829 | ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); | ||
1830 | if (ret) { | ||
1831 | ath10k_warn("Failed to create peer for AP: %d\n", ret); | ||
1832 | goto exit; | ||
1833 | } | ||
1834 | } | ||
1835 | |||
1836 | if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { | ||
1837 | param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; | ||
1838 | value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; | ||
1839 | ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, | ||
1840 | param, value); | ||
1841 | if (ret) | ||
1842 | ath10k_warn("Failed to set RX wake policy: %d\n", ret); | ||
1843 | |||
1844 | param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; | ||
1845 | value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; | ||
1846 | ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, | ||
1847 | param, value); | ||
1848 | if (ret) | ||
1849 | ath10k_warn("Failed to set TX wake thresh: %d\n", ret); | ||
1850 | |||
1851 | param = WMI_STA_PS_PARAM_PSPOLL_COUNT; | ||
1852 | value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; | ||
1853 | ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, | ||
1854 | param, value); | ||
1855 | if (ret) | ||
1856 | ath10k_warn("Failed to set PSPOLL count: %d\n", ret); | ||
1857 | } | ||
1858 | |||
1859 | if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) | ||
1860 | ar->monitor_present = true; | ||
1861 | |||
1862 | exit: | ||
1863 | mutex_unlock(&ar->conf_mutex); | ||
1864 | return ret; | ||
1865 | } | ||
1866 | |||
1867 | static void ath10k_remove_interface(struct ieee80211_hw *hw, | ||
1868 | struct ieee80211_vif *vif) | ||
1869 | { | ||
1870 | struct ath10k *ar = hw->priv; | ||
1871 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
1872 | int ret; | ||
1873 | |||
1874 | mutex_lock(&ar->conf_mutex); | ||
1875 | |||
1876 | ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id); | ||
1877 | |||
1878 | ar->free_vdev_map |= 1 << (arvif->vdev_id); | ||
1879 | |||
1880 | if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { | ||
1881 | ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); | ||
1882 | if (ret) | ||
1883 | ath10k_warn("Failed to remove peer for AP: %d\n", ret); | ||
1884 | |||
1885 | kfree(arvif->u.ap.noa_data); | ||
1886 | } | ||
1887 | |||
1888 | ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); | ||
1889 | if (ret) | ||
1890 | ath10k_warn("WMI vdev delete failed: %d\n", ret); | ||
1891 | |||
1892 | if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) | ||
1893 | ar->monitor_present = false; | ||
1894 | |||
1895 | ath10k_peer_cleanup(ar, arvif->vdev_id); | ||
1896 | |||
1897 | mutex_unlock(&ar->conf_mutex); | ||
1898 | } | ||
1899 | |||
1900 | /* | ||
1901 | * FIXME: Has to be verified. | ||
1902 | */ | ||
1903 | #define SUPPORTED_FILTERS \ | ||
1904 | (FIF_PROMISC_IN_BSS | \ | ||
1905 | FIF_ALLMULTI | \ | ||
1906 | FIF_CONTROL | \ | ||
1907 | FIF_PSPOLL | \ | ||
1908 | FIF_OTHER_BSS | \ | ||
1909 | FIF_BCN_PRBRESP_PROMISC | \ | ||
1910 | FIF_PROBE_REQ | \ | ||
1911 | FIF_FCSFAIL) | ||
1912 | |||
1913 | static void ath10k_configure_filter(struct ieee80211_hw *hw, | ||
1914 | unsigned int changed_flags, | ||
1915 | unsigned int *total_flags, | ||
1916 | u64 multicast) | ||
1917 | { | ||
1918 | struct ath10k *ar = hw->priv; | ||
1919 | int ret; | ||
1920 | |||
1921 | mutex_lock(&ar->conf_mutex); | ||
1922 | |||
1923 | changed_flags &= SUPPORTED_FILTERS; | ||
1924 | *total_flags &= SUPPORTED_FILTERS; | ||
1925 | ar->filter_flags = *total_flags; | ||
1926 | |||
1927 | if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && | ||
1928 | !ar->monitor_enabled) { | ||
1929 | ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); | ||
1930 | if (ret) | ||
1931 | ath10k_warn("Unable to start monitor mode\n"); | ||
1932 | else | ||
1933 | ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n"); | ||
1934 | } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && | ||
1935 | ar->monitor_enabled) { | ||
1936 | ret = ath10k_monitor_stop(ar); | ||
1937 | if (ret) | ||
1938 | ath10k_warn("Unable to stop monitor mode\n"); | ||
1939 | else | ||
1940 | ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n"); | ||
1941 | } | ||
1942 | |||
1943 | mutex_unlock(&ar->conf_mutex); | ||
1944 | } | ||
1945 | |||
1946 | static void ath10k_bss_info_changed(struct ieee80211_hw *hw, | ||
1947 | struct ieee80211_vif *vif, | ||
1948 | struct ieee80211_bss_conf *info, | ||
1949 | u32 changed) | ||
1950 | { | ||
1951 | struct ath10k *ar = hw->priv; | ||
1952 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
1953 | int ret = 0; | ||
1954 | |||
1955 | mutex_lock(&ar->conf_mutex); | ||
1956 | |||
1957 | if (changed & BSS_CHANGED_IBSS) | ||
1958 | ath10k_control_ibss(arvif, info, vif->addr); | ||
1959 | |||
1960 | if (changed & BSS_CHANGED_BEACON_INT) { | ||
1961 | arvif->beacon_interval = info->beacon_int; | ||
1962 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, | ||
1963 | WMI_VDEV_PARAM_BEACON_INTERVAL, | ||
1964 | arvif->beacon_interval); | ||
1965 | if (ret) | ||
1966 | ath10k_warn("Failed to set beacon interval for VDEV: %d\n", | ||
1967 | arvif->vdev_id); | ||
1968 | else | ||
1969 | ath10k_dbg(ATH10K_DBG_MAC, | ||
1970 | "Beacon interval: %d set for VDEV: %d\n", | ||
1971 | arvif->beacon_interval, arvif->vdev_id); | ||
1972 | } | ||
1973 | |||
1974 | if (changed & BSS_CHANGED_BEACON) { | ||
1975 | ret = ath10k_wmi_pdev_set_param(ar, | ||
1976 | WMI_PDEV_PARAM_BEACON_TX_MODE, | ||
1977 | WMI_BEACON_STAGGERED_MODE); | ||
1978 | if (ret) | ||
1979 | ath10k_warn("Failed to set beacon mode for VDEV: %d\n", | ||
1980 | arvif->vdev_id); | ||
1981 | else | ||
1982 | ath10k_dbg(ATH10K_DBG_MAC, | ||
1983 | "Set staggered beacon mode for VDEV: %d\n", | ||
1984 | arvif->vdev_id); | ||
1985 | } | ||
1986 | |||
1987 | if (changed & BSS_CHANGED_BEACON_INFO) { | ||
1988 | arvif->dtim_period = info->dtim_period; | ||
1989 | |||
1990 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, | ||
1991 | WMI_VDEV_PARAM_DTIM_PERIOD, | ||
1992 | arvif->dtim_period); | ||
1993 | if (ret) | ||
1994 | ath10k_warn("Failed to set dtim period for VDEV: %d\n", | ||
1995 | arvif->vdev_id); | ||
1996 | else | ||
1997 | ath10k_dbg(ATH10K_DBG_MAC, | ||
1998 | "Set dtim period: %d for VDEV: %d\n", | ||
1999 | arvif->dtim_period, arvif->vdev_id); | ||
2000 | } | ||
2001 | |||
2002 | if (changed & BSS_CHANGED_SSID && | ||
2003 | vif->type == NL80211_IFTYPE_AP) { | ||
2004 | arvif->u.ap.ssid_len = info->ssid_len; | ||
2005 | if (info->ssid_len) | ||
2006 | memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); | ||
2007 | arvif->u.ap.hidden_ssid = info->hidden_ssid; | ||
2008 | } | ||
2009 | |||
2010 | if (changed & BSS_CHANGED_BSSID) { | ||
2011 | if (!is_zero_ether_addr(info->bssid)) { | ||
2012 | ret = ath10k_peer_create(ar, arvif->vdev_id, | ||
2013 | info->bssid); | ||
2014 | if (ret) | ||
2015 | ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", | ||
2016 | info->bssid, arvif->vdev_id); | ||
2017 | else | ||
2018 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2019 | "Added peer: %pM for VDEV: %d\n", | ||
2020 | info->bssid, arvif->vdev_id); | ||
2021 | |||
2022 | |||
2023 | if (vif->type == NL80211_IFTYPE_STATION) { | ||
2024 | /* | ||
2025 | * this is never erased as we it for crypto key | ||
2026 | * clearing; this is FW requirement | ||
2027 | */ | ||
2028 | memcpy(arvif->u.sta.bssid, info->bssid, | ||
2029 | ETH_ALEN); | ||
2030 | |||
2031 | ret = ath10k_vdev_start(arvif); | ||
2032 | if (!ret) | ||
2033 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2034 | "VDEV: %d started with BSSID: %pM\n", | ||
2035 | arvif->vdev_id, info->bssid); | ||
2036 | } | ||
2037 | |||
2038 | /* | ||
2039 | * Mac80211 does not keep IBSS bssid when leaving IBSS, | ||
2040 | * so driver need to store it. It is needed when leaving | ||
2041 | * IBSS in order to remove BSSID peer. | ||
2042 | */ | ||
2043 | if (vif->type == NL80211_IFTYPE_ADHOC) | ||
2044 | memcpy(arvif->u.ibss.bssid, info->bssid, | ||
2045 | ETH_ALEN); | ||
2046 | } | ||
2047 | } | ||
2048 | |||
2049 | if (changed & BSS_CHANGED_BEACON_ENABLED) | ||
2050 | ath10k_control_beaconing(arvif, info); | ||
2051 | |||
2052 | if (changed & BSS_CHANGED_ERP_CTS_PROT) { | ||
2053 | u32 cts_prot; | ||
2054 | if (info->use_cts_prot) | ||
2055 | cts_prot = 1; | ||
2056 | else | ||
2057 | cts_prot = 0; | ||
2058 | |||
2059 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, | ||
2060 | WMI_VDEV_PARAM_ENABLE_RTSCTS, | ||
2061 | cts_prot); | ||
2062 | if (ret) | ||
2063 | ath10k_warn("Failed to set CTS prot for VDEV: %d\n", | ||
2064 | arvif->vdev_id); | ||
2065 | else | ||
2066 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2067 | "Set CTS prot: %d for VDEV: %d\n", | ||
2068 | cts_prot, arvif->vdev_id); | ||
2069 | } | ||
2070 | |||
2071 | if (changed & BSS_CHANGED_ERP_SLOT) { | ||
2072 | u32 slottime; | ||
2073 | if (info->use_short_slot) | ||
2074 | slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ | ||
2075 | |||
2076 | else | ||
2077 | slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ | ||
2078 | |||
2079 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, | ||
2080 | WMI_VDEV_PARAM_SLOT_TIME, | ||
2081 | slottime); | ||
2082 | if (ret) | ||
2083 | ath10k_warn("Failed to set erp slot for VDEV: %d\n", | ||
2084 | arvif->vdev_id); | ||
2085 | else | ||
2086 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2087 | "Set slottime: %d for VDEV: %d\n", | ||
2088 | slottime, arvif->vdev_id); | ||
2089 | } | ||
2090 | |||
2091 | if (changed & BSS_CHANGED_ERP_PREAMBLE) { | ||
2092 | u32 preamble; | ||
2093 | if (info->use_short_preamble) | ||
2094 | preamble = WMI_VDEV_PREAMBLE_SHORT; | ||
2095 | else | ||
2096 | preamble = WMI_VDEV_PREAMBLE_LONG; | ||
2097 | |||
2098 | ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, | ||
2099 | WMI_VDEV_PARAM_PREAMBLE, | ||
2100 | preamble); | ||
2101 | if (ret) | ||
2102 | ath10k_warn("Failed to set preamble for VDEV: %d\n", | ||
2103 | arvif->vdev_id); | ||
2104 | else | ||
2105 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2106 | "Set preamble: %d for VDEV: %d\n", | ||
2107 | preamble, arvif->vdev_id); | ||
2108 | } | ||
2109 | |||
2110 | if (changed & BSS_CHANGED_ASSOC) { | ||
2111 | if (info->assoc) | ||
2112 | ath10k_bss_assoc(hw, vif, info); | ||
2113 | } | ||
2114 | |||
2115 | mutex_unlock(&ar->conf_mutex); | ||
2116 | } | ||
2117 | |||
2118 | static int ath10k_hw_scan(struct ieee80211_hw *hw, | ||
2119 | struct ieee80211_vif *vif, | ||
2120 | struct cfg80211_scan_request *req) | ||
2121 | { | ||
2122 | struct ath10k *ar = hw->priv; | ||
2123 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2124 | struct wmi_start_scan_arg arg; | ||
2125 | int ret = 0; | ||
2126 | int i; | ||
2127 | |||
2128 | mutex_lock(&ar->conf_mutex); | ||
2129 | |||
2130 | spin_lock_bh(&ar->data_lock); | ||
2131 | if (ar->scan.in_progress) { | ||
2132 | spin_unlock_bh(&ar->data_lock); | ||
2133 | ret = -EBUSY; | ||
2134 | goto exit; | ||
2135 | } | ||
2136 | |||
2137 | INIT_COMPLETION(ar->scan.started); | ||
2138 | INIT_COMPLETION(ar->scan.completed); | ||
2139 | ar->scan.in_progress = true; | ||
2140 | ar->scan.aborting = false; | ||
2141 | ar->scan.is_roc = false; | ||
2142 | ar->scan.vdev_id = arvif->vdev_id; | ||
2143 | spin_unlock_bh(&ar->data_lock); | ||
2144 | |||
2145 | memset(&arg, 0, sizeof(arg)); | ||
2146 | ath10k_wmi_start_scan_init(ar, &arg); | ||
2147 | arg.vdev_id = arvif->vdev_id; | ||
2148 | arg.scan_id = ATH10K_SCAN_ID; | ||
2149 | |||
2150 | if (!req->no_cck) | ||
2151 | arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; | ||
2152 | |||
2153 | if (req->ie_len) { | ||
2154 | arg.ie_len = req->ie_len; | ||
2155 | memcpy(arg.ie, req->ie, arg.ie_len); | ||
2156 | } | ||
2157 | |||
2158 | if (req->n_ssids) { | ||
2159 | arg.n_ssids = req->n_ssids; | ||
2160 | for (i = 0; i < arg.n_ssids; i++) { | ||
2161 | arg.ssids[i].len = req->ssids[i].ssid_len; | ||
2162 | arg.ssids[i].ssid = req->ssids[i].ssid; | ||
2163 | } | ||
2164 | } | ||
2165 | |||
2166 | if (req->n_channels) { | ||
2167 | arg.n_channels = req->n_channels; | ||
2168 | for (i = 0; i < arg.n_channels; i++) | ||
2169 | arg.channels[i] = req->channels[i]->center_freq; | ||
2170 | } | ||
2171 | |||
2172 | ret = ath10k_start_scan(ar, &arg); | ||
2173 | if (ret) { | ||
2174 | ath10k_warn("could not start hw scan (%d)\n", ret); | ||
2175 | spin_lock_bh(&ar->data_lock); | ||
2176 | ar->scan.in_progress = false; | ||
2177 | spin_unlock_bh(&ar->data_lock); | ||
2178 | } | ||
2179 | |||
2180 | exit: | ||
2181 | mutex_unlock(&ar->conf_mutex); | ||
2182 | return ret; | ||
2183 | } | ||
2184 | |||
2185 | static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, | ||
2186 | struct ieee80211_vif *vif) | ||
2187 | { | ||
2188 | struct ath10k *ar = hw->priv; | ||
2189 | int ret; | ||
2190 | |||
2191 | mutex_lock(&ar->conf_mutex); | ||
2192 | ret = ath10k_abort_scan(ar); | ||
2193 | if (ret) { | ||
2194 | ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n", | ||
2195 | ret); | ||
2196 | ieee80211_scan_completed(hw, 1 /* aborted */); | ||
2197 | } | ||
2198 | mutex_unlock(&ar->conf_mutex); | ||
2199 | } | ||
2200 | |||
2201 | static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | ||
2202 | struct ieee80211_vif *vif, struct ieee80211_sta *sta, | ||
2203 | struct ieee80211_key_conf *key) | ||
2204 | { | ||
2205 | struct ath10k *ar = hw->priv; | ||
2206 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2207 | struct ath10k_peer *peer; | ||
2208 | const u8 *peer_addr; | ||
2209 | bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || | ||
2210 | key->cipher == WLAN_CIPHER_SUITE_WEP104; | ||
2211 | int ret = 0; | ||
2212 | |||
2213 | if (key->keyidx > WMI_MAX_KEY_INDEX) | ||
2214 | return -ENOSPC; | ||
2215 | |||
2216 | mutex_lock(&ar->conf_mutex); | ||
2217 | |||
2218 | if (sta) | ||
2219 | peer_addr = sta->addr; | ||
2220 | else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) | ||
2221 | peer_addr = vif->bss_conf.bssid; | ||
2222 | else | ||
2223 | peer_addr = vif->addr; | ||
2224 | |||
2225 | key->hw_key_idx = key->keyidx; | ||
2226 | |||
2227 | /* the peer should not disappear in mid-way (unless FW goes awry) since | ||
2228 | * we already hold conf_mutex. we just make sure its there now. */ | ||
2229 | spin_lock_bh(&ar->data_lock); | ||
2230 | peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); | ||
2231 | spin_unlock_bh(&ar->data_lock); | ||
2232 | |||
2233 | if (!peer) { | ||
2234 | if (cmd == SET_KEY) { | ||
2235 | ath10k_warn("cannot install key for non-existent peer %pM\n", | ||
2236 | peer_addr); | ||
2237 | ret = -EOPNOTSUPP; | ||
2238 | goto exit; | ||
2239 | } else { | ||
2240 | /* if the peer doesn't exist there is no key to disable | ||
2241 | * anymore */ | ||
2242 | goto exit; | ||
2243 | } | ||
2244 | } | ||
2245 | |||
2246 | if (is_wep) { | ||
2247 | if (cmd == SET_KEY) | ||
2248 | arvif->wep_keys[key->keyidx] = key; | ||
2249 | else | ||
2250 | arvif->wep_keys[key->keyidx] = NULL; | ||
2251 | |||
2252 | if (cmd == DISABLE_KEY) | ||
2253 | ath10k_clear_vdev_key(arvif, key); | ||
2254 | } | ||
2255 | |||
2256 | ret = ath10k_install_key(arvif, key, cmd, peer_addr); | ||
2257 | if (ret) { | ||
2258 | ath10k_warn("ath10k_install_key failed (%d)\n", ret); | ||
2259 | goto exit; | ||
2260 | } | ||
2261 | |||
2262 | spin_lock_bh(&ar->data_lock); | ||
2263 | peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); | ||
2264 | if (peer && cmd == SET_KEY) | ||
2265 | peer->keys[key->keyidx] = key; | ||
2266 | else if (peer && cmd == DISABLE_KEY) | ||
2267 | peer->keys[key->keyidx] = NULL; | ||
2268 | else if (peer == NULL) | ||
2269 | /* impossible unless FW goes crazy */ | ||
2270 | ath10k_warn("peer %pM disappeared!\n", peer_addr); | ||
2271 | spin_unlock_bh(&ar->data_lock); | ||
2272 | |||
2273 | exit: | ||
2274 | mutex_unlock(&ar->conf_mutex); | ||
2275 | return ret; | ||
2276 | } | ||
2277 | |||
2278 | static int ath10k_sta_state(struct ieee80211_hw *hw, | ||
2279 | struct ieee80211_vif *vif, | ||
2280 | struct ieee80211_sta *sta, | ||
2281 | enum ieee80211_sta_state old_state, | ||
2282 | enum ieee80211_sta_state new_state) | ||
2283 | { | ||
2284 | struct ath10k *ar = hw->priv; | ||
2285 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2286 | int ret = 0; | ||
2287 | |||
2288 | mutex_lock(&ar->conf_mutex); | ||
2289 | |||
2290 | if (old_state == IEEE80211_STA_NOTEXIST && | ||
2291 | new_state == IEEE80211_STA_NONE && | ||
2292 | vif->type != NL80211_IFTYPE_STATION) { | ||
2293 | /* | ||
2294 | * New station addition. | ||
2295 | */ | ||
2296 | ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); | ||
2297 | if (ret) | ||
2298 | ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", | ||
2299 | sta->addr, arvif->vdev_id); | ||
2300 | else | ||
2301 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2302 | "Added peer: %pM for VDEV: %d\n", | ||
2303 | sta->addr, arvif->vdev_id); | ||
2304 | } else if ((old_state == IEEE80211_STA_NONE && | ||
2305 | new_state == IEEE80211_STA_NOTEXIST)) { | ||
2306 | /* | ||
2307 | * Existing station deletion. | ||
2308 | */ | ||
2309 | ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); | ||
2310 | if (ret) | ||
2311 | ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", | ||
2312 | sta->addr, arvif->vdev_id); | ||
2313 | else | ||
2314 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2315 | "Removed peer: %pM for VDEV: %d\n", | ||
2316 | sta->addr, arvif->vdev_id); | ||
2317 | |||
2318 | if (vif->type == NL80211_IFTYPE_STATION) | ||
2319 | ath10k_bss_disassoc(hw, vif); | ||
2320 | } else if (old_state == IEEE80211_STA_AUTH && | ||
2321 | new_state == IEEE80211_STA_ASSOC && | ||
2322 | (vif->type == NL80211_IFTYPE_AP || | ||
2323 | vif->type == NL80211_IFTYPE_ADHOC)) { | ||
2324 | /* | ||
2325 | * New association. | ||
2326 | */ | ||
2327 | ret = ath10k_station_assoc(ar, arvif, sta); | ||
2328 | if (ret) | ||
2329 | ath10k_warn("Failed to associate station: %pM\n", | ||
2330 | sta->addr); | ||
2331 | else | ||
2332 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2333 | "Station %pM moved to assoc state\n", | ||
2334 | sta->addr); | ||
2335 | } else if (old_state == IEEE80211_STA_ASSOC && | ||
2336 | new_state == IEEE80211_STA_AUTH && | ||
2337 | (vif->type == NL80211_IFTYPE_AP || | ||
2338 | vif->type == NL80211_IFTYPE_ADHOC)) { | ||
2339 | /* | ||
2340 | * Disassociation. | ||
2341 | */ | ||
2342 | ret = ath10k_station_disassoc(ar, arvif, sta); | ||
2343 | if (ret) | ||
2344 | ath10k_warn("Failed to disassociate station: %pM\n", | ||
2345 | sta->addr); | ||
2346 | else | ||
2347 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2348 | "Station %pM moved to disassociated state\n", | ||
2349 | sta->addr); | ||
2350 | } | ||
2351 | |||
2352 | mutex_unlock(&ar->conf_mutex); | ||
2353 | return ret; | ||
2354 | } | ||
2355 | |||
2356 | static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, | ||
2357 | u16 ac, bool enable) | ||
2358 | { | ||
2359 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2360 | u32 value = 0; | ||
2361 | int ret = 0; | ||
2362 | |||
2363 | if (arvif->vdev_type != WMI_VDEV_TYPE_STA) | ||
2364 | return 0; | ||
2365 | |||
2366 | switch (ac) { | ||
2367 | case IEEE80211_AC_VO: | ||
2368 | value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | | ||
2369 | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; | ||
2370 | break; | ||
2371 | case IEEE80211_AC_VI: | ||
2372 | value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | | ||
2373 | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; | ||
2374 | break; | ||
2375 | case IEEE80211_AC_BE: | ||
2376 | value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | | ||
2377 | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; | ||
2378 | break; | ||
2379 | case IEEE80211_AC_BK: | ||
2380 | value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | | ||
2381 | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; | ||
2382 | break; | ||
2383 | } | ||
2384 | |||
2385 | if (enable) | ||
2386 | arvif->u.sta.uapsd |= value; | ||
2387 | else | ||
2388 | arvif->u.sta.uapsd &= ~value; | ||
2389 | |||
2390 | ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, | ||
2391 | WMI_STA_PS_PARAM_UAPSD, | ||
2392 | arvif->u.sta.uapsd); | ||
2393 | if (ret) { | ||
2394 | ath10k_warn("could not set uapsd params %d\n", ret); | ||
2395 | goto exit; | ||
2396 | } | ||
2397 | |||
2398 | if (arvif->u.sta.uapsd) | ||
2399 | value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; | ||
2400 | else | ||
2401 | value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; | ||
2402 | |||
2403 | ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, | ||
2404 | WMI_STA_PS_PARAM_RX_WAKE_POLICY, | ||
2405 | value); | ||
2406 | if (ret) | ||
2407 | ath10k_warn("could not set rx wake param %d\n", ret); | ||
2408 | |||
2409 | exit: | ||
2410 | return ret; | ||
2411 | } | ||
2412 | |||
2413 | static int ath10k_conf_tx(struct ieee80211_hw *hw, | ||
2414 | struct ieee80211_vif *vif, u16 ac, | ||
2415 | const struct ieee80211_tx_queue_params *params) | ||
2416 | { | ||
2417 | struct ath10k *ar = hw->priv; | ||
2418 | struct wmi_wmm_params_arg *p = NULL; | ||
2419 | int ret; | ||
2420 | |||
2421 | mutex_lock(&ar->conf_mutex); | ||
2422 | |||
2423 | switch (ac) { | ||
2424 | case IEEE80211_AC_VO: | ||
2425 | p = &ar->wmm_params.ac_vo; | ||
2426 | break; | ||
2427 | case IEEE80211_AC_VI: | ||
2428 | p = &ar->wmm_params.ac_vi; | ||
2429 | break; | ||
2430 | case IEEE80211_AC_BE: | ||
2431 | p = &ar->wmm_params.ac_be; | ||
2432 | break; | ||
2433 | case IEEE80211_AC_BK: | ||
2434 | p = &ar->wmm_params.ac_bk; | ||
2435 | break; | ||
2436 | } | ||
2437 | |||
2438 | if (WARN_ON(!p)) { | ||
2439 | ret = -EINVAL; | ||
2440 | goto exit; | ||
2441 | } | ||
2442 | |||
2443 | p->cwmin = params->cw_min; | ||
2444 | p->cwmax = params->cw_max; | ||
2445 | p->aifs = params->aifs; | ||
2446 | |||
2447 | /* | ||
2448 | * The channel time duration programmed in the HW is in absolute | ||
2449 | * microseconds, while mac80211 gives the txop in units of | ||
2450 | * 32 microseconds. | ||
2451 | */ | ||
2452 | p->txop = params->txop * 32; | ||
2453 | |||
2454 | /* FIXME: FW accepts wmm params per hw, not per vif */ | ||
2455 | ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); | ||
2456 | if (ret) { | ||
2457 | ath10k_warn("could not set wmm params %d\n", ret); | ||
2458 | goto exit; | ||
2459 | } | ||
2460 | |||
2461 | ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); | ||
2462 | if (ret) | ||
2463 | ath10k_warn("could not set sta uapsd %d\n", ret); | ||
2464 | |||
2465 | exit: | ||
2466 | mutex_unlock(&ar->conf_mutex); | ||
2467 | return ret; | ||
2468 | } | ||
2469 | |||
2470 | #define ATH10K_ROC_TIMEOUT_HZ (2*HZ) | ||
2471 | |||
2472 | static int ath10k_remain_on_channel(struct ieee80211_hw *hw, | ||
2473 | struct ieee80211_vif *vif, | ||
2474 | struct ieee80211_channel *chan, | ||
2475 | int duration, | ||
2476 | enum ieee80211_roc_type type) | ||
2477 | { | ||
2478 | struct ath10k *ar = hw->priv; | ||
2479 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2480 | struct wmi_start_scan_arg arg; | ||
2481 | int ret; | ||
2482 | |||
2483 | mutex_lock(&ar->conf_mutex); | ||
2484 | |||
2485 | spin_lock_bh(&ar->data_lock); | ||
2486 | if (ar->scan.in_progress) { | ||
2487 | spin_unlock_bh(&ar->data_lock); | ||
2488 | ret = -EBUSY; | ||
2489 | goto exit; | ||
2490 | } | ||
2491 | |||
2492 | INIT_COMPLETION(ar->scan.started); | ||
2493 | INIT_COMPLETION(ar->scan.completed); | ||
2494 | INIT_COMPLETION(ar->scan.on_channel); | ||
2495 | ar->scan.in_progress = true; | ||
2496 | ar->scan.aborting = false; | ||
2497 | ar->scan.is_roc = true; | ||
2498 | ar->scan.vdev_id = arvif->vdev_id; | ||
2499 | ar->scan.roc_freq = chan->center_freq; | ||
2500 | spin_unlock_bh(&ar->data_lock); | ||
2501 | |||
2502 | memset(&arg, 0, sizeof(arg)); | ||
2503 | ath10k_wmi_start_scan_init(ar, &arg); | ||
2504 | arg.vdev_id = arvif->vdev_id; | ||
2505 | arg.scan_id = ATH10K_SCAN_ID; | ||
2506 | arg.n_channels = 1; | ||
2507 | arg.channels[0] = chan->center_freq; | ||
2508 | arg.dwell_time_active = duration; | ||
2509 | arg.dwell_time_passive = duration; | ||
2510 | arg.max_scan_time = 2 * duration; | ||
2511 | arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; | ||
2512 | arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; | ||
2513 | |||
2514 | ret = ath10k_start_scan(ar, &arg); | ||
2515 | if (ret) { | ||
2516 | ath10k_warn("could not start roc scan (%d)\n", ret); | ||
2517 | spin_lock_bh(&ar->data_lock); | ||
2518 | ar->scan.in_progress = false; | ||
2519 | spin_unlock_bh(&ar->data_lock); | ||
2520 | goto exit; | ||
2521 | } | ||
2522 | |||
2523 | ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); | ||
2524 | if (ret == 0) { | ||
2525 | ath10k_warn("could not switch to channel for roc scan\n"); | ||
2526 | ath10k_abort_scan(ar); | ||
2527 | ret = -ETIMEDOUT; | ||
2528 | goto exit; | ||
2529 | } | ||
2530 | |||
2531 | ret = 0; | ||
2532 | exit: | ||
2533 | mutex_unlock(&ar->conf_mutex); | ||
2534 | return ret; | ||
2535 | } | ||
2536 | |||
2537 | static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) | ||
2538 | { | ||
2539 | struct ath10k *ar = hw->priv; | ||
2540 | |||
2541 | mutex_lock(&ar->conf_mutex); | ||
2542 | ath10k_abort_scan(ar); | ||
2543 | mutex_unlock(&ar->conf_mutex); | ||
2544 | |||
2545 | return 0; | ||
2546 | } | ||
2547 | |||
2548 | /* | ||
2549 | * Both RTS and Fragmentation threshold are interface-specific | ||
2550 | * in ath10k, but device-specific in mac80211. | ||
2551 | */ | ||
2552 | static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | ||
2553 | { | ||
2554 | struct ath10k_generic_iter *ar_iter = data; | ||
2555 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2556 | u32 rts = ar_iter->ar->hw->wiphy->rts_threshold; | ||
2557 | |||
2558 | rts = min_t(u32, rts, ATH10K_RTS_MAX); | ||
2559 | |||
2560 | ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, | ||
2561 | WMI_VDEV_PARAM_RTS_THRESHOLD, | ||
2562 | rts); | ||
2563 | if (ar_iter->ret) | ||
2564 | ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", | ||
2565 | arvif->vdev_id); | ||
2566 | else | ||
2567 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2568 | "Set RTS threshold: %d for VDEV: %d\n", | ||
2569 | rts, arvif->vdev_id); | ||
2570 | } | ||
2571 | |||
2572 | static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) | ||
2573 | { | ||
2574 | struct ath10k_generic_iter ar_iter; | ||
2575 | struct ath10k *ar = hw->priv; | ||
2576 | |||
2577 | memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); | ||
2578 | ar_iter.ar = ar; | ||
2579 | |||
2580 | mutex_lock(&ar->conf_mutex); | ||
2581 | ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, | ||
2582 | ath10k_set_rts_iter, &ar_iter); | ||
2583 | mutex_unlock(&ar->conf_mutex); | ||
2584 | |||
2585 | return ar_iter.ret; | ||
2586 | } | ||
2587 | |||
2588 | static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) | ||
2589 | { | ||
2590 | struct ath10k_generic_iter *ar_iter = data; | ||
2591 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2592 | u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; | ||
2593 | int ret; | ||
2594 | |||
2595 | frag = clamp_t(u32, frag, | ||
2596 | ATH10K_FRAGMT_THRESHOLD_MIN, | ||
2597 | ATH10K_FRAGMT_THRESHOLD_MAX); | ||
2598 | |||
2599 | ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id, | ||
2600 | WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, | ||
2601 | frag); | ||
2602 | |||
2603 | ar_iter->ret = ret; | ||
2604 | if (ar_iter->ret) | ||
2605 | ath10k_warn("Failed to set frag threshold for VDEV: %d\n", | ||
2606 | arvif->vdev_id); | ||
2607 | else | ||
2608 | ath10k_dbg(ATH10K_DBG_MAC, | ||
2609 | "Set frag threshold: %d for VDEV: %d\n", | ||
2610 | frag, arvif->vdev_id); | ||
2611 | } | ||
2612 | |||
2613 | static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) | ||
2614 | { | ||
2615 | struct ath10k_generic_iter ar_iter; | ||
2616 | struct ath10k *ar = hw->priv; | ||
2617 | |||
2618 | memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); | ||
2619 | ar_iter.ar = ar; | ||
2620 | |||
2621 | mutex_lock(&ar->conf_mutex); | ||
2622 | ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, | ||
2623 | ath10k_set_frag_iter, &ar_iter); | ||
2624 | mutex_unlock(&ar->conf_mutex); | ||
2625 | |||
2626 | return ar_iter.ret; | ||
2627 | } | ||
2628 | |||
2629 | static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) | ||
2630 | { | ||
2631 | struct ath10k *ar = hw->priv; | ||
2632 | int ret; | ||
2633 | |||
2634 | /* mac80211 doesn't care if we really xmit queued frames or not | ||
2635 | * we'll collect those frames either way if we stop/delete vdevs */ | ||
2636 | if (drop) | ||
2637 | return; | ||
2638 | |||
2639 | ret = wait_event_timeout(ar->htt->empty_tx_wq, ({ | ||
2640 | bool empty; | ||
2641 | spin_lock_bh(&ar->htt->tx_lock); | ||
2642 | empty = bitmap_empty(ar->htt->used_msdu_ids, | ||
2643 | ar->htt->max_num_pending_tx); | ||
2644 | spin_unlock_bh(&ar->htt->tx_lock); | ||
2645 | (empty); | ||
2646 | }), ATH10K_FLUSH_TIMEOUT_HZ); | ||
2647 | if (ret <= 0) | ||
2648 | ath10k_warn("tx not flushed\n"); | ||
2649 | } | ||
2650 | |||
2651 | /* TODO: Implement this function properly | ||
2652 | * For now it is needed to reply to Probe Requests in IBSS mode. | ||
2653 | * Propably we need this information from FW. | ||
2654 | */ | ||
2655 | static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) | ||
2656 | { | ||
2657 | return 1; | ||
2658 | } | ||
2659 | |||
2660 | static const struct ieee80211_ops ath10k_ops = { | ||
2661 | .tx = ath10k_tx, | ||
2662 | .start = ath10k_start, | ||
2663 | .stop = ath10k_stop, | ||
2664 | .config = ath10k_config, | ||
2665 | .add_interface = ath10k_add_interface, | ||
2666 | .remove_interface = ath10k_remove_interface, | ||
2667 | .configure_filter = ath10k_configure_filter, | ||
2668 | .bss_info_changed = ath10k_bss_info_changed, | ||
2669 | .hw_scan = ath10k_hw_scan, | ||
2670 | .cancel_hw_scan = ath10k_cancel_hw_scan, | ||
2671 | .set_key = ath10k_set_key, | ||
2672 | .sta_state = ath10k_sta_state, | ||
2673 | .conf_tx = ath10k_conf_tx, | ||
2674 | .remain_on_channel = ath10k_remain_on_channel, | ||
2675 | .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, | ||
2676 | .set_rts_threshold = ath10k_set_rts_threshold, | ||
2677 | .set_frag_threshold = ath10k_set_frag_threshold, | ||
2678 | .flush = ath10k_flush, | ||
2679 | .tx_last_beacon = ath10k_tx_last_beacon, | ||
2680 | }; | ||
2681 | |||
2682 | #define RATETAB_ENT(_rate, _rateid, _flags) { \ | ||
2683 | .bitrate = (_rate), \ | ||
2684 | .flags = (_flags), \ | ||
2685 | .hw_value = (_rateid), \ | ||
2686 | } | ||
2687 | |||
2688 | #define CHAN2G(_channel, _freq, _flags) { \ | ||
2689 | .band = IEEE80211_BAND_2GHZ, \ | ||
2690 | .hw_value = (_channel), \ | ||
2691 | .center_freq = (_freq), \ | ||
2692 | .flags = (_flags), \ | ||
2693 | .max_antenna_gain = 0, \ | ||
2694 | .max_power = 30, \ | ||
2695 | } | ||
2696 | |||
2697 | #define CHAN5G(_channel, _freq, _flags) { \ | ||
2698 | .band = IEEE80211_BAND_5GHZ, \ | ||
2699 | .hw_value = (_channel), \ | ||
2700 | .center_freq = (_freq), \ | ||
2701 | .flags = (_flags), \ | ||
2702 | .max_antenna_gain = 0, \ | ||
2703 | .max_power = 30, \ | ||
2704 | } | ||
2705 | |||
2706 | static const struct ieee80211_channel ath10k_2ghz_channels[] = { | ||
2707 | CHAN2G(1, 2412, 0), | ||
2708 | CHAN2G(2, 2417, 0), | ||
2709 | CHAN2G(3, 2422, 0), | ||
2710 | CHAN2G(4, 2427, 0), | ||
2711 | CHAN2G(5, 2432, 0), | ||
2712 | CHAN2G(6, 2437, 0), | ||
2713 | CHAN2G(7, 2442, 0), | ||
2714 | CHAN2G(8, 2447, 0), | ||
2715 | CHAN2G(9, 2452, 0), | ||
2716 | CHAN2G(10, 2457, 0), | ||
2717 | CHAN2G(11, 2462, 0), | ||
2718 | CHAN2G(12, 2467, 0), | ||
2719 | CHAN2G(13, 2472, 0), | ||
2720 | CHAN2G(14, 2484, 0), | ||
2721 | }; | ||
2722 | |||
2723 | static const struct ieee80211_channel ath10k_5ghz_channels[] = { | ||
2724 | CHAN5G(36, 5180, 14), | ||
2725 | CHAN5G(40, 5200, 15), | ||
2726 | CHAN5G(44, 5220, 16), | ||
2727 | CHAN5G(48, 5240, 17), | ||
2728 | CHAN5G(52, 5260, 18), | ||
2729 | CHAN5G(56, 5280, 19), | ||
2730 | CHAN5G(60, 5300, 20), | ||
2731 | CHAN5G(64, 5320, 21), | ||
2732 | CHAN5G(100, 5500, 22), | ||
2733 | CHAN5G(104, 5520, 23), | ||
2734 | CHAN5G(108, 5540, 24), | ||
2735 | CHAN5G(112, 5560, 25), | ||
2736 | CHAN5G(116, 5580, 26), | ||
2737 | CHAN5G(120, 5600, 27), | ||
2738 | CHAN5G(124, 5620, 28), | ||
2739 | CHAN5G(128, 5640, 29), | ||
2740 | CHAN5G(132, 5660, 30), | ||
2741 | CHAN5G(136, 5680, 31), | ||
2742 | CHAN5G(140, 5700, 32), | ||
2743 | CHAN5G(149, 5745, 33), | ||
2744 | CHAN5G(153, 5765, 34), | ||
2745 | CHAN5G(157, 5785, 35), | ||
2746 | CHAN5G(161, 5805, 36), | ||
2747 | CHAN5G(165, 5825, 37), | ||
2748 | }; | ||
2749 | |||
2750 | static struct ieee80211_rate ath10k_rates[] = { | ||
2751 | /* CCK */ | ||
2752 | RATETAB_ENT(10, 0x82, 0), | ||
2753 | RATETAB_ENT(20, 0x84, 0), | ||
2754 | RATETAB_ENT(55, 0x8b, 0), | ||
2755 | RATETAB_ENT(110, 0x96, 0), | ||
2756 | /* OFDM */ | ||
2757 | RATETAB_ENT(60, 0x0c, 0), | ||
2758 | RATETAB_ENT(90, 0x12, 0), | ||
2759 | RATETAB_ENT(120, 0x18, 0), | ||
2760 | RATETAB_ENT(180, 0x24, 0), | ||
2761 | RATETAB_ENT(240, 0x30, 0), | ||
2762 | RATETAB_ENT(360, 0x48, 0), | ||
2763 | RATETAB_ENT(480, 0x60, 0), | ||
2764 | RATETAB_ENT(540, 0x6c, 0), | ||
2765 | }; | ||
2766 | |||
2767 | #define ath10k_a_rates (ath10k_rates + 4) | ||
2768 | #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4) | ||
2769 | #define ath10k_g_rates (ath10k_rates + 0) | ||
2770 | #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) | ||
2771 | |||
2772 | struct ath10k *ath10k_mac_create(void) | ||
2773 | { | ||
2774 | struct ieee80211_hw *hw; | ||
2775 | struct ath10k *ar; | ||
2776 | |||
2777 | hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops); | ||
2778 | if (!hw) | ||
2779 | return NULL; | ||
2780 | |||
2781 | ar = hw->priv; | ||
2782 | ar->hw = hw; | ||
2783 | |||
2784 | return ar; | ||
2785 | } | ||
2786 | |||
2787 | void ath10k_mac_destroy(struct ath10k *ar) | ||
2788 | { | ||
2789 | ieee80211_free_hw(ar->hw); | ||
2790 | } | ||
2791 | |||
2792 | static const struct ieee80211_iface_limit ath10k_if_limits[] = { | ||
2793 | { | ||
2794 | .max = 8, | ||
2795 | .types = BIT(NL80211_IFTYPE_STATION) | ||
2796 | | BIT(NL80211_IFTYPE_P2P_CLIENT) | ||
2797 | | BIT(NL80211_IFTYPE_P2P_GO) | ||
2798 | | BIT(NL80211_IFTYPE_AP) | ||
2799 | } | ||
2800 | }; | ||
2801 | |||
2802 | static const struct ieee80211_iface_combination ath10k_if_comb = { | ||
2803 | .limits = ath10k_if_limits, | ||
2804 | .n_limits = ARRAY_SIZE(ath10k_if_limits), | ||
2805 | .max_interfaces = 8, | ||
2806 | .num_different_channels = 1, | ||
2807 | .beacon_int_infra_match = true, | ||
2808 | }; | ||
2809 | |||
2810 | static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) | ||
2811 | { | ||
2812 | struct ieee80211_sta_vht_cap vht_cap = {0}; | ||
2813 | u16 mcs_map; | ||
2814 | |||
2815 | vht_cap.vht_supported = 1; | ||
2816 | vht_cap.cap = ar->vht_cap_info; | ||
2817 | |||
2818 | /* FIXME: check dynamically how many streams board supports */ | ||
2819 | mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | | ||
2820 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | | ||
2821 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | | ||
2822 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | | ||
2823 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | | ||
2824 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | | ||
2825 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | | ||
2826 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; | ||
2827 | |||
2828 | vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); | ||
2829 | vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); | ||
2830 | |||
2831 | return vht_cap; | ||
2832 | } | ||
2833 | |||
2834 | static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) | ||
2835 | { | ||
2836 | int i; | ||
2837 | struct ieee80211_sta_ht_cap ht_cap = {0}; | ||
2838 | |||
2839 | if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) | ||
2840 | return ht_cap; | ||
2841 | |||
2842 | ht_cap.ht_supported = 1; | ||
2843 | ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; | ||
2844 | ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; | ||
2845 | ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; | ||
2846 | ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; | ||
2847 | ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; | ||
2848 | |||
2849 | if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) | ||
2850 | ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; | ||
2851 | |||
2852 | if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) | ||
2853 | ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; | ||
2854 | |||
2855 | if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { | ||
2856 | u32 smps; | ||
2857 | |||
2858 | smps = WLAN_HT_CAP_SM_PS_DYNAMIC; | ||
2859 | smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; | ||
2860 | |||
2861 | ht_cap.cap |= smps; | ||
2862 | } | ||
2863 | |||
2864 | if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC) | ||
2865 | ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; | ||
2866 | |||
2867 | if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { | ||
2868 | u32 stbc; | ||
2869 | |||
2870 | stbc = ar->ht_cap_info; | ||
2871 | stbc &= WMI_HT_CAP_RX_STBC; | ||
2872 | stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; | ||
2873 | stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; | ||
2874 | stbc &= IEEE80211_HT_CAP_RX_STBC; | ||
2875 | |||
2876 | ht_cap.cap |= stbc; | ||
2877 | } | ||
2878 | |||
2879 | if (ar->ht_cap_info & WMI_HT_CAP_LDPC) | ||
2880 | ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; | ||
2881 | |||
2882 | if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) | ||
2883 | ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; | ||
2884 | |||
2885 | /* max AMSDU is implicitly taken from vht_cap_info */ | ||
2886 | if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) | ||
2887 | ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; | ||
2888 | |||
2889 | for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++) | ||
2890 | ht_cap.mcs.rx_mask[i] = 0xFF; | ||
2891 | |||
2892 | ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; | ||
2893 | |||
2894 | return ht_cap; | ||
2895 | } | ||
2896 | |||
2897 | |||
2898 | static void ath10k_get_arvif_iter(void *data, u8 *mac, | ||
2899 | struct ieee80211_vif *vif) | ||
2900 | { | ||
2901 | struct ath10k_vif_iter *arvif_iter = data; | ||
2902 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
2903 | |||
2904 | if (arvif->vdev_id == arvif_iter->vdev_id) | ||
2905 | arvif_iter->arvif = arvif; | ||
2906 | } | ||
2907 | |||
2908 | struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) | ||
2909 | { | ||
2910 | struct ath10k_vif_iter arvif_iter; | ||
2911 | u32 flags; | ||
2912 | |||
2913 | memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); | ||
2914 | arvif_iter.vdev_id = vdev_id; | ||
2915 | |||
2916 | flags = IEEE80211_IFACE_ITER_RESUME_ALL; | ||
2917 | ieee80211_iterate_active_interfaces_atomic(ar->hw, | ||
2918 | flags, | ||
2919 | ath10k_get_arvif_iter, | ||
2920 | &arvif_iter); | ||
2921 | if (!arvif_iter.arvif) { | ||
2922 | ath10k_warn("No VIF found for VDEV: %d\n", vdev_id); | ||
2923 | return NULL; | ||
2924 | } | ||
2925 | |||
2926 | return arvif_iter.arvif; | ||
2927 | } | ||
2928 | |||
2929 | int ath10k_mac_register(struct ath10k *ar) | ||
2930 | { | ||
2931 | struct ieee80211_supported_band *band; | ||
2932 | struct ieee80211_sta_vht_cap vht_cap; | ||
2933 | struct ieee80211_sta_ht_cap ht_cap; | ||
2934 | void *channels; | ||
2935 | int ret; | ||
2936 | |||
2937 | SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); | ||
2938 | |||
2939 | SET_IEEE80211_DEV(ar->hw, ar->dev); | ||
2940 | |||
2941 | ht_cap = ath10k_get_ht_cap(ar); | ||
2942 | vht_cap = ath10k_create_vht_cap(ar); | ||
2943 | |||
2944 | if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { | ||
2945 | channels = kmemdup(ath10k_2ghz_channels, | ||
2946 | sizeof(ath10k_2ghz_channels), | ||
2947 | GFP_KERNEL); | ||
2948 | if (!channels) | ||
2949 | return -ENOMEM; | ||
2950 | |||
2951 | band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; | ||
2952 | band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); | ||
2953 | band->channels = channels; | ||
2954 | band->n_bitrates = ath10k_g_rates_size; | ||
2955 | band->bitrates = ath10k_g_rates; | ||
2956 | band->ht_cap = ht_cap; | ||
2957 | |||
2958 | /* vht is not supported in 2.4 GHz */ | ||
2959 | |||
2960 | ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; | ||
2961 | } | ||
2962 | |||
2963 | if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { | ||
2964 | channels = kmemdup(ath10k_5ghz_channels, | ||
2965 | sizeof(ath10k_5ghz_channels), | ||
2966 | GFP_KERNEL); | ||
2967 | if (!channels) { | ||
2968 | if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { | ||
2969 | band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; | ||
2970 | kfree(band->channels); | ||
2971 | } | ||
2972 | return -ENOMEM; | ||
2973 | } | ||
2974 | |||
2975 | band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; | ||
2976 | band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); | ||
2977 | band->channels = channels; | ||
2978 | band->n_bitrates = ath10k_a_rates_size; | ||
2979 | band->bitrates = ath10k_a_rates; | ||
2980 | band->ht_cap = ht_cap; | ||
2981 | band->vht_cap = vht_cap; | ||
2982 | ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; | ||
2983 | } | ||
2984 | |||
2985 | ar->hw->wiphy->interface_modes = | ||
2986 | BIT(NL80211_IFTYPE_STATION) | | ||
2987 | BIT(NL80211_IFTYPE_ADHOC) | | ||
2988 | BIT(NL80211_IFTYPE_AP) | | ||
2989 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | ||
2990 | BIT(NL80211_IFTYPE_P2P_GO); | ||
2991 | |||
2992 | ar->hw->flags = IEEE80211_HW_SIGNAL_DBM | | ||
2993 | IEEE80211_HW_SUPPORTS_PS | | ||
2994 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | | ||
2995 | IEEE80211_HW_SUPPORTS_UAPSD | | ||
2996 | IEEE80211_HW_MFP_CAPABLE | | ||
2997 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | | ||
2998 | IEEE80211_HW_HAS_RATE_CONTROL | | ||
2999 | IEEE80211_HW_SUPPORTS_STATIC_SMPS | | ||
3000 | IEEE80211_HW_WANT_MONITOR_VIF | | ||
3001 | IEEE80211_HW_AP_LINK_PS; | ||
3002 | |||
3003 | if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) | ||
3004 | ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; | ||
3005 | |||
3006 | if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { | ||
3007 | ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; | ||
3008 | ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW; | ||
3009 | } | ||
3010 | |||
3011 | ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; | ||
3012 | ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; | ||
3013 | |||
3014 | ar->hw->vif_data_size = sizeof(struct ath10k_vif); | ||
3015 | |||
3016 | ar->hw->channel_change_time = 5000; | ||
3017 | ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; | ||
3018 | |||
3019 | ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; | ||
3020 | ar->hw->wiphy->max_remain_on_channel_duration = 5000; | ||
3021 | |||
3022 | ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; | ||
3023 | /* | ||
3024 | * on LL hardware queues are managed entirely by the FW | ||
3025 | * so we only advertise to mac we can do the queues thing | ||
3026 | */ | ||
3027 | ar->hw->queues = 4; | ||
3028 | |||
3029 | ar->hw->wiphy->iface_combinations = &ath10k_if_comb; | ||
3030 | ar->hw->wiphy->n_iface_combinations = 1; | ||
3031 | |||
3032 | ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, | ||
3033 | ath10k_reg_notifier); | ||
3034 | if (ret) { | ||
3035 | ath10k_err("Regulatory initialization failed\n"); | ||
3036 | return ret; | ||
3037 | } | ||
3038 | |||
3039 | ret = ieee80211_register_hw(ar->hw); | ||
3040 | if (ret) { | ||
3041 | ath10k_err("ieee80211 registration failed: %d\n", ret); | ||
3042 | return ret; | ||
3043 | } | ||
3044 | |||
3045 | if (!ath_is_world_regd(&ar->ath_common.regulatory)) { | ||
3046 | ret = regulatory_hint(ar->hw->wiphy, | ||
3047 | ar->ath_common.regulatory.alpha2); | ||
3048 | if (ret) | ||
3049 | goto exit; | ||
3050 | } | ||
3051 | |||
3052 | return 0; | ||
3053 | exit: | ||
3054 | ieee80211_unregister_hw(ar->hw); | ||
3055 | return ret; | ||
3056 | } | ||
3057 | |||
3058 | void ath10k_mac_unregister(struct ath10k *ar) | ||
3059 | { | ||
3060 | ieee80211_unregister_hw(ar->hw); | ||
3061 | |||
3062 | kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); | ||
3063 | kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); | ||
3064 | |||
3065 | SET_IEEE80211_DEV(ar->hw, NULL); | ||
3066 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h new file mode 100644 index 000000000000..27fc92e58829 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/mac.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _MAC_H_ | ||
19 | #define _MAC_H_ | ||
20 | |||
21 | #include <net/mac80211.h> | ||
22 | #include "core.h" | ||
23 | |||
24 | struct ath10k_generic_iter { | ||
25 | struct ath10k *ar; | ||
26 | int ret; | ||
27 | }; | ||
28 | |||
29 | struct ath10k *ath10k_mac_create(void); | ||
30 | void ath10k_mac_destroy(struct ath10k *ar); | ||
31 | int ath10k_mac_register(struct ath10k *ar); | ||
32 | void ath10k_mac_unregister(struct ath10k *ar); | ||
33 | struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id); | ||
34 | void ath10k_reset_scan(unsigned long ptr); | ||
35 | void ath10k_offchan_tx_purge(struct ath10k *ar); | ||
36 | void ath10k_offchan_tx_work(struct work_struct *work); | ||
37 | |||
38 | static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) | ||
39 | { | ||
40 | return (struct ath10k_vif *)vif->drv_priv; | ||
41 | } | ||
42 | |||
43 | static inline void ath10k_tx_h_seq_no(struct sk_buff *skb) | ||
44 | { | ||
45 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
46 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
47 | struct ieee80211_vif *vif = info->control.vif; | ||
48 | struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); | ||
49 | |||
50 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | ||
51 | if (arvif->tx_seq_no == 0) | ||
52 | arvif->tx_seq_no = 0x1000; | ||
53 | |||
54 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | ||
55 | arvif->tx_seq_no += 0x10; | ||
56 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
57 | hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | #endif /* _MAC_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c new file mode 100644 index 000000000000..c8e905669701 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
@@ -0,0 +1,2506 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/pci.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | |||
23 | #include "core.h" | ||
24 | #include "debug.h" | ||
25 | |||
26 | #include "targaddrs.h" | ||
27 | #include "bmi.h" | ||
28 | |||
29 | #include "hif.h" | ||
30 | #include "htc.h" | ||
31 | |||
32 | #include "ce.h" | ||
33 | #include "pci.h" | ||
34 | |||
35 | unsigned int ath10k_target_ps; | ||
36 | module_param(ath10k_target_ps, uint, 0644); | ||
37 | MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); | ||
38 | |||
39 | #define QCA988X_1_0_DEVICE_ID (0xabcd) | ||
40 | #define QCA988X_2_0_DEVICE_ID (0x003c) | ||
41 | |||
42 | static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { | ||
43 | { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */ | ||
44 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ | ||
45 | {0} | ||
46 | }; | ||
47 | |||
48 | static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, | ||
49 | u32 *data); | ||
50 | |||
51 | static void ath10k_pci_process_ce(struct ath10k *ar); | ||
52 | static int ath10k_pci_post_rx(struct ath10k *ar); | ||
53 | static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, | ||
54 | int num); | ||
55 | static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); | ||
56 | static void ath10k_pci_stop_ce(struct ath10k *ar); | ||
57 | |||
58 | static const struct ce_attr host_ce_config_wlan[] = { | ||
59 | /* host->target HTC control and raw streams */ | ||
60 | { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, | ||
61 | /* could be moved to share CE3 */ | ||
62 | /* target->host HTT + HTC control */ | ||
63 | { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, | ||
64 | /* target->host WMI */ | ||
65 | { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, | ||
66 | /* host->target WMI */ | ||
67 | { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, | ||
68 | /* host->target HTT */ | ||
69 | { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, | ||
70 | CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, | ||
71 | /* unused */ | ||
72 | { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, | ||
73 | /* Target autonomous hif_memcpy */ | ||
74 | { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, | ||
75 | /* ce_diag, the Diagnostic Window */ | ||
76 | { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, | ||
77 | }; | ||
78 | |||
79 | /* Target firmware's Copy Engine configuration. */ | ||
80 | static const struct ce_pipe_config target_ce_config_wlan[] = { | ||
81 | /* host->target HTC control and raw streams */ | ||
82 | { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, | ||
83 | /* target->host HTT + HTC control */ | ||
84 | { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, | ||
85 | /* target->host WMI */ | ||
86 | { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, | ||
87 | /* host->target WMI */ | ||
88 | { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, | ||
89 | /* host->target HTT */ | ||
90 | { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, | ||
91 | /* NB: 50% of src nentries, since tx has 2 frags */ | ||
92 | /* unused */ | ||
93 | { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, | ||
94 | /* Reserved for target autonomous hif_memcpy */ | ||
95 | { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, | ||
96 | /* CE7 used only by Host */ | ||
97 | }; | ||
98 | |||
99 | /* | ||
100 | * Diagnostic read/write access is provided for startup/config/debug usage. | ||
101 | * Caller must guarantee proper alignment, when applicable, and single user | ||
102 | * at any moment. | ||
103 | */ | ||
104 | static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, | ||
105 | int nbytes) | ||
106 | { | ||
107 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
108 | int ret = 0; | ||
109 | u32 buf; | ||
110 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; | ||
111 | unsigned int id; | ||
112 | unsigned int flags; | ||
113 | struct ce_state *ce_diag; | ||
114 | /* Host buffer address in CE space */ | ||
115 | u32 ce_data; | ||
116 | dma_addr_t ce_data_base = 0; | ||
117 | void *data_buf = NULL; | ||
118 | int i; | ||
119 | |||
120 | /* | ||
121 | * This code cannot handle reads to non-memory space. Redirect to the | ||
122 | * register read fn but preserve the multi word read capability of | ||
123 | * this fn | ||
124 | */ | ||
125 | if (address < DRAM_BASE_ADDRESS) { | ||
126 | if (!IS_ALIGNED(address, 4) || | ||
127 | !IS_ALIGNED((unsigned long)data, 4)) | ||
128 | return -EIO; | ||
129 | |||
130 | while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access( | ||
131 | ar, address, (u32 *)data)) == 0)) { | ||
132 | nbytes -= sizeof(u32); | ||
133 | address += sizeof(u32); | ||
134 | data += sizeof(u32); | ||
135 | } | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | ce_diag = ar_pci->ce_diag; | ||
140 | |||
141 | /* | ||
142 | * Allocate a temporary bounce buffer to hold caller's data | ||
143 | * to be DMA'ed from Target. This guarantees | ||
144 | * 1) 4-byte alignment | ||
145 | * 2) Buffer in DMA-able space | ||
146 | */ | ||
147 | orig_nbytes = nbytes; | ||
148 | data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, | ||
149 | orig_nbytes, | ||
150 | &ce_data_base); | ||
151 | |||
152 | if (!data_buf) { | ||
153 | ret = -ENOMEM; | ||
154 | goto done; | ||
155 | } | ||
156 | memset(data_buf, 0, orig_nbytes); | ||
157 | |||
158 | remaining_bytes = orig_nbytes; | ||
159 | ce_data = ce_data_base; | ||
160 | while (remaining_bytes) { | ||
161 | nbytes = min_t(unsigned int, remaining_bytes, | ||
162 | DIAG_TRANSFER_LIMIT); | ||
163 | |||
164 | ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); | ||
165 | if (ret != 0) | ||
166 | goto done; | ||
167 | |||
168 | /* Request CE to send from Target(!) address to Host buffer */ | ||
169 | /* | ||
170 | * The address supplied by the caller is in the | ||
171 | * Target CPU virtual address space. | ||
172 | * | ||
173 | * In order to use this address with the diagnostic CE, | ||
174 | * convert it from Target CPU virtual address space | ||
175 | * to CE address space | ||
176 | */ | ||
177 | ath10k_pci_wake(ar); | ||
178 | address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, | ||
179 | address); | ||
180 | ath10k_pci_sleep(ar); | ||
181 | |||
182 | ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, | ||
183 | 0); | ||
184 | if (ret) | ||
185 | goto done; | ||
186 | |||
187 | i = 0; | ||
188 | while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, | ||
189 | &completed_nbytes, | ||
190 | &id) != 0) { | ||
191 | mdelay(1); | ||
192 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | ||
193 | ret = -EBUSY; | ||
194 | goto done; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | if (nbytes != completed_nbytes) { | ||
199 | ret = -EIO; | ||
200 | goto done; | ||
201 | } | ||
202 | |||
203 | if (buf != (u32) address) { | ||
204 | ret = -EIO; | ||
205 | goto done; | ||
206 | } | ||
207 | |||
208 | i = 0; | ||
209 | while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, | ||
210 | &completed_nbytes, | ||
211 | &id, &flags) != 0) { | ||
212 | mdelay(1); | ||
213 | |||
214 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | ||
215 | ret = -EBUSY; | ||
216 | goto done; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | if (nbytes != completed_nbytes) { | ||
221 | ret = -EIO; | ||
222 | goto done; | ||
223 | } | ||
224 | |||
225 | if (buf != ce_data) { | ||
226 | ret = -EIO; | ||
227 | goto done; | ||
228 | } | ||
229 | |||
230 | remaining_bytes -= nbytes; | ||
231 | address += nbytes; | ||
232 | ce_data += nbytes; | ||
233 | } | ||
234 | |||
235 | done: | ||
236 | if (ret == 0) { | ||
237 | /* Copy data from allocated DMA buf to caller's buf */ | ||
238 | WARN_ON_ONCE(orig_nbytes & 3); | ||
239 | for (i = 0; i < orig_nbytes / sizeof(__le32); i++) { | ||
240 | ((u32 *)data)[i] = | ||
241 | __le32_to_cpu(((__le32 *)data_buf)[i]); | ||
242 | } | ||
243 | } else | ||
244 | ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", | ||
245 | __func__, address); | ||
246 | |||
247 | if (data_buf) | ||
248 | pci_free_consistent(ar_pci->pdev, orig_nbytes, | ||
249 | data_buf, ce_data_base); | ||
250 | |||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | /* Read 4-byte aligned data from Target memory or register */ | ||
255 | static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, | ||
256 | u32 *data) | ||
257 | { | ||
258 | /* Assume range doesn't cross this boundary */ | ||
259 | if (address >= DRAM_BASE_ADDRESS) | ||
260 | return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32)); | ||
261 | |||
262 | ath10k_pci_wake(ar); | ||
263 | *data = ath10k_pci_read32(ar, address); | ||
264 | ath10k_pci_sleep(ar); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, | ||
269 | const void *data, int nbytes) | ||
270 | { | ||
271 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
272 | int ret = 0; | ||
273 | u32 buf; | ||
274 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; | ||
275 | unsigned int id; | ||
276 | unsigned int flags; | ||
277 | struct ce_state *ce_diag; | ||
278 | void *data_buf = NULL; | ||
279 | u32 ce_data; /* Host buffer address in CE space */ | ||
280 | dma_addr_t ce_data_base = 0; | ||
281 | int i; | ||
282 | |||
283 | ce_diag = ar_pci->ce_diag; | ||
284 | |||
285 | /* | ||
286 | * Allocate a temporary bounce buffer to hold caller's data | ||
287 | * to be DMA'ed to Target. This guarantees | ||
288 | * 1) 4-byte alignment | ||
289 | * 2) Buffer in DMA-able space | ||
290 | */ | ||
291 | orig_nbytes = nbytes; | ||
292 | data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, | ||
293 | orig_nbytes, | ||
294 | &ce_data_base); | ||
295 | if (!data_buf) { | ||
296 | ret = -ENOMEM; | ||
297 | goto done; | ||
298 | } | ||
299 | |||
300 | /* Copy caller's data to allocated DMA buf */ | ||
301 | WARN_ON_ONCE(orig_nbytes & 3); | ||
302 | for (i = 0; i < orig_nbytes / sizeof(__le32); i++) | ||
303 | ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]); | ||
304 | |||
305 | /* | ||
306 | * The address supplied by the caller is in the | ||
307 | * Target CPU virtual address space. | ||
308 | * | ||
309 | * In order to use this address with the diagnostic CE, | ||
310 | * convert it from | ||
311 | * Target CPU virtual address space | ||
312 | * to | ||
313 | * CE address space | ||
314 | */ | ||
315 | ath10k_pci_wake(ar); | ||
316 | address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); | ||
317 | ath10k_pci_sleep(ar); | ||
318 | |||
319 | remaining_bytes = orig_nbytes; | ||
320 | ce_data = ce_data_base; | ||
321 | while (remaining_bytes) { | ||
322 | /* FIXME: check cast */ | ||
323 | nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); | ||
324 | |||
325 | /* Set up to receive directly into Target(!) address */ | ||
326 | ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); | ||
327 | if (ret != 0) | ||
328 | goto done; | ||
329 | |||
330 | /* | ||
331 | * Request CE to send caller-supplied data that | ||
332 | * was copied to bounce buffer to Target(!) address. | ||
333 | */ | ||
334 | ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data, | ||
335 | nbytes, 0, 0); | ||
336 | if (ret != 0) | ||
337 | goto done; | ||
338 | |||
339 | i = 0; | ||
340 | while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, | ||
341 | &completed_nbytes, | ||
342 | &id) != 0) { | ||
343 | mdelay(1); | ||
344 | |||
345 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | ||
346 | ret = -EBUSY; | ||
347 | goto done; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | if (nbytes != completed_nbytes) { | ||
352 | ret = -EIO; | ||
353 | goto done; | ||
354 | } | ||
355 | |||
356 | if (buf != ce_data) { | ||
357 | ret = -EIO; | ||
358 | goto done; | ||
359 | } | ||
360 | |||
361 | i = 0; | ||
362 | while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, | ||
363 | &completed_nbytes, | ||
364 | &id, &flags) != 0) { | ||
365 | mdelay(1); | ||
366 | |||
367 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { | ||
368 | ret = -EBUSY; | ||
369 | goto done; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | if (nbytes != completed_nbytes) { | ||
374 | ret = -EIO; | ||
375 | goto done; | ||
376 | } | ||
377 | |||
378 | if (buf != address) { | ||
379 | ret = -EIO; | ||
380 | goto done; | ||
381 | } | ||
382 | |||
383 | remaining_bytes -= nbytes; | ||
384 | address += nbytes; | ||
385 | ce_data += nbytes; | ||
386 | } | ||
387 | |||
388 | done: | ||
389 | if (data_buf) { | ||
390 | pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, | ||
391 | ce_data_base); | ||
392 | } | ||
393 | |||
394 | if (ret != 0) | ||
395 | ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, | ||
396 | address); | ||
397 | |||
398 | return ret; | ||
399 | } | ||
400 | |||
401 | /* Write 4B data to Target memory or register */ | ||
402 | static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address, | ||
403 | u32 data) | ||
404 | { | ||
405 | /* Assume range doesn't cross this boundary */ | ||
406 | if (address >= DRAM_BASE_ADDRESS) | ||
407 | return ath10k_pci_diag_write_mem(ar, address, &data, | ||
408 | sizeof(u32)); | ||
409 | |||
410 | ath10k_pci_wake(ar); | ||
411 | ath10k_pci_write32(ar, address, data); | ||
412 | ath10k_pci_sleep(ar); | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static bool ath10k_pci_target_is_awake(struct ath10k *ar) | ||
417 | { | ||
418 | void __iomem *mem = ath10k_pci_priv(ar)->mem; | ||
419 | u32 val; | ||
420 | val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + | ||
421 | RTC_STATE_ADDRESS); | ||
422 | return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); | ||
423 | } | ||
424 | |||
425 | static void ath10k_pci_wait(struct ath10k *ar) | ||
426 | { | ||
427 | int n = 100; | ||
428 | |||
429 | while (n-- && !ath10k_pci_target_is_awake(ar)) | ||
430 | msleep(10); | ||
431 | |||
432 | if (n < 0) | ||
433 | ath10k_warn("Unable to wakeup target\n"); | ||
434 | } | ||
435 | |||
436 | void ath10k_do_pci_wake(struct ath10k *ar) | ||
437 | { | ||
438 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
439 | void __iomem *pci_addr = ar_pci->mem; | ||
440 | int tot_delay = 0; | ||
441 | int curr_delay = 5; | ||
442 | |||
443 | if (atomic_read(&ar_pci->keep_awake_count) == 0) { | ||
444 | /* Force AWAKE */ | ||
445 | iowrite32(PCIE_SOC_WAKE_V_MASK, | ||
446 | pci_addr + PCIE_LOCAL_BASE_ADDRESS + | ||
447 | PCIE_SOC_WAKE_ADDRESS); | ||
448 | } | ||
449 | atomic_inc(&ar_pci->keep_awake_count); | ||
450 | |||
451 | if (ar_pci->verified_awake) | ||
452 | return; | ||
453 | |||
454 | for (;;) { | ||
455 | if (ath10k_pci_target_is_awake(ar)) { | ||
456 | ar_pci->verified_awake = true; | ||
457 | break; | ||
458 | } | ||
459 | |||
460 | if (tot_delay > PCIE_WAKE_TIMEOUT) { | ||
461 | ath10k_warn("target takes too long to wake up (awake count %d)\n", | ||
462 | atomic_read(&ar_pci->keep_awake_count)); | ||
463 | break; | ||
464 | } | ||
465 | |||
466 | udelay(curr_delay); | ||
467 | tot_delay += curr_delay; | ||
468 | |||
469 | if (curr_delay < 50) | ||
470 | curr_delay += 5; | ||
471 | } | ||
472 | } | ||
473 | |||
474 | void ath10k_do_pci_sleep(struct ath10k *ar) | ||
475 | { | ||
476 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
477 | void __iomem *pci_addr = ar_pci->mem; | ||
478 | |||
479 | if (atomic_dec_and_test(&ar_pci->keep_awake_count)) { | ||
480 | /* Allow sleep */ | ||
481 | ar_pci->verified_awake = false; | ||
482 | iowrite32(PCIE_SOC_WAKE_RESET, | ||
483 | pci_addr + PCIE_LOCAL_BASE_ADDRESS + | ||
484 | PCIE_SOC_WAKE_ADDRESS); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * FIXME: Handle OOM properly. | ||
490 | */ | ||
491 | static inline | ||
492 | struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) | ||
493 | { | ||
494 | struct ath10k_pci_compl *compl = NULL; | ||
495 | |||
496 | spin_lock_bh(&pipe_info->pipe_lock); | ||
497 | if (list_empty(&pipe_info->compl_free)) { | ||
498 | ath10k_warn("Completion buffers are full\n"); | ||
499 | goto exit; | ||
500 | } | ||
501 | compl = list_first_entry(&pipe_info->compl_free, | ||
502 | struct ath10k_pci_compl, list); | ||
503 | list_del(&compl->list); | ||
504 | exit: | ||
505 | spin_unlock_bh(&pipe_info->pipe_lock); | ||
506 | return compl; | ||
507 | } | ||
508 | |||
509 | /* Called by lower (CE) layer when a send to Target completes. */ | ||
510 | static void ath10k_pci_ce_send_done(struct ce_state *ce_state, | ||
511 | void *transfer_context, | ||
512 | u32 ce_data, | ||
513 | unsigned int nbytes, | ||
514 | unsigned int transfer_id) | ||
515 | { | ||
516 | struct ath10k *ar = ce_state->ar; | ||
517 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
518 | struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; | ||
519 | struct ath10k_pci_compl *compl; | ||
520 | bool process = false; | ||
521 | |||
522 | do { | ||
523 | /* | ||
524 | * For the send completion of an item in sendlist, just | ||
525 | * increment num_sends_allowed. The upper layer callback will | ||
526 | * be triggered when last fragment is done with send. | ||
527 | */ | ||
528 | if (transfer_context == CE_SENDLIST_ITEM_CTXT) { | ||
529 | spin_lock_bh(&pipe_info->pipe_lock); | ||
530 | pipe_info->num_sends_allowed++; | ||
531 | spin_unlock_bh(&pipe_info->pipe_lock); | ||
532 | continue; | ||
533 | } | ||
534 | |||
535 | compl = get_free_compl(pipe_info); | ||
536 | if (!compl) | ||
537 | break; | ||
538 | |||
539 | compl->send_or_recv = HIF_CE_COMPLETE_SEND; | ||
540 | compl->ce_state = ce_state; | ||
541 | compl->pipe_info = pipe_info; | ||
542 | compl->transfer_context = transfer_context; | ||
543 | compl->nbytes = nbytes; | ||
544 | compl->transfer_id = transfer_id; | ||
545 | compl->flags = 0; | ||
546 | |||
547 | /* | ||
548 | * Add the completion to the processing queue. | ||
549 | */ | ||
550 | spin_lock_bh(&ar_pci->compl_lock); | ||
551 | list_add_tail(&compl->list, &ar_pci->compl_process); | ||
552 | spin_unlock_bh(&ar_pci->compl_lock); | ||
553 | |||
554 | process = true; | ||
555 | } while (ath10k_ce_completed_send_next(ce_state, | ||
556 | &transfer_context, | ||
557 | &ce_data, &nbytes, | ||
558 | &transfer_id) == 0); | ||
559 | |||
560 | /* | ||
561 | * If only some of the items within a sendlist have completed, | ||
562 | * don't invoke completion processing until the entire sendlist | ||
563 | * has been sent. | ||
564 | */ | ||
565 | if (!process) | ||
566 | return; | ||
567 | |||
568 | ath10k_pci_process_ce(ar); | ||
569 | } | ||
570 | |||
571 | /* Called by lower (CE) layer when data is received from the Target. */ | ||
572 | static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, | ||
573 | void *transfer_context, u32 ce_data, | ||
574 | unsigned int nbytes, | ||
575 | unsigned int transfer_id, | ||
576 | unsigned int flags) | ||
577 | { | ||
578 | struct ath10k *ar = ce_state->ar; | ||
579 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
580 | struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; | ||
581 | struct ath10k_pci_compl *compl; | ||
582 | struct sk_buff *skb; | ||
583 | |||
584 | do { | ||
585 | compl = get_free_compl(pipe_info); | ||
586 | if (!compl) | ||
587 | break; | ||
588 | |||
589 | compl->send_or_recv = HIF_CE_COMPLETE_RECV; | ||
590 | compl->ce_state = ce_state; | ||
591 | compl->pipe_info = pipe_info; | ||
592 | compl->transfer_context = transfer_context; | ||
593 | compl->nbytes = nbytes; | ||
594 | compl->transfer_id = transfer_id; | ||
595 | compl->flags = flags; | ||
596 | |||
597 | skb = transfer_context; | ||
598 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, | ||
599 | skb->len + skb_tailroom(skb), | ||
600 | DMA_FROM_DEVICE); | ||
601 | /* | ||
602 | * Add the completion to the processing queue. | ||
603 | */ | ||
604 | spin_lock_bh(&ar_pci->compl_lock); | ||
605 | list_add_tail(&compl->list, &ar_pci->compl_process); | ||
606 | spin_unlock_bh(&ar_pci->compl_lock); | ||
607 | |||
608 | } while (ath10k_ce_completed_recv_next(ce_state, | ||
609 | &transfer_context, | ||
610 | &ce_data, &nbytes, | ||
611 | &transfer_id, | ||
612 | &flags) == 0); | ||
613 | |||
614 | ath10k_pci_process_ce(ar); | ||
615 | } | ||
616 | |||
617 | /* Send the first nbytes bytes of the buffer */ | ||
618 | static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, | ||
619 | unsigned int transfer_id, | ||
620 | unsigned int bytes, struct sk_buff *nbuf) | ||
621 | { | ||
622 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); | ||
623 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
624 | struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); | ||
625 | struct ce_state *ce_hdl = pipe_info->ce_hdl; | ||
626 | struct ce_sendlist sendlist; | ||
627 | unsigned int len; | ||
628 | u32 flags = 0; | ||
629 | int ret; | ||
630 | |||
631 | memset(&sendlist, 0, sizeof(struct ce_sendlist)); | ||
632 | |||
633 | len = min(bytes, nbuf->len); | ||
634 | bytes -= len; | ||
635 | |||
636 | if (len & 3) | ||
637 | ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); | ||
638 | |||
639 | ath10k_dbg(ATH10K_DBG_PCI, | ||
640 | "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", | ||
641 | nbuf->data, (unsigned long long) skb_cb->paddr, | ||
642 | nbuf->len, len); | ||
643 | ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, | ||
644 | "ath10k tx: data: ", | ||
645 | nbuf->data, nbuf->len); | ||
646 | |||
647 | ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags); | ||
648 | |||
649 | /* Make sure we have resources to handle this request */ | ||
650 | spin_lock_bh(&pipe_info->pipe_lock); | ||
651 | if (!pipe_info->num_sends_allowed) { | ||
652 | ath10k_warn("Pipe: %d is full\n", pipe_id); | ||
653 | spin_unlock_bh(&pipe_info->pipe_lock); | ||
654 | return -ENOSR; | ||
655 | } | ||
656 | pipe_info->num_sends_allowed--; | ||
657 | spin_unlock_bh(&pipe_info->pipe_lock); | ||
658 | |||
659 | ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); | ||
660 | if (ret) | ||
661 | ath10k_warn("CE send failed: %p\n", nbuf); | ||
662 | |||
663 | return ret; | ||
664 | } | ||
665 | |||
666 | static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) | ||
667 | { | ||
668 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
669 | struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); | ||
670 | int ret; | ||
671 | |||
672 | spin_lock_bh(&pipe_info->pipe_lock); | ||
673 | ret = pipe_info->num_sends_allowed; | ||
674 | spin_unlock_bh(&pipe_info->pipe_lock); | ||
675 | |||
676 | return ret; | ||
677 | } | ||
678 | |||
679 | static void ath10k_pci_hif_dump_area(struct ath10k *ar) | ||
680 | { | ||
681 | u32 reg_dump_area = 0; | ||
682 | u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; | ||
683 | u32 host_addr; | ||
684 | int ret; | ||
685 | u32 i; | ||
686 | |||
687 | ath10k_err("firmware crashed!\n"); | ||
688 | ath10k_err("hardware name %s version 0x%x\n", | ||
689 | ar->hw_params.name, ar->target_version); | ||
690 | ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, | ||
691 | ar->fw_version_minor, ar->fw_version_release, | ||
692 | ar->fw_version_build); | ||
693 | |||
694 | host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); | ||
695 | if (ath10k_pci_diag_read_mem(ar, host_addr, | ||
696 | ®_dump_area, sizeof(u32)) != 0) { | ||
697 | ath10k_warn("could not read hi_failure_state\n"); | ||
698 | return; | ||
699 | } | ||
700 | |||
701 | ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area); | ||
702 | |||
703 | ret = ath10k_pci_diag_read_mem(ar, reg_dump_area, | ||
704 | ®_dump_values[0], | ||
705 | REG_DUMP_COUNT_QCA988X * sizeof(u32)); | ||
706 | if (ret != 0) { | ||
707 | ath10k_err("could not dump FW Dump Area\n"); | ||
708 | return; | ||
709 | } | ||
710 | |||
711 | BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); | ||
712 | |||
713 | ath10k_err("target Register Dump\n"); | ||
714 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) | ||
715 | ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", | ||
716 | i, | ||
717 | reg_dump_values[i], | ||
718 | reg_dump_values[i + 1], | ||
719 | reg_dump_values[i + 2], | ||
720 | reg_dump_values[i + 3]); | ||
721 | } | ||
722 | |||
723 | static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, | ||
724 | int force) | ||
725 | { | ||
726 | if (!force) { | ||
727 | int resources; | ||
728 | /* | ||
729 | * Decide whether to actually poll for completions, or just | ||
730 | * wait for a later chance. | ||
731 | * If there seem to be plenty of resources left, then just wait | ||
732 | * since checking involves reading a CE register, which is a | ||
733 | * relatively expensive operation. | ||
734 | */ | ||
735 | resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); | ||
736 | |||
737 | /* | ||
738 | * If at least 50% of the total resources are still available, | ||
739 | * don't bother checking again yet. | ||
740 | */ | ||
741 | if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) | ||
742 | return; | ||
743 | } | ||
744 | ath10k_ce_per_engine_service(ar, pipe); | ||
745 | } | ||
746 | |||
747 | static void ath10k_pci_hif_post_init(struct ath10k *ar, | ||
748 | struct ath10k_hif_cb *callbacks) | ||
749 | { | ||
750 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
751 | |||
752 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | ||
753 | |||
754 | memcpy(&ar_pci->msg_callbacks_current, callbacks, | ||
755 | sizeof(ar_pci->msg_callbacks_current)); | ||
756 | } | ||
757 | |||
758 | static int ath10k_pci_start_ce(struct ath10k *ar) | ||
759 | { | ||
760 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
761 | struct ce_state *ce_diag = ar_pci->ce_diag; | ||
762 | const struct ce_attr *attr; | ||
763 | struct hif_ce_pipe_info *pipe_info; | ||
764 | struct ath10k_pci_compl *compl; | ||
765 | int i, pipe_num, completions, disable_interrupts; | ||
766 | |||
767 | spin_lock_init(&ar_pci->compl_lock); | ||
768 | INIT_LIST_HEAD(&ar_pci->compl_process); | ||
769 | |||
770 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | ||
771 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
772 | |||
773 | spin_lock_init(&pipe_info->pipe_lock); | ||
774 | INIT_LIST_HEAD(&pipe_info->compl_free); | ||
775 | |||
776 | /* Handle Diagnostic CE specially */ | ||
777 | if (pipe_info->ce_hdl == ce_diag) | ||
778 | continue; | ||
779 | |||
780 | attr = &host_ce_config_wlan[pipe_num]; | ||
781 | completions = 0; | ||
782 | |||
783 | if (attr->src_nentries) { | ||
784 | disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; | ||
785 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, | ||
786 | ath10k_pci_ce_send_done, | ||
787 | disable_interrupts); | ||
788 | completions += attr->src_nentries; | ||
789 | pipe_info->num_sends_allowed = attr->src_nentries - 1; | ||
790 | } | ||
791 | |||
792 | if (attr->dest_nentries) { | ||
793 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, | ||
794 | ath10k_pci_ce_recv_data); | ||
795 | completions += attr->dest_nentries; | ||
796 | } | ||
797 | |||
798 | if (completions == 0) | ||
799 | continue; | ||
800 | |||
801 | for (i = 0; i < completions; i++) { | ||
802 | compl = kmalloc(sizeof(struct ath10k_pci_compl), | ||
803 | GFP_KERNEL); | ||
804 | if (!compl) { | ||
805 | ath10k_warn("No memory for completion state\n"); | ||
806 | ath10k_pci_stop_ce(ar); | ||
807 | return -ENOMEM; | ||
808 | } | ||
809 | |||
810 | compl->send_or_recv = HIF_CE_COMPLETE_FREE; | ||
811 | list_add_tail(&compl->list, &pipe_info->compl_free); | ||
812 | } | ||
813 | } | ||
814 | |||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | static void ath10k_pci_stop_ce(struct ath10k *ar) | ||
819 | { | ||
820 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
821 | struct ath10k_pci_compl *compl; | ||
822 | struct sk_buff *skb; | ||
823 | int i; | ||
824 | |||
825 | ath10k_ce_disable_interrupts(ar); | ||
826 | |||
827 | /* Cancel the pending tasklet */ | ||
828 | tasklet_kill(&ar_pci->intr_tq); | ||
829 | |||
830 | for (i = 0; i < CE_COUNT; i++) | ||
831 | tasklet_kill(&ar_pci->pipe_info[i].intr); | ||
832 | |||
833 | /* Mark pending completions as aborted, so that upper layers free up | ||
834 | * their associated resources */ | ||
835 | spin_lock_bh(&ar_pci->compl_lock); | ||
836 | list_for_each_entry(compl, &ar_pci->compl_process, list) { | ||
837 | skb = (struct sk_buff *)compl->transfer_context; | ||
838 | ATH10K_SKB_CB(skb)->is_aborted = true; | ||
839 | } | ||
840 | spin_unlock_bh(&ar_pci->compl_lock); | ||
841 | } | ||
842 | |||
843 | static void ath10k_pci_cleanup_ce(struct ath10k *ar) | ||
844 | { | ||
845 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
846 | struct ath10k_pci_compl *compl, *tmp; | ||
847 | struct hif_ce_pipe_info *pipe_info; | ||
848 | struct sk_buff *netbuf; | ||
849 | int pipe_num; | ||
850 | |||
851 | /* Free pending completions. */ | ||
852 | spin_lock_bh(&ar_pci->compl_lock); | ||
853 | if (!list_empty(&ar_pci->compl_process)) | ||
854 | ath10k_warn("pending completions still present! possible memory leaks.\n"); | ||
855 | |||
856 | list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { | ||
857 | list_del(&compl->list); | ||
858 | netbuf = (struct sk_buff *)compl->transfer_context; | ||
859 | dev_kfree_skb_any(netbuf); | ||
860 | kfree(compl); | ||
861 | } | ||
862 | spin_unlock_bh(&ar_pci->compl_lock); | ||
863 | |||
864 | /* Free unused completions for each pipe. */ | ||
865 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | ||
866 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
867 | |||
868 | spin_lock_bh(&pipe_info->pipe_lock); | ||
869 | list_for_each_entry_safe(compl, tmp, | ||
870 | &pipe_info->compl_free, list) { | ||
871 | list_del(&compl->list); | ||
872 | kfree(compl); | ||
873 | } | ||
874 | spin_unlock_bh(&pipe_info->pipe_lock); | ||
875 | } | ||
876 | } | ||
877 | |||
878 | static void ath10k_pci_process_ce(struct ath10k *ar) | ||
879 | { | ||
880 | struct ath10k_pci *ar_pci = ar->hif.priv; | ||
881 | struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; | ||
882 | struct ath10k_pci_compl *compl; | ||
883 | struct sk_buff *skb; | ||
884 | unsigned int nbytes; | ||
885 | int ret, send_done = 0; | ||
886 | |||
887 | /* Upper layers aren't ready to handle tx/rx completions in parallel so | ||
888 | * we must serialize all completion processing. */ | ||
889 | |||
890 | spin_lock_bh(&ar_pci->compl_lock); | ||
891 | if (ar_pci->compl_processing) { | ||
892 | spin_unlock_bh(&ar_pci->compl_lock); | ||
893 | return; | ||
894 | } | ||
895 | ar_pci->compl_processing = true; | ||
896 | spin_unlock_bh(&ar_pci->compl_lock); | ||
897 | |||
898 | for (;;) { | ||
899 | spin_lock_bh(&ar_pci->compl_lock); | ||
900 | if (list_empty(&ar_pci->compl_process)) { | ||
901 | spin_unlock_bh(&ar_pci->compl_lock); | ||
902 | break; | ||
903 | } | ||
904 | compl = list_first_entry(&ar_pci->compl_process, | ||
905 | struct ath10k_pci_compl, list); | ||
906 | list_del(&compl->list); | ||
907 | spin_unlock_bh(&ar_pci->compl_lock); | ||
908 | |||
909 | if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { | ||
910 | cb->tx_completion(ar, | ||
911 | compl->transfer_context, | ||
912 | compl->transfer_id); | ||
913 | send_done = 1; | ||
914 | } else { | ||
915 | ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); | ||
916 | if (ret) { | ||
917 | ath10k_warn("Unable to post recv buffer for pipe: %d\n", | ||
918 | compl->pipe_info->pipe_num); | ||
919 | break; | ||
920 | } | ||
921 | |||
922 | skb = (struct sk_buff *)compl->transfer_context; | ||
923 | nbytes = compl->nbytes; | ||
924 | |||
925 | ath10k_dbg(ATH10K_DBG_PCI, | ||
926 | "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n", | ||
927 | skb, nbytes); | ||
928 | ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, | ||
929 | "ath10k rx: ", skb->data, nbytes); | ||
930 | |||
931 | if (skb->len + skb_tailroom(skb) >= nbytes) { | ||
932 | skb_trim(skb, 0); | ||
933 | skb_put(skb, nbytes); | ||
934 | cb->rx_completion(ar, skb, | ||
935 | compl->pipe_info->pipe_num); | ||
936 | } else { | ||
937 | ath10k_warn("rxed more than expected (nbytes %d, max %d)", | ||
938 | nbytes, | ||
939 | skb->len + skb_tailroom(skb)); | ||
940 | } | ||
941 | } | ||
942 | |||
943 | compl->send_or_recv = HIF_CE_COMPLETE_FREE; | ||
944 | |||
945 | /* | ||
946 | * Add completion back to the pipe's free list. | ||
947 | */ | ||
948 | spin_lock_bh(&compl->pipe_info->pipe_lock); | ||
949 | list_add_tail(&compl->list, &compl->pipe_info->compl_free); | ||
950 | compl->pipe_info->num_sends_allowed += send_done; | ||
951 | spin_unlock_bh(&compl->pipe_info->pipe_lock); | ||
952 | } | ||
953 | |||
954 | spin_lock_bh(&ar_pci->compl_lock); | ||
955 | ar_pci->compl_processing = false; | ||
956 | spin_unlock_bh(&ar_pci->compl_lock); | ||
957 | } | ||
958 | |||
959 | /* TODO - temporary mapping while we have too few CE's */ | ||
960 | static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, | ||
961 | u16 service_id, u8 *ul_pipe, | ||
962 | u8 *dl_pipe, int *ul_is_polled, | ||
963 | int *dl_is_polled) | ||
964 | { | ||
965 | int ret = 0; | ||
966 | |||
967 | /* polling for received messages not supported */ | ||
968 | *dl_is_polled = 0; | ||
969 | |||
970 | switch (service_id) { | ||
971 | case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: | ||
972 | /* | ||
973 | * Host->target HTT gets its own pipe, so it can be polled | ||
974 | * while other pipes are interrupt driven. | ||
975 | */ | ||
976 | *ul_pipe = 4; | ||
977 | /* | ||
978 | * Use the same target->host pipe for HTC ctrl, HTC raw | ||
979 | * streams, and HTT. | ||
980 | */ | ||
981 | *dl_pipe = 1; | ||
982 | break; | ||
983 | |||
984 | case ATH10K_HTC_SVC_ID_RSVD_CTRL: | ||
985 | case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: | ||
986 | /* | ||
987 | * Note: HTC_RAW_STREAMS_SVC is currently unused, and | ||
988 | * HTC_CTRL_RSVD_SVC could share the same pipe as the | ||
989 | * WMI services. So, if another CE is needed, change | ||
990 | * this to *ul_pipe = 3, which frees up CE 0. | ||
991 | */ | ||
992 | /* *ul_pipe = 3; */ | ||
993 | *ul_pipe = 0; | ||
994 | *dl_pipe = 1; | ||
995 | break; | ||
996 | |||
997 | case ATH10K_HTC_SVC_ID_WMI_DATA_BK: | ||
998 | case ATH10K_HTC_SVC_ID_WMI_DATA_BE: | ||
999 | case ATH10K_HTC_SVC_ID_WMI_DATA_VI: | ||
1000 | case ATH10K_HTC_SVC_ID_WMI_DATA_VO: | ||
1001 | |||
1002 | case ATH10K_HTC_SVC_ID_WMI_CONTROL: | ||
1003 | *ul_pipe = 3; | ||
1004 | *dl_pipe = 2; | ||
1005 | break; | ||
1006 | |||
1007 | /* pipe 5 unused */ | ||
1008 | /* pipe 6 reserved */ | ||
1009 | /* pipe 7 reserved */ | ||
1010 | |||
1011 | default: | ||
1012 | ret = -1; | ||
1013 | break; | ||
1014 | } | ||
1015 | *ul_is_polled = | ||
1016 | (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; | ||
1017 | |||
1018 | return ret; | ||
1019 | } | ||
1020 | |||
1021 | static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, | ||
1022 | u8 *ul_pipe, u8 *dl_pipe) | ||
1023 | { | ||
1024 | int ul_is_polled, dl_is_polled; | ||
1025 | |||
1026 | (void)ath10k_pci_hif_map_service_to_pipe(ar, | ||
1027 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | ||
1028 | ul_pipe, | ||
1029 | dl_pipe, | ||
1030 | &ul_is_polled, | ||
1031 | &dl_is_polled); | ||
1032 | } | ||
1033 | |||
1034 | static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, | ||
1035 | int num) | ||
1036 | { | ||
1037 | struct ath10k *ar = pipe_info->hif_ce_state; | ||
1038 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1039 | struct ce_state *ce_state = pipe_info->ce_hdl; | ||
1040 | struct sk_buff *skb; | ||
1041 | dma_addr_t ce_data; | ||
1042 | int i, ret = 0; | ||
1043 | |||
1044 | if (pipe_info->buf_sz == 0) | ||
1045 | return 0; | ||
1046 | |||
1047 | for (i = 0; i < num; i++) { | ||
1048 | skb = dev_alloc_skb(pipe_info->buf_sz); | ||
1049 | if (!skb) { | ||
1050 | ath10k_warn("could not allocate skbuff for pipe %d\n", | ||
1051 | num); | ||
1052 | ret = -ENOMEM; | ||
1053 | goto err; | ||
1054 | } | ||
1055 | |||
1056 | WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); | ||
1057 | |||
1058 | ce_data = dma_map_single(ar->dev, skb->data, | ||
1059 | skb->len + skb_tailroom(skb), | ||
1060 | DMA_FROM_DEVICE); | ||
1061 | |||
1062 | if (unlikely(dma_mapping_error(ar->dev, ce_data))) { | ||
1063 | ath10k_warn("could not dma map skbuff\n"); | ||
1064 | dev_kfree_skb_any(skb); | ||
1065 | ret = -EIO; | ||
1066 | goto err; | ||
1067 | } | ||
1068 | |||
1069 | ATH10K_SKB_CB(skb)->paddr = ce_data; | ||
1070 | |||
1071 | pci_dma_sync_single_for_device(ar_pci->pdev, ce_data, | ||
1072 | pipe_info->buf_sz, | ||
1073 | PCI_DMA_FROMDEVICE); | ||
1074 | |||
1075 | ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, | ||
1076 | ce_data); | ||
1077 | if (ret) { | ||
1078 | ath10k_warn("could not enqueue to pipe %d (%d)\n", | ||
1079 | num, ret); | ||
1080 | goto err; | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | return ret; | ||
1085 | |||
1086 | err: | ||
1087 | ath10k_pci_rx_pipe_cleanup(pipe_info); | ||
1088 | return ret; | ||
1089 | } | ||
1090 | |||
1091 | static int ath10k_pci_post_rx(struct ath10k *ar) | ||
1092 | { | ||
1093 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1094 | struct hif_ce_pipe_info *pipe_info; | ||
1095 | const struct ce_attr *attr; | ||
1096 | int pipe_num, ret = 0; | ||
1097 | |||
1098 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | ||
1099 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
1100 | attr = &host_ce_config_wlan[pipe_num]; | ||
1101 | |||
1102 | if (attr->dest_nentries == 0) | ||
1103 | continue; | ||
1104 | |||
1105 | ret = ath10k_pci_post_rx_pipe(pipe_info, | ||
1106 | attr->dest_nentries - 1); | ||
1107 | if (ret) { | ||
1108 | ath10k_warn("Unable to replenish recv buffers for pipe: %d\n", | ||
1109 | pipe_num); | ||
1110 | |||
1111 | for (; pipe_num >= 0; pipe_num--) { | ||
1112 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
1113 | ath10k_pci_rx_pipe_cleanup(pipe_info); | ||
1114 | } | ||
1115 | return ret; | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | static int ath10k_pci_hif_start(struct ath10k *ar) | ||
1123 | { | ||
1124 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1125 | int ret; | ||
1126 | |||
1127 | ret = ath10k_pci_start_ce(ar); | ||
1128 | if (ret) { | ||
1129 | ath10k_warn("could not start CE (%d)\n", ret); | ||
1130 | return ret; | ||
1131 | } | ||
1132 | |||
1133 | /* Post buffers once to start things off. */ | ||
1134 | ret = ath10k_pci_post_rx(ar); | ||
1135 | if (ret) { | ||
1136 | ath10k_warn("could not post rx pipes (%d)\n", ret); | ||
1137 | return ret; | ||
1138 | } | ||
1139 | |||
1140 | ar_pci->started = 1; | ||
1141 | return 0; | ||
1142 | } | ||
1143 | |||
1144 | static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) | ||
1145 | { | ||
1146 | struct ath10k *ar; | ||
1147 | struct ath10k_pci *ar_pci; | ||
1148 | struct ce_state *ce_hdl; | ||
1149 | u32 buf_sz; | ||
1150 | struct sk_buff *netbuf; | ||
1151 | u32 ce_data; | ||
1152 | |||
1153 | buf_sz = pipe_info->buf_sz; | ||
1154 | |||
1155 | /* Unused Copy Engine */ | ||
1156 | if (buf_sz == 0) | ||
1157 | return; | ||
1158 | |||
1159 | ar = pipe_info->hif_ce_state; | ||
1160 | ar_pci = ath10k_pci_priv(ar); | ||
1161 | |||
1162 | if (!ar_pci->started) | ||
1163 | return; | ||
1164 | |||
1165 | ce_hdl = pipe_info->ce_hdl; | ||
1166 | |||
1167 | while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, | ||
1168 | &ce_data) == 0) { | ||
1169 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, | ||
1170 | netbuf->len + skb_tailroom(netbuf), | ||
1171 | DMA_FROM_DEVICE); | ||
1172 | dev_kfree_skb_any(netbuf); | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) | ||
1177 | { | ||
1178 | struct ath10k *ar; | ||
1179 | struct ath10k_pci *ar_pci; | ||
1180 | struct ce_state *ce_hdl; | ||
1181 | struct sk_buff *netbuf; | ||
1182 | u32 ce_data; | ||
1183 | unsigned int nbytes; | ||
1184 | unsigned int id; | ||
1185 | u32 buf_sz; | ||
1186 | |||
1187 | buf_sz = pipe_info->buf_sz; | ||
1188 | |||
1189 | /* Unused Copy Engine */ | ||
1190 | if (buf_sz == 0) | ||
1191 | return; | ||
1192 | |||
1193 | ar = pipe_info->hif_ce_state; | ||
1194 | ar_pci = ath10k_pci_priv(ar); | ||
1195 | |||
1196 | if (!ar_pci->started) | ||
1197 | return; | ||
1198 | |||
1199 | ce_hdl = pipe_info->ce_hdl; | ||
1200 | |||
1201 | while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, | ||
1202 | &ce_data, &nbytes, &id) == 0) { | ||
1203 | if (netbuf != CE_SENDLIST_ITEM_CTXT) | ||
1204 | /* | ||
1205 | * Indicate the completion to higer layer to free | ||
1206 | * the buffer | ||
1207 | */ | ||
1208 | ATH10K_SKB_CB(netbuf)->is_aborted = true; | ||
1209 | ar_pci->msg_callbacks_current.tx_completion(ar, | ||
1210 | netbuf, | ||
1211 | id); | ||
1212 | } | ||
1213 | } | ||
1214 | |||
1215 | /* | ||
1216 | * Cleanup residual buffers for device shutdown: | ||
1217 | * buffers that were enqueued for receive | ||
1218 | * buffers that were to be sent | ||
1219 | * Note: Buffers that had completed but which were | ||
1220 | * not yet processed are on a completion queue. They | ||
1221 | * are handled when the completion thread shuts down. | ||
1222 | */ | ||
1223 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar) | ||
1224 | { | ||
1225 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1226 | int pipe_num; | ||
1227 | |||
1228 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | ||
1229 | struct hif_ce_pipe_info *pipe_info; | ||
1230 | |||
1231 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
1232 | ath10k_pci_rx_pipe_cleanup(pipe_info); | ||
1233 | ath10k_pci_tx_pipe_cleanup(pipe_info); | ||
1234 | } | ||
1235 | } | ||
1236 | |||
1237 | static void ath10k_pci_ce_deinit(struct ath10k *ar) | ||
1238 | { | ||
1239 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1240 | struct hif_ce_pipe_info *pipe_info; | ||
1241 | int pipe_num; | ||
1242 | |||
1243 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | ||
1244 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
1245 | if (pipe_info->ce_hdl) { | ||
1246 | ath10k_ce_deinit(pipe_info->ce_hdl); | ||
1247 | pipe_info->ce_hdl = NULL; | ||
1248 | pipe_info->buf_sz = 0; | ||
1249 | } | ||
1250 | } | ||
1251 | } | ||
1252 | |||
1253 | static void ath10k_pci_hif_stop(struct ath10k *ar) | ||
1254 | { | ||
1255 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | ||
1256 | |||
1257 | ath10k_pci_stop_ce(ar); | ||
1258 | |||
1259 | /* At this point, asynchronous threads are stopped, the target should | ||
1260 | * not DMA nor interrupt. We process the leftovers and then free | ||
1261 | * everything else up. */ | ||
1262 | |||
1263 | ath10k_pci_process_ce(ar); | ||
1264 | ath10k_pci_cleanup_ce(ar); | ||
1265 | ath10k_pci_buffer_cleanup(ar); | ||
1266 | ath10k_pci_ce_deinit(ar); | ||
1267 | } | ||
1268 | |||
1269 | static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, | ||
1270 | void *req, u32 req_len, | ||
1271 | void *resp, u32 *resp_len) | ||
1272 | { | ||
1273 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1274 | struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; | ||
1275 | struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; | ||
1276 | dma_addr_t req_paddr = 0; | ||
1277 | dma_addr_t resp_paddr = 0; | ||
1278 | struct bmi_xfer xfer = {}; | ||
1279 | void *treq, *tresp = NULL; | ||
1280 | int ret = 0; | ||
1281 | |||
1282 | if (resp && !resp_len) | ||
1283 | return -EINVAL; | ||
1284 | |||
1285 | if (resp && resp_len && *resp_len == 0) | ||
1286 | return -EINVAL; | ||
1287 | |||
1288 | treq = kmemdup(req, req_len, GFP_KERNEL); | ||
1289 | if (!treq) | ||
1290 | return -ENOMEM; | ||
1291 | |||
1292 | req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); | ||
1293 | ret = dma_mapping_error(ar->dev, req_paddr); | ||
1294 | if (ret) | ||
1295 | goto err_dma; | ||
1296 | |||
1297 | if (resp && resp_len) { | ||
1298 | tresp = kzalloc(*resp_len, GFP_KERNEL); | ||
1299 | if (!tresp) { | ||
1300 | ret = -ENOMEM; | ||
1301 | goto err_req; | ||
1302 | } | ||
1303 | |||
1304 | resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, | ||
1305 | DMA_FROM_DEVICE); | ||
1306 | ret = dma_mapping_error(ar->dev, resp_paddr); | ||
1307 | if (ret) | ||
1308 | goto err_req; | ||
1309 | |||
1310 | xfer.wait_for_resp = true; | ||
1311 | xfer.resp_len = 0; | ||
1312 | |||
1313 | ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); | ||
1314 | } | ||
1315 | |||
1316 | init_completion(&xfer.done); | ||
1317 | |||
1318 | ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); | ||
1319 | if (ret) | ||
1320 | goto err_resp; | ||
1321 | |||
1322 | ret = wait_for_completion_timeout(&xfer.done, | ||
1323 | BMI_COMMUNICATION_TIMEOUT_HZ); | ||
1324 | if (ret <= 0) { | ||
1325 | u32 unused_buffer; | ||
1326 | unsigned int unused_nbytes; | ||
1327 | unsigned int unused_id; | ||
1328 | |||
1329 | ret = -ETIMEDOUT; | ||
1330 | ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, | ||
1331 | &unused_nbytes, &unused_id); | ||
1332 | } else { | ||
1333 | /* non-zero means we did not time out */ | ||
1334 | ret = 0; | ||
1335 | } | ||
1336 | |||
1337 | err_resp: | ||
1338 | if (resp) { | ||
1339 | u32 unused_buffer; | ||
1340 | |||
1341 | ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); | ||
1342 | dma_unmap_single(ar->dev, resp_paddr, | ||
1343 | *resp_len, DMA_FROM_DEVICE); | ||
1344 | } | ||
1345 | err_req: | ||
1346 | dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); | ||
1347 | |||
1348 | if (ret == 0 && resp_len) { | ||
1349 | *resp_len = min(*resp_len, xfer.resp_len); | ||
1350 | memcpy(resp, tresp, xfer.resp_len); | ||
1351 | } | ||
1352 | err_dma: | ||
1353 | kfree(treq); | ||
1354 | kfree(tresp); | ||
1355 | |||
1356 | return ret; | ||
1357 | } | ||
1358 | |||
1359 | static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, | ||
1360 | void *transfer_context, | ||
1361 | u32 data, | ||
1362 | unsigned int nbytes, | ||
1363 | unsigned int transfer_id) | ||
1364 | { | ||
1365 | struct bmi_xfer *xfer = transfer_context; | ||
1366 | |||
1367 | if (xfer->wait_for_resp) | ||
1368 | return; | ||
1369 | |||
1370 | complete(&xfer->done); | ||
1371 | } | ||
1372 | |||
1373 | static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, | ||
1374 | void *transfer_context, | ||
1375 | u32 data, | ||
1376 | unsigned int nbytes, | ||
1377 | unsigned int transfer_id, | ||
1378 | unsigned int flags) | ||
1379 | { | ||
1380 | struct bmi_xfer *xfer = transfer_context; | ||
1381 | |||
1382 | if (!xfer->wait_for_resp) { | ||
1383 | ath10k_warn("unexpected: BMI data received; ignoring\n"); | ||
1384 | return; | ||
1385 | } | ||
1386 | |||
1387 | xfer->resp_len = nbytes; | ||
1388 | complete(&xfer->done); | ||
1389 | } | ||
1390 | |||
1391 | /* | ||
1392 | * Map from service/endpoint to Copy Engine. | ||
1393 | * This table is derived from the CE_PCI TABLE, above. | ||
1394 | * It is passed to the Target at startup for use by firmware. | ||
1395 | */ | ||
1396 | static const struct service_to_pipe target_service_to_ce_map_wlan[] = { | ||
1397 | { | ||
1398 | ATH10K_HTC_SVC_ID_WMI_DATA_VO, | ||
1399 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1400 | 3, | ||
1401 | }, | ||
1402 | { | ||
1403 | ATH10K_HTC_SVC_ID_WMI_DATA_VO, | ||
1404 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1405 | 2, | ||
1406 | }, | ||
1407 | { | ||
1408 | ATH10K_HTC_SVC_ID_WMI_DATA_BK, | ||
1409 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1410 | 3, | ||
1411 | }, | ||
1412 | { | ||
1413 | ATH10K_HTC_SVC_ID_WMI_DATA_BK, | ||
1414 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1415 | 2, | ||
1416 | }, | ||
1417 | { | ||
1418 | ATH10K_HTC_SVC_ID_WMI_DATA_BE, | ||
1419 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1420 | 3, | ||
1421 | }, | ||
1422 | { | ||
1423 | ATH10K_HTC_SVC_ID_WMI_DATA_BE, | ||
1424 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1425 | 2, | ||
1426 | }, | ||
1427 | { | ||
1428 | ATH10K_HTC_SVC_ID_WMI_DATA_VI, | ||
1429 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1430 | 3, | ||
1431 | }, | ||
1432 | { | ||
1433 | ATH10K_HTC_SVC_ID_WMI_DATA_VI, | ||
1434 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1435 | 2, | ||
1436 | }, | ||
1437 | { | ||
1438 | ATH10K_HTC_SVC_ID_WMI_CONTROL, | ||
1439 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1440 | 3, | ||
1441 | }, | ||
1442 | { | ||
1443 | ATH10K_HTC_SVC_ID_WMI_CONTROL, | ||
1444 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1445 | 2, | ||
1446 | }, | ||
1447 | { | ||
1448 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | ||
1449 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1450 | 0, /* could be moved to 3 (share with WMI) */ | ||
1451 | }, | ||
1452 | { | ||
1453 | ATH10K_HTC_SVC_ID_RSVD_CTRL, | ||
1454 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1455 | 1, | ||
1456 | }, | ||
1457 | { | ||
1458 | ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ | ||
1459 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1460 | 0, | ||
1461 | }, | ||
1462 | { | ||
1463 | ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ | ||
1464 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1465 | 1, | ||
1466 | }, | ||
1467 | { | ||
1468 | ATH10K_HTC_SVC_ID_HTT_DATA_MSG, | ||
1469 | PIPEDIR_OUT, /* out = UL = host -> target */ | ||
1470 | 4, | ||
1471 | }, | ||
1472 | { | ||
1473 | ATH10K_HTC_SVC_ID_HTT_DATA_MSG, | ||
1474 | PIPEDIR_IN, /* in = DL = target -> host */ | ||
1475 | 1, | ||
1476 | }, | ||
1477 | |||
1478 | /* (Additions here) */ | ||
1479 | |||
1480 | { /* Must be last */ | ||
1481 | 0, | ||
1482 | 0, | ||
1483 | 0, | ||
1484 | }, | ||
1485 | }; | ||
1486 | |||
1487 | /* | ||
1488 | * Send an interrupt to the device to wake up the Target CPU | ||
1489 | * so it has an opportunity to notice any changed state. | ||
1490 | */ | ||
1491 | static int ath10k_pci_wake_target_cpu(struct ath10k *ar) | ||
1492 | { | ||
1493 | int ret; | ||
1494 | u32 core_ctrl; | ||
1495 | |||
1496 | ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS | | ||
1497 | CORE_CTRL_ADDRESS, | ||
1498 | &core_ctrl); | ||
1499 | if (ret) { | ||
1500 | ath10k_warn("Unable to read core ctrl\n"); | ||
1501 | return ret; | ||
1502 | } | ||
1503 | |||
1504 | /* A_INUM_FIRMWARE interrupt to Target CPU */ | ||
1505 | core_ctrl |= CORE_CTRL_CPU_INTR_MASK; | ||
1506 | |||
1507 | ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | | ||
1508 | CORE_CTRL_ADDRESS, | ||
1509 | core_ctrl); | ||
1510 | if (ret) | ||
1511 | ath10k_warn("Unable to set interrupt mask\n"); | ||
1512 | |||
1513 | return ret; | ||
1514 | } | ||
1515 | |||
1516 | static int ath10k_pci_init_config(struct ath10k *ar) | ||
1517 | { | ||
1518 | u32 interconnect_targ_addr; | ||
1519 | u32 pcie_state_targ_addr = 0; | ||
1520 | u32 pipe_cfg_targ_addr = 0; | ||
1521 | u32 svc_to_pipe_map = 0; | ||
1522 | u32 pcie_config_flags = 0; | ||
1523 | u32 ealloc_value; | ||
1524 | u32 ealloc_targ_addr; | ||
1525 | u32 flag2_value; | ||
1526 | u32 flag2_targ_addr; | ||
1527 | int ret = 0; | ||
1528 | |||
1529 | /* Download to Target the CE Config and the service-to-CE map */ | ||
1530 | interconnect_targ_addr = | ||
1531 | host_interest_item_address(HI_ITEM(hi_interconnect_state)); | ||
1532 | |||
1533 | /* Supply Target-side CE configuration */ | ||
1534 | ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr, | ||
1535 | &pcie_state_targ_addr); | ||
1536 | if (ret != 0) { | ||
1537 | ath10k_err("Failed to get pcie state addr: %d\n", ret); | ||
1538 | return ret; | ||
1539 | } | ||
1540 | |||
1541 | if (pcie_state_targ_addr == 0) { | ||
1542 | ret = -EIO; | ||
1543 | ath10k_err("Invalid pcie state addr\n"); | ||
1544 | return ret; | ||
1545 | } | ||
1546 | |||
1547 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + | ||
1548 | offsetof(struct pcie_state, | ||
1549 | pipe_cfg_addr), | ||
1550 | &pipe_cfg_targ_addr); | ||
1551 | if (ret != 0) { | ||
1552 | ath10k_err("Failed to get pipe cfg addr: %d\n", ret); | ||
1553 | return ret; | ||
1554 | } | ||
1555 | |||
1556 | if (pipe_cfg_targ_addr == 0) { | ||
1557 | ret = -EIO; | ||
1558 | ath10k_err("Invalid pipe cfg addr\n"); | ||
1559 | return ret; | ||
1560 | } | ||
1561 | |||
1562 | ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, | ||
1563 | target_ce_config_wlan, | ||
1564 | sizeof(target_ce_config_wlan)); | ||
1565 | |||
1566 | if (ret != 0) { | ||
1567 | ath10k_err("Failed to write pipe cfg: %d\n", ret); | ||
1568 | return ret; | ||
1569 | } | ||
1570 | |||
1571 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + | ||
1572 | offsetof(struct pcie_state, | ||
1573 | svc_to_pipe_map), | ||
1574 | &svc_to_pipe_map); | ||
1575 | if (ret != 0) { | ||
1576 | ath10k_err("Failed to get svc/pipe map: %d\n", ret); | ||
1577 | return ret; | ||
1578 | } | ||
1579 | |||
1580 | if (svc_to_pipe_map == 0) { | ||
1581 | ret = -EIO; | ||
1582 | ath10k_err("Invalid svc_to_pipe map\n"); | ||
1583 | return ret; | ||
1584 | } | ||
1585 | |||
1586 | ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, | ||
1587 | target_service_to_ce_map_wlan, | ||
1588 | sizeof(target_service_to_ce_map_wlan)); | ||
1589 | if (ret != 0) { | ||
1590 | ath10k_err("Failed to write svc/pipe map: %d\n", ret); | ||
1591 | return ret; | ||
1592 | } | ||
1593 | |||
1594 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + | ||
1595 | offsetof(struct pcie_state, | ||
1596 | config_flags), | ||
1597 | &pcie_config_flags); | ||
1598 | if (ret != 0) { | ||
1599 | ath10k_err("Failed to get pcie config_flags: %d\n", ret); | ||
1600 | return ret; | ||
1601 | } | ||
1602 | |||
1603 | pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; | ||
1604 | |||
1605 | ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr + | ||
1606 | offsetof(struct pcie_state, config_flags), | ||
1607 | &pcie_config_flags, | ||
1608 | sizeof(pcie_config_flags)); | ||
1609 | if (ret != 0) { | ||
1610 | ath10k_err("Failed to write pcie config_flags: %d\n", ret); | ||
1611 | return ret; | ||
1612 | } | ||
1613 | |||
1614 | /* configure early allocation */ | ||
1615 | ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); | ||
1616 | |||
1617 | ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value); | ||
1618 | if (ret != 0) { | ||
1619 | ath10k_err("Faile to get early alloc val: %d\n", ret); | ||
1620 | return ret; | ||
1621 | } | ||
1622 | |||
1623 | /* first bank is switched to IRAM */ | ||
1624 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & | ||
1625 | HI_EARLY_ALLOC_MAGIC_MASK); | ||
1626 | ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & | ||
1627 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); | ||
1628 | |||
1629 | ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value); | ||
1630 | if (ret != 0) { | ||
1631 | ath10k_err("Failed to set early alloc val: %d\n", ret); | ||
1632 | return ret; | ||
1633 | } | ||
1634 | |||
1635 | /* Tell Target to proceed with initialization */ | ||
1636 | flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); | ||
1637 | |||
1638 | ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value); | ||
1639 | if (ret != 0) { | ||
1640 | ath10k_err("Failed to get option val: %d\n", ret); | ||
1641 | return ret; | ||
1642 | } | ||
1643 | |||
1644 | flag2_value |= HI_OPTION_EARLY_CFG_DONE; | ||
1645 | |||
1646 | ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value); | ||
1647 | if (ret != 0) { | ||
1648 | ath10k_err("Failed to set option val: %d\n", ret); | ||
1649 | return ret; | ||
1650 | } | ||
1651 | |||
1652 | return 0; | ||
1653 | } | ||
1654 | |||
1655 | |||
1656 | |||
1657 | static int ath10k_pci_ce_init(struct ath10k *ar) | ||
1658 | { | ||
1659 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1660 | struct hif_ce_pipe_info *pipe_info; | ||
1661 | const struct ce_attr *attr; | ||
1662 | int pipe_num; | ||
1663 | |||
1664 | for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { | ||
1665 | pipe_info = &ar_pci->pipe_info[pipe_num]; | ||
1666 | pipe_info->pipe_num = pipe_num; | ||
1667 | pipe_info->hif_ce_state = ar; | ||
1668 | attr = &host_ce_config_wlan[pipe_num]; | ||
1669 | |||
1670 | pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); | ||
1671 | if (pipe_info->ce_hdl == NULL) { | ||
1672 | ath10k_err("Unable to initialize CE for pipe: %d\n", | ||
1673 | pipe_num); | ||
1674 | |||
1675 | /* It is safe to call it here. It checks if ce_hdl is | ||
1676 | * valid for each pipe */ | ||
1677 | ath10k_pci_ce_deinit(ar); | ||
1678 | return -1; | ||
1679 | } | ||
1680 | |||
1681 | if (pipe_num == ar_pci->ce_count - 1) { | ||
1682 | /* | ||
1683 | * Reserve the ultimate CE for | ||
1684 | * diagnostic Window support | ||
1685 | */ | ||
1686 | ar_pci->ce_diag = | ||
1687 | ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl; | ||
1688 | continue; | ||
1689 | } | ||
1690 | |||
1691 | pipe_info->buf_sz = (size_t) (attr->src_sz_max); | ||
1692 | } | ||
1693 | |||
1694 | /* | ||
1695 | * Initially, establish CE completion handlers for use with BMI. | ||
1696 | * These are overwritten with generic handlers after we exit BMI phase. | ||
1697 | */ | ||
1698 | pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; | ||
1699 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, | ||
1700 | ath10k_pci_bmi_send_done, 0); | ||
1701 | |||
1702 | pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; | ||
1703 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, | ||
1704 | ath10k_pci_bmi_recv_data); | ||
1705 | |||
1706 | return 0; | ||
1707 | } | ||
1708 | |||
1709 | static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) | ||
1710 | { | ||
1711 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1712 | u32 fw_indicator_address, fw_indicator; | ||
1713 | |||
1714 | ath10k_pci_wake(ar); | ||
1715 | |||
1716 | fw_indicator_address = ar_pci->fw_indicator_address; | ||
1717 | fw_indicator = ath10k_pci_read32(ar, fw_indicator_address); | ||
1718 | |||
1719 | if (fw_indicator & FW_IND_EVENT_PENDING) { | ||
1720 | /* ACK: clear Target-side pending event */ | ||
1721 | ath10k_pci_write32(ar, fw_indicator_address, | ||
1722 | fw_indicator & ~FW_IND_EVENT_PENDING); | ||
1723 | |||
1724 | if (ar_pci->started) { | ||
1725 | ath10k_pci_hif_dump_area(ar); | ||
1726 | } else { | ||
1727 | /* | ||
1728 | * Probable Target failure before we're prepared | ||
1729 | * to handle it. Generally unexpected. | ||
1730 | */ | ||
1731 | ath10k_warn("early firmware event indicated\n"); | ||
1732 | } | ||
1733 | } | ||
1734 | |||
1735 | ath10k_pci_sleep(ar); | ||
1736 | } | ||
1737 | |||
1738 | static const struct ath10k_hif_ops ath10k_pci_hif_ops = { | ||
1739 | .send_head = ath10k_pci_hif_send_head, | ||
1740 | .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, | ||
1741 | .start = ath10k_pci_hif_start, | ||
1742 | .stop = ath10k_pci_hif_stop, | ||
1743 | .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, | ||
1744 | .get_default_pipe = ath10k_pci_hif_get_default_pipe, | ||
1745 | .send_complete_check = ath10k_pci_hif_send_complete_check, | ||
1746 | .init = ath10k_pci_hif_post_init, | ||
1747 | .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, | ||
1748 | }; | ||
1749 | |||
1750 | static void ath10k_pci_ce_tasklet(unsigned long ptr) | ||
1751 | { | ||
1752 | struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; | ||
1753 | struct ath10k_pci *ar_pci = pipe->ar_pci; | ||
1754 | |||
1755 | ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); | ||
1756 | } | ||
1757 | |||
1758 | static void ath10k_msi_err_tasklet(unsigned long data) | ||
1759 | { | ||
1760 | struct ath10k *ar = (struct ath10k *)data; | ||
1761 | |||
1762 | ath10k_pci_fw_interrupt_handler(ar); | ||
1763 | } | ||
1764 | |||
1765 | /* | ||
1766 | * Handler for a per-engine interrupt on a PARTICULAR CE. | ||
1767 | * This is used in cases where each CE has a private MSI interrupt. | ||
1768 | */ | ||
1769 | static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) | ||
1770 | { | ||
1771 | struct ath10k *ar = arg; | ||
1772 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1773 | int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; | ||
1774 | |||
1775 | if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { | ||
1776 | ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id); | ||
1777 | return IRQ_HANDLED; | ||
1778 | } | ||
1779 | |||
1780 | /* | ||
1781 | * NOTE: We are able to derive ce_id from irq because we | ||
1782 | * use a one-to-one mapping for CE's 0..5. | ||
1783 | * CE's 6 & 7 do not use interrupts at all. | ||
1784 | * | ||
1785 | * This mapping must be kept in sync with the mapping | ||
1786 | * used by firmware. | ||
1787 | */ | ||
1788 | tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); | ||
1789 | return IRQ_HANDLED; | ||
1790 | } | ||
1791 | |||
1792 | static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) | ||
1793 | { | ||
1794 | struct ath10k *ar = arg; | ||
1795 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1796 | |||
1797 | tasklet_schedule(&ar_pci->msi_fw_err); | ||
1798 | return IRQ_HANDLED; | ||
1799 | } | ||
1800 | |||
1801 | /* | ||
1802 | * Top-level interrupt handler for all PCI interrupts from a Target. | ||
1803 | * When a block of MSI interrupts is allocated, this top-level handler | ||
1804 | * is not used; instead, we directly call the correct sub-handler. | ||
1805 | */ | ||
1806 | static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) | ||
1807 | { | ||
1808 | struct ath10k *ar = arg; | ||
1809 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1810 | |||
1811 | if (ar_pci->num_msi_intrs == 0) { | ||
1812 | /* | ||
1813 | * IMPORTANT: INTR_CLR regiser has to be set after | ||
1814 | * INTR_ENABLE is set to 0, otherwise interrupt can not be | ||
1815 | * really cleared. | ||
1816 | */ | ||
1817 | iowrite32(0, ar_pci->mem + | ||
1818 | (SOC_CORE_BASE_ADDRESS | | ||
1819 | PCIE_INTR_ENABLE_ADDRESS)); | ||
1820 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | ||
1821 | PCIE_INTR_CE_MASK_ALL, | ||
1822 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | ||
1823 | PCIE_INTR_CLR_ADDRESS)); | ||
1824 | /* | ||
1825 | * IMPORTANT: this extra read transaction is required to | ||
1826 | * flush the posted write buffer. | ||
1827 | */ | ||
1828 | (void) ioread32(ar_pci->mem + | ||
1829 | (SOC_CORE_BASE_ADDRESS | | ||
1830 | PCIE_INTR_ENABLE_ADDRESS)); | ||
1831 | } | ||
1832 | |||
1833 | tasklet_schedule(&ar_pci->intr_tq); | ||
1834 | |||
1835 | return IRQ_HANDLED; | ||
1836 | } | ||
1837 | |||
1838 | static void ath10k_pci_tasklet(unsigned long data) | ||
1839 | { | ||
1840 | struct ath10k *ar = (struct ath10k *)data; | ||
1841 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1842 | |||
1843 | ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ | ||
1844 | ath10k_ce_per_engine_service_any(ar); | ||
1845 | |||
1846 | if (ar_pci->num_msi_intrs == 0) { | ||
1847 | /* Enable Legacy PCI line interrupts */ | ||
1848 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | ||
1849 | PCIE_INTR_CE_MASK_ALL, | ||
1850 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | ||
1851 | PCIE_INTR_ENABLE_ADDRESS)); | ||
1852 | /* | ||
1853 | * IMPORTANT: this extra read transaction is required to | ||
1854 | * flush the posted write buffer | ||
1855 | */ | ||
1856 | (void) ioread32(ar_pci->mem + | ||
1857 | (SOC_CORE_BASE_ADDRESS | | ||
1858 | PCIE_INTR_ENABLE_ADDRESS)); | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1862 | static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) | ||
1863 | { | ||
1864 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1865 | int ret; | ||
1866 | int i; | ||
1867 | |||
1868 | ret = pci_enable_msi_block(ar_pci->pdev, num); | ||
1869 | if (ret) | ||
1870 | return ret; | ||
1871 | |||
1872 | ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, | ||
1873 | ath10k_pci_msi_fw_handler, | ||
1874 | IRQF_SHARED, "ath10k_pci", ar); | ||
1875 | if (ret) | ||
1876 | return ret; | ||
1877 | |||
1878 | for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { | ||
1879 | ret = request_irq(ar_pci->pdev->irq + i, | ||
1880 | ath10k_pci_per_engine_handler, | ||
1881 | IRQF_SHARED, "ath10k_pci", ar); | ||
1882 | if (ret) { | ||
1883 | ath10k_warn("request_irq(%d) failed %d\n", | ||
1884 | ar_pci->pdev->irq + i, ret); | ||
1885 | |||
1886 | for (; i >= MSI_ASSIGN_CE_INITIAL; i--) | ||
1887 | free_irq(ar_pci->pdev->irq, ar); | ||
1888 | |||
1889 | pci_disable_msi(ar_pci->pdev); | ||
1890 | return ret; | ||
1891 | } | ||
1892 | } | ||
1893 | |||
1894 | ath10k_info("MSI-X interrupt handling (%d intrs)\n", num); | ||
1895 | return 0; | ||
1896 | } | ||
1897 | |||
1898 | static int ath10k_pci_start_intr_msi(struct ath10k *ar) | ||
1899 | { | ||
1900 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1901 | int ret; | ||
1902 | |||
1903 | ret = pci_enable_msi(ar_pci->pdev); | ||
1904 | if (ret < 0) | ||
1905 | return ret; | ||
1906 | |||
1907 | ret = request_irq(ar_pci->pdev->irq, | ||
1908 | ath10k_pci_interrupt_handler, | ||
1909 | IRQF_SHARED, "ath10k_pci", ar); | ||
1910 | if (ret < 0) { | ||
1911 | pci_disable_msi(ar_pci->pdev); | ||
1912 | return ret; | ||
1913 | } | ||
1914 | |||
1915 | ath10k_info("MSI interrupt handling\n"); | ||
1916 | return 0; | ||
1917 | } | ||
1918 | |||
1919 | static int ath10k_pci_start_intr_legacy(struct ath10k *ar) | ||
1920 | { | ||
1921 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1922 | int ret; | ||
1923 | |||
1924 | ret = request_irq(ar_pci->pdev->irq, | ||
1925 | ath10k_pci_interrupt_handler, | ||
1926 | IRQF_SHARED, "ath10k_pci", ar); | ||
1927 | if (ret < 0) | ||
1928 | return ret; | ||
1929 | |||
1930 | /* | ||
1931 | * Make sure to wake the Target before enabling Legacy | ||
1932 | * Interrupt. | ||
1933 | */ | ||
1934 | iowrite32(PCIE_SOC_WAKE_V_MASK, | ||
1935 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | ||
1936 | PCIE_SOC_WAKE_ADDRESS); | ||
1937 | |||
1938 | ath10k_pci_wait(ar); | ||
1939 | |||
1940 | /* | ||
1941 | * A potential race occurs here: The CORE_BASE write | ||
1942 | * depends on target correctly decoding AXI address but | ||
1943 | * host won't know when target writes BAR to CORE_CTRL. | ||
1944 | * This write might get lost if target has NOT written BAR. | ||
1945 | * For now, fix the race by repeating the write in below | ||
1946 | * synchronization checking. | ||
1947 | */ | ||
1948 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | ||
1949 | PCIE_INTR_CE_MASK_ALL, | ||
1950 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | ||
1951 | PCIE_INTR_ENABLE_ADDRESS)); | ||
1952 | iowrite32(PCIE_SOC_WAKE_RESET, | ||
1953 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | ||
1954 | PCIE_SOC_WAKE_ADDRESS); | ||
1955 | |||
1956 | ath10k_info("legacy interrupt handling\n"); | ||
1957 | return 0; | ||
1958 | } | ||
1959 | |||
1960 | static int ath10k_pci_start_intr(struct ath10k *ar) | ||
1961 | { | ||
1962 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
1963 | int num = MSI_NUM_REQUEST; | ||
1964 | int ret; | ||
1965 | int i; | ||
1966 | |||
1967 | tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar); | ||
1968 | tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, | ||
1969 | (unsigned long) ar); | ||
1970 | |||
1971 | for (i = 0; i < CE_COUNT; i++) { | ||
1972 | ar_pci->pipe_info[i].ar_pci = ar_pci; | ||
1973 | tasklet_init(&ar_pci->pipe_info[i].intr, | ||
1974 | ath10k_pci_ce_tasklet, | ||
1975 | (unsigned long)&ar_pci->pipe_info[i]); | ||
1976 | } | ||
1977 | |||
1978 | if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features)) | ||
1979 | num = 1; | ||
1980 | |||
1981 | if (num > 1) { | ||
1982 | ret = ath10k_pci_start_intr_msix(ar, num); | ||
1983 | if (ret == 0) | ||
1984 | goto exit; | ||
1985 | |||
1986 | ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret); | ||
1987 | num = 1; | ||
1988 | } | ||
1989 | |||
1990 | if (num == 1) { | ||
1991 | ret = ath10k_pci_start_intr_msi(ar); | ||
1992 | if (ret == 0) | ||
1993 | goto exit; | ||
1994 | |||
1995 | ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n", | ||
1996 | ret); | ||
1997 | num = 0; | ||
1998 | } | ||
1999 | |||
2000 | ret = ath10k_pci_start_intr_legacy(ar); | ||
2001 | |||
2002 | exit: | ||
2003 | ar_pci->num_msi_intrs = num; | ||
2004 | ar_pci->ce_count = CE_COUNT; | ||
2005 | return ret; | ||
2006 | } | ||
2007 | |||
2008 | static void ath10k_pci_stop_intr(struct ath10k *ar) | ||
2009 | { | ||
2010 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
2011 | int i; | ||
2012 | |||
2013 | /* There's at least one interrupt irregardless whether its legacy INTR | ||
2014 | * or MSI or MSI-X */ | ||
2015 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) | ||
2016 | free_irq(ar_pci->pdev->irq + i, ar); | ||
2017 | |||
2018 | if (ar_pci->num_msi_intrs > 0) | ||
2019 | pci_disable_msi(ar_pci->pdev); | ||
2020 | } | ||
2021 | |||
2022 | static int ath10k_pci_reset_target(struct ath10k *ar) | ||
2023 | { | ||
2024 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
2025 | int wait_limit = 300; /* 3 sec */ | ||
2026 | |||
2027 | /* Wait for Target to finish initialization before we proceed. */ | ||
2028 | iowrite32(PCIE_SOC_WAKE_V_MASK, | ||
2029 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | ||
2030 | PCIE_SOC_WAKE_ADDRESS); | ||
2031 | |||
2032 | ath10k_pci_wait(ar); | ||
2033 | |||
2034 | while (wait_limit-- && | ||
2035 | !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & | ||
2036 | FW_IND_INITIALIZED)) { | ||
2037 | if (ar_pci->num_msi_intrs == 0) | ||
2038 | /* Fix potential race by repeating CORE_BASE writes */ | ||
2039 | iowrite32(PCIE_INTR_FIRMWARE_MASK | | ||
2040 | PCIE_INTR_CE_MASK_ALL, | ||
2041 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | | ||
2042 | PCIE_INTR_ENABLE_ADDRESS)); | ||
2043 | mdelay(10); | ||
2044 | } | ||
2045 | |||
2046 | if (wait_limit < 0) { | ||
2047 | ath10k_err("Target stalled\n"); | ||
2048 | iowrite32(PCIE_SOC_WAKE_RESET, | ||
2049 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | ||
2050 | PCIE_SOC_WAKE_ADDRESS); | ||
2051 | return -EIO; | ||
2052 | } | ||
2053 | |||
2054 | iowrite32(PCIE_SOC_WAKE_RESET, | ||
2055 | ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + | ||
2056 | PCIE_SOC_WAKE_ADDRESS); | ||
2057 | |||
2058 | return 0; | ||
2059 | } | ||
2060 | |||
2061 | static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci) | ||
2062 | { | ||
2063 | struct ath10k *ar = ar_pci->ar; | ||
2064 | void __iomem *mem = ar_pci->mem; | ||
2065 | int i; | ||
2066 | u32 val; | ||
2067 | |||
2068 | if (!SOC_GLOBAL_RESET_ADDRESS) | ||
2069 | return; | ||
2070 | |||
2071 | if (!mem) | ||
2072 | return; | ||
2073 | |||
2074 | ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, | ||
2075 | PCIE_SOC_WAKE_V_MASK); | ||
2076 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { | ||
2077 | if (ath10k_pci_target_is_awake(ar)) | ||
2078 | break; | ||
2079 | msleep(1); | ||
2080 | } | ||
2081 | |||
2082 | /* Put Target, including PCIe, into RESET. */ | ||
2083 | val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); | ||
2084 | val |= 1; | ||
2085 | ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); | ||
2086 | |||
2087 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { | ||
2088 | if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & | ||
2089 | RTC_STATE_COLD_RESET_MASK) | ||
2090 | break; | ||
2091 | msleep(1); | ||
2092 | } | ||
2093 | |||
2094 | /* Pull Target, including PCIe, out of RESET. */ | ||
2095 | val &= ~1; | ||
2096 | ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); | ||
2097 | |||
2098 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { | ||
2099 | if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & | ||
2100 | RTC_STATE_COLD_RESET_MASK)) | ||
2101 | break; | ||
2102 | msleep(1); | ||
2103 | } | ||
2104 | |||
2105 | ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); | ||
2106 | } | ||
2107 | |||
2108 | static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) | ||
2109 | { | ||
2110 | int i; | ||
2111 | |||
2112 | for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) { | ||
2113 | if (!test_bit(i, ar_pci->features)) | ||
2114 | continue; | ||
2115 | |||
2116 | switch (i) { | ||
2117 | case ATH10K_PCI_FEATURE_MSI_X: | ||
2118 | ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); | ||
2119 | break; | ||
2120 | case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND: | ||
2121 | ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); | ||
2122 | break; | ||
2123 | } | ||
2124 | } | ||
2125 | } | ||
2126 | |||
2127 | static int ath10k_pci_probe(struct pci_dev *pdev, | ||
2128 | const struct pci_device_id *pci_dev) | ||
2129 | { | ||
2130 | void __iomem *mem; | ||
2131 | int ret = 0; | ||
2132 | struct ath10k *ar; | ||
2133 | struct ath10k_pci *ar_pci; | ||
2134 | u32 lcr_val; | ||
2135 | |||
2136 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | ||
2137 | |||
2138 | ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); | ||
2139 | if (ar_pci == NULL) | ||
2140 | return -ENOMEM; | ||
2141 | |||
2142 | ar_pci->pdev = pdev; | ||
2143 | ar_pci->dev = &pdev->dev; | ||
2144 | |||
2145 | switch (pci_dev->device) { | ||
2146 | case QCA988X_1_0_DEVICE_ID: | ||
2147 | set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features); | ||
2148 | break; | ||
2149 | case QCA988X_2_0_DEVICE_ID: | ||
2150 | set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); | ||
2151 | break; | ||
2152 | default: | ||
2153 | ret = -ENODEV; | ||
2154 | ath10k_err("Unkown device ID: %d\n", pci_dev->device); | ||
2155 | goto err_ar_pci; | ||
2156 | } | ||
2157 | |||
2158 | ath10k_pci_dump_features(ar_pci); | ||
2159 | |||
2160 | ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI, | ||
2161 | &ath10k_pci_hif_ops); | ||
2162 | if (!ar) { | ||
2163 | ath10k_err("ath10k_core_create failed!\n"); | ||
2164 | ret = -EINVAL; | ||
2165 | goto err_ar_pci; | ||
2166 | } | ||
2167 | |||
2168 | /* Enable QCA988X_1.0 HW workarounds */ | ||
2169 | if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) | ||
2170 | spin_lock_init(&ar_pci->hw_v1_workaround_lock); | ||
2171 | |||
2172 | ar_pci->ar = ar; | ||
2173 | ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; | ||
2174 | atomic_set(&ar_pci->keep_awake_count, 0); | ||
2175 | |||
2176 | pci_set_drvdata(pdev, ar); | ||
2177 | |||
2178 | /* | ||
2179 | * Without any knowledge of the Host, the Target may have been reset or | ||
2180 | * power cycled and its Config Space may no longer reflect the PCI | ||
2181 | * address space that was assigned earlier by the PCI infrastructure. | ||
2182 | * Refresh it now. | ||
2183 | */ | ||
2184 | ret = pci_assign_resource(pdev, BAR_NUM); | ||
2185 | if (ret) { | ||
2186 | ath10k_err("cannot assign PCI space: %d\n", ret); | ||
2187 | goto err_ar; | ||
2188 | } | ||
2189 | |||
2190 | ret = pci_enable_device(pdev); | ||
2191 | if (ret) { | ||
2192 | ath10k_err("cannot enable PCI device: %d\n", ret); | ||
2193 | goto err_ar; | ||
2194 | } | ||
2195 | |||
2196 | /* Request MMIO resources */ | ||
2197 | ret = pci_request_region(pdev, BAR_NUM, "ath"); | ||
2198 | if (ret) { | ||
2199 | ath10k_err("PCI MMIO reservation error: %d\n", ret); | ||
2200 | goto err_device; | ||
2201 | } | ||
2202 | |||
2203 | /* | ||
2204 | * Target structures have a limit of 32 bit DMA pointers. | ||
2205 | * DMA pointers can be wider than 32 bits by default on some systems. | ||
2206 | */ | ||
2207 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2208 | if (ret) { | ||
2209 | ath10k_err("32-bit DMA not available: %d\n", ret); | ||
2210 | goto err_region; | ||
2211 | } | ||
2212 | |||
2213 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2214 | if (ret) { | ||
2215 | ath10k_err("cannot enable 32-bit consistent DMA\n"); | ||
2216 | goto err_region; | ||
2217 | } | ||
2218 | |||
2219 | /* Set bus master bit in PCI_COMMAND to enable DMA */ | ||
2220 | pci_set_master(pdev); | ||
2221 | |||
2222 | /* | ||
2223 | * Temporary FIX: disable ASPM | ||
2224 | * Will be removed after the OTP is programmed | ||
2225 | */ | ||
2226 | pci_read_config_dword(pdev, 0x80, &lcr_val); | ||
2227 | pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); | ||
2228 | |||
2229 | /* Arrange for access to Target SoC registers. */ | ||
2230 | mem = pci_iomap(pdev, BAR_NUM, 0); | ||
2231 | if (!mem) { | ||
2232 | ath10k_err("PCI iomap error\n"); | ||
2233 | ret = -EIO; | ||
2234 | goto err_master; | ||
2235 | } | ||
2236 | |||
2237 | ar_pci->mem = mem; | ||
2238 | |||
2239 | spin_lock_init(&ar_pci->ce_lock); | ||
2240 | |||
2241 | ar_pci->cacheline_sz = dma_get_cache_alignment(); | ||
2242 | |||
2243 | ret = ath10k_pci_start_intr(ar); | ||
2244 | if (ret) { | ||
2245 | ath10k_err("could not start interrupt handling (%d)\n", ret); | ||
2246 | goto err_iomap; | ||
2247 | } | ||
2248 | |||
2249 | /* | ||
2250 | * Bring the target up cleanly. | ||
2251 | * | ||
2252 | * The target may be in an undefined state with an AUX-powered Target | ||
2253 | * and a Host in WoW mode. If the Host crashes, loses power, or is | ||
2254 | * restarted (without unloading the driver) then the Target is left | ||
2255 | * (aux) powered and running. On a subsequent driver load, the Target | ||
2256 | * is in an unexpected state. We try to catch that here in order to | ||
2257 | * reset the Target and retry the probe. | ||
2258 | */ | ||
2259 | ath10k_pci_device_reset(ar_pci); | ||
2260 | |||
2261 | ret = ath10k_pci_reset_target(ar); | ||
2262 | if (ret) | ||
2263 | goto err_intr; | ||
2264 | |||
2265 | if (ath10k_target_ps) { | ||
2266 | ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n"); | ||
2267 | } else { | ||
2268 | /* Force AWAKE forever */ | ||
2269 | ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n"); | ||
2270 | ath10k_do_pci_wake(ar); | ||
2271 | } | ||
2272 | |||
2273 | ret = ath10k_pci_ce_init(ar); | ||
2274 | if (ret) | ||
2275 | goto err_intr; | ||
2276 | |||
2277 | ret = ath10k_pci_init_config(ar); | ||
2278 | if (ret) | ||
2279 | goto err_ce; | ||
2280 | |||
2281 | ret = ath10k_pci_wake_target_cpu(ar); | ||
2282 | if (ret) { | ||
2283 | ath10k_err("could not wake up target CPU (%d)\n", ret); | ||
2284 | goto err_ce; | ||
2285 | } | ||
2286 | |||
2287 | ret = ath10k_core_register(ar); | ||
2288 | if (ret) { | ||
2289 | ath10k_err("could not register driver core (%d)\n", ret); | ||
2290 | goto err_ce; | ||
2291 | } | ||
2292 | |||
2293 | return 0; | ||
2294 | |||
2295 | err_ce: | ||
2296 | ath10k_pci_ce_deinit(ar); | ||
2297 | err_intr: | ||
2298 | ath10k_pci_stop_intr(ar); | ||
2299 | err_iomap: | ||
2300 | pci_iounmap(pdev, mem); | ||
2301 | err_master: | ||
2302 | pci_clear_master(pdev); | ||
2303 | err_region: | ||
2304 | pci_release_region(pdev, BAR_NUM); | ||
2305 | err_device: | ||
2306 | pci_disable_device(pdev); | ||
2307 | err_ar: | ||
2308 | pci_set_drvdata(pdev, NULL); | ||
2309 | ath10k_core_destroy(ar); | ||
2310 | err_ar_pci: | ||
2311 | /* call HIF PCI free here */ | ||
2312 | kfree(ar_pci); | ||
2313 | |||
2314 | return ret; | ||
2315 | } | ||
2316 | |||
2317 | static void ath10k_pci_remove(struct pci_dev *pdev) | ||
2318 | { | ||
2319 | struct ath10k *ar = pci_get_drvdata(pdev); | ||
2320 | struct ath10k_pci *ar_pci; | ||
2321 | |||
2322 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | ||
2323 | |||
2324 | if (!ar) | ||
2325 | return; | ||
2326 | |||
2327 | ar_pci = ath10k_pci_priv(ar); | ||
2328 | |||
2329 | if (!ar_pci) | ||
2330 | return; | ||
2331 | |||
2332 | tasklet_kill(&ar_pci->msi_fw_err); | ||
2333 | |||
2334 | ath10k_core_unregister(ar); | ||
2335 | ath10k_pci_stop_intr(ar); | ||
2336 | |||
2337 | pci_set_drvdata(pdev, NULL); | ||
2338 | pci_iounmap(pdev, ar_pci->mem); | ||
2339 | pci_release_region(pdev, BAR_NUM); | ||
2340 | pci_clear_master(pdev); | ||
2341 | pci_disable_device(pdev); | ||
2342 | |||
2343 | ath10k_core_destroy(ar); | ||
2344 | kfree(ar_pci); | ||
2345 | } | ||
2346 | |||
2347 | #if defined(CONFIG_PM_SLEEP) | ||
2348 | |||
2349 | #define ATH10K_PCI_PM_CONTROL 0x44 | ||
2350 | |||
2351 | static int ath10k_pci_suspend(struct device *device) | ||
2352 | { | ||
2353 | struct pci_dev *pdev = to_pci_dev(device); | ||
2354 | struct ath10k *ar = pci_get_drvdata(pdev); | ||
2355 | struct ath10k_pci *ar_pci; | ||
2356 | u32 val; | ||
2357 | int ret, retval; | ||
2358 | |||
2359 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | ||
2360 | |||
2361 | if (!ar) | ||
2362 | return -ENODEV; | ||
2363 | |||
2364 | ar_pci = ath10k_pci_priv(ar); | ||
2365 | if (!ar_pci) | ||
2366 | return -ENODEV; | ||
2367 | |||
2368 | if (ath10k_core_target_suspend(ar)) | ||
2369 | return -EBUSY; | ||
2370 | |||
2371 | ret = wait_event_interruptible_timeout(ar->event_queue, | ||
2372 | ar->is_target_paused == true, | ||
2373 | 1 * HZ); | ||
2374 | if (ret < 0) { | ||
2375 | ath10k_warn("suspend interrupted (%d)\n", ret); | ||
2376 | retval = ret; | ||
2377 | goto resume; | ||
2378 | } else if (ret == 0) { | ||
2379 | ath10k_warn("suspend timed out - target pause event never came\n"); | ||
2380 | retval = EIO; | ||
2381 | goto resume; | ||
2382 | } | ||
2383 | |||
2384 | /* | ||
2385 | * reset is_target_paused and host can check that in next time, | ||
2386 | * or it will always be TRUE and host just skip the waiting | ||
2387 | * condition, it causes target assert due to host already | ||
2388 | * suspend | ||
2389 | */ | ||
2390 | ar->is_target_paused = false; | ||
2391 | |||
2392 | pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); | ||
2393 | |||
2394 | if ((val & 0x000000ff) != 0x3) { | ||
2395 | pci_save_state(pdev); | ||
2396 | pci_disable_device(pdev); | ||
2397 | pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, | ||
2398 | (val & 0xffffff00) | 0x03); | ||
2399 | } | ||
2400 | |||
2401 | return 0; | ||
2402 | resume: | ||
2403 | ret = ath10k_core_target_resume(ar); | ||
2404 | if (ret) | ||
2405 | ath10k_warn("could not resume (%d)\n", ret); | ||
2406 | |||
2407 | return retval; | ||
2408 | } | ||
2409 | |||
2410 | static int ath10k_pci_resume(struct device *device) | ||
2411 | { | ||
2412 | struct pci_dev *pdev = to_pci_dev(device); | ||
2413 | struct ath10k *ar = pci_get_drvdata(pdev); | ||
2414 | struct ath10k_pci *ar_pci; | ||
2415 | int ret; | ||
2416 | u32 val; | ||
2417 | |||
2418 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); | ||
2419 | |||
2420 | if (!ar) | ||
2421 | return -ENODEV; | ||
2422 | ar_pci = ath10k_pci_priv(ar); | ||
2423 | |||
2424 | if (!ar_pci) | ||
2425 | return -ENODEV; | ||
2426 | |||
2427 | ret = pci_enable_device(pdev); | ||
2428 | if (ret) { | ||
2429 | ath10k_warn("cannot enable PCI device: %d\n", ret); | ||
2430 | return ret; | ||
2431 | } | ||
2432 | |||
2433 | pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); | ||
2434 | |||
2435 | if ((val & 0x000000ff) != 0) { | ||
2436 | pci_restore_state(pdev); | ||
2437 | pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, | ||
2438 | val & 0xffffff00); | ||
2439 | /* | ||
2440 | * Suspend/Resume resets the PCI configuration space, | ||
2441 | * so we have to re-disable the RETRY_TIMEOUT register (0x41) | ||
2442 | * to keep PCI Tx retries from interfering with C3 CPU state | ||
2443 | */ | ||
2444 | pci_read_config_dword(pdev, 0x40, &val); | ||
2445 | |||
2446 | if ((val & 0x0000ff00) != 0) | ||
2447 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); | ||
2448 | } | ||
2449 | |||
2450 | ret = ath10k_core_target_resume(ar); | ||
2451 | if (ret) | ||
2452 | ath10k_warn("target resume failed: %d\n", ret); | ||
2453 | |||
2454 | return ret; | ||
2455 | } | ||
2456 | |||
2457 | static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops, | ||
2458 | ath10k_pci_suspend, | ||
2459 | ath10k_pci_resume); | ||
2460 | |||
2461 | #define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops) | ||
2462 | |||
2463 | #else | ||
2464 | |||
2465 | #define ATH10K_PCI_PM_OPS NULL | ||
2466 | |||
2467 | #endif /* CONFIG_PM_SLEEP */ | ||
2468 | |||
2469 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); | ||
2470 | |||
2471 | static struct pci_driver ath10k_pci_driver = { | ||
2472 | .name = "ath10k_pci", | ||
2473 | .id_table = ath10k_pci_id_table, | ||
2474 | .probe = ath10k_pci_probe, | ||
2475 | .remove = ath10k_pci_remove, | ||
2476 | .driver.pm = ATH10K_PCI_PM_OPS, | ||
2477 | }; | ||
2478 | |||
2479 | static int __init ath10k_pci_init(void) | ||
2480 | { | ||
2481 | int ret; | ||
2482 | |||
2483 | ret = pci_register_driver(&ath10k_pci_driver); | ||
2484 | if (ret) | ||
2485 | ath10k_err("pci_register_driver failed [%d]\n", ret); | ||
2486 | |||
2487 | return ret; | ||
2488 | } | ||
2489 | module_init(ath10k_pci_init); | ||
2490 | |||
2491 | static void __exit ath10k_pci_exit(void) | ||
2492 | { | ||
2493 | pci_unregister_driver(&ath10k_pci_driver); | ||
2494 | } | ||
2495 | |||
2496 | module_exit(ath10k_pci_exit); | ||
2497 | |||
2498 | MODULE_AUTHOR("Qualcomm Atheros"); | ||
2499 | MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); | ||
2500 | MODULE_LICENSE("Dual BSD/GPL"); | ||
2501 | MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE); | ||
2502 | MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE); | ||
2503 | MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE); | ||
2504 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); | ||
2505 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); | ||
2506 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); | ||
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h new file mode 100644 index 000000000000..d2a055a07dc6 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/pci.h | |||
@@ -0,0 +1,355 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _PCI_H_ | ||
19 | #define _PCI_H_ | ||
20 | |||
21 | #include <linux/interrupt.h> | ||
22 | |||
23 | #include "hw.h" | ||
24 | #include "ce.h" | ||
25 | |||
26 | /* FW dump area */ | ||
27 | #define REG_DUMP_COUNT_QCA988X 60 | ||
28 | |||
29 | /* | ||
30 | * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite | ||
31 | */ | ||
32 | #define DIAG_TRANSFER_LIMIT 2048 | ||
33 | |||
34 | /* | ||
35 | * maximum number of bytes that can be | ||
36 | * handled atomically by DiagRead/DiagWrite | ||
37 | */ | ||
38 | #define DIAG_TRANSFER_LIMIT 2048 | ||
39 | |||
40 | struct bmi_xfer { | ||
41 | struct completion done; | ||
42 | bool wait_for_resp; | ||
43 | u32 resp_len; | ||
44 | }; | ||
45 | |||
46 | struct ath10k_pci_compl { | ||
47 | struct list_head list; | ||
48 | int send_or_recv; | ||
49 | struct ce_state *ce_state; | ||
50 | struct hif_ce_pipe_info *pipe_info; | ||
51 | void *transfer_context; | ||
52 | unsigned int nbytes; | ||
53 | unsigned int transfer_id; | ||
54 | unsigned int flags; | ||
55 | }; | ||
56 | |||
57 | /* compl_state.send_or_recv */ | ||
58 | #define HIF_CE_COMPLETE_FREE 0 | ||
59 | #define HIF_CE_COMPLETE_SEND 1 | ||
60 | #define HIF_CE_COMPLETE_RECV 2 | ||
61 | |||
62 | /* | ||
63 | * PCI-specific Target state | ||
64 | * | ||
65 | * NOTE: Structure is shared between Host software and Target firmware! | ||
66 | * | ||
67 | * Much of this may be of interest to the Host so | ||
68 | * HOST_INTEREST->hi_interconnect_state points here | ||
69 | * (and all members are 32-bit quantities in order to | ||
70 | * facilitate Host access). In particular, Host software is | ||
71 | * required to initialize pipe_cfg_addr and svc_to_pipe_map. | ||
72 | */ | ||
73 | struct pcie_state { | ||
74 | /* Pipe configuration Target address */ | ||
75 | /* NB: ce_pipe_config[CE_COUNT] */ | ||
76 | u32 pipe_cfg_addr; | ||
77 | |||
78 | /* Service to pipe map Target address */ | ||
79 | /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */ | ||
80 | u32 svc_to_pipe_map; | ||
81 | |||
82 | /* number of MSI interrupts requested */ | ||
83 | u32 msi_requested; | ||
84 | |||
85 | /* number of MSI interrupts granted */ | ||
86 | u32 msi_granted; | ||
87 | |||
88 | /* Message Signalled Interrupt address */ | ||
89 | u32 msi_addr; | ||
90 | |||
91 | /* Base data */ | ||
92 | u32 msi_data; | ||
93 | |||
94 | /* | ||
95 | * Data for firmware interrupt; | ||
96 | * MSI data for other interrupts are | ||
97 | * in various SoC registers | ||
98 | */ | ||
99 | u32 msi_fw_intr_data; | ||
100 | |||
101 | /* PCIE_PWR_METHOD_* */ | ||
102 | u32 power_mgmt_method; | ||
103 | |||
104 | /* PCIE_CONFIG_FLAG_* */ | ||
105 | u32 config_flags; | ||
106 | }; | ||
107 | |||
108 | /* PCIE_CONFIG_FLAG definitions */ | ||
109 | #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 | ||
110 | |||
111 | /* Host software's Copy Engine configuration. */ | ||
112 | #define CE_ATTR_FLAGS 0 | ||
113 | |||
114 | /* | ||
115 | * Configuration information for a Copy Engine pipe. | ||
116 | * Passed from Host to Target during startup (one per CE). | ||
117 | * | ||
118 | * NOTE: Structure is shared between Host software and Target firmware! | ||
119 | */ | ||
120 | struct ce_pipe_config { | ||
121 | u32 pipenum; | ||
122 | u32 pipedir; | ||
123 | u32 nentries; | ||
124 | u32 nbytes_max; | ||
125 | u32 flags; | ||
126 | u32 reserved; | ||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * Directions for interconnect pipe configuration. | ||
131 | * These definitions may be used during configuration and are shared | ||
132 | * between Host and Target. | ||
133 | * | ||
134 | * Pipe Directions are relative to the Host, so PIPEDIR_IN means | ||
135 | * "coming IN over air through Target to Host" as with a WiFi Rx operation. | ||
136 | * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" | ||
137 | * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" | ||
138 | * Target since things that are "PIPEDIR_OUT" are coming IN to the Target | ||
139 | * over the interconnect. | ||
140 | */ | ||
141 | #define PIPEDIR_NONE 0 | ||
142 | #define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ | ||
143 | #define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ | ||
144 | #define PIPEDIR_INOUT 3 /* bidirectional */ | ||
145 | |||
146 | /* Establish a mapping between a service/direction and a pipe. */ | ||
147 | struct service_to_pipe { | ||
148 | u32 service_id; | ||
149 | u32 pipedir; | ||
150 | u32 pipenum; | ||
151 | }; | ||
152 | |||
153 | enum ath10k_pci_features { | ||
154 | ATH10K_PCI_FEATURE_MSI_X = 0, | ||
155 | ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1, | ||
156 | |||
157 | /* keep last */ | ||
158 | ATH10K_PCI_FEATURE_COUNT | ||
159 | }; | ||
160 | |||
161 | /* Per-pipe state. */ | ||
162 | struct hif_ce_pipe_info { | ||
163 | /* Handle of underlying Copy Engine */ | ||
164 | struct ce_state *ce_hdl; | ||
165 | |||
166 | /* Our pipe number; facilitiates use of pipe_info ptrs. */ | ||
167 | u8 pipe_num; | ||
168 | |||
169 | /* Convenience back pointer to hif_ce_state. */ | ||
170 | struct ath10k *hif_ce_state; | ||
171 | |||
172 | size_t buf_sz; | ||
173 | |||
174 | /* protects compl_free and num_send_allowed */ | ||
175 | spinlock_t pipe_lock; | ||
176 | |||
177 | /* List of free CE completion slots */ | ||
178 | struct list_head compl_free; | ||
179 | |||
180 | /* Limit the number of outstanding send requests. */ | ||
181 | int num_sends_allowed; | ||
182 | |||
183 | struct ath10k_pci *ar_pci; | ||
184 | struct tasklet_struct intr; | ||
185 | }; | ||
186 | |||
187 | struct ath10k_pci { | ||
188 | struct pci_dev *pdev; | ||
189 | struct device *dev; | ||
190 | struct ath10k *ar; | ||
191 | void __iomem *mem; | ||
192 | int cacheline_sz; | ||
193 | |||
194 | DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); | ||
195 | |||
196 | /* | ||
197 | * Number of MSI interrupts granted, 0 --> using legacy PCI line | ||
198 | * interrupts. | ||
199 | */ | ||
200 | int num_msi_intrs; | ||
201 | |||
202 | struct tasklet_struct intr_tq; | ||
203 | struct tasklet_struct msi_fw_err; | ||
204 | |||
205 | /* Number of Copy Engines supported */ | ||
206 | unsigned int ce_count; | ||
207 | |||
208 | int started; | ||
209 | |||
210 | atomic_t keep_awake_count; | ||
211 | bool verified_awake; | ||
212 | |||
213 | /* List of CE completions to be processed */ | ||
214 | struct list_head compl_process; | ||
215 | |||
216 | /* protects compl_processing and compl_process */ | ||
217 | spinlock_t compl_lock; | ||
218 | |||
219 | bool compl_processing; | ||
220 | |||
221 | struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX]; | ||
222 | |||
223 | struct ath10k_hif_cb msg_callbacks_current; | ||
224 | |||
225 | /* Target address used to signal a pending firmware event */ | ||
226 | u32 fw_indicator_address; | ||
227 | |||
228 | /* Copy Engine used for Diagnostic Accesses */ | ||
229 | struct ce_state *ce_diag; | ||
230 | |||
231 | /* FIXME: document what this really protects */ | ||
232 | spinlock_t ce_lock; | ||
233 | |||
234 | /* Map CE id to ce_state */ | ||
235 | struct ce_state *ce_id_to_state[CE_COUNT_MAX]; | ||
236 | |||
237 | /* makes sure that dummy reads are atomic */ | ||
238 | spinlock_t hw_v1_workaround_lock; | ||
239 | }; | ||
240 | |||
241 | static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) | ||
242 | { | ||
243 | return ar->hif.priv; | ||
244 | } | ||
245 | |||
246 | static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr) | ||
247 | { | ||
248 | return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr); | ||
249 | } | ||
250 | |||
251 | static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val) | ||
252 | { | ||
253 | iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr); | ||
254 | } | ||
255 | |||
256 | #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ | ||
257 | #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ | ||
258 | |||
259 | #define BAR_NUM 0 | ||
260 | |||
261 | #define CDC_WAR_MAGIC_STR 0xceef0000 | ||
262 | #define CDC_WAR_DATA_CE 4 | ||
263 | |||
264 | /* | ||
265 | * TODO: Should be a function call specific to each Target-type. | ||
266 | * This convoluted macro converts from Target CPU Virtual Address Space to CE | ||
267 | * Address Space. As part of this process, we conservatively fetch the current | ||
268 | * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space | ||
269 | * for this device; but that's not guaranteed. | ||
270 | */ | ||
271 | #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ | ||
272 | (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ | ||
273 | CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ | ||
274 | 0x100000 | ((addr) & 0xfffff)) | ||
275 | |||
276 | /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ | ||
277 | #define DIAG_ACCESS_CE_TIMEOUT_MS 10 | ||
278 | |||
279 | /* | ||
280 | * This API allows the Host to access Target registers directly | ||
281 | * and relatively efficiently over PCIe. | ||
282 | * This allows the Host to avoid extra overhead associated with | ||
283 | * sending a message to firmware and waiting for a response message | ||
284 | * from firmware, as is done on other interconnects. | ||
285 | * | ||
286 | * Yet there is some complexity with direct accesses because the | ||
287 | * Target's power state is not known a priori. The Host must issue | ||
288 | * special PCIe reads/writes in order to explicitly wake the Target | ||
289 | * and to verify that it is awake and will remain awake. | ||
290 | * | ||
291 | * Usage: | ||
292 | * | ||
293 | * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. | ||
294 | * These calls must be bracketed by ath10k_pci_wake and | ||
295 | * ath10k_pci_sleep. A single BEGIN/END pair is adequate for | ||
296 | * multiple READ/WRITE operations. | ||
297 | * | ||
298 | * Use ath10k_pci_wake to put the Target in a state in | ||
299 | * which it is legal for the Host to directly access it. This | ||
300 | * may involve waking the Target from a low power state, which | ||
301 | * may take up to 2Ms! | ||
302 | * | ||
303 | * Use ath10k_pci_sleep to tell the Target that as far as | ||
304 | * this code path is concerned, it no longer needs to remain | ||
305 | * directly accessible. BEGIN/END is under a reference counter; | ||
306 | * multiple code paths may issue BEGIN/END on a single targid. | ||
307 | */ | ||
308 | static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, | ||
309 | u32 value) | ||
310 | { | ||
311 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
312 | void __iomem *addr = ar_pci->mem; | ||
313 | |||
314 | if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) { | ||
315 | unsigned long irq_flags; | ||
316 | |||
317 | spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); | ||
318 | |||
319 | ioread32(addr+offset+4); /* 3rd read prior to write */ | ||
320 | ioread32(addr+offset+4); /* 2nd read prior to write */ | ||
321 | ioread32(addr+offset+4); /* 1st read prior to write */ | ||
322 | iowrite32(value, addr+offset); | ||
323 | |||
324 | spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock, | ||
325 | irq_flags); | ||
326 | } else { | ||
327 | iowrite32(value, addr+offset); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) | ||
332 | { | ||
333 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | ||
334 | |||
335 | return ioread32(ar_pci->mem + offset); | ||
336 | } | ||
337 | |||
338 | extern unsigned int ath10k_target_ps; | ||
339 | |||
340 | void ath10k_do_pci_wake(struct ath10k *ar); | ||
341 | void ath10k_do_pci_sleep(struct ath10k *ar); | ||
342 | |||
343 | static inline void ath10k_pci_wake(struct ath10k *ar) | ||
344 | { | ||
345 | if (ath10k_target_ps) | ||
346 | ath10k_do_pci_wake(ar); | ||
347 | } | ||
348 | |||
349 | static inline void ath10k_pci_sleep(struct ath10k *ar) | ||
350 | { | ||
351 | if (ath10k_target_ps) | ||
352 | ath10k_do_pci_sleep(ar); | ||
353 | } | ||
354 | |||
355 | #endif /* _PCI_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h new file mode 100644 index 000000000000..bfec6c8f2ecb --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h | |||
@@ -0,0 +1,990 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _RX_DESC_H_ | ||
19 | #define _RX_DESC_H_ | ||
20 | |||
21 | enum rx_attention_flags { | ||
22 | RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0, | ||
23 | RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1, | ||
24 | RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2, | ||
25 | RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3, | ||
26 | RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4, | ||
27 | RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5, | ||
28 | RX_ATTENTION_FLAGS_NON_QOS = 1 << 6, | ||
29 | RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7, | ||
30 | RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8, | ||
31 | RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9, | ||
32 | RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10, | ||
33 | RX_ATTENTION_FLAGS_EOSP = 1 << 11, | ||
34 | RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12, | ||
35 | RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13, | ||
36 | RX_ATTENTION_FLAGS_ORDER = 1 << 14, | ||
37 | RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15, | ||
38 | RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16, | ||
39 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17, | ||
40 | RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18, | ||
41 | RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19, | ||
42 | RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20, | ||
43 | RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21, | ||
44 | RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22, | ||
45 | RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23, | ||
46 | RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24, | ||
47 | RX_ATTENTION_FLAGS_DIRECTED = 1 << 25, | ||
48 | RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26, | ||
49 | RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27, | ||
50 | RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28, | ||
51 | RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29, | ||
52 | RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30, | ||
53 | RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31, | ||
54 | }; | ||
55 | |||
56 | struct rx_attention { | ||
57 | __le32 flags; /* %RX_ATTENTION_FLAGS_ */ | ||
58 | } __packed; | ||
59 | |||
60 | /* | ||
61 | * first_mpdu | ||
62 | * Indicates the first MSDU of the PPDU. If both first_mpdu | ||
63 | * and last_mpdu are set in the MSDU then this is a not an | ||
64 | * A-MPDU frame but a stand alone MPDU. Interior MPDU in an | ||
65 | * A-MPDU shall have both first_mpdu and last_mpdu bits set to | ||
66 | * 0. The PPDU start status will only be valid when this bit | ||
67 | * is set. | ||
68 | * | ||
69 | * last_mpdu | ||
70 | * Indicates the last MSDU of the last MPDU of the PPDU. The | ||
71 | * PPDU end status will only be valid when this bit is set. | ||
72 | * | ||
73 | * mcast_bcast | ||
74 | * Multicast / broadcast indicator. Only set when the MAC | ||
75 | * address 1 bit 0 is set indicating mcast/bcast and the BSSID | ||
76 | * matches one of the 4 BSSID registers. Only set when | ||
77 | * first_msdu is set. | ||
78 | * | ||
79 | * peer_idx_invalid | ||
80 | * Indicates no matching entries within the the max search | ||
81 | * count. Only set when first_msdu is set. | ||
82 | * | ||
83 | * peer_idx_timeout | ||
84 | * Indicates an unsuccessful search for the peer index due to | ||
85 | * timeout. Only set when first_msdu is set. | ||
86 | * | ||
87 | * power_mgmt | ||
88 | * Power management bit set in the 802.11 header. Only set | ||
89 | * when first_msdu is set. | ||
90 | * | ||
91 | * non_qos | ||
92 | * Set if packet is not a non-QoS data frame. Only set when | ||
93 | * first_msdu is set. | ||
94 | * | ||
95 | * null_data | ||
96 | * Set if frame type indicates either null data or QoS null | ||
97 | * data format. Only set when first_msdu is set. | ||
98 | * | ||
99 | * mgmt_type | ||
100 | * Set if packet is a management packet. Only set when | ||
101 | * first_msdu is set. | ||
102 | * | ||
103 | * ctrl_type | ||
104 | * Set if packet is a control packet. Only set when first_msdu | ||
105 | * is set. | ||
106 | * | ||
107 | * more_data | ||
108 | * Set if more bit in frame control is set. Only set when | ||
109 | * first_msdu is set. | ||
110 | * | ||
111 | * eosp | ||
112 | * Set if the EOSP (end of service period) bit in the QoS | ||
113 | * control field is set. Only set when first_msdu is set. | ||
114 | * | ||
115 | * u_apsd_trigger | ||
116 | * Set if packet is U-APSD trigger. Key table will have bits | ||
117 | * per TID to indicate U-APSD trigger. | ||
118 | * | ||
119 | * fragment | ||
120 | * Indicates that this is an 802.11 fragment frame. This is | ||
121 | * set when either the more_frag bit is set in the frame | ||
122 | * control or the fragment number is not zero. Only set when | ||
123 | * first_msdu is set. | ||
124 | * | ||
125 | * order | ||
126 | * Set if the order bit in the frame control is set. Only set | ||
127 | * when first_msdu is set. | ||
128 | * | ||
129 | * classification | ||
130 | * Indicates that this status has a corresponding MSDU that | ||
131 | * requires FW processing. The OLE will have classification | ||
132 | * ring mask registers which will indicate the ring(s) for | ||
133 | * packets and descriptors which need FW attention. | ||
134 | * | ||
135 | * overflow_err | ||
136 | * PCU Receive FIFO does not have enough space to store the | ||
137 | * full receive packet. Enough space is reserved in the | ||
138 | * receive FIFO for the status is written. This MPDU remaining | ||
139 | * packets in the PPDU will be filtered and no Ack response | ||
140 | * will be transmitted. | ||
141 | * | ||
142 | * msdu_length_err | ||
143 | * Indicates that the MSDU length from the 802.3 encapsulated | ||
144 | * length field extends beyond the MPDU boundary. | ||
145 | * | ||
146 | * tcp_udp_chksum_fail | ||
147 | * Indicates that the computed checksum (tcp_udp_chksum) did | ||
148 | * not match the checksum in the TCP/UDP header. | ||
149 | * | ||
150 | * ip_chksum_fail | ||
151 | * Indicates that the computed checksum did not match the | ||
152 | * checksum in the IP header. | ||
153 | * | ||
154 | * sa_idx_invalid | ||
155 | * Indicates no matching entry was found in the address search | ||
156 | * table for the source MAC address. | ||
157 | * | ||
158 | * da_idx_invalid | ||
159 | * Indicates no matching entry was found in the address search | ||
160 | * table for the destination MAC address. | ||
161 | * | ||
162 | * sa_idx_timeout | ||
163 | * Indicates an unsuccessful search for the source MAC address | ||
164 | * due to the expiring of the search timer. | ||
165 | * | ||
166 | * da_idx_timeout | ||
167 | * Indicates an unsuccessful search for the destination MAC | ||
168 | * address due to the expiring of the search timer. | ||
169 | * | ||
170 | * encrypt_required | ||
171 | * Indicates that this data type frame is not encrypted even if | ||
172 | * the policy for this MPDU requires encryption as indicated in | ||
173 | * the peer table key type. | ||
174 | * | ||
175 | * directed | ||
176 | * MPDU is a directed packet which means that the RA matched | ||
177 | * our STA addresses. In proxySTA it means that the TA matched | ||
178 | * an entry in our address search table with the corresponding | ||
179 | * 'no_ack' bit is the address search entry cleared. | ||
180 | * | ||
181 | * buffer_fragment | ||
182 | * Indicates that at least one of the rx buffers has been | ||
183 | * fragmented. If set the FW should look at the rx_frag_info | ||
184 | * descriptor described below. | ||
185 | * | ||
186 | * mpdu_length_err | ||
187 | * Indicates that the MPDU was pre-maturely terminated | ||
188 | * resulting in a truncated MPDU. Don't trust the MPDU length | ||
189 | * field. | ||
190 | * | ||
191 | * tkip_mic_err | ||
192 | * Indicates that the MPDU Michael integrity check failed | ||
193 | * | ||
194 | * decrypt_err | ||
195 | * Indicates that the MPDU decrypt integrity check failed | ||
196 | * | ||
197 | * fcs_err | ||
198 | * Indicates that the MPDU FCS check failed | ||
199 | * | ||
200 | * msdu_done | ||
201 | * If set indicates that the RX packet data, RX header data, RX | ||
202 | * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU | ||
203 | * start/end descriptors and RX Attention descriptor are all | ||
204 | * valid. This bit must be in the last octet of the | ||
205 | * descriptor. | ||
206 | */ | ||
207 | |||
208 | struct rx_frag_info { | ||
209 | u8 ring0_more_count; | ||
210 | u8 ring1_more_count; | ||
211 | u8 ring2_more_count; | ||
212 | u8 ring3_more_count; | ||
213 | } __packed; | ||
214 | |||
215 | /* | ||
216 | * ring0_more_count | ||
217 | * Indicates the number of more buffers associated with RX DMA | ||
218 | * ring 0. Field is filled in by the RX_DMA. | ||
219 | * | ||
220 | * ring1_more_count | ||
221 | * Indicates the number of more buffers associated with RX DMA | ||
222 | * ring 1. Field is filled in by the RX_DMA. | ||
223 | * | ||
224 | * ring2_more_count | ||
225 | * Indicates the number of more buffers associated with RX DMA | ||
226 | * ring 2. Field is filled in by the RX_DMA. | ||
227 | * | ||
228 | * ring3_more_count | ||
229 | * Indicates the number of more buffers associated with RX DMA | ||
230 | * ring 3. Field is filled in by the RX_DMA. | ||
231 | */ | ||
232 | |||
233 | enum htt_rx_mpdu_encrypt_type { | ||
234 | HTT_RX_MPDU_ENCRYPT_WEP40 = 0, | ||
235 | HTT_RX_MPDU_ENCRYPT_WEP104 = 1, | ||
236 | HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2, | ||
237 | HTT_RX_MPDU_ENCRYPT_WEP128 = 3, | ||
238 | HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4, | ||
239 | HTT_RX_MPDU_ENCRYPT_WAPI = 5, | ||
240 | HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6, | ||
241 | HTT_RX_MPDU_ENCRYPT_NONE = 7, | ||
242 | }; | ||
243 | |||
244 | #define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff | ||
245 | #define RX_MPDU_START_INFO0_PEER_IDX_LSB 0 | ||
246 | #define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000 | ||
247 | #define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16 | ||
248 | #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000 | ||
249 | #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28 | ||
250 | #define RX_MPDU_START_INFO0_FROM_DS (1 << 11) | ||
251 | #define RX_MPDU_START_INFO0_TO_DS (1 << 12) | ||
252 | #define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13) | ||
253 | #define RX_MPDU_START_INFO0_RETRY (1 << 14) | ||
254 | #define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15) | ||
255 | |||
256 | #define RX_MPDU_START_INFO1_TID_MASK 0xf0000000 | ||
257 | #define RX_MPDU_START_INFO1_TID_LSB 28 | ||
258 | #define RX_MPDU_START_INFO1_DIRECTED (1 << 16) | ||
259 | |||
260 | struct rx_mpdu_start { | ||
261 | __le32 info0; | ||
262 | union { | ||
263 | struct { | ||
264 | __le32 pn31_0; | ||
265 | __le32 info1; /* %RX_MPDU_START_INFO1_ */ | ||
266 | } __packed; | ||
267 | struct { | ||
268 | u8 pn[6]; | ||
269 | } __packed; | ||
270 | } __packed; | ||
271 | } __packed; | ||
272 | |||
273 | /* | ||
274 | * peer_idx | ||
275 | * The index of the address search table which associated with | ||
276 | * the peer table entry corresponding to this MPDU. Only valid | ||
277 | * when first_msdu is set. | ||
278 | * | ||
279 | * fr_ds | ||
280 | * Set if the from DS bit is set in the frame control. Only | ||
281 | * valid when first_msdu is set. | ||
282 | * | ||
283 | * to_ds | ||
284 | * Set if the to DS bit is set in the frame control. Only | ||
285 | * valid when first_msdu is set. | ||
286 | * | ||
287 | * encrypted | ||
288 | * Protected bit from the frame control. Only valid when | ||
289 | * first_msdu is set. | ||
290 | * | ||
291 | * retry | ||
292 | * Retry bit from the frame control. Only valid when | ||
293 | * first_msdu is set. | ||
294 | * | ||
295 | * txbf_h_info | ||
296 | * The MPDU data will contain H information. Primarily used | ||
297 | * for debug. | ||
298 | * | ||
299 | * seq_num | ||
300 | * The sequence number from the 802.11 header. Only valid when | ||
301 | * first_msdu is set. | ||
302 | * | ||
303 | * encrypt_type | ||
304 | * Indicates type of decrypt cipher used (as defined in the | ||
305 | * peer table) | ||
306 | * 0: WEP40 | ||
307 | * 1: WEP104 | ||
308 | * 2: TKIP without MIC | ||
309 | * 3: WEP128 | ||
310 | * 4: TKIP (WPA) | ||
311 | * 5: WAPI | ||
312 | * 6: AES-CCM (WPA2) | ||
313 | * 7: No cipher | ||
314 | * Only valid when first_msdu_is set | ||
315 | * | ||
316 | * pn_31_0 | ||
317 | * Bits [31:0] of the PN number extracted from the IV field | ||
318 | * WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is | ||
319 | * valid. | ||
320 | * TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0, | ||
321 | * WEPSeed[1], pn1}. Only pn[47:0] is valid. | ||
322 | * AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1, | ||
323 | * pn0}. Only pn[47:0] is valid. | ||
324 | * WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11, | ||
325 | * pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}. | ||
326 | * The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and | ||
327 | * pn[47:0] are valid. | ||
328 | * Only valid when first_msdu is set. | ||
329 | * | ||
330 | * pn_47_32 | ||
331 | * Bits [47:32] of the PN number. See description for | ||
332 | * pn_31_0. The remaining PN fields are in the rx_msdu_end | ||
333 | * descriptor | ||
334 | * | ||
335 | * pn | ||
336 | * Use this field to access the pn without worrying about | ||
337 | * byte-order and bitmasking/bitshifting. | ||
338 | * | ||
339 | * directed | ||
340 | * See definition in RX attention descriptor | ||
341 | * | ||
342 | * reserved_2 | ||
343 | * Reserved: HW should fill with zero. FW should ignore. | ||
344 | * | ||
345 | * tid | ||
346 | * The TID field in the QoS control field | ||
347 | */ | ||
348 | |||
349 | #define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff | ||
350 | #define RX_MPDU_END_INFO0_RESERVED_0_LSB 0 | ||
351 | #define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000 | ||
352 | #define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16 | ||
353 | #define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13) | ||
354 | #define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14) | ||
355 | #define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15) | ||
356 | #define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28) | ||
357 | #define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29) | ||
358 | #define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30) | ||
359 | #define RX_MPDU_END_INFO0_FCS_ERR (1 << 31) | ||
360 | |||
361 | struct rx_mpdu_end { | ||
362 | __le32 info0; | ||
363 | } __packed; | ||
364 | |||
365 | /* | ||
366 | * reserved_0 | ||
367 | * Reserved | ||
368 | * | ||
369 | * overflow_err | ||
370 | * PCU Receive FIFO does not have enough space to store the | ||
371 | * full receive packet. Enough space is reserved in the | ||
372 | * receive FIFO for the status is written. This MPDU remaining | ||
373 | * packets in the PPDU will be filtered and no Ack response | ||
374 | * will be transmitted. | ||
375 | * | ||
376 | * last_mpdu | ||
377 | * Indicates that this is the last MPDU of a PPDU. | ||
378 | * | ||
379 | * post_delim_err | ||
380 | * Indicates that a delimiter FCS error occurred after this | ||
381 | * MPDU before the next MPDU. Only valid when last_msdu is | ||
382 | * set. | ||
383 | * | ||
384 | * post_delim_cnt | ||
385 | * Count of the delimiters after this MPDU. This requires the | ||
386 | * last MPDU to be held until all the EOF descriptors have been | ||
387 | * received. This may be inefficient in the future when | ||
388 | * ML-MIMO is used. Only valid when last_mpdu is set. | ||
389 | * | ||
390 | * mpdu_length_err | ||
391 | * See definition in RX attention descriptor | ||
392 | * | ||
393 | * tkip_mic_err | ||
394 | * See definition in RX attention descriptor | ||
395 | * | ||
396 | * decrypt_err | ||
397 | * See definition in RX attention descriptor | ||
398 | * | ||
399 | * fcs_err | ||
400 | * See definition in RX attention descriptor | ||
401 | */ | ||
402 | |||
403 | #define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff | ||
404 | #define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0 | ||
405 | #define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000 | ||
406 | #define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14 | ||
407 | #define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000 | ||
408 | #define RX_MSDU_START_INFO0_RING_MASK_LSB 20 | ||
409 | #define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000 | ||
410 | #define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24 | ||
411 | |||
412 | #define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff | ||
413 | #define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0 | ||
414 | #define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300 | ||
415 | #define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8 | ||
416 | #define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000 | ||
417 | #define RX_MSDU_START_INFO1_SA_IDX_LSB 16 | ||
418 | #define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10) | ||
419 | #define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11) | ||
420 | #define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12) | ||
421 | #define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13) | ||
422 | #define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) | ||
423 | #define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) | ||
424 | |||
425 | enum rx_msdu_decap_format { | ||
426 | RX_MSDU_DECAP_RAW = 0, | ||
427 | RX_MSDU_DECAP_NATIVE_WIFI = 1, | ||
428 | RX_MSDU_DECAP_ETHERNET2_DIX = 2, | ||
429 | RX_MSDU_DECAP_8023_SNAP_LLC = 3 | ||
430 | }; | ||
431 | |||
432 | struct rx_msdu_start { | ||
433 | __le32 info0; /* %RX_MSDU_START_INFO0_ */ | ||
434 | __le32 flow_id_crc; | ||
435 | __le32 info1; /* %RX_MSDU_START_INFO1_ */ | ||
436 | } __packed; | ||
437 | |||
438 | /* | ||
439 | * msdu_length | ||
440 | * MSDU length in bytes after decapsulation. This field is | ||
441 | * still valid for MPDU frames without A-MSDU. It still | ||
442 | * represents MSDU length after decapsulation | ||
443 | * | ||
444 | * ip_offset | ||
445 | * Indicates the IP offset in bytes from the start of the | ||
446 | * packet after decapsulation. Only valid if ipv4_proto or | ||
447 | * ipv6_proto is set. | ||
448 | * | ||
449 | * ring_mask | ||
450 | * Indicates the destination RX rings for this MSDU. | ||
451 | * | ||
452 | * tcp_udp_offset | ||
453 | * Indicates the offset in bytes to the start of TCP or UDP | ||
454 | * header from the start of the IP header after decapsulation. | ||
455 | * Only valid if tcp_prot or udp_prot is set. The value 0 | ||
456 | * indicates that the offset is longer than 127 bytes. | ||
457 | * | ||
458 | * reserved_0c | ||
459 | * Reserved: HW should fill with zero. FW should ignore. | ||
460 | * | ||
461 | * flow_id_crc | ||
462 | * The flow_id_crc runs CRC32 on the following information: | ||
463 | * IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0, | ||
464 | * protocol[7:0]}. | ||
465 | * IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0, | ||
466 | * next_header[7:0]} | ||
467 | * UDP case: sort_port[15:0], dest_port[15:0] | ||
468 | * TCP case: sort_port[15:0], dest_port[15:0], | ||
469 | * {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]}, | ||
470 | * {16'b0, urgent_ptr[15:0]}, all options except 32-bit | ||
471 | * timestamp. | ||
472 | * | ||
473 | * msdu_number | ||
474 | * Indicates the MSDU number within a MPDU. This value is | ||
475 | * reset to zero at the start of each MPDU. If the number of | ||
476 | * MSDU exceeds 255 this number will wrap using modulo 256. | ||
477 | * | ||
478 | * decap_format | ||
479 | * Indicates the format after decapsulation: | ||
480 | * 0: RAW: No decapsulation | ||
481 | * 1: Native WiFi | ||
482 | * 2: Ethernet 2 (DIX) | ||
483 | * 3: 802.3 (SNAP/LLC) | ||
484 | * | ||
485 | * ipv4_proto | ||
486 | * Set if L2 layer indicates IPv4 protocol. | ||
487 | * | ||
488 | * ipv6_proto | ||
489 | * Set if L2 layer indicates IPv6 protocol. | ||
490 | * | ||
491 | * tcp_proto | ||
492 | * Set if the ipv4_proto or ipv6_proto are set and the IP | ||
493 | * protocol indicates TCP. | ||
494 | * | ||
495 | * udp_proto | ||
496 | * Set if the ipv4_proto or ipv6_proto are set and the IP | ||
497 | * protocol indicates UDP. | ||
498 | * | ||
499 | * ip_frag | ||
500 | * Indicates that either the IP More frag bit is set or IP frag | ||
501 | * number is non-zero. If set indicates that this is a | ||
502 | * fragmented IP packet. | ||
503 | * | ||
504 | * tcp_only_ack | ||
505 | * Set if only the TCP Ack bit is set in the TCP flags and if | ||
506 | * the TCP payload is 0. | ||
507 | * | ||
508 | * sa_idx | ||
509 | * The offset in the address table which matches the MAC source | ||
510 | * address. | ||
511 | * | ||
512 | * reserved_2b | ||
513 | * Reserved: HW should fill with zero. FW should ignore. | ||
514 | */ | ||
515 | |||
516 | #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff | ||
517 | #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0 | ||
518 | #define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14) | ||
519 | #define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15) | ||
520 | #define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30) | ||
521 | #define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31) | ||
522 | |||
523 | struct rx_msdu_end { | ||
524 | __le16 ip_hdr_cksum; | ||
525 | __le16 tcp_hdr_cksum; | ||
526 | u8 key_id_octet; | ||
527 | u8 classification_filter; | ||
528 | u8 wapi_pn[10]; | ||
529 | __le32 info0; | ||
530 | } __packed; | ||
531 | |||
532 | /* | ||
533 | *ip_hdr_chksum | ||
534 | * This can include the IP header checksum or the pseudo header | ||
535 | * checksum used by TCP/UDP checksum. | ||
536 | * | ||
537 | *tcp_udp_chksum | ||
538 | * The value of the computed TCP/UDP checksum. A mode bit | ||
539 | * selects whether this checksum is the full checksum or the | ||
540 | * partial checksum which does not include the pseudo header. | ||
541 | * | ||
542 | *key_id_octet | ||
543 | * The key ID octet from the IV. Only valid when first_msdu is | ||
544 | * set. | ||
545 | * | ||
546 | *classification_filter | ||
547 | * Indicates the number classification filter rule | ||
548 | * | ||
549 | *ext_wapi_pn_63_48 | ||
550 | * Extension PN (packet number) which is only used by WAPI. | ||
551 | * This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The | ||
552 | * WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start | ||
553 | * descriptor. | ||
554 | * | ||
555 | *ext_wapi_pn_95_64 | ||
556 | * Extension PN (packet number) which is only used by WAPI. | ||
557 | * This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and | ||
558 | * pn11). | ||
559 | * | ||
560 | *ext_wapi_pn_127_96 | ||
561 | * Extension PN (packet number) which is only used by WAPI. | ||
562 | * This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14, | ||
563 | * pn15). | ||
564 | * | ||
565 | *reported_mpdu_length | ||
566 | * MPDU length before decapsulation. Only valid when | ||
567 | * first_msdu is set. This field is taken directly from the | ||
568 | * length field of the A-MPDU delimiter or the preamble length | ||
569 | * field for non-A-MPDU frames. | ||
570 | * | ||
571 | *first_msdu | ||
572 | * Indicates the first MSDU of A-MSDU. If both first_msdu and | ||
573 | * last_msdu are set in the MSDU then this is a non-aggregated | ||
574 | * MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall | ||
575 | * have both first_mpdu and last_mpdu bits set to 0. | ||
576 | * | ||
577 | *last_msdu | ||
578 | * Indicates the last MSDU of the A-MSDU. MPDU end status is | ||
579 | * only valid when last_msdu is set. | ||
580 | * | ||
581 | *reserved_3a | ||
582 | * Reserved: HW should fill with zero. FW should ignore. | ||
583 | * | ||
584 | *pre_delim_err | ||
585 | * Indicates that the first delimiter had a FCS failure. Only | ||
586 | * valid when first_mpdu and first_msdu are set. | ||
587 | * | ||
588 | *reserved_3b | ||
589 | * Reserved: HW should fill with zero. FW should ignore. | ||
590 | */ | ||
591 | |||
592 | #define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0 | ||
593 | #define RX_PPDU_START_SIG_RATE_SELECT_CCK 1 | ||
594 | |||
595 | #define RX_PPDU_START_SIG_RATE_OFDM_48 0 | ||
596 | #define RX_PPDU_START_SIG_RATE_OFDM_24 1 | ||
597 | #define RX_PPDU_START_SIG_RATE_OFDM_12 2 | ||
598 | #define RX_PPDU_START_SIG_RATE_OFDM_6 3 | ||
599 | #define RX_PPDU_START_SIG_RATE_OFDM_54 4 | ||
600 | #define RX_PPDU_START_SIG_RATE_OFDM_36 5 | ||
601 | #define RX_PPDU_START_SIG_RATE_OFDM_18 6 | ||
602 | #define RX_PPDU_START_SIG_RATE_OFDM_9 7 | ||
603 | |||
604 | #define RX_PPDU_START_SIG_RATE_CCK_LP_11 0 | ||
605 | #define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1 | ||
606 | #define RX_PPDU_START_SIG_RATE_CCK_LP_2 2 | ||
607 | #define RX_PPDU_START_SIG_RATE_CCK_LP_1 3 | ||
608 | #define RX_PPDU_START_SIG_RATE_CCK_SP_11 4 | ||
609 | #define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5 | ||
610 | #define RX_PPDU_START_SIG_RATE_CCK_SP_2 6 | ||
611 | |||
612 | #define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04 | ||
613 | #define HTT_RX_PPDU_START_PREAMBLE_HT 0x08 | ||
614 | #define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09 | ||
615 | #define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C | ||
616 | #define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D | ||
617 | |||
618 | #define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0) | ||
619 | |||
620 | #define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f | ||
621 | #define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0 | ||
622 | #define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0 | ||
623 | #define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5 | ||
624 | #define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000 | ||
625 | #define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18 | ||
626 | #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000 | ||
627 | #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24 | ||
628 | #define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4) | ||
629 | #define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17) | ||
630 | |||
631 | #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff | ||
632 | #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0 | ||
633 | |||
634 | #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff | ||
635 | #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0 | ||
636 | #define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24) | ||
637 | |||
638 | #define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff | ||
639 | #define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0 | ||
640 | |||
641 | #define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff | ||
642 | #define RX_PPDU_START_INFO5_SERVICE_LSB 0 | ||
643 | |||
644 | struct rx_ppdu_start { | ||
645 | struct { | ||
646 | u8 pri20_mhz; | ||
647 | u8 ext20_mhz; | ||
648 | u8 ext40_mhz; | ||
649 | u8 ext80_mhz; | ||
650 | } rssi_chains[4]; | ||
651 | u8 rssi_comb; | ||
652 | __le16 rsvd0; | ||
653 | u8 info0; /* %RX_PPDU_START_INFO0_ */ | ||
654 | __le32 info1; /* %RX_PPDU_START_INFO1_ */ | ||
655 | __le32 info2; /* %RX_PPDU_START_INFO2_ */ | ||
656 | __le32 info3; /* %RX_PPDU_START_INFO3_ */ | ||
657 | __le32 info4; /* %RX_PPDU_START_INFO4_ */ | ||
658 | __le32 info5; /* %RX_PPDU_START_INFO5_ */ | ||
659 | } __packed; | ||
660 | |||
661 | /* | ||
662 | * rssi_chain0_pri20 | ||
663 | * RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth. | ||
664 | * Value of 0x80 indicates invalid. | ||
665 | * | ||
666 | * rssi_chain0_sec20 | ||
667 | * RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth. | ||
668 | * Value of 0x80 indicates invalid. | ||
669 | * | ||
670 | * rssi_chain0_sec40 | ||
671 | * RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth. | ||
672 | * Value of 0x80 indicates invalid. | ||
673 | * | ||
674 | * rssi_chain0_sec80 | ||
675 | * RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth. | ||
676 | * Value of 0x80 indicates invalid. | ||
677 | * | ||
678 | * rssi_chain1_pri20 | ||
679 | * RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth. | ||
680 | * Value of 0x80 indicates invalid. | ||
681 | * | ||
682 | * rssi_chain1_sec20 | ||
683 | * RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth. | ||
684 | * Value of 0x80 indicates invalid. | ||
685 | * | ||
686 | * rssi_chain1_sec40 | ||
687 | * RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth. | ||
688 | * Value of 0x80 indicates invalid. | ||
689 | * | ||
690 | * rssi_chain1_sec80 | ||
691 | * RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth. | ||
692 | * Value of 0x80 indicates invalid. | ||
693 | * | ||
694 | * rssi_chain2_pri20 | ||
695 | * RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth. | ||
696 | * Value of 0x80 indicates invalid. | ||
697 | * | ||
698 | * rssi_chain2_sec20 | ||
699 | * RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth. | ||
700 | * Value of 0x80 indicates invalid. | ||
701 | * | ||
702 | * rssi_chain2_sec40 | ||
703 | * RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth. | ||
704 | * Value of 0x80 indicates invalid. | ||
705 | * | ||
706 | * rssi_chain2_sec80 | ||
707 | * RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth. | ||
708 | * Value of 0x80 indicates invalid. | ||
709 | * | ||
710 | * rssi_chain3_pri20 | ||
711 | * RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth. | ||
712 | * Value of 0x80 indicates invalid. | ||
713 | * | ||
714 | * rssi_chain3_sec20 | ||
715 | * RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth. | ||
716 | * Value of 0x80 indicates invalid. | ||
717 | * | ||
718 | * rssi_chain3_sec40 | ||
719 | * RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth. | ||
720 | * Value of 0x80 indicates invalid. | ||
721 | * | ||
722 | * rssi_chain3_sec80 | ||
723 | * RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth. | ||
724 | * Value of 0x80 indicates invalid. | ||
725 | * | ||
726 | * rssi_comb | ||
727 | * The combined RSSI of RX PPDU of all active chains and | ||
728 | * bandwidths. Value of 0x80 indicates invalid. | ||
729 | * | ||
730 | * reserved_4a | ||
731 | * Reserved: HW should fill with 0, FW should ignore. | ||
732 | * | ||
733 | * is_greenfield | ||
734 | * Do we really support this? | ||
735 | * | ||
736 | * reserved_4b | ||
737 | * Reserved: HW should fill with 0, FW should ignore. | ||
738 | * | ||
739 | * l_sig_rate | ||
740 | * If l_sig_rate_select is 0: | ||
741 | * 0x8: OFDM 48 Mbps | ||
742 | * 0x9: OFDM 24 Mbps | ||
743 | * 0xA: OFDM 12 Mbps | ||
744 | * 0xB: OFDM 6 Mbps | ||
745 | * 0xC: OFDM 54 Mbps | ||
746 | * 0xD: OFDM 36 Mbps | ||
747 | * 0xE: OFDM 18 Mbps | ||
748 | * 0xF: OFDM 9 Mbps | ||
749 | * If l_sig_rate_select is 1: | ||
750 | * 0x8: CCK 11 Mbps long preamble | ||
751 | * 0x9: CCK 5.5 Mbps long preamble | ||
752 | * 0xA: CCK 2 Mbps long preamble | ||
753 | * 0xB: CCK 1 Mbps long preamble | ||
754 | * 0xC: CCK 11 Mbps short preamble | ||
755 | * 0xD: CCK 5.5 Mbps short preamble | ||
756 | * 0xE: CCK 2 Mbps short preamble | ||
757 | * | ||
758 | * l_sig_rate_select | ||
759 | * Legacy signal rate select. If set then l_sig_rate indicates | ||
760 | * CCK rates. If clear then l_sig_rate indicates OFDM rates. | ||
761 | * | ||
762 | * l_sig_length | ||
763 | * Length of legacy frame in octets. | ||
764 | * | ||
765 | * l_sig_parity | ||
766 | * Odd parity over l_sig_rate and l_sig_length | ||
767 | * | ||
768 | * l_sig_tail | ||
769 | * Tail bits for Viterbi decoder | ||
770 | * | ||
771 | * preamble_type | ||
772 | * Indicates the type of preamble ahead: | ||
773 | * 0x4: Legacy (OFDM/CCK) | ||
774 | * 0x8: HT | ||
775 | * 0x9: HT with TxBF | ||
776 | * 0xC: VHT | ||
777 | * 0xD: VHT with TxBF | ||
778 | * 0x80 - 0xFF: Reserved for special baseband data types such | ||
779 | * as radar and spectral scan. | ||
780 | * | ||
781 | * ht_sig_vht_sig_a_1 | ||
782 | * If preamble_type == 0x8 or 0x9 | ||
783 | * HT-SIG (first 24 bits) | ||
784 | * If preamble_type == 0xC or 0xD | ||
785 | * VHT-SIG A (first 24 bits) | ||
786 | * Else | ||
787 | * Reserved | ||
788 | * | ||
789 | * reserved_6 | ||
790 | * Reserved: HW should fill with 0, FW should ignore. | ||
791 | * | ||
792 | * ht_sig_vht_sig_a_2 | ||
793 | * If preamble_type == 0x8 or 0x9 | ||
794 | * HT-SIG (last 24 bits) | ||
795 | * If preamble_type == 0xC or 0xD | ||
796 | * VHT-SIG A (last 24 bits) | ||
797 | * Else | ||
798 | * Reserved | ||
799 | * | ||
800 | * txbf_h_info | ||
801 | * Indicates that the packet data carries H information which | ||
802 | * is used for TxBF debug. | ||
803 | * | ||
804 | * reserved_7 | ||
805 | * Reserved: HW should fill with 0, FW should ignore. | ||
806 | * | ||
807 | * vht_sig_b | ||
808 | * WiFi 1.0 and WiFi 2.0 will likely have this field to be all | ||
809 | * 0s since the BB does not plan on decoding VHT SIG-B. | ||
810 | * | ||
811 | * reserved_8 | ||
812 | * Reserved: HW should fill with 0, FW should ignore. | ||
813 | * | ||
814 | * service | ||
815 | * Service field from BB for OFDM, HT and VHT packets. CCK | ||
816 | * packets will have service field of 0. | ||
817 | * | ||
818 | * reserved_9 | ||
819 | * Reserved: HW should fill with 0, FW should ignore. | ||
820 | */ | ||
821 | |||
822 | |||
823 | #define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0) | ||
824 | #define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1) | ||
825 | #define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2) | ||
826 | |||
827 | #define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff | ||
828 | #define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0 | ||
829 | #define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24) | ||
830 | #define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25) | ||
831 | |||
832 | #define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) | ||
833 | |||
834 | struct rx_ppdu_end { | ||
835 | __le32 evm_p0; | ||
836 | __le32 evm_p1; | ||
837 | __le32 evm_p2; | ||
838 | __le32 evm_p3; | ||
839 | __le32 evm_p4; | ||
840 | __le32 evm_p5; | ||
841 | __le32 evm_p6; | ||
842 | __le32 evm_p7; | ||
843 | __le32 evm_p8; | ||
844 | __le32 evm_p9; | ||
845 | __le32 evm_p10; | ||
846 | __le32 evm_p11; | ||
847 | __le32 evm_p12; | ||
848 | __le32 evm_p13; | ||
849 | __le32 evm_p14; | ||
850 | __le32 evm_p15; | ||
851 | __le32 tsf_timestamp; | ||
852 | __le32 wb_timestamp; | ||
853 | u8 locationing_timestamp; | ||
854 | u8 phy_err_code; | ||
855 | __le16 flags; /* %RX_PPDU_END_FLAGS_ */ | ||
856 | __le32 info0; /* %RX_PPDU_END_INFO0_ */ | ||
857 | __le16 bb_length; | ||
858 | __le16 info1; /* %RX_PPDU_END_INFO1_ */ | ||
859 | } __packed; | ||
860 | |||
861 | /* | ||
862 | * evm_p0 | ||
863 | * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. | ||
864 | * | ||
865 | * evm_p1 | ||
866 | * EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3. | ||
867 | * | ||
868 | * evm_p2 | ||
869 | * EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3. | ||
870 | * | ||
871 | * evm_p3 | ||
872 | * EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3. | ||
873 | * | ||
874 | * evm_p4 | ||
875 | * EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3. | ||
876 | * | ||
877 | * evm_p5 | ||
878 | * EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3. | ||
879 | * | ||
880 | * evm_p6 | ||
881 | * EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3. | ||
882 | * | ||
883 | * evm_p7 | ||
884 | * EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3. | ||
885 | * | ||
886 | * evm_p8 | ||
887 | * EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3. | ||
888 | * | ||
889 | * evm_p9 | ||
890 | * EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3. | ||
891 | * | ||
892 | * evm_p10 | ||
893 | * EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3. | ||
894 | * | ||
895 | * evm_p11 | ||
896 | * EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3. | ||
897 | * | ||
898 | * evm_p12 | ||
899 | * EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3. | ||
900 | * | ||
901 | * evm_p13 | ||
902 | * EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3. | ||
903 | * | ||
904 | * evm_p14 | ||
905 | * EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3. | ||
906 | * | ||
907 | * evm_p15 | ||
908 | * EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3. | ||
909 | * | ||
910 | * tsf_timestamp | ||
911 | * Receive TSF timestamp sampled on the rising edge of | ||
912 | * rx_clear. For PHY errors this may be the current TSF when | ||
913 | * phy_error is asserted if the rx_clear does not assert before | ||
914 | * the end of the PHY error. | ||
915 | * | ||
916 | * wb_timestamp | ||
917 | * WLAN/BT timestamp is a 1 usec resolution timestamp which | ||
918 | * does not get updated based on receive beacon like TSF. The | ||
919 | * same rules for capturing tsf_timestamp are used to capture | ||
920 | * the wb_timestamp. | ||
921 | * | ||
922 | * locationing_timestamp | ||
923 | * Timestamp used for locationing. This timestamp is used to | ||
924 | * indicate fractions of usec. For example if the MAC clock is | ||
925 | * running at 80 MHz, the timestamp will increment every 12.5 | ||
926 | * nsec. The value starts at 0 and increments to 79 and | ||
927 | * returns to 0 and repeats. This information is valid for | ||
928 | * every PPDU. This information can be used in conjunction | ||
929 | * with wb_timestamp to capture large delta times. | ||
930 | * | ||
931 | * phy_err_code | ||
932 | * See the 1.10.8.1.2 for the list of the PHY error codes. | ||
933 | * | ||
934 | * phy_err | ||
935 | * Indicates a PHY error was detected for this PPDU. | ||
936 | * | ||
937 | * rx_location | ||
938 | * Indicates that location information was requested. | ||
939 | * | ||
940 | * txbf_h_info | ||
941 | * Indicates that the packet data carries H information which | ||
942 | * is used for TxBF debug. | ||
943 | * | ||
944 | * reserved_18 | ||
945 | * Reserved: HW should fill with 0, FW should ignore. | ||
946 | * | ||
947 | * rx_antenna | ||
948 | * Receive antenna value | ||
949 | * | ||
950 | * tx_ht_vht_ack | ||
951 | * Indicates that a HT or VHT Ack/BA frame was transmitted in | ||
952 | * response to this receive packet. | ||
953 | * | ||
954 | * bb_captured_channel | ||
955 | * Indicates that the BB has captured a channel dump. FW can | ||
956 | * then read the channel dump memory. This may indicate that | ||
957 | * the channel was captured either based on PCU setting the | ||
958 | * capture_channel bit BB descriptor or FW setting the | ||
959 | * capture_channel mode bit. | ||
960 | * | ||
961 | * reserved_19 | ||
962 | * Reserved: HW should fill with 0, FW should ignore. | ||
963 | * | ||
964 | * bb_length | ||
965 | * Indicates the number of bytes of baseband information for | ||
966 | * PPDUs where the BB descriptor preamble type is 0x80 to 0xFF | ||
967 | * which indicates that this is not a normal PPDU but rather | ||
968 | * contains baseband debug information. | ||
969 | * | ||
970 | * reserved_20 | ||
971 | * Reserved: HW should fill with 0, FW should ignore. | ||
972 | * | ||
973 | * ppdu_done | ||
974 | * PPDU end status is only valid when ppdu_done bit is set. | ||
975 | * Every time HW sets this bit in memory FW/SW must clear this | ||
976 | * bit in memory. FW will initialize all the ppdu_done dword | ||
977 | * to 0. | ||
978 | */ | ||
979 | |||
980 | #define FW_RX_DESC_INFO0_DISCARD (1 << 0) | ||
981 | #define FW_RX_DESC_INFO0_FORWARD (1 << 1) | ||
982 | #define FW_RX_DESC_INFO0_INSPECT (1 << 5) | ||
983 | #define FW_RX_DESC_INFO0_EXT_MASK 0xC0 | ||
984 | #define FW_RX_DESC_INFO0_EXT_LSB 6 | ||
985 | |||
986 | struct fw_rx_desc_base { | ||
987 | u8 info0; | ||
988 | } __packed; | ||
989 | |||
990 | #endif /* _RX_DESC_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h new file mode 100644 index 000000000000..be7ba1e78afe --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef __TARGADDRS_H__ | ||
19 | #define __TARGADDRS_H__ | ||
20 | |||
21 | /* | ||
22 | * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the | ||
23 | * host_interest structure. It must match the address of the _host_interest | ||
24 | * symbol (see linker script). | ||
25 | * | ||
26 | * Host Interest is shared between Host and Target in order to coordinate | ||
27 | * between the two, and is intended to remain constant (with additions only | ||
28 | * at the end) across software releases. | ||
29 | * | ||
30 | * All addresses are available here so that it's possible to | ||
31 | * write a single binary that works with all Target Types. | ||
32 | * May be used in assembler code as well as C. | ||
33 | */ | ||
34 | #define QCA988X_HOST_INTEREST_ADDRESS 0x00400800 | ||
35 | #define HOST_INTEREST_MAX_SIZE 0x200 | ||
36 | |||
37 | /* | ||
38 | * These are items that the Host may need to access via BMI or via the | ||
39 | * Diagnostic Window. The position of items in this structure must remain | ||
40 | * constant across firmware revisions! Types for each item must be fixed | ||
41 | * size across target and host platforms. More items may be added at the end. | ||
42 | */ | ||
43 | struct host_interest { | ||
44 | /* | ||
45 | * Pointer to application-defined area, if any. | ||
46 | * Set by Target application during startup. | ||
47 | */ | ||
48 | u32 hi_app_host_interest; /* 0x00 */ | ||
49 | |||
50 | /* Pointer to register dump area, valid after Target crash. */ | ||
51 | u32 hi_failure_state; /* 0x04 */ | ||
52 | |||
53 | /* Pointer to debug logging header */ | ||
54 | u32 hi_dbglog_hdr; /* 0x08 */ | ||
55 | |||
56 | u32 hi_unused0c; /* 0x0c */ | ||
57 | |||
58 | /* | ||
59 | * General-purpose flag bits, similar to SOC_OPTION_* flags. | ||
60 | * Can be used by application rather than by OS. | ||
61 | */ | ||
62 | u32 hi_option_flag; /* 0x10 */ | ||
63 | |||
64 | /* | ||
65 | * Boolean that determines whether or not to | ||
66 | * display messages on the serial port. | ||
67 | */ | ||
68 | u32 hi_serial_enable; /* 0x14 */ | ||
69 | |||
70 | /* Start address of DataSet index, if any */ | ||
71 | u32 hi_dset_list_head; /* 0x18 */ | ||
72 | |||
73 | /* Override Target application start address */ | ||
74 | u32 hi_app_start; /* 0x1c */ | ||
75 | |||
76 | /* Clock and voltage tuning */ | ||
77 | u32 hi_skip_clock_init; /* 0x20 */ | ||
78 | u32 hi_core_clock_setting; /* 0x24 */ | ||
79 | u32 hi_cpu_clock_setting; /* 0x28 */ | ||
80 | u32 hi_system_sleep_setting; /* 0x2c */ | ||
81 | u32 hi_xtal_control_setting; /* 0x30 */ | ||
82 | u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */ | ||
83 | u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */ | ||
84 | u32 hi_ref_voltage_trim_setting; /* 0x3c */ | ||
85 | u32 hi_clock_info; /* 0x40 */ | ||
86 | |||
87 | /* Host uses BE CPU or not */ | ||
88 | u32 hi_be; /* 0x44 */ | ||
89 | |||
90 | u32 hi_stack; /* normal stack */ /* 0x48 */ | ||
91 | u32 hi_err_stack; /* error stack */ /* 0x4c */ | ||
92 | u32 hi_desired_cpu_speed_hz; /* 0x50 */ | ||
93 | |||
94 | /* Pointer to Board Data */ | ||
95 | u32 hi_board_data; /* 0x54 */ | ||
96 | |||
97 | /* | ||
98 | * Indication of Board Data state: | ||
99 | * 0: board data is not yet initialized. | ||
100 | * 1: board data is initialized; unknown size | ||
101 | * >1: number of bytes of initialized board data | ||
102 | */ | ||
103 | u32 hi_board_data_initialized; /* 0x58 */ | ||
104 | |||
105 | u32 hi_dset_ram_index_table; /* 0x5c */ | ||
106 | |||
107 | u32 hi_desired_baud_rate; /* 0x60 */ | ||
108 | u32 hi_dbglog_config; /* 0x64 */ | ||
109 | u32 hi_end_ram_reserve_sz; /* 0x68 */ | ||
110 | u32 hi_mbox_io_block_sz; /* 0x6c */ | ||
111 | |||
112 | u32 hi_num_bpatch_streams; /* 0x70 -- unused */ | ||
113 | u32 hi_mbox_isr_yield_limit; /* 0x74 */ | ||
114 | |||
115 | u32 hi_refclk_hz; /* 0x78 */ | ||
116 | u32 hi_ext_clk_detected; /* 0x7c */ | ||
117 | u32 hi_dbg_uart_txpin; /* 0x80 */ | ||
118 | u32 hi_dbg_uart_rxpin; /* 0x84 */ | ||
119 | u32 hi_hci_uart_baud; /* 0x88 */ | ||
120 | u32 hi_hci_uart_pin_assignments; /* 0x8C */ | ||
121 | |||
122 | u32 hi_hci_uart_baud_scale_val; /* 0x90 */ | ||
123 | u32 hi_hci_uart_baud_step_val; /* 0x94 */ | ||
124 | |||
125 | u32 hi_allocram_start; /* 0x98 */ | ||
126 | u32 hi_allocram_sz; /* 0x9c */ | ||
127 | u32 hi_hci_bridge_flags; /* 0xa0 */ | ||
128 | u32 hi_hci_uart_support_pins; /* 0xa4 */ | ||
129 | |||
130 | u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */ | ||
131 | |||
132 | /* | ||
133 | * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high | ||
134 | * [31:16]: wakeup timeout in ms | ||
135 | */ | ||
136 | /* Pointer to extended board Data */ | ||
137 | u32 hi_board_ext_data; /* 0xac */ | ||
138 | u32 hi_board_ext_data_config; /* 0xb0 */ | ||
139 | /* | ||
140 | * Bit [0] : valid | ||
141 | * Bit[31:16: size | ||
142 | */ | ||
143 | /* | ||
144 | * hi_reset_flag is used to do some stuff when target reset. | ||
145 | * such as restore app_start after warm reset or | ||
146 | * preserve host Interest area, or preserve ROM data, literals etc. | ||
147 | */ | ||
148 | u32 hi_reset_flag; /* 0xb4 */ | ||
149 | /* indicate hi_reset_flag is valid */ | ||
150 | u32 hi_reset_flag_valid; /* 0xb8 */ | ||
151 | u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */ | ||
152 | /* 0xbc - [31:0]: idle timeout in ms */ | ||
153 | /* ACS flags */ | ||
154 | u32 hi_acs_flags; /* 0xc0 */ | ||
155 | u32 hi_console_flags; /* 0xc4 */ | ||
156 | u32 hi_nvram_state; /* 0xc8 */ | ||
157 | u32 hi_option_flag2; /* 0xcc */ | ||
158 | |||
159 | /* If non-zero, override values sent to Host in WMI_READY event. */ | ||
160 | u32 hi_sw_version_override; /* 0xd0 */ | ||
161 | u32 hi_abi_version_override; /* 0xd4 */ | ||
162 | |||
163 | /* | ||
164 | * Percentage of high priority RX traffic to total expected RX traffic | ||
165 | * applicable only to ar6004 | ||
166 | */ | ||
167 | u32 hi_hp_rx_traffic_ratio; /* 0xd8 */ | ||
168 | |||
169 | /* test applications flags */ | ||
170 | u32 hi_test_apps_related; /* 0xdc */ | ||
171 | /* location of test script */ | ||
172 | u32 hi_ota_testscript; /* 0xe0 */ | ||
173 | /* location of CAL data */ | ||
174 | u32 hi_cal_data; /* 0xe4 */ | ||
175 | |||
176 | /* Number of packet log buffers */ | ||
177 | u32 hi_pktlog_num_buffers; /* 0xe8 */ | ||
178 | |||
179 | /* wow extension configuration */ | ||
180 | u32 hi_wow_ext_config; /* 0xec */ | ||
181 | u32 hi_pwr_save_flags; /* 0xf0 */ | ||
182 | |||
183 | /* Spatial Multiplexing Power Save (SMPS) options */ | ||
184 | u32 hi_smps_options; /* 0xf4 */ | ||
185 | |||
186 | /* Interconnect-specific state */ | ||
187 | u32 hi_interconnect_state; /* 0xf8 */ | ||
188 | |||
189 | /* Coex configuration flags */ | ||
190 | u32 hi_coex_config; /* 0xfc */ | ||
191 | |||
192 | /* Early allocation support */ | ||
193 | u32 hi_early_alloc; /* 0x100 */ | ||
194 | /* FW swap field */ | ||
195 | /* | ||
196 | * Bits of this 32bit word will be used to pass specific swap | ||
197 | * instruction to FW | ||
198 | */ | ||
199 | /* | ||
200 | * Bit 0 -- AP Nart descriptor no swap. When this bit is set | ||
201 | * FW will not swap TX descriptor. Meaning packets are formed | ||
202 | * on the target processor. | ||
203 | */ | ||
204 | /* Bit 1 - unused */ | ||
205 | u32 hi_fw_swap; /* 0x104 */ | ||
206 | } __packed; | ||
207 | |||
208 | #define HI_ITEM(item) offsetof(struct host_interest, item) | ||
209 | |||
210 | /* Bits defined in hi_option_flag */ | ||
211 | |||
212 | /* Enable timer workaround */ | ||
213 | #define HI_OPTION_TIMER_WAR 0x01 | ||
214 | /* Limit BMI command credits */ | ||
215 | #define HI_OPTION_BMI_CRED_LIMIT 0x02 | ||
216 | /* Relay Dot11 hdr to/from host */ | ||
217 | #define HI_OPTION_RELAY_DOT11_HDR 0x04 | ||
218 | /* MAC addr method 0-locally administred 1-globally unique addrs */ | ||
219 | #define HI_OPTION_MAC_ADDR_METHOD 0x08 | ||
220 | /* Firmware Bridging */ | ||
221 | #define HI_OPTION_FW_BRIDGE 0x10 | ||
222 | /* Enable CPU profiling */ | ||
223 | #define HI_OPTION_ENABLE_PROFILE 0x20 | ||
224 | /* Disable debug logging */ | ||
225 | #define HI_OPTION_DISABLE_DBGLOG 0x40 | ||
226 | /* Skip Era Tracking */ | ||
227 | #define HI_OPTION_SKIP_ERA_TRACKING 0x80 | ||
228 | /* Disable PAPRD (debug) */ | ||
229 | #define HI_OPTION_PAPRD_DISABLE 0x100 | ||
230 | #define HI_OPTION_NUM_DEV_LSB 0x200 | ||
231 | #define HI_OPTION_NUM_DEV_MSB 0x800 | ||
232 | #define HI_OPTION_DEV_MODE_LSB 0x1000 | ||
233 | #define HI_OPTION_DEV_MODE_MSB 0x8000000 | ||
234 | /* Disable LowFreq Timer Stabilization */ | ||
235 | #define HI_OPTION_NO_LFT_STBL 0x10000000 | ||
236 | /* Skip regulatory scan */ | ||
237 | #define HI_OPTION_SKIP_REG_SCAN 0x20000000 | ||
238 | /* | ||
239 | * Do regulatory scan during init before | ||
240 | * sending WMI ready event to host | ||
241 | */ | ||
242 | #define HI_OPTION_INIT_REG_SCAN 0x40000000 | ||
243 | |||
244 | /* REV6: Do not adjust memory map */ | ||
245 | #define HI_OPTION_SKIP_MEMMAP 0x80000000 | ||
246 | |||
247 | #define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3 | ||
248 | |||
249 | /* 2 bits of hi_option_flag are used to represent 3 modes */ | ||
250 | #define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */ | ||
251 | #define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */ | ||
252 | #define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */ | ||
253 | #define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */ | ||
254 | |||
255 | /* 2 bits of hi_option flag are usedto represent 4 submodes */ | ||
256 | #define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */ | ||
257 | #define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */ | ||
258 | #define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */ | ||
259 | #define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */ | ||
260 | |||
261 | /* Num dev Mask */ | ||
262 | #define HI_OPTION_NUM_DEV_MASK 0x7 | ||
263 | #define HI_OPTION_NUM_DEV_SHIFT 0x9 | ||
264 | |||
265 | /* firmware bridging */ | ||
266 | #define HI_OPTION_FW_BRIDGE_SHIFT 0x04 | ||
267 | |||
268 | /* | ||
269 | Fw Mode/SubMode Mask | ||
270 | |-----------------------------------------------------------------------------| | ||
271 | | SUB | SUB | SUB | SUB | | | | | | ||
272 | |MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]| | ||
273 | | (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) | | ||
274 | |-----------------------------------------------------------------------------| | ||
275 | */ | ||
276 | #define HI_OPTION_FW_MODE_BITS 0x2 | ||
277 | #define HI_OPTION_FW_MODE_MASK 0x3 | ||
278 | #define HI_OPTION_FW_MODE_SHIFT 0xC | ||
279 | #define HI_OPTION_ALL_FW_MODE_MASK 0xFF | ||
280 | |||
281 | #define HI_OPTION_FW_SUBMODE_BITS 0x2 | ||
282 | #define HI_OPTION_FW_SUBMODE_MASK 0x3 | ||
283 | #define HI_OPTION_FW_SUBMODE_SHIFT 0x14 | ||
284 | #define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00 | ||
285 | #define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8 | ||
286 | |||
287 | |||
288 | /* hi_option_flag2 options */ | ||
289 | #define HI_OPTION_OFFLOAD_AMSDU 0x01 | ||
290 | #define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */ | ||
291 | #define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/ | ||
292 | #define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */ | ||
293 | #define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */ | ||
294 | |||
295 | #define HI_OPTION_RF_KILL_SHIFT 0x2 | ||
296 | #define HI_OPTION_RF_KILL_MASK 0x1 | ||
297 | |||
298 | /* hi_reset_flag */ | ||
299 | /* preserve App Start address */ | ||
300 | #define HI_RESET_FLAG_PRESERVE_APP_START 0x01 | ||
301 | /* preserve host interest */ | ||
302 | #define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02 | ||
303 | /* preserve ROM data */ | ||
304 | #define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04 | ||
305 | #define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08 | ||
306 | #define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10 | ||
307 | #define HI_RESET_FLAG_WARM_RESET 0x20 | ||
308 | |||
309 | /* define hi_fw_swap bits */ | ||
310 | #define HI_DESC_IN_FW_BIT 0x01 | ||
311 | |||
312 | /* indicate the reset flag is valid */ | ||
313 | #define HI_RESET_FLAG_IS_VALID 0x12345678 | ||
314 | |||
315 | /* ACS is enabled */ | ||
316 | #define HI_ACS_FLAGS_ENABLED (1 << 0) | ||
317 | /* Use physical WWAN device */ | ||
318 | #define HI_ACS_FLAGS_USE_WWAN (1 << 1) | ||
319 | /* Use test VAP */ | ||
320 | #define HI_ACS_FLAGS_TEST_VAP (1 << 2) | ||
321 | |||
322 | /* | ||
323 | * CONSOLE FLAGS | ||
324 | * | ||
325 | * Bit Range Meaning | ||
326 | * --------- -------------------------------- | ||
327 | * 2..0 UART ID (0 = Default) | ||
328 | * 3 Baud Select (0 = 9600, 1 = 115200) | ||
329 | * 30..4 Reserved | ||
330 | * 31 Enable Console | ||
331 | * | ||
332 | */ | ||
333 | |||
334 | #define HI_CONSOLE_FLAGS_ENABLE (1 << 31) | ||
335 | #define HI_CONSOLE_FLAGS_UART_MASK (0x7) | ||
336 | #define HI_CONSOLE_FLAGS_UART_SHIFT 0 | ||
337 | #define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3) | ||
338 | |||
339 | /* SM power save options */ | ||
340 | #define HI_SMPS_ALLOW_MASK (0x00000001) | ||
341 | #define HI_SMPS_MODE_MASK (0x00000002) | ||
342 | #define HI_SMPS_MODE_STATIC (0x00000000) | ||
343 | #define HI_SMPS_MODE_DYNAMIC (0x00000002) | ||
344 | #define HI_SMPS_DISABLE_AUTO_MODE (0x00000004) | ||
345 | #define HI_SMPS_DATA_THRESH_MASK (0x000007f8) | ||
346 | #define HI_SMPS_DATA_THRESH_SHIFT (3) | ||
347 | #define HI_SMPS_RSSI_THRESH_MASK (0x0007f800) | ||
348 | #define HI_SMPS_RSSI_THRESH_SHIFT (11) | ||
349 | #define HI_SMPS_LOWPWR_CM_MASK (0x00380000) | ||
350 | #define HI_SMPS_LOWPWR_CM_SHIFT (15) | ||
351 | #define HI_SMPS_HIPWR_CM_MASK (0x03c00000) | ||
352 | #define HI_SMPS_HIPWR_CM_SHIFT (19) | ||
353 | |||
354 | /* | ||
355 | * WOW Extension configuration | ||
356 | * | ||
357 | * Bit Range Meaning | ||
358 | * --------- -------------------------------- | ||
359 | * 8..0 Size of each WOW pattern (max 511) | ||
360 | * 15..9 Number of patterns per list (max 127) | ||
361 | * 17..16 Number of lists (max 4) | ||
362 | * 30..18 Reserved | ||
363 | * 31 Enabled | ||
364 | * | ||
365 | * set values (except enable) to zeros for default settings | ||
366 | */ | ||
367 | |||
368 | #define HI_WOW_EXT_ENABLED_MASK (1 << 31) | ||
369 | #define HI_WOW_EXT_NUM_LIST_SHIFT 16 | ||
370 | #define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT) | ||
371 | #define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9 | ||
372 | #define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT) | ||
373 | #define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0 | ||
374 | #define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT) | ||
375 | |||
376 | #define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \ | ||
377 | ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \ | ||
378 | HI_WOW_EXT_NUM_LIST_MASK) | \ | ||
379 | (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \ | ||
380 | HI_WOW_EXT_NUM_PATTERNS_MASK) | \ | ||
381 | (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \ | ||
382 | HI_WOW_EXT_PATTERN_SIZE_MASK)) | ||
383 | |||
384 | #define HI_WOW_EXT_GET_NUM_LISTS(config) \ | ||
385 | (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT) | ||
386 | #define HI_WOW_EXT_GET_NUM_PATTERNS(config) \ | ||
387 | (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \ | ||
388 | HI_WOW_EXT_NUM_PATTERNS_SHIFT) | ||
389 | #define HI_WOW_EXT_GET_PATTERN_SIZE(config) \ | ||
390 | (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \ | ||
391 | HI_WOW_EXT_PATTERN_SIZE_SHIFT) | ||
392 | |||
393 | /* | ||
394 | * Early allocation configuration | ||
395 | * Support RAM bank configuration before BMI done and this eases the memory | ||
396 | * allocation at very early stage | ||
397 | * Bit Range Meaning | ||
398 | * --------- ---------------------------------- | ||
399 | * [0:3] number of bank assigned to be IRAM | ||
400 | * [4:15] reserved | ||
401 | * [16:31] magic number | ||
402 | * | ||
403 | * Note: | ||
404 | * 1. target firmware would check magic number and if it's a match, firmware | ||
405 | * would consider the bits[0:15] are valid and base on that to calculate | ||
406 | * the end of DRAM. Early allocation would be located at that area and | ||
407 | * may be reclaimed when necesary | ||
408 | * 2. if no magic number is found, early allocation would happen at "_end" | ||
409 | * symbol of ROM which is located before the app-data and might NOT be | ||
410 | * re-claimable. If this is adopted, link script should keep this in | ||
411 | * mind to avoid data corruption. | ||
412 | */ | ||
413 | #define HI_EARLY_ALLOC_MAGIC 0x6d8a | ||
414 | #define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000 | ||
415 | #define HI_EARLY_ALLOC_MAGIC_SHIFT 16 | ||
416 | #define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f | ||
417 | #define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0 | ||
418 | |||
419 | #define HI_EARLY_ALLOC_VALID() \ | ||
420 | ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \ | ||
421 | HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC)) | ||
422 | #define HI_EARLY_ALLOC_GET_IRAM_BANKS() \ | ||
423 | (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \ | ||
424 | >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) | ||
425 | |||
426 | /*power save flag bit definitions*/ | ||
427 | #define HI_PWR_SAVE_LPL_ENABLED 0x1 | ||
428 | /*b1-b3 reserved*/ | ||
429 | /*b4-b5 : dev0 LPL type : 0 - none | ||
430 | 1- Reduce Pwr Search | ||
431 | 2- Reduce Pwr Listen*/ | ||
432 | /*b6-b7 : dev1 LPL type and so on for Max 8 devices*/ | ||
433 | #define HI_PWR_SAVE_LPL_DEV0_LSB 4 | ||
434 | #define HI_PWR_SAVE_LPL_DEV_MASK 0x3 | ||
435 | /*power save related utility macros*/ | ||
436 | #define HI_LPL_ENABLED() \ | ||
437 | ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED)) | ||
438 | #define HI_DEV_LPL_TYPE_GET(_devix) \ | ||
439 | (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \ | ||
440 | (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2))) | ||
441 | |||
442 | #define HOST_INTEREST_SMPS_IS_ALLOWED() \ | ||
443 | ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK)) | ||
444 | |||
445 | /* Reserve 1024 bytes for extended board data */ | ||
446 | #define QCA988X_BOARD_DATA_SZ 7168 | ||
447 | #define QCA988X_BOARD_EXT_DATA_SZ 0 | ||
448 | |||
449 | #endif /* __TARGADDRS_H__ */ | ||
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c new file mode 100644 index 000000000000..4a31e2c6fbd4 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/trace.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012 Qualcomm Atheros, Inc. | ||
3 | * | ||
4 | * Permission to use, copy, modify, and/or distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | |||
19 | #define CREATE_TRACE_POINTS | ||
20 | #include "trace.h" | ||
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h new file mode 100644 index 000000000000..85e806bf7257 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/trace.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) | ||
19 | |||
20 | #include <linux/tracepoint.h> | ||
21 | |||
22 | #define _TRACE_H_ | ||
23 | |||
24 | /* create empty functions when tracing is disabled */ | ||
25 | #if !defined(CONFIG_ATH10K_TRACING) | ||
26 | #undef TRACE_EVENT | ||
27 | #define TRACE_EVENT(name, proto, ...) \ | ||
28 | static inline void trace_ ## name(proto) {} | ||
29 | #undef DECLARE_EVENT_CLASS | ||
30 | #define DECLARE_EVENT_CLASS(...) | ||
31 | #undef DEFINE_EVENT | ||
32 | #define DEFINE_EVENT(evt_class, name, proto, ...) \ | ||
33 | static inline void trace_ ## name(proto) {} | ||
34 | #endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */ | ||
35 | |||
36 | #undef TRACE_SYSTEM | ||
37 | #define TRACE_SYSTEM ath10k | ||
38 | |||
39 | #define ATH10K_MSG_MAX 200 | ||
40 | |||
41 | DECLARE_EVENT_CLASS(ath10k_log_event, | ||
42 | TP_PROTO(struct va_format *vaf), | ||
43 | TP_ARGS(vaf), | ||
44 | TP_STRUCT__entry( | ||
45 | __dynamic_array(char, msg, ATH10K_MSG_MAX) | ||
46 | ), | ||
47 | TP_fast_assign( | ||
48 | WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), | ||
49 | ATH10K_MSG_MAX, | ||
50 | vaf->fmt, | ||
51 | *vaf->va) >= ATH10K_MSG_MAX); | ||
52 | ), | ||
53 | TP_printk("%s", __get_str(msg)) | ||
54 | ); | ||
55 | |||
56 | DEFINE_EVENT(ath10k_log_event, ath10k_log_err, | ||
57 | TP_PROTO(struct va_format *vaf), | ||
58 | TP_ARGS(vaf) | ||
59 | ); | ||
60 | |||
61 | DEFINE_EVENT(ath10k_log_event, ath10k_log_warn, | ||
62 | TP_PROTO(struct va_format *vaf), | ||
63 | TP_ARGS(vaf) | ||
64 | ); | ||
65 | |||
66 | DEFINE_EVENT(ath10k_log_event, ath10k_log_info, | ||
67 | TP_PROTO(struct va_format *vaf), | ||
68 | TP_ARGS(vaf) | ||
69 | ); | ||
70 | |||
71 | TRACE_EVENT(ath10k_log_dbg, | ||
72 | TP_PROTO(unsigned int level, struct va_format *vaf), | ||
73 | TP_ARGS(level, vaf), | ||
74 | TP_STRUCT__entry( | ||
75 | __field(unsigned int, level) | ||
76 | __dynamic_array(char, msg, ATH10K_MSG_MAX) | ||
77 | ), | ||
78 | TP_fast_assign( | ||
79 | __entry->level = level; | ||
80 | WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), | ||
81 | ATH10K_MSG_MAX, | ||
82 | vaf->fmt, | ||
83 | *vaf->va) >= ATH10K_MSG_MAX); | ||
84 | ), | ||
85 | TP_printk("%s", __get_str(msg)) | ||
86 | ); | ||
87 | |||
88 | TRACE_EVENT(ath10k_log_dbg_dump, | ||
89 | TP_PROTO(const char *msg, const char *prefix, | ||
90 | const void *buf, size_t buf_len), | ||
91 | |||
92 | TP_ARGS(msg, prefix, buf, buf_len), | ||
93 | |||
94 | TP_STRUCT__entry( | ||
95 | __string(msg, msg) | ||
96 | __string(prefix, prefix) | ||
97 | __field(size_t, buf_len) | ||
98 | __dynamic_array(u8, buf, buf_len) | ||
99 | ), | ||
100 | |||
101 | TP_fast_assign( | ||
102 | __assign_str(msg, msg); | ||
103 | __assign_str(prefix, prefix); | ||
104 | __entry->buf_len = buf_len; | ||
105 | memcpy(__get_dynamic_array(buf), buf, buf_len); | ||
106 | ), | ||
107 | |||
108 | TP_printk( | ||
109 | "%s/%s\n", __get_str(prefix), __get_str(msg) | ||
110 | ) | ||
111 | ); | ||
112 | |||
113 | TRACE_EVENT(ath10k_wmi_cmd, | ||
114 | TP_PROTO(int id, void *buf, size_t buf_len), | ||
115 | |||
116 | TP_ARGS(id, buf, buf_len), | ||
117 | |||
118 | TP_STRUCT__entry( | ||
119 | __field(unsigned int, id) | ||
120 | __field(size_t, buf_len) | ||
121 | __dynamic_array(u8, buf, buf_len) | ||
122 | ), | ||
123 | |||
124 | TP_fast_assign( | ||
125 | __entry->id = id; | ||
126 | __entry->buf_len = buf_len; | ||
127 | memcpy(__get_dynamic_array(buf), buf, buf_len); | ||
128 | ), | ||
129 | |||
130 | TP_printk( | ||
131 | "id %d len %zu", | ||
132 | __entry->id, | ||
133 | __entry->buf_len | ||
134 | ) | ||
135 | ); | ||
136 | |||
137 | TRACE_EVENT(ath10k_wmi_event, | ||
138 | TP_PROTO(int id, void *buf, size_t buf_len), | ||
139 | |||
140 | TP_ARGS(id, buf, buf_len), | ||
141 | |||
142 | TP_STRUCT__entry( | ||
143 | __field(unsigned int, id) | ||
144 | __field(size_t, buf_len) | ||
145 | __dynamic_array(u8, buf, buf_len) | ||
146 | ), | ||
147 | |||
148 | TP_fast_assign( | ||
149 | __entry->id = id; | ||
150 | __entry->buf_len = buf_len; | ||
151 | memcpy(__get_dynamic_array(buf), buf, buf_len); | ||
152 | ), | ||
153 | |||
154 | TP_printk( | ||
155 | "id %d len %zu", | ||
156 | __entry->id, | ||
157 | __entry->buf_len | ||
158 | ) | ||
159 | ); | ||
160 | |||
161 | #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ | ||
162 | |||
163 | /* we don't want to use include/trace/events */ | ||
164 | #undef TRACE_INCLUDE_PATH | ||
165 | #define TRACE_INCLUDE_PATH . | ||
166 | #undef TRACE_INCLUDE_FILE | ||
167 | #define TRACE_INCLUDE_FILE trace | ||
168 | |||
169 | /* This part must be outside protection */ | ||
170 | #include <trace/define_trace.h> | ||
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c new file mode 100644 index 000000000000..68b6faefd1d8 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/txrx.c | |||
@@ -0,0 +1,417 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include "core.h" | ||
19 | #include "txrx.h" | ||
20 | #include "htt.h" | ||
21 | #include "mac.h" | ||
22 | #include "debug.h" | ||
23 | |||
24 | static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) | ||
25 | { | ||
26 | if (!ATH10K_SKB_CB(skb)->htt.is_offchan) | ||
27 | return; | ||
28 | |||
29 | /* If the original wait_for_completion() timed out before | ||
30 | * {data,mgmt}_tx_completed() was called then we could complete | ||
31 | * offchan_tx_completed for a different skb. Prevent this by using | ||
32 | * offchan_tx_skb. */ | ||
33 | spin_lock_bh(&ar->data_lock); | ||
34 | if (ar->offchan_tx_skb != skb) { | ||
35 | ath10k_warn("completed old offchannel frame\n"); | ||
36 | goto out; | ||
37 | } | ||
38 | |||
39 | complete(&ar->offchan_tx_completed); | ||
40 | ar->offchan_tx_skb = NULL; /* just for sanity */ | ||
41 | |||
42 | ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); | ||
43 | out: | ||
44 | spin_unlock_bh(&ar->data_lock); | ||
45 | } | ||
46 | |||
47 | void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) | ||
48 | { | ||
49 | struct device *dev = htt->ar->dev; | ||
50 | struct ieee80211_tx_info *info; | ||
51 | struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag; | ||
52 | struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu; | ||
53 | int ret; | ||
54 | |||
55 | if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0) | ||
56 | return; | ||
57 | |||
58 | ATH10K_SKB_CB(txdesc)->htt.refcount--; | ||
59 | |||
60 | if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) | ||
61 | return; | ||
62 | |||
63 | if (txfrag) { | ||
64 | ret = ath10k_skb_unmap(dev, txfrag); | ||
65 | if (ret) | ||
66 | ath10k_warn("txfrag unmap failed (%d)\n", ret); | ||
67 | |||
68 | dev_kfree_skb_any(txfrag); | ||
69 | } | ||
70 | |||
71 | ret = ath10k_skb_unmap(dev, msdu); | ||
72 | if (ret) | ||
73 | ath10k_warn("data skb unmap failed (%d)\n", ret); | ||
74 | |||
75 | ath10k_report_offchan_tx(htt->ar, msdu); | ||
76 | |||
77 | info = IEEE80211_SKB_CB(msdu); | ||
78 | memset(&info->status, 0, sizeof(info->status)); | ||
79 | |||
80 | if (ATH10K_SKB_CB(txdesc)->htt.discard) { | ||
81 | ieee80211_free_txskb(htt->ar->hw, msdu); | ||
82 | goto exit; | ||
83 | } | ||
84 | |||
85 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | ||
86 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
87 | |||
88 | if (ATH10K_SKB_CB(txdesc)->htt.no_ack) | ||
89 | info->flags &= ~IEEE80211_TX_STAT_ACK; | ||
90 | |||
91 | ieee80211_tx_status(htt->ar->hw, msdu); | ||
92 | /* we do not own the msdu anymore */ | ||
93 | |||
94 | exit: | ||
95 | spin_lock_bh(&htt->tx_lock); | ||
96 | htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL; | ||
97 | ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id); | ||
98 | __ath10k_htt_tx_dec_pending(htt); | ||
99 | if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx)) | ||
100 | wake_up(&htt->empty_tx_wq); | ||
101 | spin_unlock_bh(&htt->tx_lock); | ||
102 | |||
103 | dev_kfree_skb_any(txdesc); | ||
104 | } | ||
105 | |||
106 | void ath10k_txrx_tx_completed(struct ath10k_htt *htt, | ||
107 | const struct htt_tx_done *tx_done) | ||
108 | { | ||
109 | struct sk_buff *txdesc; | ||
110 | |||
111 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", | ||
112 | tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); | ||
113 | |||
114 | if (tx_done->msdu_id >= htt->max_num_pending_tx) { | ||
115 | ath10k_warn("warning: msdu_id %d too big, ignoring\n", | ||
116 | tx_done->msdu_id); | ||
117 | return; | ||
118 | } | ||
119 | |||
120 | txdesc = htt->pending_tx[tx_done->msdu_id]; | ||
121 | |||
122 | ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard; | ||
123 | ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack; | ||
124 | |||
125 | ath10k_txrx_tx_unref(htt, txdesc); | ||
126 | } | ||
127 | |||
128 | static const u8 rx_legacy_rate_idx[] = { | ||
129 | 3, /* 0x00 - 11Mbps */ | ||
130 | 2, /* 0x01 - 5.5Mbps */ | ||
131 | 1, /* 0x02 - 2Mbps */ | ||
132 | 0, /* 0x03 - 1Mbps */ | ||
133 | 3, /* 0x04 - 11Mbps */ | ||
134 | 2, /* 0x05 - 5.5Mbps */ | ||
135 | 1, /* 0x06 - 2Mbps */ | ||
136 | 0, /* 0x07 - 1Mbps */ | ||
137 | 10, /* 0x08 - 48Mbps */ | ||
138 | 8, /* 0x09 - 24Mbps */ | ||
139 | 6, /* 0x0A - 12Mbps */ | ||
140 | 4, /* 0x0B - 6Mbps */ | ||
141 | 11, /* 0x0C - 54Mbps */ | ||
142 | 9, /* 0x0D - 36Mbps */ | ||
143 | 7, /* 0x0E - 18Mbps */ | ||
144 | 5, /* 0x0F - 9Mbps */ | ||
145 | }; | ||
146 | |||
147 | static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info, | ||
148 | enum ieee80211_band band, | ||
149 | struct ieee80211_rx_status *status) | ||
150 | { | ||
151 | u8 cck, rate, rate_idx, bw, sgi, mcs, nss; | ||
152 | u8 info0 = info->rate.info0; | ||
153 | u32 info1 = info->rate.info1; | ||
154 | u32 info2 = info->rate.info2; | ||
155 | u8 preamble = 0; | ||
156 | |||
157 | /* Check if valid fields */ | ||
158 | if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) | ||
159 | return; | ||
160 | |||
161 | preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); | ||
162 | |||
163 | switch (preamble) { | ||
164 | case HTT_RX_LEGACY: | ||
165 | cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; | ||
166 | rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); | ||
167 | rate_idx = 0; | ||
168 | |||
169 | if (rate < 0x08 || rate > 0x0F) | ||
170 | break; | ||
171 | |||
172 | switch (band) { | ||
173 | case IEEE80211_BAND_2GHZ: | ||
174 | if (cck) | ||
175 | rate &= ~BIT(3); | ||
176 | rate_idx = rx_legacy_rate_idx[rate]; | ||
177 | break; | ||
178 | case IEEE80211_BAND_5GHZ: | ||
179 | rate_idx = rx_legacy_rate_idx[rate]; | ||
180 | /* We are using same rate table registering | ||
181 | HW - ath10k_rates[]. In case of 5GHz skip | ||
182 | CCK rates, so -4 here */ | ||
183 | rate_idx -= 4; | ||
184 | break; | ||
185 | default: | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | status->rate_idx = rate_idx; | ||
190 | break; | ||
191 | case HTT_RX_HT: | ||
192 | case HTT_RX_HT_WITH_TXBF: | ||
193 | /* HT-SIG - Table 20-11 in info1 and info2 */ | ||
194 | mcs = info1 & 0x1F; | ||
195 | nss = mcs >> 3; | ||
196 | bw = (info1 >> 7) & 1; | ||
197 | sgi = (info2 >> 7) & 1; | ||
198 | |||
199 | status->rate_idx = mcs; | ||
200 | status->flag |= RX_FLAG_HT; | ||
201 | if (sgi) | ||
202 | status->flag |= RX_FLAG_SHORT_GI; | ||
203 | if (bw) | ||
204 | status->flag |= RX_FLAG_40MHZ; | ||
205 | break; | ||
206 | case HTT_RX_VHT: | ||
207 | case HTT_RX_VHT_WITH_TXBF: | ||
208 | /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 | ||
209 | TODO check this */ | ||
210 | mcs = (info2 >> 4) & 0x0F; | ||
211 | nss = (info1 >> 10) & 0x07; | ||
212 | bw = info1 & 3; | ||
213 | sgi = info2 & 1; | ||
214 | |||
215 | status->rate_idx = mcs; | ||
216 | status->vht_nss = nss; | ||
217 | |||
218 | if (sgi) | ||
219 | status->flag |= RX_FLAG_SHORT_GI; | ||
220 | |||
221 | switch (bw) { | ||
222 | /* 20MHZ */ | ||
223 | case 0: | ||
224 | break; | ||
225 | /* 40MHZ */ | ||
226 | case 1: | ||
227 | status->flag |= RX_FLAG_40MHZ; | ||
228 | break; | ||
229 | /* 80MHZ */ | ||
230 | case 2: | ||
231 | status->flag |= RX_FLAG_80MHZ; | ||
232 | } | ||
233 | |||
234 | status->flag |= RX_FLAG_VHT; | ||
235 | break; | ||
236 | default: | ||
237 | break; | ||
238 | } | ||
239 | } | ||
240 | |||
241 | void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info) | ||
242 | { | ||
243 | struct ieee80211_rx_status *status; | ||
244 | struct ieee80211_channel *ch; | ||
245 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data; | ||
246 | |||
247 | status = IEEE80211_SKB_RXCB(info->skb); | ||
248 | memset(status, 0, sizeof(*status)); | ||
249 | |||
250 | if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { | ||
251 | status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | | ||
252 | RX_FLAG_MMIC_STRIPPED; | ||
253 | hdr->frame_control = __cpu_to_le16( | ||
254 | __le16_to_cpu(hdr->frame_control) & | ||
255 | ~IEEE80211_FCTL_PROTECTED); | ||
256 | } | ||
257 | |||
258 | if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) | ||
259 | status->flag |= RX_FLAG_MMIC_ERROR; | ||
260 | |||
261 | if (info->fcs_err) | ||
262 | status->flag |= RX_FLAG_FAILED_FCS_CRC; | ||
263 | |||
264 | status->signal = info->signal; | ||
265 | |||
266 | spin_lock_bh(&ar->data_lock); | ||
267 | ch = ar->scan_channel; | ||
268 | if (!ch) | ||
269 | ch = ar->rx_channel; | ||
270 | spin_unlock_bh(&ar->data_lock); | ||
271 | |||
272 | if (!ch) { | ||
273 | ath10k_warn("no channel configured; ignoring frame!\n"); | ||
274 | dev_kfree_skb_any(info->skb); | ||
275 | return; | ||
276 | } | ||
277 | |||
278 | process_rx_rates(ar, info, ch->band, status); | ||
279 | status->band = ch->band; | ||
280 | status->freq = ch->center_freq; | ||
281 | |||
282 | ath10k_dbg(ATH10K_DBG_DATA, | ||
283 | "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n", | ||
284 | info->skb, | ||
285 | info->skb->len, | ||
286 | status->flag == 0 ? "legacy" : "", | ||
287 | status->flag & RX_FLAG_HT ? "ht" : "", | ||
288 | status->flag & RX_FLAG_VHT ? "vht" : "", | ||
289 | status->flag & RX_FLAG_40MHZ ? "40" : "", | ||
290 | status->flag & RX_FLAG_80MHZ ? "80" : "", | ||
291 | status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", | ||
292 | status->rate_idx, | ||
293 | status->vht_nss, | ||
294 | status->freq, | ||
295 | status->band); | ||
296 | |||
297 | ieee80211_rx(ar->hw, info->skb); | ||
298 | } | ||
299 | |||
300 | struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, | ||
301 | const u8 *addr) | ||
302 | { | ||
303 | struct ath10k_peer *peer; | ||
304 | |||
305 | lockdep_assert_held(&ar->data_lock); | ||
306 | |||
307 | list_for_each_entry(peer, &ar->peers, list) { | ||
308 | if (peer->vdev_id != vdev_id) | ||
309 | continue; | ||
310 | if (memcmp(peer->addr, addr, ETH_ALEN)) | ||
311 | continue; | ||
312 | |||
313 | return peer; | ||
314 | } | ||
315 | |||
316 | return NULL; | ||
317 | } | ||
318 | |||
319 | static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, | ||
320 | int peer_id) | ||
321 | { | ||
322 | struct ath10k_peer *peer; | ||
323 | |||
324 | lockdep_assert_held(&ar->data_lock); | ||
325 | |||
326 | list_for_each_entry(peer, &ar->peers, list) | ||
327 | if (test_bit(peer_id, peer->peer_ids)) | ||
328 | return peer; | ||
329 | |||
330 | return NULL; | ||
331 | } | ||
332 | |||
333 | static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, | ||
334 | const u8 *addr, bool expect_mapped) | ||
335 | { | ||
336 | int ret; | ||
337 | |||
338 | ret = wait_event_timeout(ar->peer_mapping_wq, ({ | ||
339 | bool mapped; | ||
340 | |||
341 | spin_lock_bh(&ar->data_lock); | ||
342 | mapped = !!ath10k_peer_find(ar, vdev_id, addr); | ||
343 | spin_unlock_bh(&ar->data_lock); | ||
344 | |||
345 | mapped == expect_mapped; | ||
346 | }), 3*HZ); | ||
347 | |||
348 | if (ret <= 0) | ||
349 | return -ETIMEDOUT; | ||
350 | |||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr) | ||
355 | { | ||
356 | return ath10k_wait_for_peer_common(ar, vdev_id, addr, true); | ||
357 | } | ||
358 | |||
359 | int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr) | ||
360 | { | ||
361 | return ath10k_wait_for_peer_common(ar, vdev_id, addr, false); | ||
362 | } | ||
363 | |||
364 | void ath10k_peer_map_event(struct ath10k_htt *htt, | ||
365 | struct htt_peer_map_event *ev) | ||
366 | { | ||
367 | struct ath10k *ar = htt->ar; | ||
368 | struct ath10k_peer *peer; | ||
369 | |||
370 | spin_lock_bh(&ar->data_lock); | ||
371 | peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr); | ||
372 | if (!peer) { | ||
373 | peer = kzalloc(sizeof(*peer), GFP_ATOMIC); | ||
374 | if (!peer) | ||
375 | goto exit; | ||
376 | |||
377 | peer->vdev_id = ev->vdev_id; | ||
378 | memcpy(peer->addr, ev->addr, ETH_ALEN); | ||
379 | list_add(&peer->list, &ar->peers); | ||
380 | wake_up(&ar->peer_mapping_wq); | ||
381 | } | ||
382 | |||
383 | ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", | ||
384 | ev->vdev_id, ev->addr, ev->peer_id); | ||
385 | |||
386 | set_bit(ev->peer_id, peer->peer_ids); | ||
387 | exit: | ||
388 | spin_unlock_bh(&ar->data_lock); | ||
389 | } | ||
390 | |||
391 | void ath10k_peer_unmap_event(struct ath10k_htt *htt, | ||
392 | struct htt_peer_unmap_event *ev) | ||
393 | { | ||
394 | struct ath10k *ar = htt->ar; | ||
395 | struct ath10k_peer *peer; | ||
396 | |||
397 | spin_lock_bh(&ar->data_lock); | ||
398 | peer = ath10k_peer_find_by_id(ar, ev->peer_id); | ||
399 | if (!peer) { | ||
400 | ath10k_warn("unknown peer id %d\n", ev->peer_id); | ||
401 | goto exit; | ||
402 | } | ||
403 | |||
404 | ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", | ||
405 | peer->vdev_id, peer->addr, ev->peer_id); | ||
406 | |||
407 | clear_bit(ev->peer_id, peer->peer_ids); | ||
408 | |||
409 | if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) { | ||
410 | list_del(&peer->list); | ||
411 | kfree(peer); | ||
412 | wake_up(&ar->peer_mapping_wq); | ||
413 | } | ||
414 | |||
415 | exit: | ||
416 | spin_unlock_bh(&ar->data_lock); | ||
417 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h new file mode 100644 index 000000000000..e78632a76df7 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/txrx.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | #ifndef _TXRX_H_ | ||
18 | #define _TXRX_H_ | ||
19 | |||
20 | #include "htt.h" | ||
21 | |||
22 | void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc); | ||
23 | void ath10k_txrx_tx_completed(struct ath10k_htt *htt, | ||
24 | const struct htt_tx_done *tx_done); | ||
25 | void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info); | ||
26 | |||
27 | struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, | ||
28 | const u8 *addr); | ||
29 | int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, | ||
30 | const u8 *addr); | ||
31 | int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, | ||
32 | const u8 *addr); | ||
33 | |||
34 | void ath10k_peer_map_event(struct ath10k_htt *htt, | ||
35 | struct htt_peer_map_event *ev); | ||
36 | void ath10k_peer_unmap_event(struct ath10k_htt *htt, | ||
37 | struct htt_peer_unmap_event *ev); | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c new file mode 100644 index 000000000000..7d4b7987422d --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi.c | |||
@@ -0,0 +1,2081 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #include <linux/skbuff.h> | ||
19 | |||
20 | #include "core.h" | ||
21 | #include "htc.h" | ||
22 | #include "debug.h" | ||
23 | #include "wmi.h" | ||
24 | #include "mac.h" | ||
25 | |||
26 | void ath10k_wmi_flush_tx(struct ath10k *ar) | ||
27 | { | ||
28 | int ret; | ||
29 | |||
30 | ret = wait_event_timeout(ar->wmi.wq, | ||
31 | atomic_read(&ar->wmi.pending_tx_count) == 0, | ||
32 | 5*HZ); | ||
33 | if (atomic_read(&ar->wmi.pending_tx_count) == 0) | ||
34 | return; | ||
35 | |||
36 | if (ret == 0) | ||
37 | ret = -ETIMEDOUT; | ||
38 | |||
39 | if (ret < 0) | ||
40 | ath10k_warn("wmi flush failed (%d)\n", ret); | ||
41 | } | ||
42 | |||
43 | int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) | ||
44 | { | ||
45 | int ret; | ||
46 | ret = wait_for_completion_timeout(&ar->wmi.service_ready, | ||
47 | WMI_SERVICE_READY_TIMEOUT_HZ); | ||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) | ||
52 | { | ||
53 | int ret; | ||
54 | ret = wait_for_completion_timeout(&ar->wmi.unified_ready, | ||
55 | WMI_UNIFIED_READY_TIMEOUT_HZ); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) | ||
60 | { | ||
61 | struct sk_buff *skb; | ||
62 | u32 round_len = roundup(len, 4); | ||
63 | |||
64 | skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); | ||
65 | if (!skb) | ||
66 | return NULL; | ||
67 | |||
68 | skb_reserve(skb, WMI_SKB_HEADROOM); | ||
69 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | ||
70 | ath10k_warn("Unaligned WMI skb\n"); | ||
71 | |||
72 | skb_put(skb, round_len); | ||
73 | memset(skb->data, 0, round_len); | ||
74 | |||
75 | return skb; | ||
76 | } | ||
77 | |||
78 | static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) | ||
79 | { | ||
80 | dev_kfree_skb(skb); | ||
81 | |||
82 | if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0) | ||
83 | wake_up(&ar->wmi.wq); | ||
84 | } | ||
85 | |||
86 | /* WMI command API */ | ||
87 | static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, | ||
88 | enum wmi_cmd_id cmd_id) | ||
89 | { | ||
90 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); | ||
91 | struct wmi_cmd_hdr *cmd_hdr; | ||
92 | int status; | ||
93 | u32 cmd = 0; | ||
94 | |||
95 | if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) | ||
96 | return -ENOMEM; | ||
97 | |||
98 | cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID); | ||
99 | |||
100 | cmd_hdr = (struct wmi_cmd_hdr *)skb->data; | ||
101 | cmd_hdr->cmd_id = __cpu_to_le32(cmd); | ||
102 | |||
103 | if (atomic_add_return(1, &ar->wmi.pending_tx_count) > | ||
104 | WMI_MAX_PENDING_TX_COUNT) { | ||
105 | /* avoid using up memory when FW hangs */ | ||
106 | atomic_dec(&ar->wmi.pending_tx_count); | ||
107 | return -EBUSY; | ||
108 | } | ||
109 | |||
110 | memset(skb_cb, 0, sizeof(*skb_cb)); | ||
111 | |||
112 | trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); | ||
113 | |||
114 | status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb); | ||
115 | if (status) { | ||
116 | dev_kfree_skb_any(skb); | ||
117 | atomic_dec(&ar->wmi.pending_tx_count); | ||
118 | return status; | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) | ||
125 | { | ||
126 | struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; | ||
127 | enum wmi_scan_event_type event_type; | ||
128 | enum wmi_scan_completion_reason reason; | ||
129 | u32 freq; | ||
130 | u32 req_id; | ||
131 | u32 scan_id; | ||
132 | u32 vdev_id; | ||
133 | |||
134 | event_type = __le32_to_cpu(event->event_type); | ||
135 | reason = __le32_to_cpu(event->reason); | ||
136 | freq = __le32_to_cpu(event->channel_freq); | ||
137 | req_id = __le32_to_cpu(event->scan_req_id); | ||
138 | scan_id = __le32_to_cpu(event->scan_id); | ||
139 | vdev_id = __le32_to_cpu(event->vdev_id); | ||
140 | |||
141 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); | ||
142 | ath10k_dbg(ATH10K_DBG_WMI, | ||
143 | "scan event type %d reason %d freq %d req_id %d " | ||
144 | "scan_id %d vdev_id %d\n", | ||
145 | event_type, reason, freq, req_id, scan_id, vdev_id); | ||
146 | |||
147 | spin_lock_bh(&ar->data_lock); | ||
148 | |||
149 | switch (event_type) { | ||
150 | case WMI_SCAN_EVENT_STARTED: | ||
151 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); | ||
152 | if (ar->scan.in_progress && ar->scan.is_roc) | ||
153 | ieee80211_ready_on_channel(ar->hw); | ||
154 | |||
155 | complete(&ar->scan.started); | ||
156 | break; | ||
157 | case WMI_SCAN_EVENT_COMPLETED: | ||
158 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); | ||
159 | switch (reason) { | ||
160 | case WMI_SCAN_REASON_COMPLETED: | ||
161 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); | ||
162 | break; | ||
163 | case WMI_SCAN_REASON_CANCELLED: | ||
164 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); | ||
165 | break; | ||
166 | case WMI_SCAN_REASON_PREEMPTED: | ||
167 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); | ||
168 | break; | ||
169 | case WMI_SCAN_REASON_TIMEDOUT: | ||
170 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); | ||
171 | break; | ||
172 | default: | ||
173 | break; | ||
174 | } | ||
175 | |||
176 | ar->scan_channel = NULL; | ||
177 | if (!ar->scan.in_progress) { | ||
178 | ath10k_warn("no scan requested, ignoring\n"); | ||
179 | break; | ||
180 | } | ||
181 | |||
182 | if (ar->scan.is_roc) { | ||
183 | ath10k_offchan_tx_purge(ar); | ||
184 | |||
185 | if (!ar->scan.aborting) | ||
186 | ieee80211_remain_on_channel_expired(ar->hw); | ||
187 | } else { | ||
188 | ieee80211_scan_completed(ar->hw, ar->scan.aborting); | ||
189 | } | ||
190 | |||
191 | del_timer(&ar->scan.timeout); | ||
192 | complete_all(&ar->scan.completed); | ||
193 | ar->scan.in_progress = false; | ||
194 | break; | ||
195 | case WMI_SCAN_EVENT_BSS_CHANNEL: | ||
196 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); | ||
197 | ar->scan_channel = NULL; | ||
198 | break; | ||
199 | case WMI_SCAN_EVENT_FOREIGN_CHANNEL: | ||
200 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); | ||
201 | ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); | ||
202 | if (ar->scan.in_progress && ar->scan.is_roc && | ||
203 | ar->scan.roc_freq == freq) { | ||
204 | complete(&ar->scan.on_channel); | ||
205 | } | ||
206 | break; | ||
207 | case WMI_SCAN_EVENT_DEQUEUED: | ||
208 | ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); | ||
209 | break; | ||
210 | case WMI_SCAN_EVENT_PREEMPTED: | ||
211 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); | ||
212 | break; | ||
213 | case WMI_SCAN_EVENT_START_FAILED: | ||
214 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); | ||
215 | break; | ||
216 | default: | ||
217 | break; | ||
218 | } | ||
219 | |||
220 | spin_unlock_bh(&ar->data_lock); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode) | ||
225 | { | ||
226 | enum ieee80211_band band; | ||
227 | |||
228 | switch (phy_mode) { | ||
229 | case MODE_11A: | ||
230 | case MODE_11NA_HT20: | ||
231 | case MODE_11NA_HT40: | ||
232 | case MODE_11AC_VHT20: | ||
233 | case MODE_11AC_VHT40: | ||
234 | case MODE_11AC_VHT80: | ||
235 | band = IEEE80211_BAND_5GHZ; | ||
236 | break; | ||
237 | case MODE_11G: | ||
238 | case MODE_11B: | ||
239 | case MODE_11GONLY: | ||
240 | case MODE_11NG_HT20: | ||
241 | case MODE_11NG_HT40: | ||
242 | case MODE_11AC_VHT20_2G: | ||
243 | case MODE_11AC_VHT40_2G: | ||
244 | case MODE_11AC_VHT80_2G: | ||
245 | default: | ||
246 | band = IEEE80211_BAND_2GHZ; | ||
247 | } | ||
248 | |||
249 | return band; | ||
250 | } | ||
251 | |||
252 | static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) | ||
253 | { | ||
254 | u8 rate_idx = 0; | ||
255 | |||
256 | /* rate in Kbps */ | ||
257 | switch (rate) { | ||
258 | case 1000: | ||
259 | rate_idx = 0; | ||
260 | break; | ||
261 | case 2000: | ||
262 | rate_idx = 1; | ||
263 | break; | ||
264 | case 5500: | ||
265 | rate_idx = 2; | ||
266 | break; | ||
267 | case 11000: | ||
268 | rate_idx = 3; | ||
269 | break; | ||
270 | case 6000: | ||
271 | rate_idx = 4; | ||
272 | break; | ||
273 | case 9000: | ||
274 | rate_idx = 5; | ||
275 | break; | ||
276 | case 12000: | ||
277 | rate_idx = 6; | ||
278 | break; | ||
279 | case 18000: | ||
280 | rate_idx = 7; | ||
281 | break; | ||
282 | case 24000: | ||
283 | rate_idx = 8; | ||
284 | break; | ||
285 | case 36000: | ||
286 | rate_idx = 9; | ||
287 | break; | ||
288 | case 48000: | ||
289 | rate_idx = 10; | ||
290 | break; | ||
291 | case 54000: | ||
292 | rate_idx = 11; | ||
293 | break; | ||
294 | default: | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | if (band == IEEE80211_BAND_5GHZ) { | ||
299 | if (rate_idx > 3) | ||
300 | /* Omit CCK rates */ | ||
301 | rate_idx -= 4; | ||
302 | else | ||
303 | rate_idx = 0; | ||
304 | } | ||
305 | |||
306 | return rate_idx; | ||
307 | } | ||
308 | |||
309 | static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) | ||
310 | { | ||
311 | struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data; | ||
312 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
313 | struct ieee80211_hdr *hdr; | ||
314 | u32 rx_status; | ||
315 | u32 channel; | ||
316 | u32 phy_mode; | ||
317 | u32 snr; | ||
318 | u32 rate; | ||
319 | u32 buf_len; | ||
320 | u16 fc; | ||
321 | |||
322 | channel = __le32_to_cpu(event->hdr.channel); | ||
323 | buf_len = __le32_to_cpu(event->hdr.buf_len); | ||
324 | rx_status = __le32_to_cpu(event->hdr.status); | ||
325 | snr = __le32_to_cpu(event->hdr.snr); | ||
326 | phy_mode = __le32_to_cpu(event->hdr.phy_mode); | ||
327 | rate = __le32_to_cpu(event->hdr.rate); | ||
328 | |||
329 | memset(status, 0, sizeof(*status)); | ||
330 | |||
331 | ath10k_dbg(ATH10K_DBG_MGMT, | ||
332 | "event mgmt rx status %08x\n", rx_status); | ||
333 | |||
334 | if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) { | ||
335 | dev_kfree_skb(skb); | ||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) { | ||
340 | dev_kfree_skb(skb); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | if (rx_status & WMI_RX_STATUS_ERR_CRC) | ||
345 | status->flag |= RX_FLAG_FAILED_FCS_CRC; | ||
346 | if (rx_status & WMI_RX_STATUS_ERR_MIC) | ||
347 | status->flag |= RX_FLAG_MMIC_ERROR; | ||
348 | |||
349 | status->band = phy_mode_to_band(phy_mode); | ||
350 | status->freq = ieee80211_channel_to_frequency(channel, status->band); | ||
351 | status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; | ||
352 | status->rate_idx = get_rate_idx(rate, status->band); | ||
353 | |||
354 | skb_pull(skb, sizeof(event->hdr)); | ||
355 | |||
356 | hdr = (struct ieee80211_hdr *)skb->data; | ||
357 | fc = le16_to_cpu(hdr->frame_control); | ||
358 | |||
359 | if (fc & IEEE80211_FCTL_PROTECTED) { | ||
360 | status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | | ||
361 | RX_FLAG_MMIC_STRIPPED; | ||
362 | hdr->frame_control = __cpu_to_le16(fc & | ||
363 | ~IEEE80211_FCTL_PROTECTED); | ||
364 | } | ||
365 | |||
366 | ath10k_dbg(ATH10K_DBG_MGMT, | ||
367 | "event mgmt rx skb %p len %d ftype %02x stype %02x\n", | ||
368 | skb, skb->len, | ||
369 | fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); | ||
370 | |||
371 | ath10k_dbg(ATH10K_DBG_MGMT, | ||
372 | "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", | ||
373 | status->freq, status->band, status->signal, | ||
374 | status->rate_idx); | ||
375 | |||
376 | /* | ||
377 | * packets from HTC come aligned to 4byte boundaries | ||
378 | * because they can originally come in along with a trailer | ||
379 | */ | ||
380 | skb_trim(skb, buf_len); | ||
381 | |||
382 | ieee80211_rx(ar->hw, skb); | ||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) | ||
387 | { | ||
388 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n"); | ||
389 | } | ||
390 | |||
391 | static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) | ||
392 | { | ||
393 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); | ||
394 | } | ||
395 | |||
396 | static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) | ||
397 | { | ||
398 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n"); | ||
399 | } | ||
400 | |||
401 | static void ath10k_wmi_event_update_stats(struct ath10k *ar, | ||
402 | struct sk_buff *skb) | ||
403 | { | ||
404 | struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; | ||
405 | |||
406 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); | ||
407 | |||
408 | ath10k_debug_read_target_stats(ar, ev); | ||
409 | } | ||
410 | |||
411 | static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, | ||
412 | struct sk_buff *skb) | ||
413 | { | ||
414 | struct wmi_vdev_start_response_event *ev; | ||
415 | |||
416 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); | ||
417 | |||
418 | ev = (struct wmi_vdev_start_response_event *)skb->data; | ||
419 | |||
420 | if (WARN_ON(__le32_to_cpu(ev->status))) | ||
421 | return; | ||
422 | |||
423 | complete(&ar->vdev_setup_done); | ||
424 | } | ||
425 | |||
426 | static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, | ||
427 | struct sk_buff *skb) | ||
428 | { | ||
429 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); | ||
430 | complete(&ar->vdev_setup_done); | ||
431 | } | ||
432 | |||
433 | static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, | ||
434 | struct sk_buff *skb) | ||
435 | { | ||
436 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n"); | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * FIXME | ||
441 | * | ||
442 | * We don't report to mac80211 sleep state of connected | ||
443 | * stations. Due to this mac80211 can't fill in TIM IE | ||
444 | * correctly. | ||
445 | * | ||
446 | * I know of no way of getting nullfunc frames that contain | ||
447 | * sleep transition from connected stations - these do not | ||
448 | * seem to be sent from the target to the host. There also | ||
449 | * doesn't seem to be a dedicated event for that. So the | ||
450 | * only way left to do this would be to read tim_bitmap | ||
451 | * during SWBA. | ||
452 | * | ||
453 | * We could probably try using tim_bitmap from SWBA to tell | ||
454 | * mac80211 which stations are asleep and which are not. The | ||
455 | * problem here is calling mac80211 functions so many times | ||
456 | * could take too long and make us miss the time to submit | ||
457 | * the beacon to the target. | ||
458 | * | ||
459 | * So as a workaround we try to extend the TIM IE if there | ||
460 | * is unicast buffered for stations with aid > 7 and fill it | ||
461 | * in ourselves. | ||
462 | */ | ||
463 | static void ath10k_wmi_update_tim(struct ath10k *ar, | ||
464 | struct ath10k_vif *arvif, | ||
465 | struct sk_buff *bcn, | ||
466 | struct wmi_bcn_info *bcn_info) | ||
467 | { | ||
468 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; | ||
469 | struct ieee80211_tim_ie *tim; | ||
470 | u8 *ies, *ie; | ||
471 | u8 ie_len, pvm_len; | ||
472 | |||
473 | /* if next SWBA has no tim_changed the tim_bitmap is garbage. | ||
474 | * we must copy the bitmap upon change and reuse it later */ | ||
475 | if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) { | ||
476 | int i; | ||
477 | |||
478 | BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != | ||
479 | sizeof(bcn_info->tim_info.tim_bitmap)); | ||
480 | |||
481 | for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { | ||
482 | __le32 t = bcn_info->tim_info.tim_bitmap[i / 4]; | ||
483 | u32 v = __le32_to_cpu(t); | ||
484 | arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; | ||
485 | } | ||
486 | |||
487 | /* FW reports either length 0 or 16 | ||
488 | * so we calculate this on our own */ | ||
489 | arvif->u.ap.tim_len = 0; | ||
490 | for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) | ||
491 | if (arvif->u.ap.tim_bitmap[i]) | ||
492 | arvif->u.ap.tim_len = i; | ||
493 | |||
494 | arvif->u.ap.tim_len++; | ||
495 | } | ||
496 | |||
497 | ies = bcn->data; | ||
498 | ies += ieee80211_hdrlen(hdr->frame_control); | ||
499 | ies += 12; /* fixed parameters */ | ||
500 | |||
501 | ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies, | ||
502 | (u8 *)skb_tail_pointer(bcn) - ies); | ||
503 | if (!ie) { | ||
504 | /* highly unlikely for mac80211 */ | ||
505 | ath10k_warn("no tim ie found;\n"); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | tim = (void *)ie + 2; | ||
510 | ie_len = ie[1]; | ||
511 | pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ | ||
512 | |||
513 | if (pvm_len < arvif->u.ap.tim_len) { | ||
514 | int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; | ||
515 | int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); | ||
516 | void *next_ie = ie + 2 + ie_len; | ||
517 | |||
518 | if (skb_put(bcn, expand_size)) { | ||
519 | memmove(next_ie + expand_size, next_ie, move_size); | ||
520 | |||
521 | ie[1] += expand_size; | ||
522 | ie_len += expand_size; | ||
523 | pvm_len += expand_size; | ||
524 | } else { | ||
525 | ath10k_warn("tim expansion failed\n"); | ||
526 | } | ||
527 | } | ||
528 | |||
529 | if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { | ||
530 | ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); | ||
531 | return; | ||
532 | } | ||
533 | |||
534 | tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); | ||
535 | memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); | ||
536 | |||
537 | ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", | ||
538 | tim->dtim_count, tim->dtim_period, | ||
539 | tim->bitmap_ctrl, pvm_len); | ||
540 | } | ||
541 | |||
542 | static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len, | ||
543 | struct wmi_p2p_noa_info *noa) | ||
544 | { | ||
545 | struct ieee80211_p2p_noa_attr *noa_attr; | ||
546 | u8 ctwindow_oppps = noa->ctwindow_oppps; | ||
547 | u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET; | ||
548 | bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT); | ||
549 | __le16 *noa_attr_len; | ||
550 | u16 attr_len; | ||
551 | u8 noa_descriptors = noa->num_descriptors; | ||
552 | int i; | ||
553 | |||
554 | /* P2P IE */ | ||
555 | data[0] = WLAN_EID_VENDOR_SPECIFIC; | ||
556 | data[1] = len - 2; | ||
557 | data[2] = (WLAN_OUI_WFA >> 16) & 0xff; | ||
558 | data[3] = (WLAN_OUI_WFA >> 8) & 0xff; | ||
559 | data[4] = (WLAN_OUI_WFA >> 0) & 0xff; | ||
560 | data[5] = WLAN_OUI_TYPE_WFA_P2P; | ||
561 | |||
562 | /* NOA ATTR */ | ||
563 | data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; | ||
564 | noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ | ||
565 | noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; | ||
566 | |||
567 | noa_attr->index = noa->index; | ||
568 | noa_attr->oppps_ctwindow = ctwindow; | ||
569 | if (oppps) | ||
570 | noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; | ||
571 | |||
572 | for (i = 0; i < noa_descriptors; i++) { | ||
573 | noa_attr->desc[i].count = | ||
574 | __le32_to_cpu(noa->descriptors[i].type_count); | ||
575 | noa_attr->desc[i].duration = noa->descriptors[i].duration; | ||
576 | noa_attr->desc[i].interval = noa->descriptors[i].interval; | ||
577 | noa_attr->desc[i].start_time = noa->descriptors[i].start_time; | ||
578 | } | ||
579 | |||
580 | attr_len = 2; /* index + oppps_ctwindow */ | ||
581 | attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); | ||
582 | *noa_attr_len = __cpu_to_le16(attr_len); | ||
583 | } | ||
584 | |||
585 | static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) | ||
586 | { | ||
587 | u32 len = 0; | ||
588 | u8 noa_descriptors = noa->num_descriptors; | ||
589 | u8 opp_ps_info = noa->ctwindow_oppps; | ||
590 | bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT); | ||
591 | |||
592 | |||
593 | if (!noa_descriptors && !opps_enabled) | ||
594 | return len; | ||
595 | |||
596 | len += 1 + 1 + 4; /* EID + len + OUI */ | ||
597 | len += 1 + 2; /* noa attr + attr len */ | ||
598 | len += 1 + 1; /* index + oppps_ctwindow */ | ||
599 | len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); | ||
600 | |||
601 | return len; | ||
602 | } | ||
603 | |||
604 | static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, | ||
605 | struct sk_buff *bcn, | ||
606 | struct wmi_bcn_info *bcn_info) | ||
607 | { | ||
608 | struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info; | ||
609 | u8 *new_data, *old_data = arvif->u.ap.noa_data; | ||
610 | u32 new_len; | ||
611 | |||
612 | if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) | ||
613 | return; | ||
614 | |||
615 | ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); | ||
616 | if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { | ||
617 | new_len = ath10k_p2p_calc_noa_ie_len(noa); | ||
618 | if (!new_len) | ||
619 | goto cleanup; | ||
620 | |||
621 | new_data = kmalloc(new_len, GFP_ATOMIC); | ||
622 | if (!new_data) | ||
623 | goto cleanup; | ||
624 | |||
625 | ath10k_p2p_fill_noa_ie(new_data, new_len, noa); | ||
626 | |||
627 | spin_lock_bh(&ar->data_lock); | ||
628 | arvif->u.ap.noa_data = new_data; | ||
629 | arvif->u.ap.noa_len = new_len; | ||
630 | spin_unlock_bh(&ar->data_lock); | ||
631 | kfree(old_data); | ||
632 | } | ||
633 | |||
634 | if (arvif->u.ap.noa_data) | ||
635 | if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC)) | ||
636 | memcpy(skb_put(bcn, arvif->u.ap.noa_len), | ||
637 | arvif->u.ap.noa_data, | ||
638 | arvif->u.ap.noa_len); | ||
639 | return; | ||
640 | |||
641 | cleanup: | ||
642 | spin_lock_bh(&ar->data_lock); | ||
643 | arvif->u.ap.noa_data = NULL; | ||
644 | arvif->u.ap.noa_len = 0; | ||
645 | spin_unlock_bh(&ar->data_lock); | ||
646 | kfree(old_data); | ||
647 | } | ||
648 | |||
649 | |||
650 | static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) | ||
651 | { | ||
652 | struct wmi_host_swba_event *ev; | ||
653 | u32 map; | ||
654 | int i = -1; | ||
655 | struct wmi_bcn_info *bcn_info; | ||
656 | struct ath10k_vif *arvif; | ||
657 | struct wmi_bcn_tx_arg arg; | ||
658 | struct sk_buff *bcn; | ||
659 | int vdev_id = 0; | ||
660 | int ret; | ||
661 | |||
662 | ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); | ||
663 | |||
664 | ev = (struct wmi_host_swba_event *)skb->data; | ||
665 | map = __le32_to_cpu(ev->vdev_map); | ||
666 | |||
667 | ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" | ||
668 | "-vdev map 0x%x\n", | ||
669 | ev->vdev_map); | ||
670 | |||
671 | for (; map; map >>= 1, vdev_id++) { | ||
672 | if (!(map & 0x1)) | ||
673 | continue; | ||
674 | |||
675 | i++; | ||
676 | |||
677 | if (i >= WMI_MAX_AP_VDEV) { | ||
678 | ath10k_warn("swba has corrupted vdev map\n"); | ||
679 | break; | ||
680 | } | ||
681 | |||
682 | bcn_info = &ev->bcn_info[i]; | ||
683 | |||
684 | ath10k_dbg(ATH10K_DBG_MGMT, | ||
685 | "-bcn_info[%d]:\n" | ||
686 | "--tim_len %d\n" | ||
687 | "--tim_mcast %d\n" | ||
688 | "--tim_changed %d\n" | ||
689 | "--tim_num_ps_pending %d\n" | ||
690 | "--tim_bitmap 0x%08x%08x%08x%08x\n", | ||
691 | i, | ||
692 | __le32_to_cpu(bcn_info->tim_info.tim_len), | ||
693 | __le32_to_cpu(bcn_info->tim_info.tim_mcast), | ||
694 | __le32_to_cpu(bcn_info->tim_info.tim_changed), | ||
695 | __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending), | ||
696 | __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]), | ||
697 | __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]), | ||
698 | __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]), | ||
699 | __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0])); | ||
700 | |||
701 | arvif = ath10k_get_arvif(ar, vdev_id); | ||
702 | if (arvif == NULL) { | ||
703 | ath10k_warn("no vif for vdev_id %d found\n", vdev_id); | ||
704 | continue; | ||
705 | } | ||
706 | |||
707 | bcn = ieee80211_beacon_get(ar->hw, arvif->vif); | ||
708 | if (!bcn) { | ||
709 | ath10k_warn("could not get mac80211 beacon\n"); | ||
710 | continue; | ||
711 | } | ||
712 | |||
713 | ath10k_tx_h_seq_no(bcn); | ||
714 | ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); | ||
715 | ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); | ||
716 | |||
717 | arg.vdev_id = arvif->vdev_id; | ||
718 | arg.tx_rate = 0; | ||
719 | arg.tx_power = 0; | ||
720 | arg.bcn = bcn->data; | ||
721 | arg.bcn_len = bcn->len; | ||
722 | |||
723 | ret = ath10k_wmi_beacon_send(ar, &arg); | ||
724 | if (ret) | ||
725 | ath10k_warn("could not send beacon (%d)\n", ret); | ||
726 | |||
727 | dev_kfree_skb_any(bcn); | ||
728 | } | ||
729 | } | ||
730 | |||
731 | static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, | ||
732 | struct sk_buff *skb) | ||
733 | { | ||
734 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); | ||
735 | } | ||
736 | |||
737 | static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) | ||
738 | { | ||
739 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n"); | ||
740 | } | ||
741 | |||
742 | static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) | ||
743 | { | ||
744 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); | ||
745 | } | ||
746 | |||
747 | static void ath10k_wmi_event_profile_match(struct ath10k *ar, | ||
748 | struct sk_buff *skb) | ||
749 | { | ||
750 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); | ||
751 | } | ||
752 | |||
753 | static void ath10k_wmi_event_debug_print(struct ath10k *ar, | ||
754 | struct sk_buff *skb) | ||
755 | { | ||
756 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n"); | ||
757 | } | ||
758 | |||
759 | static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) | ||
760 | { | ||
761 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); | ||
762 | } | ||
763 | |||
764 | static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, | ||
765 | struct sk_buff *skb) | ||
766 | { | ||
767 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); | ||
768 | } | ||
769 | |||
770 | static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, | ||
771 | struct sk_buff *skb) | ||
772 | { | ||
773 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); | ||
774 | } | ||
775 | |||
776 | static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, | ||
777 | struct sk_buff *skb) | ||
778 | { | ||
779 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); | ||
780 | } | ||
781 | |||
782 | static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, | ||
783 | struct sk_buff *skb) | ||
784 | { | ||
785 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); | ||
786 | } | ||
787 | |||
788 | static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, | ||
789 | struct sk_buff *skb) | ||
790 | { | ||
791 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); | ||
792 | } | ||
793 | |||
794 | static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, | ||
795 | struct sk_buff *skb) | ||
796 | { | ||
797 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); | ||
798 | } | ||
799 | |||
800 | static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, | ||
801 | struct sk_buff *skb) | ||
802 | { | ||
803 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); | ||
804 | } | ||
805 | |||
806 | static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, | ||
807 | struct sk_buff *skb) | ||
808 | { | ||
809 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); | ||
810 | } | ||
811 | |||
812 | static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, | ||
813 | struct sk_buff *skb) | ||
814 | { | ||
815 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); | ||
816 | } | ||
817 | |||
818 | static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, | ||
819 | struct sk_buff *skb) | ||
820 | { | ||
821 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); | ||
822 | } | ||
823 | |||
824 | static void ath10k_wmi_event_delba_complete(struct ath10k *ar, | ||
825 | struct sk_buff *skb) | ||
826 | { | ||
827 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); | ||
828 | } | ||
829 | |||
830 | static void ath10k_wmi_event_addba_complete(struct ath10k *ar, | ||
831 | struct sk_buff *skb) | ||
832 | { | ||
833 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); | ||
834 | } | ||
835 | |||
836 | static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, | ||
837 | struct sk_buff *skb) | ||
838 | { | ||
839 | ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); | ||
840 | } | ||
841 | |||
842 | static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, | ||
843 | struct sk_buff *skb) | ||
844 | { | ||
845 | struct wmi_service_ready_event *ev = (void *)skb->data; | ||
846 | |||
847 | if (skb->len < sizeof(*ev)) { | ||
848 | ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", | ||
849 | skb->len, sizeof(*ev)); | ||
850 | return; | ||
851 | } | ||
852 | |||
853 | ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); | ||
854 | ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); | ||
855 | ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); | ||
856 | ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); | ||
857 | ar->fw_version_major = | ||
858 | (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; | ||
859 | ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); | ||
860 | ar->fw_version_release = | ||
861 | (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; | ||
862 | ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); | ||
863 | ar->phy_capability = __le32_to_cpu(ev->phy_capability); | ||
864 | |||
865 | ar->ath_common.regulatory.current_rd = | ||
866 | __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); | ||
867 | |||
868 | ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, | ||
869 | sizeof(ev->wmi_service_bitmap)); | ||
870 | |||
871 | if (strlen(ar->hw->wiphy->fw_version) == 0) { | ||
872 | snprintf(ar->hw->wiphy->fw_version, | ||
873 | sizeof(ar->hw->wiphy->fw_version), | ||
874 | "%u.%u.%u.%u", | ||
875 | ar->fw_version_major, | ||
876 | ar->fw_version_minor, | ||
877 | ar->fw_version_release, | ||
878 | ar->fw_version_build); | ||
879 | } | ||
880 | |||
881 | /* FIXME: it probably should be better to support this */ | ||
882 | if (__le32_to_cpu(ev->num_mem_reqs) > 0) { | ||
883 | ath10k_warn("target requested %d memory chunks; ignoring\n", | ||
884 | __le32_to_cpu(ev->num_mem_reqs)); | ||
885 | } | ||
886 | |||
887 | ath10k_dbg(ATH10K_DBG_WMI, | ||
888 | "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n", | ||
889 | __le32_to_cpu(ev->sw_version), | ||
890 | __le32_to_cpu(ev->sw_version_1), | ||
891 | __le32_to_cpu(ev->abi_version), | ||
892 | __le32_to_cpu(ev->phy_capability), | ||
893 | __le32_to_cpu(ev->ht_cap_info), | ||
894 | __le32_to_cpu(ev->vht_cap_info), | ||
895 | __le32_to_cpu(ev->vht_supp_mcs), | ||
896 | __le32_to_cpu(ev->sys_cap_info), | ||
897 | __le32_to_cpu(ev->num_mem_reqs)); | ||
898 | |||
899 | complete(&ar->wmi.service_ready); | ||
900 | } | ||
901 | |||
902 | static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) | ||
903 | { | ||
904 | struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; | ||
905 | |||
906 | if (WARN_ON(skb->len < sizeof(*ev))) | ||
907 | return -EINVAL; | ||
908 | |||
909 | memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); | ||
910 | |||
911 | ath10k_dbg(ATH10K_DBG_WMI, | ||
912 | "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n", | ||
913 | __le32_to_cpu(ev->sw_version), | ||
914 | __le32_to_cpu(ev->abi_version), | ||
915 | ev->mac_addr.addr, | ||
916 | __le32_to_cpu(ev->status)); | ||
917 | |||
918 | complete(&ar->wmi.unified_ready); | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) | ||
923 | { | ||
924 | struct wmi_cmd_hdr *cmd_hdr; | ||
925 | enum wmi_event_id id; | ||
926 | u16 len; | ||
927 | |||
928 | cmd_hdr = (struct wmi_cmd_hdr *)skb->data; | ||
929 | id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); | ||
930 | |||
931 | if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) | ||
932 | return; | ||
933 | |||
934 | len = skb->len; | ||
935 | |||
936 | trace_ath10k_wmi_event(id, skb->data, skb->len); | ||
937 | |||
938 | switch (id) { | ||
939 | case WMI_MGMT_RX_EVENTID: | ||
940 | ath10k_wmi_event_mgmt_rx(ar, skb); | ||
941 | /* mgmt_rx() owns the skb now! */ | ||
942 | return; | ||
943 | case WMI_SCAN_EVENTID: | ||
944 | ath10k_wmi_event_scan(ar, skb); | ||
945 | break; | ||
946 | case WMI_CHAN_INFO_EVENTID: | ||
947 | ath10k_wmi_event_chan_info(ar, skb); | ||
948 | break; | ||
949 | case WMI_ECHO_EVENTID: | ||
950 | ath10k_wmi_event_echo(ar, skb); | ||
951 | break; | ||
952 | case WMI_DEBUG_MESG_EVENTID: | ||
953 | ath10k_wmi_event_debug_mesg(ar, skb); | ||
954 | break; | ||
955 | case WMI_UPDATE_STATS_EVENTID: | ||
956 | ath10k_wmi_event_update_stats(ar, skb); | ||
957 | break; | ||
958 | case WMI_VDEV_START_RESP_EVENTID: | ||
959 | ath10k_wmi_event_vdev_start_resp(ar, skb); | ||
960 | break; | ||
961 | case WMI_VDEV_STOPPED_EVENTID: | ||
962 | ath10k_wmi_event_vdev_stopped(ar, skb); | ||
963 | break; | ||
964 | case WMI_PEER_STA_KICKOUT_EVENTID: | ||
965 | ath10k_wmi_event_peer_sta_kickout(ar, skb); | ||
966 | break; | ||
967 | case WMI_HOST_SWBA_EVENTID: | ||
968 | ath10k_wmi_event_host_swba(ar, skb); | ||
969 | break; | ||
970 | case WMI_TBTTOFFSET_UPDATE_EVENTID: | ||
971 | ath10k_wmi_event_tbttoffset_update(ar, skb); | ||
972 | break; | ||
973 | case WMI_PHYERR_EVENTID: | ||
974 | ath10k_wmi_event_phyerr(ar, skb); | ||
975 | break; | ||
976 | case WMI_ROAM_EVENTID: | ||
977 | ath10k_wmi_event_roam(ar, skb); | ||
978 | break; | ||
979 | case WMI_PROFILE_MATCH: | ||
980 | ath10k_wmi_event_profile_match(ar, skb); | ||
981 | break; | ||
982 | case WMI_DEBUG_PRINT_EVENTID: | ||
983 | ath10k_wmi_event_debug_print(ar, skb); | ||
984 | break; | ||
985 | case WMI_PDEV_QVIT_EVENTID: | ||
986 | ath10k_wmi_event_pdev_qvit(ar, skb); | ||
987 | break; | ||
988 | case WMI_WLAN_PROFILE_DATA_EVENTID: | ||
989 | ath10k_wmi_event_wlan_profile_data(ar, skb); | ||
990 | break; | ||
991 | case WMI_RTT_MEASUREMENT_REPORT_EVENTID: | ||
992 | ath10k_wmi_event_rtt_measurement_report(ar, skb); | ||
993 | break; | ||
994 | case WMI_TSF_MEASUREMENT_REPORT_EVENTID: | ||
995 | ath10k_wmi_event_tsf_measurement_report(ar, skb); | ||
996 | break; | ||
997 | case WMI_RTT_ERROR_REPORT_EVENTID: | ||
998 | ath10k_wmi_event_rtt_error_report(ar, skb); | ||
999 | break; | ||
1000 | case WMI_WOW_WAKEUP_HOST_EVENTID: | ||
1001 | ath10k_wmi_event_wow_wakeup_host(ar, skb); | ||
1002 | break; | ||
1003 | case WMI_DCS_INTERFERENCE_EVENTID: | ||
1004 | ath10k_wmi_event_dcs_interference(ar, skb); | ||
1005 | break; | ||
1006 | case WMI_PDEV_TPC_CONFIG_EVENTID: | ||
1007 | ath10k_wmi_event_pdev_tpc_config(ar, skb); | ||
1008 | break; | ||
1009 | case WMI_PDEV_FTM_INTG_EVENTID: | ||
1010 | ath10k_wmi_event_pdev_ftm_intg(ar, skb); | ||
1011 | break; | ||
1012 | case WMI_GTK_OFFLOAD_STATUS_EVENTID: | ||
1013 | ath10k_wmi_event_gtk_offload_status(ar, skb); | ||
1014 | break; | ||
1015 | case WMI_GTK_REKEY_FAIL_EVENTID: | ||
1016 | ath10k_wmi_event_gtk_rekey_fail(ar, skb); | ||
1017 | break; | ||
1018 | case WMI_TX_DELBA_COMPLETE_EVENTID: | ||
1019 | ath10k_wmi_event_delba_complete(ar, skb); | ||
1020 | break; | ||
1021 | case WMI_TX_ADDBA_COMPLETE_EVENTID: | ||
1022 | ath10k_wmi_event_addba_complete(ar, skb); | ||
1023 | break; | ||
1024 | case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: | ||
1025 | ath10k_wmi_event_vdev_install_key_complete(ar, skb); | ||
1026 | break; | ||
1027 | case WMI_SERVICE_READY_EVENTID: | ||
1028 | ath10k_wmi_service_ready_event_rx(ar, skb); | ||
1029 | break; | ||
1030 | case WMI_READY_EVENTID: | ||
1031 | ath10k_wmi_ready_event_rx(ar, skb); | ||
1032 | break; | ||
1033 | default: | ||
1034 | ath10k_warn("Unknown eventid: %d\n", id); | ||
1035 | break; | ||
1036 | } | ||
1037 | |||
1038 | dev_kfree_skb(skb); | ||
1039 | } | ||
1040 | |||
1041 | static void ath10k_wmi_event_work(struct work_struct *work) | ||
1042 | { | ||
1043 | struct ath10k *ar = container_of(work, struct ath10k, | ||
1044 | wmi.wmi_event_work); | ||
1045 | struct sk_buff *skb; | ||
1046 | |||
1047 | for (;;) { | ||
1048 | skb = skb_dequeue(&ar->wmi.wmi_event_list); | ||
1049 | if (!skb) | ||
1050 | break; | ||
1051 | |||
1052 | ath10k_wmi_event_process(ar, skb); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) | ||
1057 | { | ||
1058 | struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data; | ||
1059 | enum wmi_event_id event_id; | ||
1060 | |||
1061 | event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); | ||
1062 | |||
1063 | /* some events require to be handled ASAP | ||
1064 | * thus can't be defered to a worker thread */ | ||
1065 | switch (event_id) { | ||
1066 | case WMI_HOST_SWBA_EVENTID: | ||
1067 | case WMI_MGMT_RX_EVENTID: | ||
1068 | ath10k_wmi_event_process(ar, skb); | ||
1069 | return; | ||
1070 | default: | ||
1071 | break; | ||
1072 | } | ||
1073 | |||
1074 | skb_queue_tail(&ar->wmi.wmi_event_list, skb); | ||
1075 | queue_work(ar->workqueue, &ar->wmi.wmi_event_work); | ||
1076 | } | ||
1077 | |||
1078 | /* WMI Initialization functions */ | ||
1079 | int ath10k_wmi_attach(struct ath10k *ar) | ||
1080 | { | ||
1081 | init_completion(&ar->wmi.service_ready); | ||
1082 | init_completion(&ar->wmi.unified_ready); | ||
1083 | init_waitqueue_head(&ar->wmi.wq); | ||
1084 | |||
1085 | skb_queue_head_init(&ar->wmi.wmi_event_list); | ||
1086 | INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work); | ||
1087 | |||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1091 | void ath10k_wmi_detach(struct ath10k *ar) | ||
1092 | { | ||
1093 | /* HTC should've drained the packets already */ | ||
1094 | if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0)) | ||
1095 | ath10k_warn("there are still pending packets\n"); | ||
1096 | |||
1097 | cancel_work_sync(&ar->wmi.wmi_event_work); | ||
1098 | skb_queue_purge(&ar->wmi.wmi_event_list); | ||
1099 | } | ||
1100 | |||
1101 | int ath10k_wmi_connect_htc_service(struct ath10k *ar) | ||
1102 | { | ||
1103 | int status; | ||
1104 | struct ath10k_htc_svc_conn_req conn_req; | ||
1105 | struct ath10k_htc_svc_conn_resp conn_resp; | ||
1106 | |||
1107 | memset(&conn_req, 0, sizeof(conn_req)); | ||
1108 | memset(&conn_resp, 0, sizeof(conn_resp)); | ||
1109 | |||
1110 | /* these fields are the same for all service endpoints */ | ||
1111 | conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; | ||
1112 | conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; | ||
1113 | |||
1114 | /* connect to control service */ | ||
1115 | conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; | ||
1116 | |||
1117 | status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp); | ||
1118 | if (status) { | ||
1119 | ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", | ||
1120 | status); | ||
1121 | return status; | ||
1122 | } | ||
1123 | |||
1124 | ar->wmi.eid = conn_resp.eid; | ||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, | ||
1129 | u16 rd5g, u16 ctl2g, u16 ctl5g) | ||
1130 | { | ||
1131 | struct wmi_pdev_set_regdomain_cmd *cmd; | ||
1132 | struct sk_buff *skb; | ||
1133 | |||
1134 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1135 | if (!skb) | ||
1136 | return -ENOMEM; | ||
1137 | |||
1138 | cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; | ||
1139 | cmd->reg_domain = __cpu_to_le32(rd); | ||
1140 | cmd->reg_domain_2G = __cpu_to_le32(rd2g); | ||
1141 | cmd->reg_domain_5G = __cpu_to_le32(rd5g); | ||
1142 | cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); | ||
1143 | cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); | ||
1144 | |||
1145 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1146 | "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", | ||
1147 | rd, rd2g, rd5g, ctl2g, ctl5g); | ||
1148 | |||
1149 | return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); | ||
1150 | } | ||
1151 | |||
1152 | int ath10k_wmi_pdev_set_channel(struct ath10k *ar, | ||
1153 | const struct wmi_channel_arg *arg) | ||
1154 | { | ||
1155 | struct wmi_set_channel_cmd *cmd; | ||
1156 | struct sk_buff *skb; | ||
1157 | |||
1158 | if (arg->passive) | ||
1159 | return -EINVAL; | ||
1160 | |||
1161 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1162 | if (!skb) | ||
1163 | return -ENOMEM; | ||
1164 | |||
1165 | cmd = (struct wmi_set_channel_cmd *)skb->data; | ||
1166 | cmd->chan.mhz = __cpu_to_le32(arg->freq); | ||
1167 | cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); | ||
1168 | cmd->chan.mode = arg->mode; | ||
1169 | cmd->chan.min_power = arg->min_power; | ||
1170 | cmd->chan.max_power = arg->max_power; | ||
1171 | cmd->chan.reg_power = arg->max_reg_power; | ||
1172 | cmd->chan.reg_classid = arg->reg_class_id; | ||
1173 | cmd->chan.antenna_max = arg->max_antenna_gain; | ||
1174 | |||
1175 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1176 | "wmi set channel mode %d freq %d\n", | ||
1177 | arg->mode, arg->freq); | ||
1178 | |||
1179 | return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID); | ||
1180 | } | ||
1181 | |||
1182 | int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) | ||
1183 | { | ||
1184 | struct wmi_pdev_suspend_cmd *cmd; | ||
1185 | struct sk_buff *skb; | ||
1186 | |||
1187 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1188 | if (!skb) | ||
1189 | return -ENOMEM; | ||
1190 | |||
1191 | cmd = (struct wmi_pdev_suspend_cmd *)skb->data; | ||
1192 | cmd->suspend_opt = WMI_PDEV_SUSPEND; | ||
1193 | |||
1194 | return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID); | ||
1195 | } | ||
1196 | |||
1197 | int ath10k_wmi_pdev_resume_target(struct ath10k *ar) | ||
1198 | { | ||
1199 | struct sk_buff *skb; | ||
1200 | |||
1201 | skb = ath10k_wmi_alloc_skb(0); | ||
1202 | if (skb == NULL) | ||
1203 | return -ENOMEM; | ||
1204 | |||
1205 | return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID); | ||
1206 | } | ||
1207 | |||
1208 | int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, | ||
1209 | u32 value) | ||
1210 | { | ||
1211 | struct wmi_pdev_set_param_cmd *cmd; | ||
1212 | struct sk_buff *skb; | ||
1213 | |||
1214 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1215 | if (!skb) | ||
1216 | return -ENOMEM; | ||
1217 | |||
1218 | cmd = (struct wmi_pdev_set_param_cmd *)skb->data; | ||
1219 | cmd->param_id = __cpu_to_le32(id); | ||
1220 | cmd->param_value = __cpu_to_le32(value); | ||
1221 | |||
1222 | ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", | ||
1223 | id, value); | ||
1224 | return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID); | ||
1225 | } | ||
1226 | |||
1227 | int ath10k_wmi_cmd_init(struct ath10k *ar) | ||
1228 | { | ||
1229 | struct wmi_init_cmd *cmd; | ||
1230 | struct sk_buff *buf; | ||
1231 | struct wmi_resource_config config = {}; | ||
1232 | u32 val; | ||
1233 | |||
1234 | config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); | ||
1235 | config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); | ||
1236 | config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); | ||
1237 | |||
1238 | config.num_offload_reorder_bufs = | ||
1239 | __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS); | ||
1240 | |||
1241 | config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS); | ||
1242 | config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS); | ||
1243 | config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT); | ||
1244 | config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK); | ||
1245 | config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK); | ||
1246 | config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); | ||
1247 | config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); | ||
1248 | config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); | ||
1249 | config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); | ||
1250 | config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); | ||
1251 | |||
1252 | config.scan_max_pending_reqs = | ||
1253 | __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); | ||
1254 | |||
1255 | config.bmiss_offload_max_vdev = | ||
1256 | __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV); | ||
1257 | |||
1258 | config.roam_offload_max_vdev = | ||
1259 | __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV); | ||
1260 | |||
1261 | config.roam_offload_max_ap_profiles = | ||
1262 | __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES); | ||
1263 | |||
1264 | config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS); | ||
1265 | config.num_mcast_table_elems = | ||
1266 | __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS); | ||
1267 | |||
1268 | config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE); | ||
1269 | config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE); | ||
1270 | config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES); | ||
1271 | config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE); | ||
1272 | config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM); | ||
1273 | |||
1274 | val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; | ||
1275 | config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); | ||
1276 | |||
1277 | config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG); | ||
1278 | |||
1279 | config.gtk_offload_max_vdev = | ||
1280 | __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV); | ||
1281 | |||
1282 | config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); | ||
1283 | config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); | ||
1284 | |||
1285 | buf = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1286 | if (!buf) | ||
1287 | return -ENOMEM; | ||
1288 | |||
1289 | cmd = (struct wmi_init_cmd *)buf->data; | ||
1290 | cmd->num_host_mem_chunks = 0; | ||
1291 | memcpy(&cmd->resource_config, &config, sizeof(config)); | ||
1292 | |||
1293 | ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); | ||
1294 | return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID); | ||
1295 | } | ||
1296 | |||
1297 | static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) | ||
1298 | { | ||
1299 | int len; | ||
1300 | |||
1301 | len = sizeof(struct wmi_start_scan_cmd); | ||
1302 | |||
1303 | if (arg->ie_len) { | ||
1304 | if (!arg->ie) | ||
1305 | return -EINVAL; | ||
1306 | if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) | ||
1307 | return -EINVAL; | ||
1308 | |||
1309 | len += sizeof(struct wmi_ie_data); | ||
1310 | len += roundup(arg->ie_len, 4); | ||
1311 | } | ||
1312 | |||
1313 | if (arg->n_channels) { | ||
1314 | if (!arg->channels) | ||
1315 | return -EINVAL; | ||
1316 | if (arg->n_channels > ARRAY_SIZE(arg->channels)) | ||
1317 | return -EINVAL; | ||
1318 | |||
1319 | len += sizeof(struct wmi_chan_list); | ||
1320 | len += sizeof(__le32) * arg->n_channels; | ||
1321 | } | ||
1322 | |||
1323 | if (arg->n_ssids) { | ||
1324 | if (!arg->ssids) | ||
1325 | return -EINVAL; | ||
1326 | if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) | ||
1327 | return -EINVAL; | ||
1328 | |||
1329 | len += sizeof(struct wmi_ssid_list); | ||
1330 | len += sizeof(struct wmi_ssid) * arg->n_ssids; | ||
1331 | } | ||
1332 | |||
1333 | if (arg->n_bssids) { | ||
1334 | if (!arg->bssids) | ||
1335 | return -EINVAL; | ||
1336 | if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) | ||
1337 | return -EINVAL; | ||
1338 | |||
1339 | len += sizeof(struct wmi_bssid_list); | ||
1340 | len += sizeof(struct wmi_mac_addr) * arg->n_bssids; | ||
1341 | } | ||
1342 | |||
1343 | return len; | ||
1344 | } | ||
1345 | |||
1346 | int ath10k_wmi_start_scan(struct ath10k *ar, | ||
1347 | const struct wmi_start_scan_arg *arg) | ||
1348 | { | ||
1349 | struct wmi_start_scan_cmd *cmd; | ||
1350 | struct sk_buff *skb; | ||
1351 | struct wmi_ie_data *ie; | ||
1352 | struct wmi_chan_list *channels; | ||
1353 | struct wmi_ssid_list *ssids; | ||
1354 | struct wmi_bssid_list *bssids; | ||
1355 | u32 scan_id; | ||
1356 | u32 scan_req_id; | ||
1357 | int off; | ||
1358 | int len = 0; | ||
1359 | int i; | ||
1360 | |||
1361 | len = ath10k_wmi_start_scan_calc_len(arg); | ||
1362 | if (len < 0) | ||
1363 | return len; /* len contains error code here */ | ||
1364 | |||
1365 | skb = ath10k_wmi_alloc_skb(len); | ||
1366 | if (!skb) | ||
1367 | return -ENOMEM; | ||
1368 | |||
1369 | scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; | ||
1370 | scan_id |= arg->scan_id; | ||
1371 | |||
1372 | scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; | ||
1373 | scan_req_id |= arg->scan_req_id; | ||
1374 | |||
1375 | cmd = (struct wmi_start_scan_cmd *)skb->data; | ||
1376 | cmd->scan_id = __cpu_to_le32(scan_id); | ||
1377 | cmd->scan_req_id = __cpu_to_le32(scan_req_id); | ||
1378 | cmd->vdev_id = __cpu_to_le32(arg->vdev_id); | ||
1379 | cmd->scan_priority = __cpu_to_le32(arg->scan_priority); | ||
1380 | cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); | ||
1381 | cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); | ||
1382 | cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); | ||
1383 | cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); | ||
1384 | cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); | ||
1385 | cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); | ||
1386 | cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); | ||
1387 | cmd->idle_time = __cpu_to_le32(arg->idle_time); | ||
1388 | cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); | ||
1389 | cmd->probe_delay = __cpu_to_le32(arg->probe_delay); | ||
1390 | cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); | ||
1391 | |||
1392 | /* TLV list starts after fields included in the struct */ | ||
1393 | off = sizeof(*cmd); | ||
1394 | |||
1395 | if (arg->n_channels) { | ||
1396 | channels = (void *)skb->data + off; | ||
1397 | channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); | ||
1398 | channels->num_chan = __cpu_to_le32(arg->n_channels); | ||
1399 | |||
1400 | for (i = 0; i < arg->n_channels; i++) | ||
1401 | channels->channel_list[i] = | ||
1402 | __cpu_to_le32(arg->channels[i]); | ||
1403 | |||
1404 | off += sizeof(*channels); | ||
1405 | off += sizeof(__le32) * arg->n_channels; | ||
1406 | } | ||
1407 | |||
1408 | if (arg->n_ssids) { | ||
1409 | ssids = (void *)skb->data + off; | ||
1410 | ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); | ||
1411 | ssids->num_ssids = __cpu_to_le32(arg->n_ssids); | ||
1412 | |||
1413 | for (i = 0; i < arg->n_ssids; i++) { | ||
1414 | ssids->ssids[i].ssid_len = | ||
1415 | __cpu_to_le32(arg->ssids[i].len); | ||
1416 | memcpy(&ssids->ssids[i].ssid, | ||
1417 | arg->ssids[i].ssid, | ||
1418 | arg->ssids[i].len); | ||
1419 | } | ||
1420 | |||
1421 | off += sizeof(*ssids); | ||
1422 | off += sizeof(struct wmi_ssid) * arg->n_ssids; | ||
1423 | } | ||
1424 | |||
1425 | if (arg->n_bssids) { | ||
1426 | bssids = (void *)skb->data + off; | ||
1427 | bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); | ||
1428 | bssids->num_bssid = __cpu_to_le32(arg->n_bssids); | ||
1429 | |||
1430 | for (i = 0; i < arg->n_bssids; i++) | ||
1431 | memcpy(&bssids->bssid_list[i], | ||
1432 | arg->bssids[i].bssid, | ||
1433 | ETH_ALEN); | ||
1434 | |||
1435 | off += sizeof(*bssids); | ||
1436 | off += sizeof(struct wmi_mac_addr) * arg->n_bssids; | ||
1437 | } | ||
1438 | |||
1439 | if (arg->ie_len) { | ||
1440 | ie = (void *)skb->data + off; | ||
1441 | ie->tag = __cpu_to_le32(WMI_IE_TAG); | ||
1442 | ie->ie_len = __cpu_to_le32(arg->ie_len); | ||
1443 | memcpy(ie->ie_data, arg->ie, arg->ie_len); | ||
1444 | |||
1445 | off += sizeof(*ie); | ||
1446 | off += roundup(arg->ie_len, 4); | ||
1447 | } | ||
1448 | |||
1449 | if (off != skb->len) { | ||
1450 | dev_kfree_skb(skb); | ||
1451 | return -EINVAL; | ||
1452 | } | ||
1453 | |||
1454 | ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); | ||
1455 | return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID); | ||
1456 | } | ||
1457 | |||
1458 | void ath10k_wmi_start_scan_init(struct ath10k *ar, | ||
1459 | struct wmi_start_scan_arg *arg) | ||
1460 | { | ||
1461 | /* setup commonly used values */ | ||
1462 | arg->scan_req_id = 1; | ||
1463 | arg->scan_priority = WMI_SCAN_PRIORITY_LOW; | ||
1464 | arg->dwell_time_active = 50; | ||
1465 | arg->dwell_time_passive = 150; | ||
1466 | arg->min_rest_time = 50; | ||
1467 | arg->max_rest_time = 500; | ||
1468 | arg->repeat_probe_time = 0; | ||
1469 | arg->probe_spacing_time = 0; | ||
1470 | arg->idle_time = 0; | ||
1471 | arg->max_scan_time = 5000; | ||
1472 | arg->probe_delay = 5; | ||
1473 | arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | ||
1474 | | WMI_SCAN_EVENT_COMPLETED | ||
1475 | | WMI_SCAN_EVENT_BSS_CHANNEL | ||
1476 | | WMI_SCAN_EVENT_FOREIGN_CHANNEL | ||
1477 | | WMI_SCAN_EVENT_DEQUEUED; | ||
1478 | arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; | ||
1479 | arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; | ||
1480 | arg->n_bssids = 1; | ||
1481 | arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; | ||
1482 | } | ||
1483 | |||
1484 | int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) | ||
1485 | { | ||
1486 | struct wmi_stop_scan_cmd *cmd; | ||
1487 | struct sk_buff *skb; | ||
1488 | u32 scan_id; | ||
1489 | u32 req_id; | ||
1490 | |||
1491 | if (arg->req_id > 0xFFF) | ||
1492 | return -EINVAL; | ||
1493 | if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) | ||
1494 | return -EINVAL; | ||
1495 | |||
1496 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1497 | if (!skb) | ||
1498 | return -ENOMEM; | ||
1499 | |||
1500 | scan_id = arg->u.scan_id; | ||
1501 | scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; | ||
1502 | |||
1503 | req_id = arg->req_id; | ||
1504 | req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; | ||
1505 | |||
1506 | cmd = (struct wmi_stop_scan_cmd *)skb->data; | ||
1507 | cmd->req_type = __cpu_to_le32(arg->req_type); | ||
1508 | cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); | ||
1509 | cmd->scan_id = __cpu_to_le32(scan_id); | ||
1510 | cmd->scan_req_id = __cpu_to_le32(req_id); | ||
1511 | |||
1512 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1513 | "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", | ||
1514 | arg->req_id, arg->req_type, arg->u.scan_id); | ||
1515 | return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID); | ||
1516 | } | ||
1517 | |||
1518 | int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, | ||
1519 | enum wmi_vdev_type type, | ||
1520 | enum wmi_vdev_subtype subtype, | ||
1521 | const u8 macaddr[ETH_ALEN]) | ||
1522 | { | ||
1523 | struct wmi_vdev_create_cmd *cmd; | ||
1524 | struct sk_buff *skb; | ||
1525 | |||
1526 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1527 | if (!skb) | ||
1528 | return -ENOMEM; | ||
1529 | |||
1530 | cmd = (struct wmi_vdev_create_cmd *)skb->data; | ||
1531 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1532 | cmd->vdev_type = __cpu_to_le32(type); | ||
1533 | cmd->vdev_subtype = __cpu_to_le32(subtype); | ||
1534 | memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); | ||
1535 | |||
1536 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1537 | "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", | ||
1538 | vdev_id, type, subtype, macaddr); | ||
1539 | |||
1540 | return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID); | ||
1541 | } | ||
1542 | |||
1543 | int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) | ||
1544 | { | ||
1545 | struct wmi_vdev_delete_cmd *cmd; | ||
1546 | struct sk_buff *skb; | ||
1547 | |||
1548 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1549 | if (!skb) | ||
1550 | return -ENOMEM; | ||
1551 | |||
1552 | cmd = (struct wmi_vdev_delete_cmd *)skb->data; | ||
1553 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1554 | |||
1555 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1556 | "WMI vdev delete id %d\n", vdev_id); | ||
1557 | |||
1558 | return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID); | ||
1559 | } | ||
1560 | |||
1561 | static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, | ||
1562 | const struct wmi_vdev_start_request_arg *arg, | ||
1563 | enum wmi_cmd_id cmd_id) | ||
1564 | { | ||
1565 | struct wmi_vdev_start_request_cmd *cmd; | ||
1566 | struct sk_buff *skb; | ||
1567 | const char *cmdname; | ||
1568 | u32 flags = 0; | ||
1569 | |||
1570 | if (cmd_id != WMI_VDEV_START_REQUEST_CMDID && | ||
1571 | cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID) | ||
1572 | return -EINVAL; | ||
1573 | if (WARN_ON(arg->ssid && arg->ssid_len == 0)) | ||
1574 | return -EINVAL; | ||
1575 | if (WARN_ON(arg->hidden_ssid && !arg->ssid)) | ||
1576 | return -EINVAL; | ||
1577 | if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) | ||
1578 | return -EINVAL; | ||
1579 | |||
1580 | if (cmd_id == WMI_VDEV_START_REQUEST_CMDID) | ||
1581 | cmdname = "start"; | ||
1582 | else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID) | ||
1583 | cmdname = "restart"; | ||
1584 | else | ||
1585 | return -EINVAL; /* should not happen, we already check cmd_id */ | ||
1586 | |||
1587 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1588 | if (!skb) | ||
1589 | return -ENOMEM; | ||
1590 | |||
1591 | if (arg->hidden_ssid) | ||
1592 | flags |= WMI_VDEV_START_HIDDEN_SSID; | ||
1593 | if (arg->pmf_enabled) | ||
1594 | flags |= WMI_VDEV_START_PMF_ENABLED; | ||
1595 | |||
1596 | cmd = (struct wmi_vdev_start_request_cmd *)skb->data; | ||
1597 | cmd->vdev_id = __cpu_to_le32(arg->vdev_id); | ||
1598 | cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); | ||
1599 | cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval); | ||
1600 | cmd->dtim_period = __cpu_to_le32(arg->dtim_period); | ||
1601 | cmd->flags = __cpu_to_le32(flags); | ||
1602 | cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); | ||
1603 | cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); | ||
1604 | |||
1605 | if (arg->ssid) { | ||
1606 | cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); | ||
1607 | memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); | ||
1608 | } | ||
1609 | |||
1610 | cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); | ||
1611 | |||
1612 | cmd->chan.band_center_freq1 = | ||
1613 | __cpu_to_le32(arg->channel.band_center_freq1); | ||
1614 | |||
1615 | cmd->chan.mode = arg->channel.mode; | ||
1616 | cmd->chan.min_power = arg->channel.min_power; | ||
1617 | cmd->chan.max_power = arg->channel.max_power; | ||
1618 | cmd->chan.reg_power = arg->channel.max_reg_power; | ||
1619 | cmd->chan.reg_classid = arg->channel.reg_class_id; | ||
1620 | cmd->chan.antenna_max = arg->channel.max_antenna_gain; | ||
1621 | |||
1622 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1623 | "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X," | ||
1624 | "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq, | ||
1625 | arg->channel.mode, flags, arg->channel.max_power); | ||
1626 | |||
1627 | return ath10k_wmi_cmd_send(ar, skb, cmd_id); | ||
1628 | } | ||
1629 | |||
1630 | int ath10k_wmi_vdev_start(struct ath10k *ar, | ||
1631 | const struct wmi_vdev_start_request_arg *arg) | ||
1632 | { | ||
1633 | return ath10k_wmi_vdev_start_restart(ar, arg, | ||
1634 | WMI_VDEV_START_REQUEST_CMDID); | ||
1635 | } | ||
1636 | |||
1637 | int ath10k_wmi_vdev_restart(struct ath10k *ar, | ||
1638 | const struct wmi_vdev_start_request_arg *arg) | ||
1639 | { | ||
1640 | return ath10k_wmi_vdev_start_restart(ar, arg, | ||
1641 | WMI_VDEV_RESTART_REQUEST_CMDID); | ||
1642 | } | ||
1643 | |||
1644 | int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) | ||
1645 | { | ||
1646 | struct wmi_vdev_stop_cmd *cmd; | ||
1647 | struct sk_buff *skb; | ||
1648 | |||
1649 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1650 | if (!skb) | ||
1651 | return -ENOMEM; | ||
1652 | |||
1653 | cmd = (struct wmi_vdev_stop_cmd *)skb->data; | ||
1654 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1655 | |||
1656 | ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); | ||
1657 | |||
1658 | return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID); | ||
1659 | } | ||
1660 | |||
1661 | int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) | ||
1662 | { | ||
1663 | struct wmi_vdev_up_cmd *cmd; | ||
1664 | struct sk_buff *skb; | ||
1665 | |||
1666 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1667 | if (!skb) | ||
1668 | return -ENOMEM; | ||
1669 | |||
1670 | cmd = (struct wmi_vdev_up_cmd *)skb->data; | ||
1671 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1672 | cmd->vdev_assoc_id = __cpu_to_le32(aid); | ||
1673 | memcpy(&cmd->vdev_bssid.addr, bssid, 6); | ||
1674 | |||
1675 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1676 | "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", | ||
1677 | vdev_id, aid, bssid); | ||
1678 | |||
1679 | return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID); | ||
1680 | } | ||
1681 | |||
1682 | int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) | ||
1683 | { | ||
1684 | struct wmi_vdev_down_cmd *cmd; | ||
1685 | struct sk_buff *skb; | ||
1686 | |||
1687 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1688 | if (!skb) | ||
1689 | return -ENOMEM; | ||
1690 | |||
1691 | cmd = (struct wmi_vdev_down_cmd *)skb->data; | ||
1692 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1693 | |||
1694 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1695 | "wmi mgmt vdev down id 0x%x\n", vdev_id); | ||
1696 | |||
1697 | return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID); | ||
1698 | } | ||
1699 | |||
1700 | int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, | ||
1701 | enum wmi_vdev_param param_id, u32 param_value) | ||
1702 | { | ||
1703 | struct wmi_vdev_set_param_cmd *cmd; | ||
1704 | struct sk_buff *skb; | ||
1705 | |||
1706 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1707 | if (!skb) | ||
1708 | return -ENOMEM; | ||
1709 | |||
1710 | cmd = (struct wmi_vdev_set_param_cmd *)skb->data; | ||
1711 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1712 | cmd->param_id = __cpu_to_le32(param_id); | ||
1713 | cmd->param_value = __cpu_to_le32(param_value); | ||
1714 | |||
1715 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1716 | "wmi vdev id 0x%x set param %d value %d\n", | ||
1717 | vdev_id, param_id, param_value); | ||
1718 | |||
1719 | return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID); | ||
1720 | } | ||
1721 | |||
1722 | int ath10k_wmi_vdev_install_key(struct ath10k *ar, | ||
1723 | const struct wmi_vdev_install_key_arg *arg) | ||
1724 | { | ||
1725 | struct wmi_vdev_install_key_cmd *cmd; | ||
1726 | struct sk_buff *skb; | ||
1727 | |||
1728 | if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) | ||
1729 | return -EINVAL; | ||
1730 | if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) | ||
1731 | return -EINVAL; | ||
1732 | |||
1733 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); | ||
1734 | if (!skb) | ||
1735 | return -ENOMEM; | ||
1736 | |||
1737 | cmd = (struct wmi_vdev_install_key_cmd *)skb->data; | ||
1738 | cmd->vdev_id = __cpu_to_le32(arg->vdev_id); | ||
1739 | cmd->key_idx = __cpu_to_le32(arg->key_idx); | ||
1740 | cmd->key_flags = __cpu_to_le32(arg->key_flags); | ||
1741 | cmd->key_cipher = __cpu_to_le32(arg->key_cipher); | ||
1742 | cmd->key_len = __cpu_to_le32(arg->key_len); | ||
1743 | cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); | ||
1744 | cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); | ||
1745 | |||
1746 | if (arg->macaddr) | ||
1747 | memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN); | ||
1748 | if (arg->key_data) | ||
1749 | memcpy(cmd->key_data, arg->key_data, arg->key_len); | ||
1750 | |||
1751 | return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); | ||
1752 | } | ||
1753 | |||
1754 | int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, | ||
1755 | const u8 peer_addr[ETH_ALEN]) | ||
1756 | { | ||
1757 | struct wmi_peer_create_cmd *cmd; | ||
1758 | struct sk_buff *skb; | ||
1759 | |||
1760 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1761 | if (!skb) | ||
1762 | return -ENOMEM; | ||
1763 | |||
1764 | cmd = (struct wmi_peer_create_cmd *)skb->data; | ||
1765 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1766 | memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); | ||
1767 | |||
1768 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1769 | "wmi peer create vdev_id %d peer_addr %pM\n", | ||
1770 | vdev_id, peer_addr); | ||
1771 | return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID); | ||
1772 | } | ||
1773 | |||
1774 | int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, | ||
1775 | const u8 peer_addr[ETH_ALEN]) | ||
1776 | { | ||
1777 | struct wmi_peer_delete_cmd *cmd; | ||
1778 | struct sk_buff *skb; | ||
1779 | |||
1780 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1781 | if (!skb) | ||
1782 | return -ENOMEM; | ||
1783 | |||
1784 | cmd = (struct wmi_peer_delete_cmd *)skb->data; | ||
1785 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1786 | memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); | ||
1787 | |||
1788 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1789 | "wmi peer delete vdev_id %d peer_addr %pM\n", | ||
1790 | vdev_id, peer_addr); | ||
1791 | return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID); | ||
1792 | } | ||
1793 | |||
1794 | int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, | ||
1795 | const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) | ||
1796 | { | ||
1797 | struct wmi_peer_flush_tids_cmd *cmd; | ||
1798 | struct sk_buff *skb; | ||
1799 | |||
1800 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1801 | if (!skb) | ||
1802 | return -ENOMEM; | ||
1803 | |||
1804 | cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; | ||
1805 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1806 | cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); | ||
1807 | memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); | ||
1808 | |||
1809 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1810 | "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", | ||
1811 | vdev_id, peer_addr, tid_bitmap); | ||
1812 | return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID); | ||
1813 | } | ||
1814 | |||
1815 | int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, | ||
1816 | const u8 *peer_addr, enum wmi_peer_param param_id, | ||
1817 | u32 param_value) | ||
1818 | { | ||
1819 | struct wmi_peer_set_param_cmd *cmd; | ||
1820 | struct sk_buff *skb; | ||
1821 | |||
1822 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1823 | if (!skb) | ||
1824 | return -ENOMEM; | ||
1825 | |||
1826 | cmd = (struct wmi_peer_set_param_cmd *)skb->data; | ||
1827 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1828 | cmd->param_id = __cpu_to_le32(param_id); | ||
1829 | cmd->param_value = __cpu_to_le32(param_value); | ||
1830 | memcpy(&cmd->peer_macaddr.addr, peer_addr, 6); | ||
1831 | |||
1832 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1833 | "wmi vdev %d peer 0x%pM set param %d value %d\n", | ||
1834 | vdev_id, peer_addr, param_id, param_value); | ||
1835 | |||
1836 | return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID); | ||
1837 | } | ||
1838 | |||
1839 | int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, | ||
1840 | enum wmi_sta_ps_mode psmode) | ||
1841 | { | ||
1842 | struct wmi_sta_powersave_mode_cmd *cmd; | ||
1843 | struct sk_buff *skb; | ||
1844 | |||
1845 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1846 | if (!skb) | ||
1847 | return -ENOMEM; | ||
1848 | |||
1849 | cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; | ||
1850 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1851 | cmd->sta_ps_mode = __cpu_to_le32(psmode); | ||
1852 | |||
1853 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1854 | "wmi set powersave id 0x%x mode %d\n", | ||
1855 | vdev_id, psmode); | ||
1856 | |||
1857 | return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID); | ||
1858 | } | ||
1859 | |||
1860 | int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, | ||
1861 | enum wmi_sta_powersave_param param_id, | ||
1862 | u32 value) | ||
1863 | { | ||
1864 | struct wmi_sta_powersave_param_cmd *cmd; | ||
1865 | struct sk_buff *skb; | ||
1866 | |||
1867 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1868 | if (!skb) | ||
1869 | return -ENOMEM; | ||
1870 | |||
1871 | cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; | ||
1872 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1873 | cmd->param_id = __cpu_to_le32(param_id); | ||
1874 | cmd->param_value = __cpu_to_le32(value); | ||
1875 | |||
1876 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1877 | "wmi sta ps param vdev_id 0x%x param %d value %d\n", | ||
1878 | vdev_id, param_id, value); | ||
1879 | return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID); | ||
1880 | } | ||
1881 | |||
1882 | int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
1883 | enum wmi_ap_ps_peer_param param_id, u32 value) | ||
1884 | { | ||
1885 | struct wmi_ap_ps_peer_cmd *cmd; | ||
1886 | struct sk_buff *skb; | ||
1887 | |||
1888 | if (!mac) | ||
1889 | return -EINVAL; | ||
1890 | |||
1891 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1892 | if (!skb) | ||
1893 | return -ENOMEM; | ||
1894 | |||
1895 | cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; | ||
1896 | cmd->vdev_id = __cpu_to_le32(vdev_id); | ||
1897 | cmd->param_id = __cpu_to_le32(param_id); | ||
1898 | cmd->param_value = __cpu_to_le32(value); | ||
1899 | memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); | ||
1900 | |||
1901 | ath10k_dbg(ATH10K_DBG_WMI, | ||
1902 | "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", | ||
1903 | vdev_id, param_id, value, mac); | ||
1904 | |||
1905 | return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID); | ||
1906 | } | ||
1907 | |||
1908 | int ath10k_wmi_scan_chan_list(struct ath10k *ar, | ||
1909 | const struct wmi_scan_chan_list_arg *arg) | ||
1910 | { | ||
1911 | struct wmi_scan_chan_list_cmd *cmd; | ||
1912 | struct sk_buff *skb; | ||
1913 | struct wmi_channel_arg *ch; | ||
1914 | struct wmi_channel *ci; | ||
1915 | int len; | ||
1916 | int i; | ||
1917 | |||
1918 | len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); | ||
1919 | |||
1920 | skb = ath10k_wmi_alloc_skb(len); | ||
1921 | if (!skb) | ||
1922 | return -EINVAL; | ||
1923 | |||
1924 | cmd = (struct wmi_scan_chan_list_cmd *)skb->data; | ||
1925 | cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); | ||
1926 | |||
1927 | for (i = 0; i < arg->n_channels; i++) { | ||
1928 | u32 flags = 0; | ||
1929 | |||
1930 | ch = &arg->channels[i]; | ||
1931 | ci = &cmd->chan_info[i]; | ||
1932 | |||
1933 | if (ch->passive) | ||
1934 | flags |= WMI_CHAN_FLAG_PASSIVE; | ||
1935 | if (ch->allow_ibss) | ||
1936 | flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; | ||
1937 | if (ch->allow_ht) | ||
1938 | flags |= WMI_CHAN_FLAG_ALLOW_HT; | ||
1939 | if (ch->allow_vht) | ||
1940 | flags |= WMI_CHAN_FLAG_ALLOW_VHT; | ||
1941 | if (ch->ht40plus) | ||
1942 | flags |= WMI_CHAN_FLAG_HT40_PLUS; | ||
1943 | |||
1944 | ci->mhz = __cpu_to_le32(ch->freq); | ||
1945 | ci->band_center_freq1 = __cpu_to_le32(ch->freq); | ||
1946 | ci->band_center_freq2 = 0; | ||
1947 | ci->min_power = ch->min_power; | ||
1948 | ci->max_power = ch->max_power; | ||
1949 | ci->reg_power = ch->max_reg_power; | ||
1950 | ci->antenna_max = ch->max_antenna_gain; | ||
1951 | ci->antenna_max = 0; | ||
1952 | |||
1953 | /* mode & flags share storage */ | ||
1954 | ci->mode = ch->mode; | ||
1955 | ci->flags |= __cpu_to_le32(flags); | ||
1956 | } | ||
1957 | |||
1958 | return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID); | ||
1959 | } | ||
1960 | |||
1961 | int ath10k_wmi_peer_assoc(struct ath10k *ar, | ||
1962 | const struct wmi_peer_assoc_complete_arg *arg) | ||
1963 | { | ||
1964 | struct wmi_peer_assoc_complete_cmd *cmd; | ||
1965 | struct sk_buff *skb; | ||
1966 | |||
1967 | if (arg->peer_mpdu_density > 16) | ||
1968 | return -EINVAL; | ||
1969 | if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) | ||
1970 | return -EINVAL; | ||
1971 | if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) | ||
1972 | return -EINVAL; | ||
1973 | |||
1974 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
1975 | if (!skb) | ||
1976 | return -ENOMEM; | ||
1977 | |||
1978 | cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data; | ||
1979 | cmd->vdev_id = __cpu_to_le32(arg->vdev_id); | ||
1980 | cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); | ||
1981 | cmd->peer_associd = __cpu_to_le32(arg->peer_aid); | ||
1982 | cmd->peer_flags = __cpu_to_le32(arg->peer_flags); | ||
1983 | cmd->peer_caps = __cpu_to_le32(arg->peer_caps); | ||
1984 | cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval); | ||
1985 | cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps); | ||
1986 | cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); | ||
1987 | cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); | ||
1988 | cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps); | ||
1989 | cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams); | ||
1990 | cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps); | ||
1991 | cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode); | ||
1992 | |||
1993 | memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN); | ||
1994 | |||
1995 | cmd->peer_legacy_rates.num_rates = | ||
1996 | __cpu_to_le32(arg->peer_legacy_rates.num_rates); | ||
1997 | memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates, | ||
1998 | arg->peer_legacy_rates.num_rates); | ||
1999 | |||
2000 | cmd->peer_ht_rates.num_rates = | ||
2001 | __cpu_to_le32(arg->peer_ht_rates.num_rates); | ||
2002 | memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates, | ||
2003 | arg->peer_ht_rates.num_rates); | ||
2004 | |||
2005 | cmd->peer_vht_rates.rx_max_rate = | ||
2006 | __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); | ||
2007 | cmd->peer_vht_rates.rx_mcs_set = | ||
2008 | __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); | ||
2009 | cmd->peer_vht_rates.tx_max_rate = | ||
2010 | __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); | ||
2011 | cmd->peer_vht_rates.tx_mcs_set = | ||
2012 | __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); | ||
2013 | |||
2014 | return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); | ||
2015 | } | ||
2016 | |||
2017 | int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) | ||
2018 | { | ||
2019 | struct wmi_bcn_tx_cmd *cmd; | ||
2020 | struct sk_buff *skb; | ||
2021 | |||
2022 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); | ||
2023 | if (!skb) | ||
2024 | return -ENOMEM; | ||
2025 | |||
2026 | cmd = (struct wmi_bcn_tx_cmd *)skb->data; | ||
2027 | cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id); | ||
2028 | cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate); | ||
2029 | cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power); | ||
2030 | cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); | ||
2031 | memcpy(cmd->bcn, arg->bcn, arg->bcn_len); | ||
2032 | |||
2033 | return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID); | ||
2034 | } | ||
2035 | |||
2036 | static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, | ||
2037 | const struct wmi_wmm_params_arg *arg) | ||
2038 | { | ||
2039 | params->cwmin = __cpu_to_le32(arg->cwmin); | ||
2040 | params->cwmax = __cpu_to_le32(arg->cwmax); | ||
2041 | params->aifs = __cpu_to_le32(arg->aifs); | ||
2042 | params->txop = __cpu_to_le32(arg->txop); | ||
2043 | params->acm = __cpu_to_le32(arg->acm); | ||
2044 | params->no_ack = __cpu_to_le32(arg->no_ack); | ||
2045 | } | ||
2046 | |||
2047 | int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, | ||
2048 | const struct wmi_pdev_set_wmm_params_arg *arg) | ||
2049 | { | ||
2050 | struct wmi_pdev_set_wmm_params *cmd; | ||
2051 | struct sk_buff *skb; | ||
2052 | |||
2053 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
2054 | if (!skb) | ||
2055 | return -ENOMEM; | ||
2056 | |||
2057 | cmd = (struct wmi_pdev_set_wmm_params *)skb->data; | ||
2058 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); | ||
2059 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); | ||
2060 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); | ||
2061 | ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); | ||
2062 | |||
2063 | ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); | ||
2064 | return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID); | ||
2065 | } | ||
2066 | |||
2067 | int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) | ||
2068 | { | ||
2069 | struct wmi_request_stats_cmd *cmd; | ||
2070 | struct sk_buff *skb; | ||
2071 | |||
2072 | skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); | ||
2073 | if (!skb) | ||
2074 | return -ENOMEM; | ||
2075 | |||
2076 | cmd = (struct wmi_request_stats_cmd *)skb->data; | ||
2077 | cmd->stats_id = __cpu_to_le32(stats_id); | ||
2078 | |||
2079 | ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); | ||
2080 | return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); | ||
2081 | } | ||
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h new file mode 100644 index 000000000000..9555f5a0e041 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi.h | |||
@@ -0,0 +1,3052 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | ||
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | ||
4 | * | ||
5 | * Permission to use, copy, modify, and/or distribute this software for any | ||
6 | * purpose with or without fee is hereby granted, provided that the above | ||
7 | * copyright notice and this permission notice appear in all copies. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
16 | */ | ||
17 | |||
18 | #ifndef _WMI_H_ | ||
19 | #define _WMI_H_ | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <net/mac80211.h> | ||
23 | |||
24 | /* | ||
25 | * This file specifies the WMI interface for the Unified Software | ||
26 | * Architecture. | ||
27 | * | ||
28 | * It includes definitions of all the commands and events. Commands are | ||
29 | * messages from the host to the target. Events and Replies are messages | ||
30 | * from the target to the host. | ||
31 | * | ||
32 | * Ownership of correctness in regards to WMI commands belongs to the host | ||
33 | * driver and the target is not required to validate parameters for value, | ||
34 | * proper range, or any other checking. | ||
35 | * | ||
36 | * Guidelines for extending this interface are below. | ||
37 | * | ||
38 | * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff | ||
39 | * | ||
40 | * 2. Use ONLY u32 type for defining member variables within WMI | ||
41 | * command/event structures. Do not use u8, u16, bool or | ||
42 | * enum types within these structures. | ||
43 | * | ||
44 | * 3. DO NOT define bit fields within structures. Implement bit fields | ||
45 | * using masks if necessary. Do not use the programming language's bit | ||
46 | * field definition. | ||
47 | * | ||
48 | * 4. Define macros for encode/decode of u8, u16 fields within | ||
49 | * the u32 variables. Use these macros for set/get of these fields. | ||
50 | * Try to use this to optimize the structure without bloating it with | ||
51 | * u32 variables for every lower sized field. | ||
52 | * | ||
53 | * 5. Do not use PACK/UNPACK attributes for the structures as each member | ||
54 | * variable is already 4-byte aligned by virtue of being a u32 | ||
55 | * type. | ||
56 | * | ||
57 | * 6. Comment each parameter part of the WMI command/event structure by | ||
58 | * using the 2 stars at the begining of C comment instead of one star to | ||
59 | * enable HTML document generation using Doxygen. | ||
60 | * | ||
61 | */ | ||
62 | |||
63 | /* Control Path */ | ||
64 | struct wmi_cmd_hdr { | ||
65 | __le32 cmd_id; | ||
66 | } __packed; | ||
67 | |||
68 | #define WMI_CMD_HDR_CMD_ID_MASK 0x00FFFFFF | ||
69 | #define WMI_CMD_HDR_CMD_ID_LSB 0 | ||
70 | #define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000 | ||
71 | #define WMI_CMD_HDR_PLT_PRIV_LSB 24 | ||
72 | |||
73 | #define HTC_PROTOCOL_VERSION 0x0002 | ||
74 | #define WMI_PROTOCOL_VERSION 0x0002 | ||
75 | |||
76 | enum wmi_service_id { | ||
77 | WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */ | ||
78 | WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */ | ||
79 | WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */ | ||
80 | WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */ | ||
81 | WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */ | ||
82 | WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */ | ||
83 | WMI_SERVICE_AP_UAPSD, /* uapsd on AP */ | ||
84 | WMI_SERVICE_AP_DFS, /* DFS on AP */ | ||
85 | WMI_SERVICE_11AC, /* supports 11ac */ | ||
86 | WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/ | ||
87 | WMI_SERVICE_PHYERR, /* PHY error */ | ||
88 | WMI_SERVICE_BCN_FILTER, /* Beacon filter support */ | ||
89 | WMI_SERVICE_RTT, /* RTT (round trip time) support */ | ||
90 | WMI_SERVICE_RATECTRL, /* Rate-control */ | ||
91 | WMI_SERVICE_WOW, /* WOW Support */ | ||
92 | WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */ | ||
93 | WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */ | ||
94 | WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */ | ||
95 | WMI_SERVICE_NLO, /* Network list offload service */ | ||
96 | WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */ | ||
97 | WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */ | ||
98 | WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */ | ||
99 | WMI_SERVICE_CHATTER, /* Chatter service */ | ||
100 | WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */ | ||
101 | WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */ | ||
102 | WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */ | ||
103 | WMI_SERVICE_GPIO, /* GPIO service */ | ||
104 | WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */ | ||
105 | WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */ | ||
106 | WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */ | ||
107 | WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */ | ||
108 | WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */ | ||
109 | |||
110 | WMI_SERVICE_LAST, | ||
111 | WMI_MAX_SERVICE = 64 /* max service */ | ||
112 | }; | ||
113 | |||
114 | static inline char *wmi_service_name(int service_id) | ||
115 | { | ||
116 | switch (service_id) { | ||
117 | case WMI_SERVICE_BEACON_OFFLOAD: | ||
118 | return "BEACON_OFFLOAD"; | ||
119 | case WMI_SERVICE_SCAN_OFFLOAD: | ||
120 | return "SCAN_OFFLOAD"; | ||
121 | case WMI_SERVICE_ROAM_OFFLOAD: | ||
122 | return "ROAM_OFFLOAD"; | ||
123 | case WMI_SERVICE_BCN_MISS_OFFLOAD: | ||
124 | return "BCN_MISS_OFFLOAD"; | ||
125 | case WMI_SERVICE_STA_PWRSAVE: | ||
126 | return "STA_PWRSAVE"; | ||
127 | case WMI_SERVICE_STA_ADVANCED_PWRSAVE: | ||
128 | return "STA_ADVANCED_PWRSAVE"; | ||
129 | case WMI_SERVICE_AP_UAPSD: | ||
130 | return "AP_UAPSD"; | ||
131 | case WMI_SERVICE_AP_DFS: | ||
132 | return "AP_DFS"; | ||
133 | case WMI_SERVICE_11AC: | ||
134 | return "11AC"; | ||
135 | case WMI_SERVICE_BLOCKACK: | ||
136 | return "BLOCKACK"; | ||
137 | case WMI_SERVICE_PHYERR: | ||
138 | return "PHYERR"; | ||
139 | case WMI_SERVICE_BCN_FILTER: | ||
140 | return "BCN_FILTER"; | ||
141 | case WMI_SERVICE_RTT: | ||
142 | return "RTT"; | ||
143 | case WMI_SERVICE_RATECTRL: | ||
144 | return "RATECTRL"; | ||
145 | case WMI_SERVICE_WOW: | ||
146 | return "WOW"; | ||
147 | case WMI_SERVICE_RATECTRL_CACHE: | ||
148 | return "RATECTRL CACHE"; | ||
149 | case WMI_SERVICE_IRAM_TIDS: | ||
150 | return "IRAM TIDS"; | ||
151 | case WMI_SERVICE_ARPNS_OFFLOAD: | ||
152 | return "ARPNS_OFFLOAD"; | ||
153 | case WMI_SERVICE_NLO: | ||
154 | return "NLO"; | ||
155 | case WMI_SERVICE_GTK_OFFLOAD: | ||
156 | return "GTK_OFFLOAD"; | ||
157 | case WMI_SERVICE_SCAN_SCH: | ||
158 | return "SCAN_SCH"; | ||
159 | case WMI_SERVICE_CSA_OFFLOAD: | ||
160 | return "CSA_OFFLOAD"; | ||
161 | case WMI_SERVICE_CHATTER: | ||
162 | return "CHATTER"; | ||
163 | case WMI_SERVICE_COEX_FREQAVOID: | ||
164 | return "COEX_FREQAVOID"; | ||
165 | case WMI_SERVICE_PACKET_POWER_SAVE: | ||
166 | return "PACKET_POWER_SAVE"; | ||
167 | case WMI_SERVICE_FORCE_FW_HANG: | ||
168 | return "FORCE FW HANG"; | ||
169 | case WMI_SERVICE_GPIO: | ||
170 | return "GPIO"; | ||
171 | case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM: | ||
172 | return "MODULATED DTIM"; | ||
173 | case WMI_STA_UAPSD_BASIC_AUTO_TRIG: | ||
174 | return "BASIC UAPSD"; | ||
175 | case WMI_STA_UAPSD_VAR_AUTO_TRIG: | ||
176 | return "VAR UAPSD"; | ||
177 | case WMI_SERVICE_STA_KEEP_ALIVE: | ||
178 | return "STA KEEP ALIVE"; | ||
179 | case WMI_SERVICE_TX_ENCAP: | ||
180 | return "TX ENCAP"; | ||
181 | default: | ||
182 | return "UNKNOWN SERVICE\n"; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | |||
187 | #define WMI_SERVICE_BM_SIZE \ | ||
188 | ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32)) | ||
189 | |||
190 | /* 2 word representation of MAC addr */ | ||
191 | struct wmi_mac_addr { | ||
192 | union { | ||
193 | u8 addr[6]; | ||
194 | struct { | ||
195 | u32 word0; | ||
196 | u32 word1; | ||
197 | } __packed; | ||
198 | } __packed; | ||
199 | } __packed; | ||
200 | |||
201 | /* macro to convert MAC address from WMI word format to char array */ | ||
202 | #define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \ | ||
203 | (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \ | ||
204 | (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \ | ||
205 | (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \ | ||
206 | (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \ | ||
207 | (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \ | ||
208 | (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \ | ||
209 | } while (0) | ||
210 | |||
211 | /* | ||
212 | * wmi command groups. | ||
213 | */ | ||
214 | enum wmi_cmd_group { | ||
215 | /* 0 to 2 are reserved */ | ||
216 | WMI_GRP_START = 0x3, | ||
217 | WMI_GRP_SCAN = WMI_GRP_START, | ||
218 | WMI_GRP_PDEV, | ||
219 | WMI_GRP_VDEV, | ||
220 | WMI_GRP_PEER, | ||
221 | WMI_GRP_MGMT, | ||
222 | WMI_GRP_BA_NEG, | ||
223 | WMI_GRP_STA_PS, | ||
224 | WMI_GRP_DFS, | ||
225 | WMI_GRP_ROAM, | ||
226 | WMI_GRP_OFL_SCAN, | ||
227 | WMI_GRP_P2P, | ||
228 | WMI_GRP_AP_PS, | ||
229 | WMI_GRP_RATE_CTRL, | ||
230 | WMI_GRP_PROFILE, | ||
231 | WMI_GRP_SUSPEND, | ||
232 | WMI_GRP_BCN_FILTER, | ||
233 | WMI_GRP_WOW, | ||
234 | WMI_GRP_RTT, | ||
235 | WMI_GRP_SPECTRAL, | ||
236 | WMI_GRP_STATS, | ||
237 | WMI_GRP_ARP_NS_OFL, | ||
238 | WMI_GRP_NLO_OFL, | ||
239 | WMI_GRP_GTK_OFL, | ||
240 | WMI_GRP_CSA_OFL, | ||
241 | WMI_GRP_CHATTER, | ||
242 | WMI_GRP_TID_ADDBA, | ||
243 | WMI_GRP_MISC, | ||
244 | WMI_GRP_GPIO, | ||
245 | }; | ||
246 | |||
247 | #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) | ||
248 | #define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) | ||
249 | |||
250 | /* Command IDs and commande events. */ | ||
251 | enum wmi_cmd_id { | ||
252 | WMI_INIT_CMDID = 0x1, | ||
253 | |||
254 | /* Scan specific commands */ | ||
255 | WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN), | ||
256 | WMI_STOP_SCAN_CMDID, | ||
257 | WMI_SCAN_CHAN_LIST_CMDID, | ||
258 | WMI_SCAN_SCH_PRIO_TBL_CMDID, | ||
259 | |||
260 | /* PDEV (physical device) specific commands */ | ||
261 | WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV), | ||
262 | WMI_PDEV_SET_CHANNEL_CMDID, | ||
263 | WMI_PDEV_SET_PARAM_CMDID, | ||
264 | WMI_PDEV_PKTLOG_ENABLE_CMDID, | ||
265 | WMI_PDEV_PKTLOG_DISABLE_CMDID, | ||
266 | WMI_PDEV_SET_WMM_PARAMS_CMDID, | ||
267 | WMI_PDEV_SET_HT_CAP_IE_CMDID, | ||
268 | WMI_PDEV_SET_VHT_CAP_IE_CMDID, | ||
269 | WMI_PDEV_SET_DSCP_TID_MAP_CMDID, | ||
270 | WMI_PDEV_SET_QUIET_MODE_CMDID, | ||
271 | WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, | ||
272 | WMI_PDEV_GET_TPC_CONFIG_CMDID, | ||
273 | WMI_PDEV_SET_BASE_MACADDR_CMDID, | ||
274 | |||
275 | /* VDEV (virtual device) specific commands */ | ||
276 | WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV), | ||
277 | WMI_VDEV_DELETE_CMDID, | ||
278 | WMI_VDEV_START_REQUEST_CMDID, | ||
279 | WMI_VDEV_RESTART_REQUEST_CMDID, | ||
280 | WMI_VDEV_UP_CMDID, | ||
281 | WMI_VDEV_STOP_CMDID, | ||
282 | WMI_VDEV_DOWN_CMDID, | ||
283 | WMI_VDEV_SET_PARAM_CMDID, | ||
284 | WMI_VDEV_INSTALL_KEY_CMDID, | ||
285 | |||
286 | /* peer specific commands */ | ||
287 | WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER), | ||
288 | WMI_PEER_DELETE_CMDID, | ||
289 | WMI_PEER_FLUSH_TIDS_CMDID, | ||
290 | WMI_PEER_SET_PARAM_CMDID, | ||
291 | WMI_PEER_ASSOC_CMDID, | ||
292 | WMI_PEER_ADD_WDS_ENTRY_CMDID, | ||
293 | WMI_PEER_REMOVE_WDS_ENTRY_CMDID, | ||
294 | WMI_PEER_MCAST_GROUP_CMDID, | ||
295 | |||
296 | /* beacon/management specific commands */ | ||
297 | WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT), | ||
298 | WMI_PDEV_SEND_BCN_CMDID, | ||
299 | WMI_BCN_TMPL_CMDID, | ||
300 | WMI_BCN_FILTER_RX_CMDID, | ||
301 | WMI_PRB_REQ_FILTER_RX_CMDID, | ||
302 | WMI_MGMT_TX_CMDID, | ||
303 | WMI_PRB_TMPL_CMDID, | ||
304 | |||
305 | /* commands to directly control BA negotiation directly from host. */ | ||
306 | WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG), | ||
307 | WMI_ADDBA_SEND_CMDID, | ||
308 | WMI_ADDBA_STATUS_CMDID, | ||
309 | WMI_DELBA_SEND_CMDID, | ||
310 | WMI_ADDBA_SET_RESP_CMDID, | ||
311 | WMI_SEND_SINGLEAMSDU_CMDID, | ||
312 | |||
313 | /* Station power save specific config */ | ||
314 | WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS), | ||
315 | WMI_STA_POWERSAVE_PARAM_CMDID, | ||
316 | WMI_STA_MIMO_PS_MODE_CMDID, | ||
317 | |||
318 | /** DFS-specific commands */ | ||
319 | WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS), | ||
320 | WMI_PDEV_DFS_DISABLE_CMDID, | ||
321 | |||
322 | /* Roaming specific commands */ | ||
323 | WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM), | ||
324 | WMI_ROAM_SCAN_RSSI_THRESHOLD, | ||
325 | WMI_ROAM_SCAN_PERIOD, | ||
326 | WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, | ||
327 | WMI_ROAM_AP_PROFILE, | ||
328 | |||
329 | /* offload scan specific commands */ | ||
330 | WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN), | ||
331 | WMI_OFL_SCAN_REMOVE_AP_PROFILE, | ||
332 | WMI_OFL_SCAN_PERIOD, | ||
333 | |||
334 | /* P2P specific commands */ | ||
335 | WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P), | ||
336 | WMI_P2P_DEV_SET_DISCOVERABILITY, | ||
337 | WMI_P2P_GO_SET_BEACON_IE, | ||
338 | WMI_P2P_GO_SET_PROBE_RESP_IE, | ||
339 | WMI_P2P_SET_VENDOR_IE_DATA_CMDID, | ||
340 | |||
341 | /* AP power save specific config */ | ||
342 | WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS), | ||
343 | WMI_AP_PS_PEER_UAPSD_COEX_CMDID, | ||
344 | |||
345 | /* Rate-control specific commands */ | ||
346 | WMI_PEER_RATE_RETRY_SCHED_CMDID = | ||
347 | WMI_CMD_GRP(WMI_GRP_RATE_CTRL), | ||
348 | |||
349 | /* WLAN Profiling commands. */ | ||
350 | WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE), | ||
351 | WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, | ||
352 | WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, | ||
353 | WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, | ||
354 | WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, | ||
355 | |||
356 | /* Suspend resume command Ids */ | ||
357 | WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND), | ||
358 | WMI_PDEV_RESUME_CMDID, | ||
359 | |||
360 | /* Beacon filter commands */ | ||
361 | WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER), | ||
362 | WMI_RMV_BCN_FILTER_CMDID, | ||
363 | |||
364 | /* WOW Specific WMI commands*/ | ||
365 | WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW), | ||
366 | WMI_WOW_DEL_WAKE_PATTERN_CMDID, | ||
367 | WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, | ||
368 | WMI_WOW_ENABLE_CMDID, | ||
369 | WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, | ||
370 | |||
371 | /* RTT measurement related cmd */ | ||
372 | WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT), | ||
373 | WMI_RTT_TSF_CMDID, | ||
374 | |||
375 | /* spectral scan commands */ | ||
376 | WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL), | ||
377 | WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, | ||
378 | |||
379 | /* F/W stats */ | ||
380 | WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS), | ||
381 | |||
382 | /* ARP OFFLOAD REQUEST*/ | ||
383 | WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL), | ||
384 | |||
385 | /* NS offload confid*/ | ||
386 | WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL), | ||
387 | |||
388 | /* GTK offload Specific WMI commands*/ | ||
389 | WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL), | ||
390 | |||
391 | /* CSA offload Specific WMI commands*/ | ||
392 | WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL), | ||
393 | WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, | ||
394 | |||
395 | /* Chatter commands*/ | ||
396 | WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER), | ||
397 | |||
398 | /* addba specific commands */ | ||
399 | WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA), | ||
400 | WMI_PEER_TID_DELBA_CMDID, | ||
401 | |||
402 | /* set station mimo powersave method */ | ||
403 | WMI_STA_DTIM_PS_METHOD_CMDID, | ||
404 | /* Configure the Station UAPSD AC Auto Trigger Parameters */ | ||
405 | WMI_STA_UAPSD_AUTO_TRIG_CMDID, | ||
406 | |||
407 | /* STA Keep alive parameter configuration, | ||
408 | Requires WMI_SERVICE_STA_KEEP_ALIVE */ | ||
409 | WMI_STA_KEEPALIVE_CMD, | ||
410 | |||
411 | /* misc command group */ | ||
412 | WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC), | ||
413 | WMI_PDEV_UTF_CMDID, | ||
414 | WMI_DBGLOG_CFG_CMDID, | ||
415 | WMI_PDEV_QVIT_CMDID, | ||
416 | WMI_PDEV_FTM_INTG_CMDID, | ||
417 | WMI_VDEV_SET_KEEPALIVE_CMDID, | ||
418 | WMI_VDEV_GET_KEEPALIVE_CMDID, | ||
419 | |||
420 | /* GPIO Configuration */ | ||
421 | WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO), | ||
422 | WMI_GPIO_OUTPUT_CMDID, | ||
423 | }; | ||
424 | |||
425 | enum wmi_event_id { | ||
426 | WMI_SERVICE_READY_EVENTID = 0x1, | ||
427 | WMI_READY_EVENTID, | ||
428 | |||
429 | /* Scan specific events */ | ||
430 | WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN), | ||
431 | |||
432 | /* PDEV specific events */ | ||
433 | WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV), | ||
434 | WMI_CHAN_INFO_EVENTID, | ||
435 | WMI_PHYERR_EVENTID, | ||
436 | |||
437 | /* VDEV specific events */ | ||
438 | WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV), | ||
439 | WMI_VDEV_STOPPED_EVENTID, | ||
440 | WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID, | ||
441 | |||
442 | /* peer specific events */ | ||
443 | WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER), | ||
444 | |||
445 | /* beacon/mgmt specific events */ | ||
446 | WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT), | ||
447 | WMI_HOST_SWBA_EVENTID, | ||
448 | WMI_TBTTOFFSET_UPDATE_EVENTID, | ||
449 | |||
450 | /* ADDBA Related WMI Events*/ | ||
451 | WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG), | ||
452 | WMI_TX_ADDBA_COMPLETE_EVENTID, | ||
453 | |||
454 | /* Roam event to trigger roaming on host */ | ||
455 | WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM), | ||
456 | WMI_PROFILE_MATCH, | ||
457 | |||
458 | /* WoW */ | ||
459 | WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW), | ||
460 | |||
461 | /* RTT */ | ||
462 | WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT), | ||
463 | WMI_TSF_MEASUREMENT_REPORT_EVENTID, | ||
464 | WMI_RTT_ERROR_REPORT_EVENTID, | ||
465 | |||
466 | /* GTK offload */ | ||
467 | WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL), | ||
468 | WMI_GTK_REKEY_FAIL_EVENTID, | ||
469 | |||
470 | /* CSA IE received event */ | ||
471 | WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL), | ||
472 | |||
473 | /* Misc events */ | ||
474 | WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC), | ||
475 | WMI_PDEV_UTF_EVENTID, | ||
476 | WMI_DEBUG_MESG_EVENTID, | ||
477 | WMI_UPDATE_STATS_EVENTID, | ||
478 | WMI_DEBUG_PRINT_EVENTID, | ||
479 | WMI_DCS_INTERFERENCE_EVENTID, | ||
480 | WMI_PDEV_QVIT_EVENTID, | ||
481 | WMI_WLAN_PROFILE_DATA_EVENTID, | ||
482 | WMI_PDEV_FTM_INTG_EVENTID, | ||
483 | WMI_WLAN_FREQ_AVOID_EVENTID, | ||
484 | WMI_VDEV_GET_KEEPALIVE_EVENTID, | ||
485 | |||
486 | /* GPIO Event */ | ||
487 | WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO), | ||
488 | }; | ||
489 | |||
490 | enum wmi_phy_mode { | ||
491 | MODE_11A = 0, /* 11a Mode */ | ||
492 | MODE_11G = 1, /* 11b/g Mode */ | ||
493 | MODE_11B = 2, /* 11b Mode */ | ||
494 | MODE_11GONLY = 3, /* 11g only Mode */ | ||
495 | MODE_11NA_HT20 = 4, /* 11a HT20 mode */ | ||
496 | MODE_11NG_HT20 = 5, /* 11g HT20 mode */ | ||
497 | MODE_11NA_HT40 = 6, /* 11a HT40 mode */ | ||
498 | MODE_11NG_HT40 = 7, /* 11g HT40 mode */ | ||
499 | MODE_11AC_VHT20 = 8, | ||
500 | MODE_11AC_VHT40 = 9, | ||
501 | MODE_11AC_VHT80 = 10, | ||
502 | /* MODE_11AC_VHT160 = 11, */ | ||
503 | MODE_11AC_VHT20_2G = 11, | ||
504 | MODE_11AC_VHT40_2G = 12, | ||
505 | MODE_11AC_VHT80_2G = 13, | ||
506 | MODE_UNKNOWN = 14, | ||
507 | MODE_MAX = 14 | ||
508 | }; | ||
509 | |||
510 | #define WMI_CHAN_LIST_TAG 0x1 | ||
511 | #define WMI_SSID_LIST_TAG 0x2 | ||
512 | #define WMI_BSSID_LIST_TAG 0x3 | ||
513 | #define WMI_IE_TAG 0x4 | ||
514 | |||
515 | struct wmi_channel { | ||
516 | __le32 mhz; | ||
517 | __le32 band_center_freq1; | ||
518 | __le32 band_center_freq2; /* valid for 11ac, 80plus80 */ | ||
519 | union { | ||
520 | __le32 flags; /* WMI_CHAN_FLAG_ */ | ||
521 | struct { | ||
522 | u8 mode; /* only 6 LSBs */ | ||
523 | } __packed; | ||
524 | } __packed; | ||
525 | union { | ||
526 | __le32 reginfo0; | ||
527 | struct { | ||
528 | u8 min_power; | ||
529 | u8 max_power; | ||
530 | u8 reg_power; | ||
531 | u8 reg_classid; | ||
532 | } __packed; | ||
533 | } __packed; | ||
534 | union { | ||
535 | __le32 reginfo1; | ||
536 | struct { | ||
537 | u8 antenna_max; | ||
538 | } __packed; | ||
539 | } __packed; | ||
540 | } __packed; | ||
541 | |||
542 | struct wmi_channel_arg { | ||
543 | u32 freq; | ||
544 | u32 band_center_freq1; | ||
545 | bool passive; | ||
546 | bool allow_ibss; | ||
547 | bool allow_ht; | ||
548 | bool allow_vht; | ||
549 | bool ht40plus; | ||
550 | /* note: power unit is 1/4th of dBm */ | ||
551 | u32 min_power; | ||
552 | u32 max_power; | ||
553 | u32 max_reg_power; | ||
554 | u32 max_antenna_gain; | ||
555 | u32 reg_class_id; | ||
556 | enum wmi_phy_mode mode; | ||
557 | }; | ||
558 | |||
559 | enum wmi_channel_change_cause { | ||
560 | WMI_CHANNEL_CHANGE_CAUSE_NONE = 0, | ||
561 | WMI_CHANNEL_CHANGE_CAUSE_CSA, | ||
562 | }; | ||
563 | |||
564 | #define WMI_CHAN_FLAG_HT40_PLUS (1 << 6) | ||
565 | #define WMI_CHAN_FLAG_PASSIVE (1 << 7) | ||
566 | #define WMI_CHAN_FLAG_ADHOC_ALLOWED (1 << 8) | ||
567 | #define WMI_CHAN_FLAG_AP_DISABLED (1 << 9) | ||
568 | #define WMI_CHAN_FLAG_DFS (1 << 10) | ||
569 | #define WMI_CHAN_FLAG_ALLOW_HT (1 << 11) | ||
570 | #define WMI_CHAN_FLAG_ALLOW_VHT (1 << 12) | ||
571 | |||
572 | /* Indicate reason for channel switch */ | ||
573 | #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) | ||
574 | |||
575 | #define WMI_MAX_SPATIAL_STREAM 3 | ||
576 | |||
577 | /* HT Capabilities*/ | ||
578 | #define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ | ||
579 | #define WMI_HT_CAP_HT20_SGI 0x0002 /* Short Guard Interval with HT20 */ | ||
580 | #define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */ | ||
581 | #define WMI_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */ | ||
582 | #define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3 | ||
583 | #define WMI_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */ | ||
584 | #define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4 | ||
585 | #define WMI_HT_CAP_LDPC 0x0040 /* LDPC supported */ | ||
586 | #define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */ | ||
587 | #define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */ | ||
588 | #define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8 | ||
589 | #define WMI_HT_CAP_HT40_SGI 0x0800 | ||
590 | |||
591 | #define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \ | ||
592 | WMI_HT_CAP_HT20_SGI | \ | ||
593 | WMI_HT_CAP_HT40_SGI | \ | ||
594 | WMI_HT_CAP_TX_STBC | \ | ||
595 | WMI_HT_CAP_RX_STBC | \ | ||
596 | WMI_HT_CAP_LDPC) | ||
597 | |||
598 | |||
599 | /* | ||
600 | * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information | ||
601 | * field. The fields not defined here are not supported, or reserved. | ||
602 | * Do not change these masks and if you have to add new one follow the | ||
603 | * bitmask as specified by 802.11ac draft. | ||
604 | */ | ||
605 | |||
606 | #define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003 | ||
607 | #define WMI_VHT_CAP_RX_LDPC 0x00000010 | ||
608 | #define WMI_VHT_CAP_SGI_80MHZ 0x00000020 | ||
609 | #define WMI_VHT_CAP_TX_STBC 0x00000080 | ||
610 | #define WMI_VHT_CAP_RX_STBC_MASK 0x00000300 | ||
611 | #define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8 | ||
612 | #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000 | ||
613 | #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23 | ||
614 | #define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000 | ||
615 | #define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000 | ||
616 | |||
617 | /* The following also refer for max HT AMSDU */ | ||
618 | #define WMI_VHT_CAP_MAX_MPDU_LEN_3839 0x00000000 | ||
619 | #define WMI_VHT_CAP_MAX_MPDU_LEN_7935 0x00000001 | ||
620 | #define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002 | ||
621 | |||
622 | #define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \ | ||
623 | WMI_VHT_CAP_RX_LDPC | \ | ||
624 | WMI_VHT_CAP_SGI_80MHZ | \ | ||
625 | WMI_VHT_CAP_TX_STBC | \ | ||
626 | WMI_VHT_CAP_RX_STBC_MASK | \ | ||
627 | WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \ | ||
628 | WMI_VHT_CAP_RX_FIXED_ANT | \ | ||
629 | WMI_VHT_CAP_TX_FIXED_ANT) | ||
630 | |||
631 | /* | ||
632 | * Interested readers refer to Rx/Tx MCS Map definition as defined in | ||
633 | * 802.11ac | ||
634 | */ | ||
635 | #define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1)) | ||
636 | #define WMI_VHT_MAX_SUPP_RATE_MASK 0x1fff0000 | ||
637 | #define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT 16 | ||
638 | |||
639 | enum { | ||
640 | REGDMN_MODE_11A = 0x00001, /* 11a channels */ | ||
641 | REGDMN_MODE_TURBO = 0x00002, /* 11a turbo-only channels */ | ||
642 | REGDMN_MODE_11B = 0x00004, /* 11b channels */ | ||
643 | REGDMN_MODE_PUREG = 0x00008, /* 11g channels (OFDM only) */ | ||
644 | REGDMN_MODE_11G = 0x00008, /* XXX historical */ | ||
645 | REGDMN_MODE_108G = 0x00020, /* 11a+Turbo channels */ | ||
646 | REGDMN_MODE_108A = 0x00040, /* 11g+Turbo channels */ | ||
647 | REGDMN_MODE_XR = 0x00100, /* XR channels */ | ||
648 | REGDMN_MODE_11A_HALF_RATE = 0x00200, /* 11A half rate channels */ | ||
649 | REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */ | ||
650 | REGDMN_MODE_11NG_HT20 = 0x00800, /* 11N-G HT20 channels */ | ||
651 | REGDMN_MODE_11NA_HT20 = 0x01000, /* 11N-A HT20 channels */ | ||
652 | REGDMN_MODE_11NG_HT40PLUS = 0x02000, /* 11N-G HT40 + channels */ | ||
653 | REGDMN_MODE_11NG_HT40MINUS = 0x04000, /* 11N-G HT40 - channels */ | ||
654 | REGDMN_MODE_11NA_HT40PLUS = 0x08000, /* 11N-A HT40 + channels */ | ||
655 | REGDMN_MODE_11NA_HT40MINUS = 0x10000, /* 11N-A HT40 - channels */ | ||
656 | REGDMN_MODE_11AC_VHT20 = 0x20000, /* 5Ghz, VHT20 */ | ||
657 | REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */ | ||
658 | REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */ | ||
659 | REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */ | ||
660 | REGDMN_MODE_ALL = 0xffffffff | ||
661 | }; | ||
662 | |||
663 | #define REGDMN_CAP1_CHAN_HALF_RATE 0x00000001 | ||
664 | #define REGDMN_CAP1_CHAN_QUARTER_RATE 0x00000002 | ||
665 | #define REGDMN_CAP1_CHAN_HAL49GHZ 0x00000004 | ||
666 | |||
667 | /* regulatory capabilities */ | ||
668 | #define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040 | ||
669 | #define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080 | ||
670 | #define REGDMN_EEPROM_EEREGCAP_EN_KK_U2 0x0100 | ||
671 | #define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200 | ||
672 | #define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400 | ||
673 | #define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800 | ||
674 | |||
675 | struct hal_reg_capabilities { | ||
676 | /* regdomain value specified in EEPROM */ | ||
677 | __le32 eeprom_rd; | ||
678 | /*regdomain */ | ||
679 | __le32 eeprom_rd_ext; | ||
680 | /* CAP1 capabilities bit map. */ | ||
681 | __le32 regcap1; | ||
682 | /* REGDMN EEPROM CAP. */ | ||
683 | __le32 regcap2; | ||
684 | /* REGDMN MODE */ | ||
685 | __le32 wireless_modes; | ||
686 | __le32 low_2ghz_chan; | ||
687 | __le32 high_2ghz_chan; | ||
688 | __le32 low_5ghz_chan; | ||
689 | __le32 high_5ghz_chan; | ||
690 | } __packed; | ||
691 | |||
692 | enum wlan_mode_capability { | ||
693 | WHAL_WLAN_11A_CAPABILITY = 0x1, | ||
694 | WHAL_WLAN_11G_CAPABILITY = 0x2, | ||
695 | WHAL_WLAN_11AG_CAPABILITY = 0x3, | ||
696 | }; | ||
697 | |||
698 | /* structure used by FW for requesting host memory */ | ||
699 | struct wlan_host_mem_req { | ||
700 | /* ID of the request */ | ||
701 | __le32 req_id; | ||
702 | /* size of the of each unit */ | ||
703 | __le32 unit_size; | ||
704 | /* flags to indicate that | ||
705 | * the number units is dependent | ||
706 | * on number of resources(num vdevs num peers .. etc) | ||
707 | */ | ||
708 | __le32 num_unit_info; | ||
709 | /* | ||
710 | * actual number of units to allocate . if flags in the num_unit_info | ||
711 | * indicate that number of units is tied to number of a particular | ||
712 | * resource to allocate then num_units filed is set to 0 and host | ||
713 | * will derive the number units from number of the resources it is | ||
714 | * requesting. | ||
715 | */ | ||
716 | __le32 num_units; | ||
717 | } __packed; | ||
718 | |||
719 | #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ | ||
720 | ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ | ||
721 | (1 << ((svc_id)%(sizeof(u32))))) != 0) | ||
722 | |||
723 | /* | ||
724 | * The following struct holds optional payload for | ||
725 | * wmi_service_ready_event,e.g., 11ac pass some of the | ||
726 | * device capability to the host. | ||
727 | */ | ||
728 | struct wmi_service_ready_event { | ||
729 | __le32 sw_version; | ||
730 | __le32 sw_version_1; | ||
731 | __le32 abi_version; | ||
732 | /* WMI_PHY_CAPABILITY */ | ||
733 | __le32 phy_capability; | ||
734 | /* Maximum number of frag table entries that SW will populate less 1 */ | ||
735 | __le32 max_frag_entry; | ||
736 | __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; | ||
737 | __le32 num_rf_chains; | ||
738 | /* | ||
739 | * The following field is only valid for service type | ||
740 | * WMI_SERVICE_11AC | ||
741 | */ | ||
742 | __le32 ht_cap_info; /* WMI HT Capability */ | ||
743 | __le32 vht_cap_info; /* VHT capability info field of 802.11ac */ | ||
744 | __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */ | ||
745 | __le32 hw_min_tx_power; | ||
746 | __le32 hw_max_tx_power; | ||
747 | struct hal_reg_capabilities hal_reg_capabilities; | ||
748 | __le32 sys_cap_info; | ||
749 | __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */ | ||
750 | /* | ||
751 | * Max beacon and Probe Response IE offload size | ||
752 | * (includes optional P2P IEs) | ||
753 | */ | ||
754 | __le32 max_bcn_ie_size; | ||
755 | /* | ||
756 | * request to host to allocate a chuck of memory and pss it down to FW | ||
757 | * via WM_INIT. FW uses this as FW extesnsion memory for saving its | ||
758 | * data structures. Only valid for low latency interfaces like PCIE | ||
759 | * where FW can access this memory directly (or) by DMA. | ||
760 | */ | ||
761 | __le32 num_mem_reqs; | ||
762 | struct wlan_host_mem_req mem_reqs[1]; | ||
763 | } __packed; | ||
764 | |||
765 | /* | ||
766 | * status consists of upper 16 bits fo int status and lower 16 bits of | ||
767 | * module ID that retuned status | ||
768 | */ | ||
769 | #define WLAN_INIT_STATUS_SUCCESS 0x0 | ||
770 | #define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff) | ||
771 | #define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff) | ||
772 | |||
773 | #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) | ||
774 | #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) | ||
775 | |||
776 | struct wmi_ready_event { | ||
777 | __le32 sw_version; | ||
778 | __le32 abi_version; | ||
779 | struct wmi_mac_addr mac_addr; | ||
780 | __le32 status; | ||
781 | } __packed; | ||
782 | |||
783 | struct wmi_resource_config { | ||
784 | /* number of virtual devices (VAPs) to support */ | ||
785 | __le32 num_vdevs; | ||
786 | |||
787 | /* number of peer nodes to support */ | ||
788 | __le32 num_peers; | ||
789 | |||
790 | /* | ||
791 | * In offload mode target supports features like WOW, chatter and | ||
792 | * other protocol offloads. In order to support them some | ||
793 | * functionalities like reorder buffering, PN checking need to be | ||
794 | * done in target. This determines maximum number of peers suported | ||
795 | * by target in offload mode | ||
796 | */ | ||
797 | __le32 num_offload_peers; | ||
798 | |||
799 | /* For target-based RX reordering */ | ||
800 | __le32 num_offload_reorder_bufs; | ||
801 | |||
802 | /* number of keys per peer */ | ||
803 | __le32 num_peer_keys; | ||
804 | |||
805 | /* total number of TX/RX data TIDs */ | ||
806 | __le32 num_tids; | ||
807 | |||
808 | /* | ||
809 | * max skid for resolving hash collisions | ||
810 | * | ||
811 | * The address search table is sparse, so that if two MAC addresses | ||
812 | * result in the same hash value, the second of these conflicting | ||
813 | * entries can slide to the next index in the address search table, | ||
814 | * and use it, if it is unoccupied. This ast_skid_limit parameter | ||
815 | * specifies the upper bound on how many subsequent indices to search | ||
816 | * over to find an unoccupied space. | ||
817 | */ | ||
818 | __le32 ast_skid_limit; | ||
819 | |||
820 | /* | ||
821 | * the nominal chain mask for transmit | ||
822 | * | ||
823 | * The chain mask may be modified dynamically, e.g. to operate AP | ||
824 | * tx with a reduced number of chains if no clients are associated. | ||
825 | * This configuration parameter specifies the nominal chain-mask that | ||
826 | * should be used when not operating with a reduced set of tx chains. | ||
827 | */ | ||
828 | __le32 tx_chain_mask; | ||
829 | |||
830 | /* | ||
831 | * the nominal chain mask for receive | ||
832 | * | ||
833 | * The chain mask may be modified dynamically, e.g. for a client | ||
834 | * to use a reduced number of chains for receive if the traffic to | ||
835 | * the client is low enough that it doesn't require downlink MIMO | ||
836 | * or antenna diversity. | ||
837 | * This configuration parameter specifies the nominal chain-mask that | ||
838 | * should be used when not operating with a reduced set of rx chains. | ||
839 | */ | ||
840 | __le32 rx_chain_mask; | ||
841 | |||
842 | /* | ||
843 | * what rx reorder timeout (ms) to use for the AC | ||
844 | * | ||
845 | * Each WMM access class (voice, video, best-effort, background) will | ||
846 | * have its own timeout value to dictate how long to wait for missing | ||
847 | * rx MPDUs to arrive before flushing subsequent MPDUs that have | ||
848 | * already been received. | ||
849 | * This parameter specifies the timeout in milliseconds for each | ||
850 | * class. | ||
851 | */ | ||
852 | __le32 rx_timeout_pri_vi; | ||
853 | __le32 rx_timeout_pri_vo; | ||
854 | __le32 rx_timeout_pri_be; | ||
855 | __le32 rx_timeout_pri_bk; | ||
856 | |||
857 | /* | ||
858 | * what mode the rx should decap packets to | ||
859 | * | ||
860 | * MAC can decap to RAW (no decap), native wifi or Ethernet types | ||
861 | * THis setting also determines the default TX behavior, however TX | ||
862 | * behavior can be modified on a per VAP basis during VAP init | ||
863 | */ | ||
864 | __le32 rx_decap_mode; | ||
865 | |||
866 | /* what is the maximum scan requests than can be queued */ | ||
867 | __le32 scan_max_pending_reqs; | ||
868 | |||
869 | /* maximum VDEV that could use BMISS offload */ | ||
870 | __le32 bmiss_offload_max_vdev; | ||
871 | |||
872 | /* maximum VDEV that could use offload roaming */ | ||
873 | __le32 roam_offload_max_vdev; | ||
874 | |||
875 | /* maximum AP profiles that would push to offload roaming */ | ||
876 | __le32 roam_offload_max_ap_profiles; | ||
877 | |||
878 | /* | ||
879 | * how many groups to use for mcast->ucast conversion | ||
880 | * | ||
881 | * The target's WAL maintains a table to hold information regarding | ||
882 | * which peers belong to a given multicast group, so that if | ||
883 | * multicast->unicast conversion is enabled, the target can convert | ||
884 | * multicast tx frames to a series of unicast tx frames, to each | ||
885 | * peer within the multicast group. | ||
886 | This num_mcast_groups configuration parameter tells the target how | ||
887 | * many multicast groups to provide storage for within its multicast | ||
888 | * group membership table. | ||
889 | */ | ||
890 | __le32 num_mcast_groups; | ||
891 | |||
892 | /* | ||
893 | * size to alloc for the mcast membership table | ||
894 | * | ||
895 | * This num_mcast_table_elems configuration parameter tells the | ||
896 | * target how many peer elements it needs to provide storage for in | ||
897 | * its multicast group membership table. | ||
898 | * These multicast group membership table elements are shared by the | ||
899 | * multicast groups stored within the table. | ||
900 | */ | ||
901 | __le32 num_mcast_table_elems; | ||
902 | |||
903 | /* | ||
904 | * whether/how to do multicast->unicast conversion | ||
905 | * | ||
906 | * This configuration parameter specifies whether the target should | ||
907 | * perform multicast --> unicast conversion on transmit, and if so, | ||
908 | * what to do if it finds no entries in its multicast group | ||
909 | * membership table for the multicast IP address in the tx frame. | ||
910 | * Configuration value: | ||
911 | * 0 -> Do not perform multicast to unicast conversion. | ||
912 | * 1 -> Convert multicast frames to unicast, if the IP multicast | ||
913 | * address from the tx frame is found in the multicast group | ||
914 | * membership table. If the IP multicast address is not found, | ||
915 | * drop the frame. | ||
916 | * 2 -> Convert multicast frames to unicast, if the IP multicast | ||
917 | * address from the tx frame is found in the multicast group | ||
918 | * membership table. If the IP multicast address is not found, | ||
919 | * transmit the frame as multicast. | ||
920 | */ | ||
921 | __le32 mcast2ucast_mode; | ||
922 | |||
923 | /* | ||
924 | * how much memory to allocate for a tx PPDU dbg log | ||
925 | * | ||
926 | * This parameter controls how much memory the target will allocate | ||
927 | * to store a log of tx PPDU meta-information (how large the PPDU | ||
928 | * was, when it was sent, whether it was successful, etc.) | ||
929 | */ | ||
930 | __le32 tx_dbg_log_size; | ||
931 | |||
932 | /* how many AST entries to be allocated for WDS */ | ||
933 | __le32 num_wds_entries; | ||
934 | |||
935 | /* | ||
936 | * MAC DMA burst size, e.g., For target PCI limit can be | ||
937 | * 0 -default, 1 256B | ||
938 | */ | ||
939 | __le32 dma_burst_size; | ||
940 | |||
941 | /* | ||
942 | * Fixed delimiters to be inserted after every MPDU to | ||
943 | * account for interface latency to avoid underrun. | ||
944 | */ | ||
945 | __le32 mac_aggr_delim; | ||
946 | |||
947 | /* | ||
948 | * determine whether target is responsible for detecting duplicate | ||
949 | * non-aggregate MPDU and timing out stale fragments. | ||
950 | * | ||
951 | * A-MPDU reordering is always performed on the target. | ||
952 | * | ||
953 | * 0: target responsible for frag timeout and dup checking | ||
954 | * 1: host responsible for frag timeout and dup checking | ||
955 | */ | ||
956 | __le32 rx_skip_defrag_timeout_dup_detection_check; | ||
957 | |||
958 | /* | ||
959 | * Configuration for VoW : | ||
960 | * No of Video Nodes to be supported | ||
961 | * and Max no of descriptors for each Video link (node). | ||
962 | */ | ||
963 | __le32 vow_config; | ||
964 | |||
965 | /* maximum VDEV that could use GTK offload */ | ||
966 | __le32 gtk_offload_max_vdev; | ||
967 | |||
968 | /* Number of msdu descriptors target should use */ | ||
969 | __le32 num_msdu_desc; | ||
970 | |||
971 | /* | ||
972 | * Max. number of Tx fragments per MSDU | ||
973 | * This parameter controls the max number of Tx fragments per MSDU. | ||
974 | * This is sent by the target as part of the WMI_SERVICE_READY event | ||
975 | * and is overriden by the OS shim as required. | ||
976 | */ | ||
977 | __le32 max_frag_entries; | ||
978 | } __packed; | ||
979 | |||
980 | /* strucutre describing host memory chunk. */ | ||
981 | struct host_memory_chunk { | ||
982 | /* id of the request that is passed up in service ready */ | ||
983 | __le32 req_id; | ||
984 | /* the physical address the memory chunk */ | ||
985 | __le32 ptr; | ||
986 | /* size of the chunk */ | ||
987 | __le32 size; | ||
988 | } __packed; | ||
989 | |||
990 | struct wmi_init_cmd { | ||
991 | struct wmi_resource_config resource_config; | ||
992 | __le32 num_host_mem_chunks; | ||
993 | |||
994 | /* | ||
995 | * variable number of host memory chunks. | ||
996 | * This should be the last element in the structure | ||
997 | */ | ||
998 | struct host_memory_chunk host_mem_chunks[1]; | ||
999 | } __packed; | ||
1000 | |||
1001 | /* TLV for channel list */ | ||
1002 | struct wmi_chan_list { | ||
1003 | __le32 tag; /* WMI_CHAN_LIST_TAG */ | ||
1004 | __le32 num_chan; | ||
1005 | __le32 channel_list[0]; | ||
1006 | } __packed; | ||
1007 | |||
1008 | struct wmi_bssid_list { | ||
1009 | __le32 tag; /* WMI_BSSID_LIST_TAG */ | ||
1010 | __le32 num_bssid; | ||
1011 | struct wmi_mac_addr bssid_list[0]; | ||
1012 | } __packed; | ||
1013 | |||
1014 | struct wmi_ie_data { | ||
1015 | __le32 tag; /* WMI_IE_TAG */ | ||
1016 | __le32 ie_len; | ||
1017 | u8 ie_data[0]; | ||
1018 | } __packed; | ||
1019 | |||
1020 | struct wmi_ssid { | ||
1021 | __le32 ssid_len; | ||
1022 | u8 ssid[32]; | ||
1023 | } __packed; | ||
1024 | |||
1025 | struct wmi_ssid_list { | ||
1026 | __le32 tag; /* WMI_SSID_LIST_TAG */ | ||
1027 | __le32 num_ssids; | ||
1028 | struct wmi_ssid ssids[0]; | ||
1029 | } __packed; | ||
1030 | |||
1031 | /* prefix used by scan requestor ids on the host */ | ||
1032 | #define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000 | ||
1033 | |||
1034 | /* prefix used by scan request ids generated on the host */ | ||
1035 | /* host cycles through the lower 12 bits to generate ids */ | ||
1036 | #define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000 | ||
1037 | |||
1038 | #define WLAN_SCAN_PARAMS_MAX_SSID 16 | ||
1039 | #define WLAN_SCAN_PARAMS_MAX_BSSID 4 | ||
1040 | #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 | ||
1041 | |||
1042 | /* Scan priority numbers must be sequential, starting with 0 */ | ||
1043 | enum wmi_scan_priority { | ||
1044 | WMI_SCAN_PRIORITY_VERY_LOW = 0, | ||
1045 | WMI_SCAN_PRIORITY_LOW, | ||
1046 | WMI_SCAN_PRIORITY_MEDIUM, | ||
1047 | WMI_SCAN_PRIORITY_HIGH, | ||
1048 | WMI_SCAN_PRIORITY_VERY_HIGH, | ||
1049 | WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */ | ||
1050 | }; | ||
1051 | |||
1052 | struct wmi_start_scan_cmd { | ||
1053 | /* Scan ID */ | ||
1054 | __le32 scan_id; | ||
1055 | /* Scan requestor ID */ | ||
1056 | __le32 scan_req_id; | ||
1057 | /* VDEV id(interface) that is requesting scan */ | ||
1058 | __le32 vdev_id; | ||
1059 | /* Scan Priority, input to scan scheduler */ | ||
1060 | __le32 scan_priority; | ||
1061 | /* Scan events subscription */ | ||
1062 | __le32 notify_scan_events; | ||
1063 | /* dwell time in msec on active channels */ | ||
1064 | __le32 dwell_time_active; | ||
1065 | /* dwell time in msec on passive channels */ | ||
1066 | __le32 dwell_time_passive; | ||
1067 | /* | ||
1068 | * min time in msec on the BSS channel,only valid if atleast one | ||
1069 | * VDEV is active | ||
1070 | */ | ||
1071 | __le32 min_rest_time; | ||
1072 | /* | ||
1073 | * max rest time in msec on the BSS channel,only valid if at least | ||
1074 | * one VDEV is active | ||
1075 | */ | ||
1076 | /* | ||
1077 | * the scanner will rest on the bss channel at least min_rest_time | ||
1078 | * after min_rest_time the scanner will start checking for tx/rx | ||
1079 | * activity on all VDEVs. if there is no activity the scanner will | ||
1080 | * switch to off channel. if there is activity the scanner will let | ||
1081 | * the radio on the bss channel until max_rest_time expires.at | ||
1082 | * max_rest_time scanner will switch to off channel irrespective of | ||
1083 | * activity. activity is determined by the idle_time parameter. | ||
1084 | */ | ||
1085 | __le32 max_rest_time; | ||
1086 | /* | ||
1087 | * time before sending next set of probe requests. | ||
1088 | * The scanner keeps repeating probe requests transmission with | ||
1089 | * period specified by repeat_probe_time. | ||
1090 | * The number of probe requests specified depends on the ssid_list | ||
1091 | * and bssid_list | ||
1092 | */ | ||
1093 | __le32 repeat_probe_time; | ||
1094 | /* time in msec between 2 consequetive probe requests with in a set. */ | ||
1095 | __le32 probe_spacing_time; | ||
1096 | /* | ||
1097 | * data inactivity time in msec on bss channel that will be used by | ||
1098 | * scanner for measuring the inactivity. | ||
1099 | */ | ||
1100 | __le32 idle_time; | ||
1101 | /* maximum time in msec allowed for scan */ | ||
1102 | __le32 max_scan_time; | ||
1103 | /* | ||
1104 | * delay in msec before sending first probe request after switching | ||
1105 | * to a channel | ||
1106 | */ | ||
1107 | __le32 probe_delay; | ||
1108 | /* Scan control flags */ | ||
1109 | __le32 scan_ctrl_flags; | ||
1110 | |||
1111 | /* Burst duration time in msecs */ | ||
1112 | __le32 burst_duration; | ||
1113 | /* | ||
1114 | * TLV (tag length value ) paramerters follow the scan_cmd structure. | ||
1115 | * TLV can contain channel list, bssid list, ssid list and | ||
1116 | * ie. the TLV tags are defined above; | ||
1117 | */ | ||
1118 | } __packed; | ||
1119 | |||
1120 | struct wmi_ssid_arg { | ||
1121 | int len; | ||
1122 | const u8 *ssid; | ||
1123 | }; | ||
1124 | |||
1125 | struct wmi_bssid_arg { | ||
1126 | const u8 *bssid; | ||
1127 | }; | ||
1128 | |||
1129 | struct wmi_start_scan_arg { | ||
1130 | u32 scan_id; | ||
1131 | u32 scan_req_id; | ||
1132 | u32 vdev_id; | ||
1133 | u32 scan_priority; | ||
1134 | u32 notify_scan_events; | ||
1135 | u32 dwell_time_active; | ||
1136 | u32 dwell_time_passive; | ||
1137 | u32 min_rest_time; | ||
1138 | u32 max_rest_time; | ||
1139 | u32 repeat_probe_time; | ||
1140 | u32 probe_spacing_time; | ||
1141 | u32 idle_time; | ||
1142 | u32 max_scan_time; | ||
1143 | u32 probe_delay; | ||
1144 | u32 scan_ctrl_flags; | ||
1145 | |||
1146 | u32 ie_len; | ||
1147 | u32 n_channels; | ||
1148 | u32 n_ssids; | ||
1149 | u32 n_bssids; | ||
1150 | |||
1151 | u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; | ||
1152 | u32 channels[64]; | ||
1153 | struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; | ||
1154 | struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; | ||
1155 | }; | ||
1156 | |||
1157 | /* scan control flags */ | ||
1158 | |||
1159 | /* passively scan all channels including active channels */ | ||
1160 | #define WMI_SCAN_FLAG_PASSIVE 0x1 | ||
1161 | /* add wild card ssid probe request even though ssid_list is specified. */ | ||
1162 | #define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2 | ||
1163 | /* add cck rates to rates/xrate ie for the generated probe request */ | ||
1164 | #define WMI_SCAN_ADD_CCK_RATES 0x4 | ||
1165 | /* add ofdm rates to rates/xrate ie for the generated probe request */ | ||
1166 | #define WMI_SCAN_ADD_OFDM_RATES 0x8 | ||
1167 | /* To enable indication of Chan load and Noise floor to host */ | ||
1168 | #define WMI_SCAN_CHAN_STAT_EVENT 0x10 | ||
1169 | /* Filter Probe request frames */ | ||
1170 | #define WMI_SCAN_FILTER_PROBE_REQ 0x20 | ||
1171 | /* When set, DFS channels will not be scanned */ | ||
1172 | #define WMI_SCAN_BYPASS_DFS_CHN 0x40 | ||
1173 | /* Different FW scan engine may choose to bail out on errors. | ||
1174 | * Allow the driver to have influence over that. */ | ||
1175 | #define WMI_SCAN_CONTINUE_ON_ERROR 0x80 | ||
1176 | |||
1177 | /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ | ||
1178 | #define WMI_SCAN_CLASS_MASK 0xFF000000 | ||
1179 | |||
1180 | |||
1181 | enum wmi_stop_scan_type { | ||
1182 | WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */ | ||
1183 | WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */ | ||
1184 | WMI_SCAN_STOP_ALL = 0x04000000, /* stop all scans */ | ||
1185 | }; | ||
1186 | |||
1187 | struct wmi_stop_scan_cmd { | ||
1188 | __le32 scan_req_id; | ||
1189 | __le32 scan_id; | ||
1190 | __le32 req_type; | ||
1191 | __le32 vdev_id; | ||
1192 | } __packed; | ||
1193 | |||
1194 | struct wmi_stop_scan_arg { | ||
1195 | u32 req_id; | ||
1196 | enum wmi_stop_scan_type req_type; | ||
1197 | union { | ||
1198 | u32 scan_id; | ||
1199 | u32 vdev_id; | ||
1200 | } u; | ||
1201 | }; | ||
1202 | |||
1203 | struct wmi_scan_chan_list_cmd { | ||
1204 | __le32 num_scan_chans; | ||
1205 | struct wmi_channel chan_info[0]; | ||
1206 | } __packed; | ||
1207 | |||
1208 | struct wmi_scan_chan_list_arg { | ||
1209 | u32 n_channels; | ||
1210 | struct wmi_channel_arg *channels; | ||
1211 | }; | ||
1212 | |||
1213 | enum wmi_bss_filter { | ||
1214 | WMI_BSS_FILTER_NONE = 0, /* no beacons forwarded */ | ||
1215 | WMI_BSS_FILTER_ALL, /* all beacons forwarded */ | ||
1216 | WMI_BSS_FILTER_PROFILE, /* only beacons matching profile */ | ||
1217 | WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */ | ||
1218 | WMI_BSS_FILTER_CURRENT_BSS, /* only beacons matching current BSS */ | ||
1219 | WMI_BSS_FILTER_ALL_BUT_BSS, /* all but beacons matching BSS */ | ||
1220 | WMI_BSS_FILTER_PROBED_SSID, /* beacons matching probed ssid */ | ||
1221 | WMI_BSS_FILTER_LAST_BSS, /* marker only */ | ||
1222 | }; | ||
1223 | |||
1224 | enum wmi_scan_event_type { | ||
1225 | WMI_SCAN_EVENT_STARTED = 0x1, | ||
1226 | WMI_SCAN_EVENT_COMPLETED = 0x2, | ||
1227 | WMI_SCAN_EVENT_BSS_CHANNEL = 0x4, | ||
1228 | WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8, | ||
1229 | WMI_SCAN_EVENT_DEQUEUED = 0x10, | ||
1230 | WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */ | ||
1231 | WMI_SCAN_EVENT_START_FAILED = 0x40, | ||
1232 | WMI_SCAN_EVENT_RESTARTED = 0x80, | ||
1233 | WMI_SCAN_EVENT_MAX = 0x8000 | ||
1234 | }; | ||
1235 | |||
1236 | enum wmi_scan_completion_reason { | ||
1237 | WMI_SCAN_REASON_COMPLETED, | ||
1238 | WMI_SCAN_REASON_CANCELLED, | ||
1239 | WMI_SCAN_REASON_PREEMPTED, | ||
1240 | WMI_SCAN_REASON_TIMEDOUT, | ||
1241 | WMI_SCAN_REASON_MAX, | ||
1242 | }; | ||
1243 | |||
1244 | struct wmi_scan_event { | ||
1245 | __le32 event_type; /* %WMI_SCAN_EVENT_ */ | ||
1246 | __le32 reason; /* %WMI_SCAN_REASON_ */ | ||
1247 | __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */ | ||
1248 | __le32 scan_req_id; | ||
1249 | __le32 scan_id; | ||
1250 | __le32 vdev_id; | ||
1251 | } __packed; | ||
1252 | |||
1253 | /* | ||
1254 | * This defines how much headroom is kept in the | ||
1255 | * receive frame between the descriptor and the | ||
1256 | * payload, in order for the WMI PHY error and | ||
1257 | * management handler to insert header contents. | ||
1258 | * | ||
1259 | * This is in bytes. | ||
1260 | */ | ||
1261 | #define WMI_MGMT_RX_HDR_HEADROOM 52 | ||
1262 | |||
1263 | /* | ||
1264 | * This event will be used for sending scan results | ||
1265 | * as well as rx mgmt frames to the host. The rx buffer | ||
1266 | * will be sent as part of this WMI event. It would be a | ||
1267 | * good idea to pass all the fields in the RX status | ||
1268 | * descriptor up to the host. | ||
1269 | */ | ||
1270 | struct wmi_mgmt_rx_hdr { | ||
1271 | __le32 channel; | ||
1272 | __le32 snr; | ||
1273 | __le32 rate; | ||
1274 | __le32 phy_mode; | ||
1275 | __le32 buf_len; | ||
1276 | __le32 status; /* %WMI_RX_STATUS_ */ | ||
1277 | } __packed; | ||
1278 | |||
1279 | struct wmi_mgmt_rx_event { | ||
1280 | struct wmi_mgmt_rx_hdr hdr; | ||
1281 | u8 buf[0]; | ||
1282 | } __packed; | ||
1283 | |||
1284 | #define WMI_RX_STATUS_OK 0x00 | ||
1285 | #define WMI_RX_STATUS_ERR_CRC 0x01 | ||
1286 | #define WMI_RX_STATUS_ERR_DECRYPT 0x08 | ||
1287 | #define WMI_RX_STATUS_ERR_MIC 0x10 | ||
1288 | #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20 | ||
1289 | |||
1290 | struct wmi_single_phyerr_rx_hdr { | ||
1291 | /* TSF timestamp */ | ||
1292 | __le32 tsf_timestamp; | ||
1293 | |||
1294 | /* | ||
1295 | * Current freq1, freq2 | ||
1296 | * | ||
1297 | * [7:0]: freq1[lo] | ||
1298 | * [15:8] : freq1[hi] | ||
1299 | * [23:16]: freq2[lo] | ||
1300 | * [31:24]: freq2[hi] | ||
1301 | */ | ||
1302 | __le16 freq1; | ||
1303 | __le16 freq2; | ||
1304 | |||
1305 | /* | ||
1306 | * Combined RSSI over all chains and channel width for this PHY error | ||
1307 | * | ||
1308 | * [7:0]: RSSI combined | ||
1309 | * [15:8]: Channel width (MHz) | ||
1310 | * [23:16]: PHY error code | ||
1311 | * [24:16]: reserved (future use) | ||
1312 | */ | ||
1313 | u8 rssi_combined; | ||
1314 | u8 chan_width_mhz; | ||
1315 | u8 phy_err_code; | ||
1316 | u8 rsvd0; | ||
1317 | |||
1318 | /* | ||
1319 | * RSSI on chain 0 through 3 | ||
1320 | * | ||
1321 | * This is formatted the same as the PPDU_START RX descriptor | ||
1322 | * field: | ||
1323 | * | ||
1324 | * [7:0]: pri20 | ||
1325 | * [15:8]: sec20 | ||
1326 | * [23:16]: sec40 | ||
1327 | * [31:24]: sec80 | ||
1328 | */ | ||
1329 | |||
1330 | __le32 rssi_chain0; | ||
1331 | __le32 rssi_chain1; | ||
1332 | __le32 rssi_chain2; | ||
1333 | __le32 rssi_chain3; | ||
1334 | |||
1335 | /* | ||
1336 | * Last calibrated NF value for chain 0 through 3 | ||
1337 | * | ||
1338 | * nf_list_1: | ||
1339 | * | ||
1340 | * + [15:0] - chain 0 | ||
1341 | * + [31:16] - chain 1 | ||
1342 | * | ||
1343 | * nf_list_2: | ||
1344 | * | ||
1345 | * + [15:0] - chain 2 | ||
1346 | * + [31:16] - chain 3 | ||
1347 | */ | ||
1348 | __le32 nf_list_1; | ||
1349 | __le32 nf_list_2; | ||
1350 | |||
1351 | |||
1352 | /* Length of the frame */ | ||
1353 | __le32 buf_len; | ||
1354 | } __packed; | ||
1355 | |||
1356 | struct wmi_single_phyerr_rx_event { | ||
1357 | /* Phy error event header */ | ||
1358 | struct wmi_single_phyerr_rx_hdr hdr; | ||
1359 | /* frame buffer */ | ||
1360 | u8 bufp[0]; | ||
1361 | } __packed; | ||
1362 | |||
1363 | struct wmi_comb_phyerr_rx_hdr { | ||
1364 | /* Phy error phy error count */ | ||
1365 | __le32 num_phyerr_events; | ||
1366 | __le32 tsf_l32; | ||
1367 | __le32 tsf_u32; | ||
1368 | } __packed; | ||
1369 | |||
1370 | struct wmi_comb_phyerr_rx_event { | ||
1371 | /* Phy error phy error count */ | ||
1372 | struct wmi_comb_phyerr_rx_hdr hdr; | ||
1373 | /* | ||
1374 | * frame buffer - contains multiple payloads in the order: | ||
1375 | * header - payload, header - payload... | ||
1376 | * (The header is of type: wmi_single_phyerr_rx_hdr) | ||
1377 | */ | ||
1378 | u8 bufp[0]; | ||
1379 | } __packed; | ||
1380 | |||
1381 | struct wmi_mgmt_tx_hdr { | ||
1382 | __le32 vdev_id; | ||
1383 | struct wmi_mac_addr peer_macaddr; | ||
1384 | __le32 tx_rate; | ||
1385 | __le32 tx_power; | ||
1386 | __le32 buf_len; | ||
1387 | } __packed; | ||
1388 | |||
1389 | struct wmi_mgmt_tx_cmd { | ||
1390 | struct wmi_mgmt_tx_hdr hdr; | ||
1391 | u8 buf[0]; | ||
1392 | } __packed; | ||
1393 | |||
1394 | struct wmi_echo_event { | ||
1395 | __le32 value; | ||
1396 | } __packed; | ||
1397 | |||
1398 | struct wmi_echo_cmd { | ||
1399 | __le32 value; | ||
1400 | } __packed; | ||
1401 | |||
1402 | |||
1403 | struct wmi_pdev_set_regdomain_cmd { | ||
1404 | __le32 reg_domain; | ||
1405 | __le32 reg_domain_2G; | ||
1406 | __le32 reg_domain_5G; | ||
1407 | __le32 conformance_test_limit_2G; | ||
1408 | __le32 conformance_test_limit_5G; | ||
1409 | } __packed; | ||
1410 | |||
1411 | /* Command to set/unset chip in quiet mode */ | ||
1412 | struct wmi_pdev_set_quiet_cmd { | ||
1413 | /* period in TUs */ | ||
1414 | __le32 period; | ||
1415 | |||
1416 | /* duration in TUs */ | ||
1417 | __le32 duration; | ||
1418 | |||
1419 | /* offset in TUs */ | ||
1420 | __le32 next_start; | ||
1421 | |||
1422 | /* enable/disable */ | ||
1423 | __le32 enabled; | ||
1424 | } __packed; | ||
1425 | |||
1426 | |||
1427 | /* | ||
1428 | * 802.11g protection mode. | ||
1429 | */ | ||
1430 | enum ath10k_protmode { | ||
1431 | ATH10K_PROT_NONE = 0, /* no protection */ | ||
1432 | ATH10K_PROT_CTSONLY = 1, /* CTS to self */ | ||
1433 | ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */ | ||
1434 | }; | ||
1435 | |||
1436 | enum wmi_beacon_gen_mode { | ||
1437 | WMI_BEACON_STAGGERED_MODE = 0, | ||
1438 | WMI_BEACON_BURST_MODE = 1 | ||
1439 | }; | ||
1440 | |||
1441 | enum wmi_csa_event_ies_present_flag { | ||
1442 | WMI_CSA_IE_PRESENT = 0x00000001, | ||
1443 | WMI_XCSA_IE_PRESENT = 0x00000002, | ||
1444 | WMI_WBW_IE_PRESENT = 0x00000004, | ||
1445 | WMI_CSWARP_IE_PRESENT = 0x00000008, | ||
1446 | }; | ||
1447 | |||
1448 | /* wmi CSA receive event from beacon frame */ | ||
1449 | struct wmi_csa_event { | ||
1450 | __le32 i_fc_dur; | ||
1451 | /* Bit 0-15: FC */ | ||
1452 | /* Bit 16-31: DUR */ | ||
1453 | struct wmi_mac_addr i_addr1; | ||
1454 | struct wmi_mac_addr i_addr2; | ||
1455 | __le32 csa_ie[2]; | ||
1456 | __le32 xcsa_ie[2]; | ||
1457 | __le32 wb_ie[2]; | ||
1458 | __le32 cswarp_ie; | ||
1459 | __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */ | ||
1460 | } __packed; | ||
1461 | |||
1462 | /* the definition of different PDEV parameters */ | ||
1463 | #define PDEV_DEFAULT_STATS_UPDATE_PERIOD 500 | ||
1464 | #define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500 | ||
1465 | #define PEER_DEFAULT_STATS_UPDATE_PERIOD 500 | ||
1466 | |||
1467 | enum wmi_pdev_param { | ||
1468 | /* TX chian mask */ | ||
1469 | WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, | ||
1470 | /* RX chian mask */ | ||
1471 | WMI_PDEV_PARAM_RX_CHAIN_MASK, | ||
1472 | /* TX power limit for 2G Radio */ | ||
1473 | WMI_PDEV_PARAM_TXPOWER_LIMIT2G, | ||
1474 | /* TX power limit for 5G Radio */ | ||
1475 | WMI_PDEV_PARAM_TXPOWER_LIMIT5G, | ||
1476 | /* TX power scale */ | ||
1477 | WMI_PDEV_PARAM_TXPOWER_SCALE, | ||
1478 | /* Beacon generation mode . 0: host, 1: target */ | ||
1479 | WMI_PDEV_PARAM_BEACON_GEN_MODE, | ||
1480 | /* Beacon generation mode . 0: staggered 1: bursted */ | ||
1481 | WMI_PDEV_PARAM_BEACON_TX_MODE, | ||
1482 | /* | ||
1483 | * Resource manager off chan mode . | ||
1484 | * 0: turn off off chan mode. 1: turn on offchan mode | ||
1485 | */ | ||
1486 | WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, | ||
1487 | /* | ||
1488 | * Protection mode: | ||
1489 | * 0: no protection 1:use CTS-to-self 2: use RTS/CTS | ||
1490 | */ | ||
1491 | WMI_PDEV_PARAM_PROTECTION_MODE, | ||
1492 | /* Dynamic bandwidth 0: disable 1: enable */ | ||
1493 | WMI_PDEV_PARAM_DYNAMIC_BW, | ||
1494 | /* Non aggregrate/ 11g sw retry threshold.0-disable */ | ||
1495 | WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, | ||
1496 | /* aggregrate sw retry threshold. 0-disable*/ | ||
1497 | WMI_PDEV_PARAM_AGG_SW_RETRY_TH, | ||
1498 | /* Station kickout threshold (non of consecutive failures).0-disable */ | ||
1499 | WMI_PDEV_PARAM_STA_KICKOUT_TH, | ||
1500 | /* Aggerate size scaling configuration per AC */ | ||
1501 | WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, | ||
1502 | /* LTR enable */ | ||
1503 | WMI_PDEV_PARAM_LTR_ENABLE, | ||
1504 | /* LTR latency for BE, in us */ | ||
1505 | WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, | ||
1506 | /* LTR latency for BK, in us */ | ||
1507 | WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, | ||
1508 | /* LTR latency for VI, in us */ | ||
1509 | WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, | ||
1510 | /* LTR latency for VO, in us */ | ||
1511 | WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, | ||
1512 | /* LTR AC latency timeout, in ms */ | ||
1513 | WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, | ||
1514 | /* LTR platform latency override, in us */ | ||
1515 | WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, | ||
1516 | /* LTR-RX override, in us */ | ||
1517 | WMI_PDEV_PARAM_LTR_RX_OVERRIDE, | ||
1518 | /* Tx activity timeout for LTR, in us */ | ||
1519 | WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, | ||
1520 | /* L1SS state machine enable */ | ||
1521 | WMI_PDEV_PARAM_L1SS_ENABLE, | ||
1522 | /* Deep sleep state machine enable */ | ||
1523 | WMI_PDEV_PARAM_DSLEEP_ENABLE, | ||
1524 | /* RX buffering flush enable */ | ||
1525 | WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, | ||
1526 | /* RX buffering matermark */ | ||
1527 | WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, | ||
1528 | /* RX buffering timeout enable */ | ||
1529 | WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, | ||
1530 | /* RX buffering timeout value */ | ||
1531 | WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, | ||
1532 | /* pdev level stats update period in ms */ | ||
1533 | WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, | ||
1534 | /* vdev level stats update period in ms */ | ||
1535 | WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, | ||
1536 | /* peer level stats update period in ms */ | ||
1537 | WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, | ||
1538 | /* beacon filter status update period */ | ||
1539 | WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, | ||
1540 | /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */ | ||
1541 | WMI_PDEV_PARAM_PMF_QOS, | ||
1542 | /* Access category on which ARP frames are sent */ | ||
1543 | WMI_PDEV_PARAM_ARP_AC_OVERRIDE, | ||
1544 | /* DCS configuration */ | ||
1545 | WMI_PDEV_PARAM_DCS, | ||
1546 | /* Enable/Disable ANI on target */ | ||
1547 | WMI_PDEV_PARAM_ANI_ENABLE, | ||
1548 | /* configure the ANI polling period */ | ||
1549 | WMI_PDEV_PARAM_ANI_POLL_PERIOD, | ||
1550 | /* configure the ANI listening period */ | ||
1551 | WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, | ||
1552 | /* configure OFDM immunity level */ | ||
1553 | WMI_PDEV_PARAM_ANI_OFDM_LEVEL, | ||
1554 | /* configure CCK immunity level */ | ||
1555 | WMI_PDEV_PARAM_ANI_CCK_LEVEL, | ||
1556 | /* Enable/Disable CDD for 1x1 STAs in rate control module */ | ||
1557 | WMI_PDEV_PARAM_DYNTXCHAIN, | ||
1558 | /* Enable/Disable proxy STA */ | ||
1559 | WMI_PDEV_PARAM_PROXY_STA, | ||
1560 | /* Enable/Disable low power state when all VDEVs are inactive/idle. */ | ||
1561 | WMI_PDEV_PARAM_IDLE_PS_CONFIG, | ||
1562 | /* Enable/Disable power gating sleep */ | ||
1563 | WMI_PDEV_PARAM_POWER_GATING_SLEEP, | ||
1564 | }; | ||
1565 | |||
1566 | struct wmi_pdev_set_param_cmd { | ||
1567 | __le32 param_id; | ||
1568 | __le32 param_value; | ||
1569 | } __packed; | ||
1570 | |||
1571 | struct wmi_pdev_get_tpc_config_cmd { | ||
1572 | /* parameter */ | ||
1573 | __le32 param; | ||
1574 | } __packed; | ||
1575 | |||
1576 | #define WMI_TPC_RATE_MAX 160 | ||
1577 | #define WMI_TPC_TX_N_CHAIN 4 | ||
1578 | |||
1579 | enum wmi_tpc_config_event_flag { | ||
1580 | WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1, | ||
1581 | WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2, | ||
1582 | WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4, | ||
1583 | }; | ||
1584 | |||
1585 | struct wmi_pdev_tpc_config_event { | ||
1586 | __le32 reg_domain; | ||
1587 | __le32 chan_freq; | ||
1588 | __le32 phy_mode; | ||
1589 | __le32 twice_antenna_reduction; | ||
1590 | __le32 twice_max_rd_power; | ||
1591 | s32 twice_antenna_gain; | ||
1592 | __le32 power_limit; | ||
1593 | __le32 rate_max; | ||
1594 | __le32 num_tx_chain; | ||
1595 | __le32 ctl; | ||
1596 | __le32 flags; | ||
1597 | s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN]; | ||
1598 | s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; | ||
1599 | s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; | ||
1600 | s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN]; | ||
1601 | u8 rates_array[WMI_TPC_RATE_MAX]; | ||
1602 | } __packed; | ||
1603 | |||
1604 | /* Transmit power scale factor. */ | ||
1605 | enum wmi_tp_scale { | ||
1606 | WMI_TP_SCALE_MAX = 0, /* no scaling (default) */ | ||
1607 | WMI_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */ | ||
1608 | WMI_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */ | ||
1609 | WMI_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */ | ||
1610 | WMI_TP_SCALE_MIN = 4, /* min, but still on */ | ||
1611 | WMI_TP_SCALE_SIZE = 5, /* max num of enum */ | ||
1612 | }; | ||
1613 | |||
1614 | struct wmi_set_channel_cmd { | ||
1615 | /* channel (only frequency and mode info are used) */ | ||
1616 | struct wmi_channel chan; | ||
1617 | } __packed; | ||
1618 | |||
1619 | struct wmi_pdev_chanlist_update_event { | ||
1620 | /* number of channels */ | ||
1621 | __le32 num_chan; | ||
1622 | /* array of channels */ | ||
1623 | struct wmi_channel channel_list[1]; | ||
1624 | } __packed; | ||
1625 | |||
1626 | #define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32) | ||
1627 | |||
1628 | struct wmi_debug_mesg_event { | ||
1629 | /* message buffer, NULL terminated */ | ||
1630 | char bufp[WMI_MAX_DEBUG_MESG]; | ||
1631 | } __packed; | ||
1632 | |||
1633 | enum { | ||
1634 | /* P2P device */ | ||
1635 | VDEV_SUBTYPE_P2PDEV = 0, | ||
1636 | /* P2P client */ | ||
1637 | VDEV_SUBTYPE_P2PCLI, | ||
1638 | /* P2P GO */ | ||
1639 | VDEV_SUBTYPE_P2PGO, | ||
1640 | /* BT3.0 HS */ | ||
1641 | VDEV_SUBTYPE_BT, | ||
1642 | }; | ||
1643 | |||
1644 | struct wmi_pdev_set_channel_cmd { | ||
1645 | /* idnore power , only use flags , mode and freq */ | ||
1646 | struct wmi_channel chan; | ||
1647 | } __packed; | ||
1648 | |||
1649 | /* Customize the DSCP (bit) to TID (0-7) mapping for QOS */ | ||
1650 | #define WMI_DSCP_MAP_MAX (64) | ||
1651 | struct wmi_pdev_set_dscp_tid_map_cmd { | ||
1652 | /* map indicating DSCP to TID conversion */ | ||
1653 | __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX]; | ||
1654 | } __packed; | ||
1655 | |||
1656 | enum mcast_bcast_rate_id { | ||
1657 | WMI_SET_MCAST_RATE, | ||
1658 | WMI_SET_BCAST_RATE | ||
1659 | }; | ||
1660 | |||
1661 | struct mcast_bcast_rate { | ||
1662 | enum mcast_bcast_rate_id rate_id; | ||
1663 | __le32 rate; | ||
1664 | } __packed; | ||
1665 | |||
1666 | struct wmi_wmm_params { | ||
1667 | __le32 cwmin; | ||
1668 | __le32 cwmax; | ||
1669 | __le32 aifs; | ||
1670 | __le32 txop; | ||
1671 | __le32 acm; | ||
1672 | __le32 no_ack; | ||
1673 | } __packed; | ||
1674 | |||
1675 | struct wmi_pdev_set_wmm_params { | ||
1676 | struct wmi_wmm_params ac_be; | ||
1677 | struct wmi_wmm_params ac_bk; | ||
1678 | struct wmi_wmm_params ac_vi; | ||
1679 | struct wmi_wmm_params ac_vo; | ||
1680 | } __packed; | ||
1681 | |||
1682 | struct wmi_wmm_params_arg { | ||
1683 | u32 cwmin; | ||
1684 | u32 cwmax; | ||
1685 | u32 aifs; | ||
1686 | u32 txop; | ||
1687 | u32 acm; | ||
1688 | u32 no_ack; | ||
1689 | }; | ||
1690 | |||
1691 | struct wmi_pdev_set_wmm_params_arg { | ||
1692 | struct wmi_wmm_params_arg ac_be; | ||
1693 | struct wmi_wmm_params_arg ac_bk; | ||
1694 | struct wmi_wmm_params_arg ac_vi; | ||
1695 | struct wmi_wmm_params_arg ac_vo; | ||
1696 | }; | ||
1697 | |||
1698 | struct wal_dbg_tx_stats { | ||
1699 | /* Num HTT cookies queued to dispatch list */ | ||
1700 | __le32 comp_queued; | ||
1701 | |||
1702 | /* Num HTT cookies dispatched */ | ||
1703 | __le32 comp_delivered; | ||
1704 | |||
1705 | /* Num MSDU queued to WAL */ | ||
1706 | __le32 msdu_enqued; | ||
1707 | |||
1708 | /* Num MPDU queue to WAL */ | ||
1709 | __le32 mpdu_enqued; | ||
1710 | |||
1711 | /* Num MSDUs dropped by WMM limit */ | ||
1712 | __le32 wmm_drop; | ||
1713 | |||
1714 | /* Num Local frames queued */ | ||
1715 | __le32 local_enqued; | ||
1716 | |||
1717 | /* Num Local frames done */ | ||
1718 | __le32 local_freed; | ||
1719 | |||
1720 | /* Num queued to HW */ | ||
1721 | __le32 hw_queued; | ||
1722 | |||
1723 | /* Num PPDU reaped from HW */ | ||
1724 | __le32 hw_reaped; | ||
1725 | |||
1726 | /* Num underruns */ | ||
1727 | __le32 underrun; | ||
1728 | |||
1729 | /* Num PPDUs cleaned up in TX abort */ | ||
1730 | __le32 tx_abort; | ||
1731 | |||
1732 | /* Num MPDUs requed by SW */ | ||
1733 | __le32 mpdus_requed; | ||
1734 | |||
1735 | /* excessive retries */ | ||
1736 | __le32 tx_ko; | ||
1737 | |||
1738 | /* data hw rate code */ | ||
1739 | __le32 data_rc; | ||
1740 | |||
1741 | /* Scheduler self triggers */ | ||
1742 | __le32 self_triggers; | ||
1743 | |||
1744 | /* frames dropped due to excessive sw retries */ | ||
1745 | __le32 sw_retry_failure; | ||
1746 | |||
1747 | /* illegal rate phy errors */ | ||
1748 | __le32 illgl_rate_phy_err; | ||
1749 | |||
1750 | /* wal pdev continous xretry */ | ||
1751 | __le32 pdev_cont_xretry; | ||
1752 | |||
1753 | /* wal pdev continous xretry */ | ||
1754 | __le32 pdev_tx_timeout; | ||
1755 | |||
1756 | /* wal pdev resets */ | ||
1757 | __le32 pdev_resets; | ||
1758 | |||
1759 | __le32 phy_underrun; | ||
1760 | |||
1761 | /* MPDU is more than txop limit */ | ||
1762 | __le32 txop_ovf; | ||
1763 | } __packed; | ||
1764 | |||
1765 | struct wal_dbg_rx_stats { | ||
1766 | /* Cnts any change in ring routing mid-ppdu */ | ||
1767 | __le32 mid_ppdu_route_change; | ||
1768 | |||
1769 | /* Total number of statuses processed */ | ||
1770 | __le32 status_rcvd; | ||
1771 | |||
1772 | /* Extra frags on rings 0-3 */ | ||
1773 | __le32 r0_frags; | ||
1774 | __le32 r1_frags; | ||
1775 | __le32 r2_frags; | ||
1776 | __le32 r3_frags; | ||
1777 | |||
1778 | /* MSDUs / MPDUs delivered to HTT */ | ||
1779 | __le32 htt_msdus; | ||
1780 | __le32 htt_mpdus; | ||
1781 | |||
1782 | /* MSDUs / MPDUs delivered to local stack */ | ||
1783 | __le32 loc_msdus; | ||
1784 | __le32 loc_mpdus; | ||
1785 | |||
1786 | /* AMSDUs that have more MSDUs than the status ring size */ | ||
1787 | __le32 oversize_amsdu; | ||
1788 | |||
1789 | /* Number of PHY errors */ | ||
1790 | __le32 phy_errs; | ||
1791 | |||
1792 | /* Number of PHY errors drops */ | ||
1793 | __le32 phy_err_drop; | ||
1794 | |||
1795 | /* Number of mpdu errors - FCS, MIC, ENC etc. */ | ||
1796 | __le32 mpdu_errs; | ||
1797 | } __packed; | ||
1798 | |||
1799 | struct wal_dbg_peer_stats { | ||
1800 | /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ | ||
1801 | __le32 dummy; | ||
1802 | } __packed; | ||
1803 | |||
1804 | struct wal_dbg_stats { | ||
1805 | struct wal_dbg_tx_stats tx; | ||
1806 | struct wal_dbg_rx_stats rx; | ||
1807 | struct wal_dbg_peer_stats peer; | ||
1808 | } __packed; | ||
1809 | |||
1810 | enum wmi_stats_id { | ||
1811 | WMI_REQUEST_PEER_STAT = 0x01, | ||
1812 | WMI_REQUEST_AP_STAT = 0x02 | ||
1813 | }; | ||
1814 | |||
1815 | struct wmi_request_stats_cmd { | ||
1816 | __le32 stats_id; | ||
1817 | |||
1818 | /* | ||
1819 | * Space to add parameters like | ||
1820 | * peer mac addr | ||
1821 | */ | ||
1822 | } __packed; | ||
1823 | |||
1824 | /* Suspend option */ | ||
1825 | enum { | ||
1826 | /* suspend */ | ||
1827 | WMI_PDEV_SUSPEND, | ||
1828 | |||
1829 | /* suspend and disable all interrupts */ | ||
1830 | WMI_PDEV_SUSPEND_AND_DISABLE_INTR, | ||
1831 | }; | ||
1832 | |||
1833 | struct wmi_pdev_suspend_cmd { | ||
1834 | /* suspend option sent to target */ | ||
1835 | __le32 suspend_opt; | ||
1836 | } __packed; | ||
1837 | |||
1838 | struct wmi_stats_event { | ||
1839 | __le32 stats_id; /* %WMI_REQUEST_ */ | ||
1840 | /* | ||
1841 | * number of pdev stats event structures | ||
1842 | * (wmi_pdev_stats) 0 or 1 | ||
1843 | */ | ||
1844 | __le32 num_pdev_stats; | ||
1845 | /* | ||
1846 | * number of vdev stats event structures | ||
1847 | * (wmi_vdev_stats) 0 or max vdevs | ||
1848 | */ | ||
1849 | __le32 num_vdev_stats; | ||
1850 | /* | ||
1851 | * number of peer stats event structures | ||
1852 | * (wmi_peer_stats) 0 or max peers | ||
1853 | */ | ||
1854 | __le32 num_peer_stats; | ||
1855 | __le32 num_bcnflt_stats; | ||
1856 | /* | ||
1857 | * followed by | ||
1858 | * num_pdev_stats * size of(struct wmi_pdev_stats) | ||
1859 | * num_vdev_stats * size of(struct wmi_vdev_stats) | ||
1860 | * num_peer_stats * size of(struct wmi_peer_stats) | ||
1861 | * | ||
1862 | * By having a zero sized array, the pointer to data area | ||
1863 | * becomes available without increasing the struct size | ||
1864 | */ | ||
1865 | u8 data[0]; | ||
1866 | } __packed; | ||
1867 | |||
1868 | /* | ||
1869 | * PDEV statistics | ||
1870 | * TODO: add all PDEV stats here | ||
1871 | */ | ||
1872 | struct wmi_pdev_stats { | ||
1873 | __le32 chan_nf; /* Channel noise floor */ | ||
1874 | __le32 tx_frame_count; /* TX frame count */ | ||
1875 | __le32 rx_frame_count; /* RX frame count */ | ||
1876 | __le32 rx_clear_count; /* rx clear count */ | ||
1877 | __le32 cycle_count; /* cycle count */ | ||
1878 | __le32 phy_err_count; /* Phy error count */ | ||
1879 | __le32 chan_tx_pwr; /* channel tx power */ | ||
1880 | struct wal_dbg_stats wal; /* WAL dbg stats */ | ||
1881 | } __packed; | ||
1882 | |||
1883 | /* | ||
1884 | * VDEV statistics | ||
1885 | * TODO: add all VDEV stats here | ||
1886 | */ | ||
1887 | struct wmi_vdev_stats { | ||
1888 | __le32 vdev_id; | ||
1889 | } __packed; | ||
1890 | |||
1891 | /* | ||
1892 | * peer statistics. | ||
1893 | * TODO: add more stats | ||
1894 | */ | ||
1895 | struct wmi_peer_stats { | ||
1896 | struct wmi_mac_addr peer_macaddr; | ||
1897 | __le32 peer_rssi; | ||
1898 | __le32 peer_tx_rate; | ||
1899 | } __packed; | ||
1900 | |||
1901 | struct wmi_vdev_create_cmd { | ||
1902 | __le32 vdev_id; | ||
1903 | __le32 vdev_type; | ||
1904 | __le32 vdev_subtype; | ||
1905 | struct wmi_mac_addr vdev_macaddr; | ||
1906 | } __packed; | ||
1907 | |||
1908 | enum wmi_vdev_type { | ||
1909 | WMI_VDEV_TYPE_AP = 1, | ||
1910 | WMI_VDEV_TYPE_STA = 2, | ||
1911 | WMI_VDEV_TYPE_IBSS = 3, | ||
1912 | WMI_VDEV_TYPE_MONITOR = 4, | ||
1913 | }; | ||
1914 | |||
1915 | enum wmi_vdev_subtype { | ||
1916 | WMI_VDEV_SUBTYPE_NONE = 0, | ||
1917 | WMI_VDEV_SUBTYPE_P2P_DEVICE = 1, | ||
1918 | WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, | ||
1919 | WMI_VDEV_SUBTYPE_P2P_GO = 3, | ||
1920 | }; | ||
1921 | |||
1922 | /* values for vdev_subtype */ | ||
1923 | |||
1924 | /* values for vdev_start_request flags */ | ||
1925 | /* | ||
1926 | * Indicates that AP VDEV uses hidden ssid. only valid for | ||
1927 | * AP/GO */ | ||
1928 | #define WMI_VDEV_START_HIDDEN_SSID (1<<0) | ||
1929 | /* | ||
1930 | * Indicates if robust management frame/management frame | ||
1931 | * protection is enabled. For GO/AP vdevs, it indicates that | ||
1932 | * it may support station/client associations with RMF enabled. | ||
1933 | * For STA/client vdevs, it indicates that sta will | ||
1934 | * associate with AP with RMF enabled. */ | ||
1935 | #define WMI_VDEV_START_PMF_ENABLED (1<<1) | ||
1936 | |||
1937 | struct wmi_p2p_noa_descriptor { | ||
1938 | __le32 type_count; /* 255: continuous schedule, 0: reserved */ | ||
1939 | __le32 duration; /* Absent period duration in micro seconds */ | ||
1940 | __le32 interval; /* Absent period interval in micro seconds */ | ||
1941 | __le32 start_time; /* 32 bit tsf time when in starts */ | ||
1942 | } __packed; | ||
1943 | |||
1944 | struct wmi_vdev_start_request_cmd { | ||
1945 | /* WMI channel */ | ||
1946 | struct wmi_channel chan; | ||
1947 | /* unique id identifying the VDEV, generated by the caller */ | ||
1948 | __le32 vdev_id; | ||
1949 | /* requestor id identifying the caller module */ | ||
1950 | __le32 requestor_id; | ||
1951 | /* beacon interval from received beacon */ | ||
1952 | __le32 beacon_interval; | ||
1953 | /* DTIM Period from the received beacon */ | ||
1954 | __le32 dtim_period; | ||
1955 | /* Flags */ | ||
1956 | __le32 flags; | ||
1957 | /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */ | ||
1958 | struct wmi_ssid ssid; | ||
1959 | /* beacon/probe reponse xmit rate. Applicable for SoftAP. */ | ||
1960 | __le32 bcn_tx_rate; | ||
1961 | /* beacon/probe reponse xmit power. Applicable for SoftAP. */ | ||
1962 | __le32 bcn_tx_power; | ||
1963 | /* number of p2p NOA descriptor(s) from scan entry */ | ||
1964 | __le32 num_noa_descriptors; | ||
1965 | /* | ||
1966 | * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID. | ||
1967 | * During CAC, Our HW shouldn't ack ditected frames | ||
1968 | */ | ||
1969 | __le32 disable_hw_ack; | ||
1970 | /* actual p2p NOA descriptor from scan entry */ | ||
1971 | struct wmi_p2p_noa_descriptor noa_descriptors[2]; | ||
1972 | } __packed; | ||
1973 | |||
1974 | struct wmi_vdev_restart_request_cmd { | ||
1975 | struct wmi_vdev_start_request_cmd vdev_start_request_cmd; | ||
1976 | } __packed; | ||
1977 | |||
1978 | struct wmi_vdev_start_request_arg { | ||
1979 | u32 vdev_id; | ||
1980 | struct wmi_channel_arg channel; | ||
1981 | u32 bcn_intval; | ||
1982 | u32 dtim_period; | ||
1983 | u8 *ssid; | ||
1984 | u32 ssid_len; | ||
1985 | u32 bcn_tx_rate; | ||
1986 | u32 bcn_tx_power; | ||
1987 | bool disable_hw_ack; | ||
1988 | bool hidden_ssid; | ||
1989 | bool pmf_enabled; | ||
1990 | }; | ||
1991 | |||
1992 | struct wmi_vdev_delete_cmd { | ||
1993 | /* unique id identifying the VDEV, generated by the caller */ | ||
1994 | __le32 vdev_id; | ||
1995 | } __packed; | ||
1996 | |||
1997 | struct wmi_vdev_up_cmd { | ||
1998 | __le32 vdev_id; | ||
1999 | __le32 vdev_assoc_id; | ||
2000 | struct wmi_mac_addr vdev_bssid; | ||
2001 | } __packed; | ||
2002 | |||
2003 | struct wmi_vdev_stop_cmd { | ||
2004 | __le32 vdev_id; | ||
2005 | } __packed; | ||
2006 | |||
2007 | struct wmi_vdev_down_cmd { | ||
2008 | __le32 vdev_id; | ||
2009 | } __packed; | ||
2010 | |||
2011 | struct wmi_vdev_standby_response_cmd { | ||
2012 | /* unique id identifying the VDEV, generated by the caller */ | ||
2013 | __le32 vdev_id; | ||
2014 | } __packed; | ||
2015 | |||
2016 | struct wmi_vdev_resume_response_cmd { | ||
2017 | /* unique id identifying the VDEV, generated by the caller */ | ||
2018 | __le32 vdev_id; | ||
2019 | } __packed; | ||
2020 | |||
2021 | struct wmi_vdev_set_param_cmd { | ||
2022 | __le32 vdev_id; | ||
2023 | __le32 param_id; | ||
2024 | __le32 param_value; | ||
2025 | } __packed; | ||
2026 | |||
2027 | #define WMI_MAX_KEY_INDEX 3 | ||
2028 | #define WMI_MAX_KEY_LEN 32 | ||
2029 | |||
2030 | #define WMI_KEY_PAIRWISE 0x00 | ||
2031 | #define WMI_KEY_GROUP 0x01 | ||
2032 | #define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */ | ||
2033 | |||
2034 | struct wmi_key_seq_counter { | ||
2035 | __le32 key_seq_counter_l; | ||
2036 | __le32 key_seq_counter_h; | ||
2037 | } __packed; | ||
2038 | |||
2039 | #define WMI_CIPHER_NONE 0x0 /* clear key */ | ||
2040 | #define WMI_CIPHER_WEP 0x1 | ||
2041 | #define WMI_CIPHER_TKIP 0x2 | ||
2042 | #define WMI_CIPHER_AES_OCB 0x3 | ||
2043 | #define WMI_CIPHER_AES_CCM 0x4 | ||
2044 | #define WMI_CIPHER_WAPI 0x5 | ||
2045 | #define WMI_CIPHER_CKIP 0x6 | ||
2046 | #define WMI_CIPHER_AES_CMAC 0x7 | ||
2047 | |||
2048 | struct wmi_vdev_install_key_cmd { | ||
2049 | __le32 vdev_id; | ||
2050 | struct wmi_mac_addr peer_macaddr; | ||
2051 | __le32 key_idx; | ||
2052 | __le32 key_flags; | ||
2053 | __le32 key_cipher; /* %WMI_CIPHER_ */ | ||
2054 | struct wmi_key_seq_counter key_rsc_counter; | ||
2055 | struct wmi_key_seq_counter key_global_rsc_counter; | ||
2056 | struct wmi_key_seq_counter key_tsc_counter; | ||
2057 | u8 wpi_key_rsc_counter[16]; | ||
2058 | u8 wpi_key_tsc_counter[16]; | ||
2059 | __le32 key_len; | ||
2060 | __le32 key_txmic_len; | ||
2061 | __le32 key_rxmic_len; | ||
2062 | |||
2063 | /* contains key followed by tx mic followed by rx mic */ | ||
2064 | u8 key_data[0]; | ||
2065 | } __packed; | ||
2066 | |||
2067 | struct wmi_vdev_install_key_arg { | ||
2068 | u32 vdev_id; | ||
2069 | const u8 *macaddr; | ||
2070 | u32 key_idx; | ||
2071 | u32 key_flags; | ||
2072 | u32 key_cipher; | ||
2073 | u32 key_len; | ||
2074 | u32 key_txmic_len; | ||
2075 | u32 key_rxmic_len; | ||
2076 | const void *key_data; | ||
2077 | }; | ||
2078 | |||
2079 | /* Preamble types to be used with VDEV fixed rate configuration */ | ||
2080 | enum wmi_rate_preamble { | ||
2081 | WMI_RATE_PREAMBLE_OFDM, | ||
2082 | WMI_RATE_PREAMBLE_CCK, | ||
2083 | WMI_RATE_PREAMBLE_HT, | ||
2084 | WMI_RATE_PREAMBLE_VHT, | ||
2085 | }; | ||
2086 | |||
2087 | /* Value to disable fixed rate setting */ | ||
2088 | #define WMI_FIXED_RATE_NONE (0xff) | ||
2089 | |||
2090 | /* the definition of different VDEV parameters */ | ||
2091 | enum wmi_vdev_param { | ||
2092 | /* RTS Threshold */ | ||
2093 | WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1, | ||
2094 | /* Fragmentation threshold */ | ||
2095 | WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, | ||
2096 | /* beacon interval in TUs */ | ||
2097 | WMI_VDEV_PARAM_BEACON_INTERVAL, | ||
2098 | /* Listen interval in TUs */ | ||
2099 | WMI_VDEV_PARAM_LISTEN_INTERVAL, | ||
2100 | /* muticast rate in Mbps */ | ||
2101 | WMI_VDEV_PARAM_MULTICAST_RATE, | ||
2102 | /* management frame rate in Mbps */ | ||
2103 | WMI_VDEV_PARAM_MGMT_TX_RATE, | ||
2104 | /* slot time (long vs short) */ | ||
2105 | WMI_VDEV_PARAM_SLOT_TIME, | ||
2106 | /* preamble (long vs short) */ | ||
2107 | WMI_VDEV_PARAM_PREAMBLE, | ||
2108 | /* SWBA time (time before tbtt in msec) */ | ||
2109 | WMI_VDEV_PARAM_SWBA_TIME, | ||
2110 | /* time period for updating VDEV stats */ | ||
2111 | WMI_VDEV_STATS_UPDATE_PERIOD, | ||
2112 | /* age out time in msec for frames queued for station in power save */ | ||
2113 | WMI_VDEV_PWRSAVE_AGEOUT_TIME, | ||
2114 | /* | ||
2115 | * Host SWBA interval (time in msec before tbtt for SWBA event | ||
2116 | * generation). | ||
2117 | */ | ||
2118 | WMI_VDEV_HOST_SWBA_INTERVAL, | ||
2119 | /* DTIM period (specified in units of num beacon intervals) */ | ||
2120 | WMI_VDEV_PARAM_DTIM_PERIOD, | ||
2121 | /* | ||
2122 | * scheduler air time limit for this VDEV. used by off chan | ||
2123 | * scheduler. | ||
2124 | */ | ||
2125 | WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, | ||
2126 | /* enable/dsiable WDS for this VDEV */ | ||
2127 | WMI_VDEV_PARAM_WDS, | ||
2128 | /* ATIM Window */ | ||
2129 | WMI_VDEV_PARAM_ATIM_WINDOW, | ||
2130 | /* BMISS max */ | ||
2131 | WMI_VDEV_PARAM_BMISS_COUNT_MAX, | ||
2132 | /* BMISS first time */ | ||
2133 | WMI_VDEV_PARAM_BMISS_FIRST_BCNT, | ||
2134 | /* BMISS final time */ | ||
2135 | WMI_VDEV_PARAM_BMISS_FINAL_BCNT, | ||
2136 | /* WMM enables/disabled */ | ||
2137 | WMI_VDEV_PARAM_FEATURE_WMM, | ||
2138 | /* Channel width */ | ||
2139 | WMI_VDEV_PARAM_CHWIDTH, | ||
2140 | /* Channel Offset */ | ||
2141 | WMI_VDEV_PARAM_CHEXTOFFSET, | ||
2142 | /* Disable HT Protection */ | ||
2143 | WMI_VDEV_PARAM_DISABLE_HTPROTECTION, | ||
2144 | /* Quick STA Kickout */ | ||
2145 | WMI_VDEV_PARAM_STA_QUICKKICKOUT, | ||
2146 | /* Rate to be used with Management frames */ | ||
2147 | WMI_VDEV_PARAM_MGMT_RATE, | ||
2148 | /* Protection Mode */ | ||
2149 | WMI_VDEV_PARAM_PROTECTION_MODE, | ||
2150 | /* Fixed rate setting */ | ||
2151 | WMI_VDEV_PARAM_FIXED_RATE, | ||
2152 | /* Short GI Enable/Disable */ | ||
2153 | WMI_VDEV_PARAM_SGI, | ||
2154 | /* Enable LDPC */ | ||
2155 | WMI_VDEV_PARAM_LDPC, | ||
2156 | /* Enable Tx STBC */ | ||
2157 | WMI_VDEV_PARAM_TX_STBC, | ||
2158 | /* Enable Rx STBC */ | ||
2159 | WMI_VDEV_PARAM_RX_STBC, | ||
2160 | /* Intra BSS forwarding */ | ||
2161 | WMI_VDEV_PARAM_INTRA_BSS_FWD, | ||
2162 | /* Setting Default xmit key for Vdev */ | ||
2163 | WMI_VDEV_PARAM_DEF_KEYID, | ||
2164 | /* NSS width */ | ||
2165 | WMI_VDEV_PARAM_NSS, | ||
2166 | /* Set the custom rate for the broadcast data frames */ | ||
2167 | WMI_VDEV_PARAM_BCAST_DATA_RATE, | ||
2168 | /* Set the custom rate (rate-code) for multicast data frames */ | ||
2169 | WMI_VDEV_PARAM_MCAST_DATA_RATE, | ||
2170 | /* Tx multicast packet indicate Enable/Disable */ | ||
2171 | WMI_VDEV_PARAM_MCAST_INDICATE, | ||
2172 | /* Tx DHCP packet indicate Enable/Disable */ | ||
2173 | WMI_VDEV_PARAM_DHCP_INDICATE, | ||
2174 | /* Enable host inspection of Tx unicast packet to unknown destination */ | ||
2175 | WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, | ||
2176 | |||
2177 | /* The minimum amount of time AP begins to consider STA inactive */ | ||
2178 | WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, | ||
2179 | |||
2180 | /* | ||
2181 | * An associated STA is considered inactive when there is no recent | ||
2182 | * TX/RX activity and no downlink frames are buffered for it. Once a | ||
2183 | * STA exceeds the maximum idle inactive time, the AP will send an | ||
2184 | * 802.11 data-null as a keep alive to verify the STA is still | ||
2185 | * associated. If the STA does ACK the data-null, or if the data-null | ||
2186 | * is buffered and the STA does not retrieve it, the STA will be | ||
2187 | * considered unresponsive | ||
2188 | * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS). | ||
2189 | */ | ||
2190 | WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, | ||
2191 | |||
2192 | /* | ||
2193 | * An associated STA is considered unresponsive if there is no recent | ||
2194 | * TX/RX activity and downlink frames are buffered for it. Once a STA | ||
2195 | * exceeds the maximum unresponsive time, the AP will send a | ||
2196 | * WMI_STA_KICKOUT event to the host so the STA can be deleted. */ | ||
2197 | WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, | ||
2198 | |||
2199 | /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ | ||
2200 | WMI_VDEV_PARAM_AP_ENABLE_NAWDS, | ||
2201 | /* Enable/Disable RTS-CTS */ | ||
2202 | WMI_VDEV_PARAM_ENABLE_RTSCTS, | ||
2203 | /* Enable TXBFee/er */ | ||
2204 | WMI_VDEV_PARAM_TXBF, | ||
2205 | |||
2206 | /* Set packet power save */ | ||
2207 | WMI_VDEV_PARAM_PACKET_POWERSAVE, | ||
2208 | |||
2209 | /* | ||
2210 | * Drops un-encrypted packets if eceived in an encrypted connection | ||
2211 | * otherwise forwards to host. | ||
2212 | */ | ||
2213 | WMI_VDEV_PARAM_DROP_UNENCRY, | ||
2214 | |||
2215 | /* | ||
2216 | * Set the encapsulation type for frames. | ||
2217 | */ | ||
2218 | WMI_VDEV_PARAM_TX_ENCAP_TYPE, | ||
2219 | }; | ||
2220 | |||
2221 | /* slot time long */ | ||
2222 | #define WMI_VDEV_SLOT_TIME_LONG 0x1 | ||
2223 | /* slot time short */ | ||
2224 | #define WMI_VDEV_SLOT_TIME_SHORT 0x2 | ||
2225 | /* preablbe long */ | ||
2226 | #define WMI_VDEV_PREAMBLE_LONG 0x1 | ||
2227 | /* preablbe short */ | ||
2228 | #define WMI_VDEV_PREAMBLE_SHORT 0x2 | ||
2229 | |||
2230 | enum wmi_start_event_param { | ||
2231 | WMI_VDEV_RESP_START_EVENT = 0, | ||
2232 | WMI_VDEV_RESP_RESTART_EVENT, | ||
2233 | }; | ||
2234 | |||
2235 | struct wmi_vdev_start_response_event { | ||
2236 | __le32 vdev_id; | ||
2237 | __le32 req_id; | ||
2238 | __le32 resp_type; /* %WMI_VDEV_RESP_ */ | ||
2239 | __le32 status; | ||
2240 | } __packed; | ||
2241 | |||
2242 | struct wmi_vdev_standby_req_event { | ||
2243 | /* unique id identifying the VDEV, generated by the caller */ | ||
2244 | __le32 vdev_id; | ||
2245 | } __packed; | ||
2246 | |||
2247 | struct wmi_vdev_resume_req_event { | ||
2248 | /* unique id identifying the VDEV, generated by the caller */ | ||
2249 | __le32 vdev_id; | ||
2250 | } __packed; | ||
2251 | |||
2252 | struct wmi_vdev_stopped_event { | ||
2253 | /* unique id identifying the VDEV, generated by the caller */ | ||
2254 | __le32 vdev_id; | ||
2255 | } __packed; | ||
2256 | |||
2257 | /* | ||
2258 | * common structure used for simple events | ||
2259 | * (stopped, resume_req, standby response) | ||
2260 | */ | ||
2261 | struct wmi_vdev_simple_event { | ||
2262 | /* unique id identifying the VDEV, generated by the caller */ | ||
2263 | __le32 vdev_id; | ||
2264 | } __packed; | ||
2265 | |||
2266 | /* VDEV start response status codes */ | ||
2267 | /* VDEV succesfully started */ | ||
2268 | #define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0 | ||
2269 | |||
2270 | /* requested VDEV not found */ | ||
2271 | #define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1 | ||
2272 | |||
2273 | /* unsupported VDEV combination */ | ||
2274 | #define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2 | ||
2275 | |||
2276 | /* Beacon processing related command and event structures */ | ||
2277 | struct wmi_bcn_tx_hdr { | ||
2278 | __le32 vdev_id; | ||
2279 | __le32 tx_rate; | ||
2280 | __le32 tx_power; | ||
2281 | __le32 bcn_len; | ||
2282 | } __packed; | ||
2283 | |||
2284 | struct wmi_bcn_tx_cmd { | ||
2285 | struct wmi_bcn_tx_hdr hdr; | ||
2286 | u8 *bcn[0]; | ||
2287 | } __packed; | ||
2288 | |||
2289 | struct wmi_bcn_tx_arg { | ||
2290 | u32 vdev_id; | ||
2291 | u32 tx_rate; | ||
2292 | u32 tx_power; | ||
2293 | u32 bcn_len; | ||
2294 | const void *bcn; | ||
2295 | }; | ||
2296 | |||
2297 | /* Beacon filter */ | ||
2298 | #define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */ | ||
2299 | #define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */ | ||
2300 | #define WMI_BCN_FILTER_RSSI 2 /* Pass Beacons RSSI >= RSSI threshold */ | ||
2301 | #define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */ | ||
2302 | #define WMI_BCN_FILTER_SSID 4 /* Pass Beacons with matching SSID */ | ||
2303 | |||
2304 | struct wmi_bcn_filter_rx_cmd { | ||
2305 | /* Filter ID */ | ||
2306 | __le32 bcn_filter_id; | ||
2307 | /* Filter type - wmi_bcn_filter */ | ||
2308 | __le32 bcn_filter; | ||
2309 | /* Buffer len */ | ||
2310 | __le32 bcn_filter_len; | ||
2311 | /* Filter info (threshold, BSSID, RSSI) */ | ||
2312 | u8 *bcn_filter_buf; | ||
2313 | } __packed; | ||
2314 | |||
2315 | /* Capabilities and IEs to be passed to firmware */ | ||
2316 | struct wmi_bcn_prb_info { | ||
2317 | /* Capabilities */ | ||
2318 | __le32 caps; | ||
2319 | /* ERP info */ | ||
2320 | __le32 erp; | ||
2321 | /* Advanced capabilities */ | ||
2322 | /* HT capabilities */ | ||
2323 | /* HT Info */ | ||
2324 | /* ibss_dfs */ | ||
2325 | /* wpa Info */ | ||
2326 | /* rsn Info */ | ||
2327 | /* rrm info */ | ||
2328 | /* ath_ext */ | ||
2329 | /* app IE */ | ||
2330 | } __packed; | ||
2331 | |||
2332 | struct wmi_bcn_tmpl_cmd { | ||
2333 | /* unique id identifying the VDEV, generated by the caller */ | ||
2334 | __le32 vdev_id; | ||
2335 | /* TIM IE offset from the beginning of the template. */ | ||
2336 | __le32 tim_ie_offset; | ||
2337 | /* beacon probe capabilities and IEs */ | ||
2338 | struct wmi_bcn_prb_info bcn_prb_info; | ||
2339 | /* beacon buffer length */ | ||
2340 | __le32 buf_len; | ||
2341 | /* variable length data */ | ||
2342 | u8 data[1]; | ||
2343 | } __packed; | ||
2344 | |||
2345 | struct wmi_prb_tmpl_cmd { | ||
2346 | /* unique id identifying the VDEV, generated by the caller */ | ||
2347 | __le32 vdev_id; | ||
2348 | /* beacon probe capabilities and IEs */ | ||
2349 | struct wmi_bcn_prb_info bcn_prb_info; | ||
2350 | /* beacon buffer length */ | ||
2351 | __le32 buf_len; | ||
2352 | /* Variable length data */ | ||
2353 | u8 data[1]; | ||
2354 | } __packed; | ||
2355 | |||
2356 | enum wmi_sta_ps_mode { | ||
2357 | /* enable power save for the given STA VDEV */ | ||
2358 | WMI_STA_PS_MODE_DISABLED = 0, | ||
2359 | /* disable power save for a given STA VDEV */ | ||
2360 | WMI_STA_PS_MODE_ENABLED = 1, | ||
2361 | }; | ||
2362 | |||
2363 | struct wmi_sta_powersave_mode_cmd { | ||
2364 | /* unique id identifying the VDEV, generated by the caller */ | ||
2365 | __le32 vdev_id; | ||
2366 | |||
2367 | /* | ||
2368 | * Power save mode | ||
2369 | * (see enum wmi_sta_ps_mode) | ||
2370 | */ | ||
2371 | __le32 sta_ps_mode; | ||
2372 | } __packed; | ||
2373 | |||
2374 | enum wmi_csa_offload_en { | ||
2375 | WMI_CSA_OFFLOAD_DISABLE = 0, | ||
2376 | WMI_CSA_OFFLOAD_ENABLE = 1, | ||
2377 | }; | ||
2378 | |||
2379 | struct wmi_csa_offload_enable_cmd { | ||
2380 | __le32 vdev_id; | ||
2381 | __le32 csa_offload_enable; | ||
2382 | } __packed; | ||
2383 | |||
2384 | struct wmi_csa_offload_chanswitch_cmd { | ||
2385 | __le32 vdev_id; | ||
2386 | struct wmi_channel chan; | ||
2387 | } __packed; | ||
2388 | |||
2389 | /* | ||
2390 | * This parameter controls the policy for retrieving frames from AP while the | ||
2391 | * STA is in sleep state. | ||
2392 | * | ||
2393 | * Only takes affect if the sta_ps_mode is enabled | ||
2394 | */ | ||
2395 | enum wmi_sta_ps_param_rx_wake_policy { | ||
2396 | /* | ||
2397 | * Wake up when ever there is an RX activity on the VDEV. In this mode | ||
2398 | * the Power save SM(state machine) will come out of sleep by either | ||
2399 | * sending null frame (or) a data frame (with PS==0) in response to TIM | ||
2400 | * bit set in the received beacon frame from AP. | ||
2401 | */ | ||
2402 | WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0, | ||
2403 | |||
2404 | /* | ||
2405 | * Here the power save state machine will not wakeup in response to TIM | ||
2406 | * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD | ||
2407 | * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all | ||
2408 | * access categories are delivery-enabled, the station will send a | ||
2409 | * UAPSD trigger frame, otherwise it will send a PS-Poll. | ||
2410 | */ | ||
2411 | WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1, | ||
2412 | }; | ||
2413 | |||
2414 | /* | ||
2415 | * Number of tx frames/beacon that cause the power save SM to wake up. | ||
2416 | * | ||
2417 | * Value 1 causes the SM to wake up for every TX. Value 0 has a special | ||
2418 | * meaning, It will cause the SM to never wake up. This is useful if you want | ||
2419 | * to keep the system to sleep all the time for some kind of test mode . host | ||
2420 | * can change this parameter any time. It will affect at the next tx frame. | ||
2421 | */ | ||
2422 | enum wmi_sta_ps_param_tx_wake_threshold { | ||
2423 | WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0, | ||
2424 | WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1, | ||
2425 | |||
2426 | /* | ||
2427 | * Values greater than one indicate that many TX attempts per beacon | ||
2428 | * interval before the STA will wake up | ||
2429 | */ | ||
2430 | }; | ||
2431 | |||
2432 | /* | ||
2433 | * The maximum number of PS-Poll frames the FW will send in response to | ||
2434 | * traffic advertised in TIM before waking up (by sending a null frame with PS | ||
2435 | * = 0). Value 0 has a special meaning: there is no maximum count and the FW | ||
2436 | * will send as many PS-Poll as are necessary to retrieve buffered BU. This | ||
2437 | * parameter is used when the RX wake policy is | ||
2438 | * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake | ||
2439 | * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE. | ||
2440 | */ | ||
2441 | enum wmi_sta_ps_param_pspoll_count { | ||
2442 | WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0, | ||
2443 | /* | ||
2444 | * Values greater than 0 indicate the maximum numer of PS-Poll frames | ||
2445 | * FW will send before waking up. | ||
2446 | */ | ||
2447 | }; | ||
2448 | |||
2449 | /* | ||
2450 | * This will include the delivery and trigger enabled state for every AC. | ||
2451 | * This is the negotiated state with AP. The host MLME needs to set this based | ||
2452 | * on AP capability and the state Set in the association request by the | ||
2453 | * station MLME.Lower 8 bits of the value specify the UAPSD configuration. | ||
2454 | */ | ||
2455 | #define WMI_UAPSD_AC_TYPE_DELI 0 | ||
2456 | #define WMI_UAPSD_AC_TYPE_TRIG 1 | ||
2457 | |||
2458 | #define WMI_UAPSD_AC_BIT_MASK(ac, type) \ | ||
2459 | ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1))) | ||
2460 | |||
2461 | enum wmi_sta_ps_param_uapsd { | ||
2462 | WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), | ||
2463 | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), | ||
2464 | WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), | ||
2465 | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), | ||
2466 | WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), | ||
2467 | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), | ||
2468 | WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), | ||
2469 | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), | ||
2470 | }; | ||
2471 | |||
2472 | enum wmi_sta_powersave_param { | ||
2473 | /* | ||
2474 | * Controls how frames are retrievd from AP while STA is sleeping | ||
2475 | * | ||
2476 | * (see enum wmi_sta_ps_param_rx_wake_policy) | ||
2477 | */ | ||
2478 | WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0, | ||
2479 | |||
2480 | /* | ||
2481 | * The STA will go active after this many TX | ||
2482 | * | ||
2483 | * (see enum wmi_sta_ps_param_tx_wake_threshold) | ||
2484 | */ | ||
2485 | WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1, | ||
2486 | |||
2487 | /* | ||
2488 | * Number of PS-Poll to send before STA wakes up | ||
2489 | * | ||
2490 | * (see enum wmi_sta_ps_param_pspoll_count) | ||
2491 | * | ||
2492 | */ | ||
2493 | WMI_STA_PS_PARAM_PSPOLL_COUNT = 2, | ||
2494 | |||
2495 | /* | ||
2496 | * TX/RX inactivity time in msec before going to sleep. | ||
2497 | * | ||
2498 | * The power save SM will monitor tx/rx activity on the VDEV, if no | ||
2499 | * activity for the specified msec of the parameter the Power save | ||
2500 | * SM will go to sleep. | ||
2501 | */ | ||
2502 | WMI_STA_PS_PARAM_INACTIVITY_TIME = 3, | ||
2503 | |||
2504 | /* | ||
2505 | * Set uapsd configuration. | ||
2506 | * | ||
2507 | * (see enum wmi_sta_ps_param_uapsd) | ||
2508 | */ | ||
2509 | WMI_STA_PS_PARAM_UAPSD = 4, | ||
2510 | }; | ||
2511 | |||
2512 | struct wmi_sta_powersave_param_cmd { | ||
2513 | __le32 vdev_id; | ||
2514 | __le32 param_id; /* %WMI_STA_PS_PARAM_ */ | ||
2515 | __le32 param_value; | ||
2516 | } __packed; | ||
2517 | |||
2518 | /* No MIMO power save */ | ||
2519 | #define WMI_STA_MIMO_PS_MODE_DISABLE | ||
2520 | /* mimo powersave mode static*/ | ||
2521 | #define WMI_STA_MIMO_PS_MODE_STATIC | ||
2522 | /* mimo powersave mode dynamic */ | ||
2523 | #define WMI_STA_MIMO_PS_MODE_DYNAMIC | ||
2524 | |||
2525 | struct wmi_sta_mimo_ps_mode_cmd { | ||
2526 | /* unique id identifying the VDEV, generated by the caller */ | ||
2527 | __le32 vdev_id; | ||
2528 | /* mimo powersave mode as defined above */ | ||
2529 | __le32 mimo_pwrsave_mode; | ||
2530 | } __packed; | ||
2531 | |||
2532 | /* U-APSD configuration of peer station from (re)assoc request and TSPECs */ | ||
2533 | enum wmi_ap_ps_param_uapsd { | ||
2534 | WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0), | ||
2535 | WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1), | ||
2536 | WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2), | ||
2537 | WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3), | ||
2538 | WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4), | ||
2539 | WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5), | ||
2540 | WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6), | ||
2541 | WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), | ||
2542 | }; | ||
2543 | |||
2544 | /* U-APSD maximum service period of peer station */ | ||
2545 | enum wmi_ap_ps_peer_param_max_sp { | ||
2546 | WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0, | ||
2547 | WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1, | ||
2548 | WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2, | ||
2549 | WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3, | ||
2550 | MAX_WMI_AP_PS_PEER_PARAM_MAX_SP, | ||
2551 | }; | ||
2552 | |||
2553 | /* | ||
2554 | * AP power save parameter | ||
2555 | * Set a power save specific parameter for a peer station | ||
2556 | */ | ||
2557 | enum wmi_ap_ps_peer_param { | ||
2558 | /* Set uapsd configuration for a given peer. | ||
2559 | * | ||
2560 | * Include the delivery and trigger enabled state for every AC. | ||
2561 | * The host MLME needs to set this based on AP capability and stations | ||
2562 | * request Set in the association request received from the station. | ||
2563 | * | ||
2564 | * Lower 8 bits of the value specify the UAPSD configuration. | ||
2565 | * | ||
2566 | * (see enum wmi_ap_ps_param_uapsd) | ||
2567 | * The default value is 0. | ||
2568 | */ | ||
2569 | WMI_AP_PS_PEER_PARAM_UAPSD = 0, | ||
2570 | |||
2571 | /* | ||
2572 | * Set the service period for a UAPSD capable station | ||
2573 | * | ||
2574 | * The service period from wme ie in the (re)assoc request frame. | ||
2575 | * | ||
2576 | * (see enum wmi_ap_ps_peer_param_max_sp) | ||
2577 | */ | ||
2578 | WMI_AP_PS_PEER_PARAM_MAX_SP = 1, | ||
2579 | |||
2580 | /* Time in seconds for aging out buffered frames for STA in PS */ | ||
2581 | WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2, | ||
2582 | }; | ||
2583 | |||
2584 | struct wmi_ap_ps_peer_cmd { | ||
2585 | /* unique id identifying the VDEV, generated by the caller */ | ||
2586 | __le32 vdev_id; | ||
2587 | |||
2588 | /* peer MAC address */ | ||
2589 | struct wmi_mac_addr peer_macaddr; | ||
2590 | |||
2591 | /* AP powersave param (see enum wmi_ap_ps_peer_param) */ | ||
2592 | __le32 param_id; | ||
2593 | |||
2594 | /* AP powersave param value */ | ||
2595 | __le32 param_value; | ||
2596 | } __packed; | ||
2597 | |||
2598 | /* 128 clients = 4 words */ | ||
2599 | #define WMI_TIM_BITMAP_ARRAY_SIZE 4 | ||
2600 | |||
2601 | struct wmi_tim_info { | ||
2602 | __le32 tim_len; | ||
2603 | __le32 tim_mcast; | ||
2604 | __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE]; | ||
2605 | __le32 tim_changed; | ||
2606 | __le32 tim_num_ps_pending; | ||
2607 | } __packed; | ||
2608 | |||
2609 | /* Maximum number of NOA Descriptors supported */ | ||
2610 | #define WMI_P2P_MAX_NOA_DESCRIPTORS 4 | ||
2611 | #define WMI_P2P_OPPPS_ENABLE_BIT BIT(0) | ||
2612 | #define WMI_P2P_OPPPS_CTWINDOW_OFFSET 1 | ||
2613 | #define WMI_P2P_NOA_CHANGED_BIT BIT(0) | ||
2614 | |||
2615 | struct wmi_p2p_noa_info { | ||
2616 | /* Bit 0 - Flag to indicate an update in NOA schedule | ||
2617 | Bits 7-1 - Reserved */ | ||
2618 | u8 changed; | ||
2619 | /* NOA index */ | ||
2620 | u8 index; | ||
2621 | /* Bit 0 - Opp PS state of the AP | ||
2622 | Bits 1-7 - Ctwindow in TUs */ | ||
2623 | u8 ctwindow_oppps; | ||
2624 | /* Number of NOA descriptors */ | ||
2625 | u8 num_descriptors; | ||
2626 | |||
2627 | struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS]; | ||
2628 | } __packed; | ||
2629 | |||
2630 | struct wmi_bcn_info { | ||
2631 | struct wmi_tim_info tim_info; | ||
2632 | struct wmi_p2p_noa_info p2p_noa_info; | ||
2633 | } __packed; | ||
2634 | |||
2635 | struct wmi_host_swba_event { | ||
2636 | __le32 vdev_map; | ||
2637 | struct wmi_bcn_info bcn_info[1]; | ||
2638 | } __packed; | ||
2639 | |||
2640 | #define WMI_MAX_AP_VDEV 16 | ||
2641 | |||
2642 | struct wmi_tbtt_offset_event { | ||
2643 | __le32 vdev_map; | ||
2644 | __le32 tbttoffset_list[WMI_MAX_AP_VDEV]; | ||
2645 | } __packed; | ||
2646 | |||
2647 | |||
2648 | struct wmi_peer_create_cmd { | ||
2649 | __le32 vdev_id; | ||
2650 | struct wmi_mac_addr peer_macaddr; | ||
2651 | } __packed; | ||
2652 | |||
2653 | struct wmi_peer_delete_cmd { | ||
2654 | __le32 vdev_id; | ||
2655 | struct wmi_mac_addr peer_macaddr; | ||
2656 | } __packed; | ||
2657 | |||
2658 | struct wmi_peer_flush_tids_cmd { | ||
2659 | __le32 vdev_id; | ||
2660 | struct wmi_mac_addr peer_macaddr; | ||
2661 | __le32 peer_tid_bitmap; | ||
2662 | } __packed; | ||
2663 | |||
2664 | struct wmi_fixed_rate { | ||
2665 | /* | ||
2666 | * rate mode . 0: disable fixed rate (auto rate) | ||
2667 | * 1: legacy (non 11n) rate specified as ieee rate 2*Mbps | ||
2668 | * 2: ht20 11n rate specified as mcs index | ||
2669 | * 3: ht40 11n rate specified as mcs index | ||
2670 | */ | ||
2671 | __le32 rate_mode; | ||
2672 | /* | ||
2673 | * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB) | ||
2674 | * and series 3 is stored at byte 3 (MSB) | ||
2675 | */ | ||
2676 | __le32 rate_series; | ||
2677 | /* | ||
2678 | * 4 retry counts for 4 rate series. retry count for rate 0 is stored | ||
2679 | * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3 | ||
2680 | * (MSB) | ||
2681 | */ | ||
2682 | __le32 rate_retries; | ||
2683 | } __packed; | ||
2684 | |||
2685 | struct wmi_peer_fixed_rate_cmd { | ||
2686 | /* unique id identifying the VDEV, generated by the caller */ | ||
2687 | __le32 vdev_id; | ||
2688 | /* peer MAC address */ | ||
2689 | struct wmi_mac_addr peer_macaddr; | ||
2690 | /* fixed rate */ | ||
2691 | struct wmi_fixed_rate peer_fixed_rate; | ||
2692 | } __packed; | ||
2693 | |||
2694 | #define WMI_MGMT_TID 17 | ||
2695 | |||
2696 | struct wmi_addba_clear_resp_cmd { | ||
2697 | /* unique id identifying the VDEV, generated by the caller */ | ||
2698 | __le32 vdev_id; | ||
2699 | /* peer MAC address */ | ||
2700 | struct wmi_mac_addr peer_macaddr; | ||
2701 | } __packed; | ||
2702 | |||
2703 | struct wmi_addba_send_cmd { | ||
2704 | /* unique id identifying the VDEV, generated by the caller */ | ||
2705 | __le32 vdev_id; | ||
2706 | /* peer MAC address */ | ||
2707 | struct wmi_mac_addr peer_macaddr; | ||
2708 | /* Tid number */ | ||
2709 | __le32 tid; | ||
2710 | /* Buffer/Window size*/ | ||
2711 | __le32 buffersize; | ||
2712 | } __packed; | ||
2713 | |||
2714 | struct wmi_delba_send_cmd { | ||
2715 | /* unique id identifying the VDEV, generated by the caller */ | ||
2716 | __le32 vdev_id; | ||
2717 | /* peer MAC address */ | ||
2718 | struct wmi_mac_addr peer_macaddr; | ||
2719 | /* Tid number */ | ||
2720 | __le32 tid; | ||
2721 | /* Is Initiator */ | ||
2722 | __le32 initiator; | ||
2723 | /* Reason code */ | ||
2724 | __le32 reasoncode; | ||
2725 | } __packed; | ||
2726 | |||
2727 | struct wmi_addba_setresponse_cmd { | ||
2728 | /* unique id identifying the vdev, generated by the caller */ | ||
2729 | __le32 vdev_id; | ||
2730 | /* peer mac address */ | ||
2731 | struct wmi_mac_addr peer_macaddr; | ||
2732 | /* Tid number */ | ||
2733 | __le32 tid; | ||
2734 | /* status code */ | ||
2735 | __le32 statuscode; | ||
2736 | } __packed; | ||
2737 | |||
2738 | struct wmi_send_singleamsdu_cmd { | ||
2739 | /* unique id identifying the vdev, generated by the caller */ | ||
2740 | __le32 vdev_id; | ||
2741 | /* peer mac address */ | ||
2742 | struct wmi_mac_addr peer_macaddr; | ||
2743 | /* Tid number */ | ||
2744 | __le32 tid; | ||
2745 | } __packed; | ||
2746 | |||
2747 | enum wmi_peer_smps_state { | ||
2748 | WMI_PEER_SMPS_PS_NONE = 0x0, | ||
2749 | WMI_PEER_SMPS_STATIC = 0x1, | ||
2750 | WMI_PEER_SMPS_DYNAMIC = 0x2 | ||
2751 | }; | ||
2752 | |||
2753 | enum wmi_peer_param { | ||
2754 | WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */ | ||
2755 | WMI_PEER_AMPDU = 0x2, | ||
2756 | WMI_PEER_AUTHORIZE = 0x3, | ||
2757 | WMI_PEER_CHAN_WIDTH = 0x4, | ||
2758 | WMI_PEER_NSS = 0x5, | ||
2759 | WMI_PEER_USE_4ADDR = 0x6 | ||
2760 | }; | ||
2761 | |||
2762 | struct wmi_peer_set_param_cmd { | ||
2763 | __le32 vdev_id; | ||
2764 | struct wmi_mac_addr peer_macaddr; | ||
2765 | __le32 param_id; | ||
2766 | __le32 param_value; | ||
2767 | } __packed; | ||
2768 | |||
2769 | #define MAX_SUPPORTED_RATES 128 | ||
2770 | |||
2771 | struct wmi_rate_set { | ||
2772 | /* total number of rates */ | ||
2773 | __le32 num_rates; | ||
2774 | /* | ||
2775 | * rates (each 8bit value) packed into a 32 bit word. | ||
2776 | * the rates are filled from least significant byte to most | ||
2777 | * significant byte. | ||
2778 | */ | ||
2779 | __le32 rates[(MAX_SUPPORTED_RATES/4)+1]; | ||
2780 | } __packed; | ||
2781 | |||
2782 | struct wmi_rate_set_arg { | ||
2783 | unsigned int num_rates; | ||
2784 | u8 rates[MAX_SUPPORTED_RATES]; | ||
2785 | }; | ||
2786 | |||
2787 | /* | ||
2788 | * NOTE: It would bea good idea to represent the Tx MCS | ||
2789 | * info in one word and Rx in another word. This is split | ||
2790 | * into multiple words for convenience | ||
2791 | */ | ||
2792 | struct wmi_vht_rate_set { | ||
2793 | __le32 rx_max_rate; /* Max Rx data rate */ | ||
2794 | __le32 rx_mcs_set; /* Negotiated RX VHT rates */ | ||
2795 | __le32 tx_max_rate; /* Max Tx data rate */ | ||
2796 | __le32 tx_mcs_set; /* Negotiated TX VHT rates */ | ||
2797 | } __packed; | ||
2798 | |||
2799 | struct wmi_vht_rate_set_arg { | ||
2800 | u32 rx_max_rate; | ||
2801 | u32 rx_mcs_set; | ||
2802 | u32 tx_max_rate; | ||
2803 | u32 tx_mcs_set; | ||
2804 | }; | ||
2805 | |||
2806 | struct wmi_peer_set_rates_cmd { | ||
2807 | /* peer MAC address */ | ||
2808 | struct wmi_mac_addr peer_macaddr; | ||
2809 | /* legacy rate set */ | ||
2810 | struct wmi_rate_set peer_legacy_rates; | ||
2811 | /* ht rate set */ | ||
2812 | struct wmi_rate_set peer_ht_rates; | ||
2813 | } __packed; | ||
2814 | |||
2815 | struct wmi_peer_set_q_empty_callback_cmd { | ||
2816 | /* unique id identifying the VDEV, generated by the caller */ | ||
2817 | __le32 vdev_id; | ||
2818 | /* peer MAC address */ | ||
2819 | struct wmi_mac_addr peer_macaddr; | ||
2820 | __le32 callback_enable; | ||
2821 | } __packed; | ||
2822 | |||
2823 | #define WMI_PEER_AUTH 0x00000001 | ||
2824 | #define WMI_PEER_QOS 0x00000002 | ||
2825 | #define WMI_PEER_NEED_PTK_4_WAY 0x00000004 | ||
2826 | #define WMI_PEER_NEED_GTK_2_WAY 0x00000010 | ||
2827 | #define WMI_PEER_APSD 0x00000800 | ||
2828 | #define WMI_PEER_HT 0x00001000 | ||
2829 | #define WMI_PEER_40MHZ 0x00002000 | ||
2830 | #define WMI_PEER_STBC 0x00008000 | ||
2831 | #define WMI_PEER_LDPC 0x00010000 | ||
2832 | #define WMI_PEER_DYN_MIMOPS 0x00020000 | ||
2833 | #define WMI_PEER_STATIC_MIMOPS 0x00040000 | ||
2834 | #define WMI_PEER_SPATIAL_MUX 0x00200000 | ||
2835 | #define WMI_PEER_VHT 0x02000000 | ||
2836 | #define WMI_PEER_80MHZ 0x04000000 | ||
2837 | #define WMI_PEER_PMF 0x08000000 | ||
2838 | |||
2839 | /* | ||
2840 | * Peer rate capabilities. | ||
2841 | * | ||
2842 | * This is of interest to the ratecontrol | ||
2843 | * module which resides in the firmware. The bit definitions are | ||
2844 | * consistent with that defined in if_athrate.c. | ||
2845 | */ | ||
2846 | #define WMI_RC_DS_FLAG 0x01 | ||
2847 | #define WMI_RC_CW40_FLAG 0x02 | ||
2848 | #define WMI_RC_SGI_FLAG 0x04 | ||
2849 | #define WMI_RC_HT_FLAG 0x08 | ||
2850 | #define WMI_RC_RTSCTS_FLAG 0x10 | ||
2851 | #define WMI_RC_TX_STBC_FLAG 0x20 | ||
2852 | #define WMI_RC_RX_STBC_FLAG 0xC0 | ||
2853 | #define WMI_RC_RX_STBC_FLAG_S 6 | ||
2854 | #define WMI_RC_WEP_TKIP_FLAG 0x100 | ||
2855 | #define WMI_RC_TS_FLAG 0x200 | ||
2856 | #define WMI_RC_UAPSD_FLAG 0x400 | ||
2857 | |||
2858 | /* Maximum listen interval supported by hw in units of beacon interval */ | ||
2859 | #define ATH10K_MAX_HW_LISTEN_INTERVAL 5 | ||
2860 | |||
2861 | struct wmi_peer_assoc_complete_cmd { | ||
2862 | struct wmi_mac_addr peer_macaddr; | ||
2863 | __le32 vdev_id; | ||
2864 | __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */ | ||
2865 | __le32 peer_associd; /* 16 LSBs */ | ||
2866 | __le32 peer_flags; | ||
2867 | __le32 peer_caps; /* 16 LSBs */ | ||
2868 | __le32 peer_listen_intval; | ||
2869 | __le32 peer_ht_caps; | ||
2870 | __le32 peer_max_mpdu; | ||
2871 | __le32 peer_mpdu_density; /* 0..16 */ | ||
2872 | __le32 peer_rate_caps; | ||
2873 | struct wmi_rate_set peer_legacy_rates; | ||
2874 | struct wmi_rate_set peer_ht_rates; | ||
2875 | __le32 peer_nss; /* num of spatial streams */ | ||
2876 | __le32 peer_vht_caps; | ||
2877 | __le32 peer_phymode; | ||
2878 | struct wmi_vht_rate_set peer_vht_rates; | ||
2879 | /* HT Operation Element of the peer. Five bytes packed in 2 | ||
2880 | * INT32 array and filled from lsb to msb. */ | ||
2881 | __le32 peer_ht_info[2]; | ||
2882 | } __packed; | ||
2883 | |||
2884 | struct wmi_peer_assoc_complete_arg { | ||
2885 | u8 addr[ETH_ALEN]; | ||
2886 | u32 vdev_id; | ||
2887 | bool peer_reassoc; | ||
2888 | u16 peer_aid; | ||
2889 | u32 peer_flags; /* see %WMI_PEER_ */ | ||
2890 | u16 peer_caps; | ||
2891 | u32 peer_listen_intval; | ||
2892 | u32 peer_ht_caps; | ||
2893 | u32 peer_max_mpdu; | ||
2894 | u32 peer_mpdu_density; /* 0..16 */ | ||
2895 | u32 peer_rate_caps; /* see %WMI_RC_ */ | ||
2896 | struct wmi_rate_set_arg peer_legacy_rates; | ||
2897 | struct wmi_rate_set_arg peer_ht_rates; | ||
2898 | u32 peer_num_spatial_streams; | ||
2899 | u32 peer_vht_caps; | ||
2900 | enum wmi_phy_mode peer_phymode; | ||
2901 | struct wmi_vht_rate_set_arg peer_vht_rates; | ||
2902 | }; | ||
2903 | |||
2904 | struct wmi_peer_add_wds_entry_cmd { | ||
2905 | /* peer MAC address */ | ||
2906 | struct wmi_mac_addr peer_macaddr; | ||
2907 | /* wds MAC addr */ | ||
2908 | struct wmi_mac_addr wds_macaddr; | ||
2909 | } __packed; | ||
2910 | |||
2911 | struct wmi_peer_remove_wds_entry_cmd { | ||
2912 | /* wds MAC addr */ | ||
2913 | struct wmi_mac_addr wds_macaddr; | ||
2914 | } __packed; | ||
2915 | |||
2916 | struct wmi_peer_q_empty_callback_event { | ||
2917 | /* peer MAC address */ | ||
2918 | struct wmi_mac_addr peer_macaddr; | ||
2919 | } __packed; | ||
2920 | |||
2921 | /* | ||
2922 | * Channel info WMI event | ||
2923 | */ | ||
2924 | struct wmi_chan_info_event { | ||
2925 | __le32 err_code; | ||
2926 | __le32 freq; | ||
2927 | __le32 cmd_flags; | ||
2928 | __le32 noise_floor; | ||
2929 | __le32 rx_clear_count; | ||
2930 | __le32 cycle_count; | ||
2931 | } __packed; | ||
2932 | |||
2933 | /* Beacon filter wmi command info */ | ||
2934 | #define BCN_FLT_MAX_SUPPORTED_IES 256 | ||
2935 | #define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32) | ||
2936 | |||
2937 | struct bss_bcn_stats { | ||
2938 | __le32 vdev_id; | ||
2939 | __le32 bss_bcnsdropped; | ||
2940 | __le32 bss_bcnsdelivered; | ||
2941 | } __packed; | ||
2942 | |||
2943 | struct bcn_filter_stats { | ||
2944 | __le32 bcns_dropped; | ||
2945 | __le32 bcns_delivered; | ||
2946 | __le32 activefilters; | ||
2947 | struct bss_bcn_stats bss_stats; | ||
2948 | } __packed; | ||
2949 | |||
2950 | struct wmi_add_bcn_filter_cmd { | ||
2951 | u32 vdev_id; | ||
2952 | u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST]; | ||
2953 | } __packed; | ||
2954 | |||
2955 | enum wmi_sta_keepalive_method { | ||
2956 | WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1, | ||
2957 | WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2, | ||
2958 | }; | ||
2959 | |||
2960 | /* note: ip4 addresses are in network byte order, i.e. big endian */ | ||
2961 | struct wmi_sta_keepalive_arp_resp { | ||
2962 | __be32 src_ip4_addr; | ||
2963 | __be32 dest_ip4_addr; | ||
2964 | struct wmi_mac_addr dest_mac_addr; | ||
2965 | } __packed; | ||
2966 | |||
2967 | struct wmi_sta_keepalive_cmd { | ||
2968 | __le32 vdev_id; | ||
2969 | __le32 enabled; | ||
2970 | __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */ | ||
2971 | __le32 interval; /* in seconds */ | ||
2972 | struct wmi_sta_keepalive_arp_resp arp_resp; | ||
2973 | } __packed; | ||
2974 | |||
2975 | #define ATH10K_RTS_MAX 2347 | ||
2976 | #define ATH10K_FRAGMT_THRESHOLD_MIN 540 | ||
2977 | #define ATH10K_FRAGMT_THRESHOLD_MAX 2346 | ||
2978 | |||
2979 | #define WMI_MAX_EVENT 0x1000 | ||
2980 | /* Maximum number of pending TXed WMI packets */ | ||
2981 | #define WMI_MAX_PENDING_TX_COUNT 128 | ||
2982 | #define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr) | ||
2983 | |||
2984 | /* By default disable power save for IBSS */ | ||
2985 | #define ATH10K_DEFAULT_ATIM 0 | ||
2986 | |||
2987 | struct ath10k; | ||
2988 | struct ath10k_vif; | ||
2989 | |||
2990 | int ath10k_wmi_attach(struct ath10k *ar); | ||
2991 | void ath10k_wmi_detach(struct ath10k *ar); | ||
2992 | int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); | ||
2993 | int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); | ||
2994 | void ath10k_wmi_flush_tx(struct ath10k *ar); | ||
2995 | |||
2996 | int ath10k_wmi_connect_htc_service(struct ath10k *ar); | ||
2997 | int ath10k_wmi_pdev_set_channel(struct ath10k *ar, | ||
2998 | const struct wmi_channel_arg *); | ||
2999 | int ath10k_wmi_pdev_suspend_target(struct ath10k *ar); | ||
3000 | int ath10k_wmi_pdev_resume_target(struct ath10k *ar); | ||
3001 | int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, | ||
3002 | u16 rd5g, u16 ctl2g, u16 ctl5g); | ||
3003 | int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, | ||
3004 | u32 value); | ||
3005 | int ath10k_wmi_cmd_init(struct ath10k *ar); | ||
3006 | int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); | ||
3007 | void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); | ||
3008 | int ath10k_wmi_stop_scan(struct ath10k *ar, | ||
3009 | const struct wmi_stop_scan_arg *arg); | ||
3010 | int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, | ||
3011 | enum wmi_vdev_type type, | ||
3012 | enum wmi_vdev_subtype subtype, | ||
3013 | const u8 macaddr[ETH_ALEN]); | ||
3014 | int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id); | ||
3015 | int ath10k_wmi_vdev_start(struct ath10k *ar, | ||
3016 | const struct wmi_vdev_start_request_arg *); | ||
3017 | int ath10k_wmi_vdev_restart(struct ath10k *ar, | ||
3018 | const struct wmi_vdev_start_request_arg *); | ||
3019 | int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id); | ||
3020 | int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, | ||
3021 | const u8 *bssid); | ||
3022 | int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id); | ||
3023 | int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, | ||
3024 | enum wmi_vdev_param param_id, u32 param_value); | ||
3025 | int ath10k_wmi_vdev_install_key(struct ath10k *ar, | ||
3026 | const struct wmi_vdev_install_key_arg *arg); | ||
3027 | int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, | ||
3028 | const u8 peer_addr[ETH_ALEN]); | ||
3029 | int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, | ||
3030 | const u8 peer_addr[ETH_ALEN]); | ||
3031 | int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, | ||
3032 | const u8 peer_addr[ETH_ALEN], u32 tid_bitmap); | ||
3033 | int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, | ||
3034 | const u8 *peer_addr, | ||
3035 | enum wmi_peer_param param_id, u32 param_value); | ||
3036 | int ath10k_wmi_peer_assoc(struct ath10k *ar, | ||
3037 | const struct wmi_peer_assoc_complete_arg *arg); | ||
3038 | int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, | ||
3039 | enum wmi_sta_ps_mode psmode); | ||
3040 | int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, | ||
3041 | enum wmi_sta_powersave_param param_id, | ||
3042 | u32 value); | ||
3043 | int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, | ||
3044 | enum wmi_ap_ps_peer_param param_id, u32 value); | ||
3045 | int ath10k_wmi_scan_chan_list(struct ath10k *ar, | ||
3046 | const struct wmi_scan_chan_list_arg *arg); | ||
3047 | int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg); | ||
3048 | int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, | ||
3049 | const struct wmi_pdev_set_wmm_params_arg *arg); | ||
3050 | int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); | ||
3051 | |||
3052 | #endif /* _WMI_H_ */ | ||
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 7f702fe3ecc2..ce67ab791eae 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -60,6 +60,7 @@ | |||
60 | 60 | ||
61 | #include <asm/unaligned.h> | 61 | #include <asm/unaligned.h> |
62 | 62 | ||
63 | #include <net/mac80211.h> | ||
63 | #include "base.h" | 64 | #include "base.h" |
64 | #include "reg.h" | 65 | #include "reg.h" |
65 | #include "debug.h" | 66 | #include "debug.h" |
@@ -666,9 +667,46 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb) | |||
666 | return htype; | 667 | return htype; |
667 | } | 668 | } |
668 | 669 | ||
670 | static struct ieee80211_rate * | ||
671 | ath5k_get_rate(const struct ieee80211_hw *hw, | ||
672 | const struct ieee80211_tx_info *info, | ||
673 | struct ath5k_buf *bf, int idx) | ||
674 | { | ||
675 | /* | ||
676 | * convert a ieee80211_tx_rate RC-table entry to | ||
677 | * the respective ieee80211_rate struct | ||
678 | */ | ||
679 | if (bf->rates[idx].idx < 0) { | ||
680 | return NULL; | ||
681 | } | ||
682 | |||
683 | return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ]; | ||
684 | } | ||
685 | |||
686 | static u16 | ||
687 | ath5k_get_rate_hw_value(const struct ieee80211_hw *hw, | ||
688 | const struct ieee80211_tx_info *info, | ||
689 | struct ath5k_buf *bf, int idx) | ||
690 | { | ||
691 | struct ieee80211_rate *rate; | ||
692 | u16 hw_rate; | ||
693 | u8 rc_flags; | ||
694 | |||
695 | rate = ath5k_get_rate(hw, info, bf, idx); | ||
696 | if (!rate) | ||
697 | return 0; | ||
698 | |||
699 | rc_flags = bf->rates[idx].flags; | ||
700 | hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? | ||
701 | rate->hw_value_short : rate->hw_value; | ||
702 | |||
703 | return hw_rate; | ||
704 | } | ||
705 | |||
669 | static int | 706 | static int |
670 | ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, | 707 | ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, |
671 | struct ath5k_txq *txq, int padsize) | 708 | struct ath5k_txq *txq, int padsize, |
709 | struct ieee80211_tx_control *control) | ||
672 | { | 710 | { |
673 | struct ath5k_desc *ds = bf->desc; | 711 | struct ath5k_desc *ds = bf->desc; |
674 | struct sk_buff *skb = bf->skb; | 712 | struct sk_buff *skb = bf->skb; |
@@ -688,7 +726,11 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, | |||
688 | bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, | 726 | bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, |
689 | DMA_TO_DEVICE); | 727 | DMA_TO_DEVICE); |
690 | 728 | ||
691 | rate = ieee80211_get_tx_rate(ah->hw, info); | 729 | ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates, |
730 | ARRAY_SIZE(bf->rates)); | ||
731 | |||
732 | rate = ath5k_get_rate(ah->hw, info, bf, 0); | ||
733 | |||
692 | if (!rate) { | 734 | if (!rate) { |
693 | ret = -EINVAL; | 735 | ret = -EINVAL; |
694 | goto err_unmap; | 736 | goto err_unmap; |
@@ -698,8 +740,8 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, | |||
698 | flags |= AR5K_TXDESC_NOACK; | 740 | flags |= AR5K_TXDESC_NOACK; |
699 | 741 | ||
700 | rc_flags = info->control.rates[0].flags; | 742 | rc_flags = info->control.rates[0].flags; |
701 | hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? | 743 | |
702 | rate->hw_value_short : rate->hw_value; | 744 | hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0); |
703 | 745 | ||
704 | pktlen = skb->len; | 746 | pktlen = skb->len; |
705 | 747 | ||
@@ -722,12 +764,13 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, | |||
722 | duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw, | 764 | duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw, |
723 | info->control.vif, pktlen, info)); | 765 | info->control.vif, pktlen, info)); |
724 | } | 766 | } |
767 | |||
725 | ret = ah->ah_setup_tx_desc(ah, ds, pktlen, | 768 | ret = ah->ah_setup_tx_desc(ah, ds, pktlen, |
726 | ieee80211_get_hdrlen_from_skb(skb), padsize, | 769 | ieee80211_get_hdrlen_from_skb(skb), padsize, |
727 | get_hw_packet_type(skb), | 770 | get_hw_packet_type(skb), |
728 | (ah->ah_txpower.txp_requested * 2), | 771 | (ah->ah_txpower.txp_requested * 2), |
729 | hw_rate, | 772 | hw_rate, |
730 | info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, | 773 | bf->rates[0].count, keyidx, ah->ah_tx_ant, flags, |
731 | cts_rate, duration); | 774 | cts_rate, duration); |
732 | if (ret) | 775 | if (ret) |
733 | goto err_unmap; | 776 | goto err_unmap; |
@@ -736,13 +779,15 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, | |||
736 | if (ah->ah_capabilities.cap_has_mrr_support) { | 779 | if (ah->ah_capabilities.cap_has_mrr_support) { |
737 | memset(mrr_rate, 0, sizeof(mrr_rate)); | 780 | memset(mrr_rate, 0, sizeof(mrr_rate)); |
738 | memset(mrr_tries, 0, sizeof(mrr_tries)); | 781 | memset(mrr_tries, 0, sizeof(mrr_tries)); |
782 | |||
739 | for (i = 0; i < 3; i++) { | 783 | for (i = 0; i < 3; i++) { |
740 | rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); | 784 | |
785 | rate = ath5k_get_rate(ah->hw, info, bf, i); | ||
741 | if (!rate) | 786 | if (!rate) |
742 | break; | 787 | break; |
743 | 788 | ||
744 | mrr_rate[i] = rate->hw_value; | 789 | mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i); |
745 | mrr_tries[i] = info->control.rates[i + 1].count; | 790 | mrr_tries[i] = bf->rates[i].count; |
746 | } | 791 | } |
747 | 792 | ||
748 | ath5k_hw_setup_mrr_tx_desc(ah, ds, | 793 | ath5k_hw_setup_mrr_tx_desc(ah, ds, |
@@ -1515,7 +1560,7 @@ unlock: | |||
1515 | 1560 | ||
1516 | void | 1561 | void |
1517 | ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, | 1562 | ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, |
1518 | struct ath5k_txq *txq) | 1563 | struct ath5k_txq *txq, struct ieee80211_tx_control *control) |
1519 | { | 1564 | { |
1520 | struct ath5k_hw *ah = hw->priv; | 1565 | struct ath5k_hw *ah = hw->priv; |
1521 | struct ath5k_buf *bf; | 1566 | struct ath5k_buf *bf; |
@@ -1555,7 +1600,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1555 | 1600 | ||
1556 | bf->skb = skb; | 1601 | bf->skb = skb; |
1557 | 1602 | ||
1558 | if (ath5k_txbuf_setup(ah, bf, txq, padsize)) { | 1603 | if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) { |
1559 | bf->skb = NULL; | 1604 | bf->skb = NULL; |
1560 | spin_lock_irqsave(&ah->txbuflock, flags); | 1605 | spin_lock_irqsave(&ah->txbuflock, flags); |
1561 | list_add_tail(&bf->list, &ah->txbuf); | 1606 | list_add_tail(&bf->list, &ah->txbuf); |
@@ -1571,11 +1616,13 @@ drop_packet: | |||
1571 | 1616 | ||
1572 | static void | 1617 | static void |
1573 | ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, | 1618 | ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, |
1574 | struct ath5k_txq *txq, struct ath5k_tx_status *ts) | 1619 | struct ath5k_txq *txq, struct ath5k_tx_status *ts, |
1620 | struct ath5k_buf *bf) | ||
1575 | { | 1621 | { |
1576 | struct ieee80211_tx_info *info; | 1622 | struct ieee80211_tx_info *info; |
1577 | u8 tries[3]; | 1623 | u8 tries[3]; |
1578 | int i; | 1624 | int i; |
1625 | int size = 0; | ||
1579 | 1626 | ||
1580 | ah->stats.tx_all_count++; | 1627 | ah->stats.tx_all_count++; |
1581 | ah->stats.tx_bytes_count += skb->len; | 1628 | ah->stats.tx_bytes_count += skb->len; |
@@ -1587,6 +1634,9 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, | |||
1587 | 1634 | ||
1588 | ieee80211_tx_info_clear_status(info); | 1635 | ieee80211_tx_info_clear_status(info); |
1589 | 1636 | ||
1637 | size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates)); | ||
1638 | memcpy(info->status.rates, bf->rates, size); | ||
1639 | |||
1590 | for (i = 0; i < ts->ts_final_idx; i++) { | 1640 | for (i = 0; i < ts->ts_final_idx; i++) { |
1591 | struct ieee80211_tx_rate *r = | 1641 | struct ieee80211_tx_rate *r = |
1592 | &info->status.rates[i]; | 1642 | &info->status.rates[i]; |
@@ -1663,7 +1713,7 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq) | |||
1663 | 1713 | ||
1664 | dma_unmap_single(ah->dev, bf->skbaddr, skb->len, | 1714 | dma_unmap_single(ah->dev, bf->skbaddr, skb->len, |
1665 | DMA_TO_DEVICE); | 1715 | DMA_TO_DEVICE); |
1666 | ath5k_tx_frame_completed(ah, skb, txq, &ts); | 1716 | ath5k_tx_frame_completed(ah, skb, txq, &ts, bf); |
1667 | } | 1717 | } |
1668 | 1718 | ||
1669 | /* | 1719 | /* |
@@ -1917,7 +1967,7 @@ ath5k_beacon_send(struct ath5k_hw *ah) | |||
1917 | 1967 | ||
1918 | skb = ieee80211_get_buffered_bc(ah->hw, vif); | 1968 | skb = ieee80211_get_buffered_bc(ah->hw, vif); |
1919 | while (skb) { | 1969 | while (skb) { |
1920 | ath5k_tx_queue(ah->hw, skb, ah->cabq); | 1970 | ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL); |
1921 | 1971 | ||
1922 | if (ah->cabq->txq_len >= ah->cabq->txq_max) | 1972 | if (ah->cabq->txq_len >= ah->cabq->txq_max) |
1923 | break; | 1973 | break; |
@@ -2442,7 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) | |||
2442 | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | | 2492 | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | |
2443 | IEEE80211_HW_SIGNAL_DBM | | 2493 | IEEE80211_HW_SIGNAL_DBM | |
2444 | IEEE80211_HW_MFP_CAPABLE | | 2494 | IEEE80211_HW_MFP_CAPABLE | |
2445 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | 2495 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
2496 | IEEE80211_HW_SUPPORTS_RC_TABLE; | ||
2446 | 2497 | ||
2447 | hw->wiphy->interface_modes = | 2498 | hw->wiphy->interface_modes = |
2448 | BIT(NL80211_IFTYPE_AP) | | 2499 | BIT(NL80211_IFTYPE_AP) | |
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h index 6c94c7ff2350..ca9a83ceeee1 100644 --- a/drivers/net/wireless/ath/ath5k/base.h +++ b/drivers/net/wireless/ath/ath5k/base.h | |||
@@ -47,6 +47,7 @@ struct ath5k_hw; | |||
47 | struct ath5k_txq; | 47 | struct ath5k_txq; |
48 | struct ieee80211_channel; | 48 | struct ieee80211_channel; |
49 | struct ath_bus_ops; | 49 | struct ath_bus_ops; |
50 | struct ieee80211_tx_control; | ||
50 | enum nl80211_iftype; | 51 | enum nl80211_iftype; |
51 | 52 | ||
52 | enum ath5k_srev_type { | 53 | enum ath5k_srev_type { |
@@ -61,11 +62,12 @@ struct ath5k_srev_name { | |||
61 | }; | 62 | }; |
62 | 63 | ||
63 | struct ath5k_buf { | 64 | struct ath5k_buf { |
64 | struct list_head list; | 65 | struct list_head list; |
65 | struct ath5k_desc *desc; /* virtual addr of desc */ | 66 | struct ath5k_desc *desc; /* virtual addr of desc */ |
66 | dma_addr_t daddr; /* physical addr of desc */ | 67 | dma_addr_t daddr; /* physical addr of desc */ |
67 | struct sk_buff *skb; /* skbuff for buf */ | 68 | struct sk_buff *skb; /* skbuff for buf */ |
68 | dma_addr_t skbaddr;/* physical addr of skb data */ | 69 | dma_addr_t skbaddr; /* physical addr of skb data */ |
70 | struct ieee80211_tx_rate rates[4]; /* number of multi-rate stages */ | ||
69 | }; | 71 | }; |
70 | 72 | ||
71 | struct ath5k_vif { | 73 | struct ath5k_vif { |
@@ -103,7 +105,7 @@ int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan); | |||
103 | void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); | 105 | void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); |
104 | void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); | 106 | void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf); |
105 | void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, | 107 | void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, |
106 | struct ath5k_txq *txq); | 108 | struct ath5k_txq *txq, struct ieee80211_tx_control *control); |
107 | 109 | ||
108 | const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val); | 110 | const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val); |
109 | 111 | ||
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 06f86f435711..81b686c6a376 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c | |||
@@ -66,7 +66,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, | |||
66 | return; | 66 | return; |
67 | } | 67 | } |
68 | 68 | ||
69 | ath5k_tx_queue(hw, skb, &ah->txqs[qnum]); | 69 | ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control); |
70 | } | 70 | } |
71 | 71 | ||
72 | 72 | ||
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 5c9736a94e54..2437ad26949d 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c | |||
@@ -3175,10 +3175,21 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
3175 | { | 3175 | { |
3176 | struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); | 3176 | struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); |
3177 | struct ath6kl *ar = ath6kl_priv(vif->ndev); | 3177 | struct ath6kl *ar = ath6kl_priv(vif->ndev); |
3178 | u32 id; | 3178 | u32 id, freq; |
3179 | const struct ieee80211_mgmt *mgmt; | 3179 | const struct ieee80211_mgmt *mgmt; |
3180 | bool more_data, queued; | 3180 | bool more_data, queued; |
3181 | 3181 | ||
3182 | /* default to the current channel, but use the one specified as argument | ||
3183 | * if any | ||
3184 | */ | ||
3185 | freq = vif->ch_hint; | ||
3186 | if (chan) | ||
3187 | freq = chan->center_freq; | ||
3188 | |||
3189 | /* never send freq zero to the firmware */ | ||
3190 | if (WARN_ON(freq == 0)) | ||
3191 | return -EINVAL; | ||
3192 | |||
3182 | mgmt = (const struct ieee80211_mgmt *) buf; | 3193 | mgmt = (const struct ieee80211_mgmt *) buf; |
3183 | if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && | 3194 | if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && |
3184 | ieee80211_is_probe_resp(mgmt->frame_control) && | 3195 | ieee80211_is_probe_resp(mgmt->frame_control) && |
@@ -3188,8 +3199,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
3188 | * command to allow the target to fill in the generic IEs. | 3199 | * command to allow the target to fill in the generic IEs. |
3189 | */ | 3200 | */ |
3190 | *cookie = 0; /* TX status not supported */ | 3201 | *cookie = 0; /* TX status not supported */ |
3191 | return ath6kl_send_go_probe_resp(vif, buf, len, | 3202 | return ath6kl_send_go_probe_resp(vif, buf, len, freq); |
3192 | chan->center_freq); | ||
3193 | } | 3203 | } |
3194 | 3204 | ||
3195 | id = vif->send_action_id++; | 3205 | id = vif->send_action_id++; |
@@ -3205,17 +3215,14 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
3205 | 3215 | ||
3206 | /* AP mode Power saving processing */ | 3216 | /* AP mode Power saving processing */ |
3207 | if (vif->nw_type == AP_NETWORK) { | 3217 | if (vif->nw_type == AP_NETWORK) { |
3208 | queued = ath6kl_mgmt_powersave_ap(vif, | 3218 | queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len, |
3209 | id, chan->center_freq, | 3219 | &more_data, no_cck); |
3210 | wait, buf, | ||
3211 | len, &more_data, no_cck); | ||
3212 | if (queued) | 3220 | if (queued) |
3213 | return 0; | 3221 | return 0; |
3214 | } | 3222 | } |
3215 | 3223 | ||
3216 | return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, | 3224 | return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq, |
3217 | chan->center_freq, wait, | 3225 | wait, buf, len, no_cck); |
3218 | buf, len, no_cck); | ||
3219 | } | 3226 | } |
3220 | 3227 | ||
3221 | static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, | 3228 | static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, |
@@ -3679,6 +3686,20 @@ err: | |||
3679 | return NULL; | 3686 | return NULL; |
3680 | } | 3687 | } |
3681 | 3688 | ||
3689 | #ifdef CONFIG_PM | ||
3690 | static const struct wiphy_wowlan_support ath6kl_wowlan_support = { | ||
3691 | .flags = WIPHY_WOWLAN_MAGIC_PKT | | ||
3692 | WIPHY_WOWLAN_DISCONNECT | | ||
3693 | WIPHY_WOWLAN_GTK_REKEY_FAILURE | | ||
3694 | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | | ||
3695 | WIPHY_WOWLAN_EAP_IDENTITY_REQ | | ||
3696 | WIPHY_WOWLAN_4WAY_HANDSHAKE, | ||
3697 | .n_patterns = WOW_MAX_FILTERS_PER_LIST, | ||
3698 | .pattern_min_len = 1, | ||
3699 | .pattern_max_len = WOW_PATTERN_SIZE, | ||
3700 | }; | ||
3701 | #endif | ||
3702 | |||
3682 | int ath6kl_cfg80211_init(struct ath6kl *ar) | 3703 | int ath6kl_cfg80211_init(struct ath6kl *ar) |
3683 | { | 3704 | { |
3684 | struct wiphy *wiphy = ar->wiphy; | 3705 | struct wiphy *wiphy = ar->wiphy; |
@@ -3772,15 +3793,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) | |||
3772 | wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); | 3793 | wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); |
3773 | 3794 | ||
3774 | #ifdef CONFIG_PM | 3795 | #ifdef CONFIG_PM |
3775 | wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | | 3796 | wiphy->wowlan = &ath6kl_wowlan_support; |
3776 | WIPHY_WOWLAN_DISCONNECT | | ||
3777 | WIPHY_WOWLAN_GTK_REKEY_FAILURE | | ||
3778 | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | | ||
3779 | WIPHY_WOWLAN_EAP_IDENTITY_REQ | | ||
3780 | WIPHY_WOWLAN_4WAY_HANDSHAKE; | ||
3781 | wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST; | ||
3782 | wiphy->wowlan.pattern_min_len = 1; | ||
3783 | wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; | ||
3784 | #endif | 3797 | #endif |
3785 | 3798 | ||
3786 | wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS; | 3799 | wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS; |
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index fe38b836cb26..dbfd17d0a5fa 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c | |||
@@ -1240,20 +1240,14 @@ static ssize_t ath6kl_force_roam_write(struct file *file, | |||
1240 | char buf[20]; | 1240 | char buf[20]; |
1241 | size_t len; | 1241 | size_t len; |
1242 | u8 bssid[ETH_ALEN]; | 1242 | u8 bssid[ETH_ALEN]; |
1243 | int i; | ||
1244 | int addr[ETH_ALEN]; | ||
1245 | 1243 | ||
1246 | len = min(count, sizeof(buf) - 1); | 1244 | len = min(count, sizeof(buf) - 1); |
1247 | if (copy_from_user(buf, user_buf, len)) | 1245 | if (copy_from_user(buf, user_buf, len)) |
1248 | return -EFAULT; | 1246 | return -EFAULT; |
1249 | buf[len] = '\0'; | 1247 | buf[len] = '\0'; |
1250 | 1248 | ||
1251 | if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x", | 1249 | if (!mac_pton(buf, bssid)) |
1252 | &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) | ||
1253 | != ETH_ALEN) | ||
1254 | return -EINVAL; | 1250 | return -EINVAL; |
1255 | for (i = 0; i < ETH_ALEN; i++) | ||
1256 | bssid[i] = addr[i]; | ||
1257 | 1251 | ||
1258 | ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); | 1252 | ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid); |
1259 | if (ret) | 1253 | if (ret) |
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 40ffee6184fd..6a67881f94d6 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c | |||
@@ -1696,10 +1696,16 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar) | |||
1696 | test_bit(WMI_READY, | 1696 | test_bit(WMI_READY, |
1697 | &ar->flag), | 1697 | &ar->flag), |
1698 | WMI_TIMEOUT); | 1698 | WMI_TIMEOUT); |
1699 | if (timeleft <= 0) { | ||
1700 | clear_bit(WMI_READY, &ar->flag); | ||
1701 | ath6kl_err("wmi is not ready or wait was interrupted: %ld\n", | ||
1702 | timeleft); | ||
1703 | ret = -EIO; | ||
1704 | goto err_htc_stop; | ||
1705 | } | ||
1699 | 1706 | ||
1700 | ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); | 1707 | ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); |
1701 | 1708 | ||
1702 | |||
1703 | if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { | 1709 | if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { |
1704 | ath6kl_info("%s %s fw %s api %d%s\n", | 1710 | ath6kl_info("%s %s fw %s api %d%s\n", |
1705 | ar->hw.name, | 1711 | ar->hw.name, |
@@ -1718,12 +1724,6 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar) | |||
1718 | goto err_htc_stop; | 1724 | goto err_htc_stop; |
1719 | } | 1725 | } |
1720 | 1726 | ||
1721 | if (!timeleft || signal_pending(current)) { | ||
1722 | ath6kl_err("wmi is not ready or wait was interrupted\n"); | ||
1723 | ret = -EIO; | ||
1724 | goto err_htc_stop; | ||
1725 | } | ||
1726 | |||
1727 | ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); | 1727 | ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); |
1728 | 1728 | ||
1729 | /* communicate the wmi protocol verision to the target */ | 1729 | /* communicate the wmi protocol verision to the target */ |
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index fb141454c6d2..7126bdd4236c 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c | |||
@@ -345,17 +345,17 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, | |||
345 | { | 345 | { |
346 | struct hif_scatter_req *s_req; | 346 | struct hif_scatter_req *s_req; |
347 | struct bus_request *bus_req; | 347 | struct bus_request *bus_req; |
348 | int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; | 348 | int i, scat_req_sz, scat_list_sz, size; |
349 | u8 *virt_buf; | 349 | u8 *virt_buf; |
350 | 350 | ||
351 | scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); | 351 | scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); |
352 | scat_req_sz = sizeof(*s_req) + scat_list_sz; | 352 | scat_req_sz = sizeof(*s_req) + scat_list_sz; |
353 | 353 | ||
354 | if (!virt_scat) | 354 | if (!virt_scat) |
355 | sg_sz = sizeof(struct scatterlist) * n_scat_entry; | 355 | size = sizeof(struct scatterlist) * n_scat_entry; |
356 | else | 356 | else |
357 | buf_sz = 2 * L1_CACHE_BYTES + | 357 | size = 2 * L1_CACHE_BYTES + |
358 | ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; | 358 | ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; |
359 | 359 | ||
360 | for (i = 0; i < n_scat_req; i++) { | 360 | for (i = 0; i < n_scat_req; i++) { |
361 | /* allocate the scatter request */ | 361 | /* allocate the scatter request */ |
@@ -364,7 +364,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, | |||
364 | return -ENOMEM; | 364 | return -ENOMEM; |
365 | 365 | ||
366 | if (virt_scat) { | 366 | if (virt_scat) { |
367 | virt_buf = kzalloc(buf_sz, GFP_KERNEL); | 367 | virt_buf = kzalloc(size, GFP_KERNEL); |
368 | if (!virt_buf) { | 368 | if (!virt_buf) { |
369 | kfree(s_req); | 369 | kfree(s_req); |
370 | return -ENOMEM; | 370 | return -ENOMEM; |
@@ -374,7 +374,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, | |||
374 | (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); | 374 | (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); |
375 | } else { | 375 | } else { |
376 | /* allocate sglist */ | 376 | /* allocate sglist */ |
377 | s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); | 377 | s_req->sgentries = kzalloc(size, GFP_KERNEL); |
378 | 378 | ||
379 | if (!s_req->sgentries) { | 379 | if (!s_req->sgentries) { |
380 | kfree(s_req); | 380 | kfree(s_req); |
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index bed0d337712d..f38ff6a6255e 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c | |||
@@ -1061,6 +1061,22 @@ static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar) | |||
1061 | return; | 1061 | return; |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) | ||
1065 | { | ||
1066 | /* | ||
1067 | * cfg80211 suspend/WOW currently not supported for USB. | ||
1068 | */ | ||
1069 | return 0; | ||
1070 | } | ||
1071 | |||
1072 | static int ath6kl_usb_resume(struct ath6kl *ar) | ||
1073 | { | ||
1074 | /* | ||
1075 | * cfg80211 resume currently not supported for USB. | ||
1076 | */ | ||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
1064 | static const struct ath6kl_hif_ops ath6kl_usb_ops = { | 1080 | static const struct ath6kl_hif_ops ath6kl_usb_ops = { |
1065 | .diag_read32 = ath6kl_usb_diag_read32, | 1081 | .diag_read32 = ath6kl_usb_diag_read32, |
1066 | .diag_write32 = ath6kl_usb_diag_write32, | 1082 | .diag_write32 = ath6kl_usb_diag_write32, |
@@ -1074,6 +1090,8 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = { | |||
1074 | .pipe_map_service = ath6kl_usb_map_service_pipe, | 1090 | .pipe_map_service = ath6kl_usb_map_service_pipe, |
1075 | .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number, | 1091 | .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number, |
1076 | .cleanup_scatter = ath6kl_usb_cleanup_scatter, | 1092 | .cleanup_scatter = ath6kl_usb_cleanup_scatter, |
1093 | .suspend = ath6kl_usb_suspend, | ||
1094 | .resume = ath6kl_usb_resume, | ||
1077 | }; | 1095 | }; |
1078 | 1096 | ||
1079 | /* ath6kl usb driver registered functions */ | 1097 | /* ath6kl usb driver registered functions */ |
@@ -1152,7 +1170,7 @@ static void ath6kl_usb_remove(struct usb_interface *interface) | |||
1152 | 1170 | ||
1153 | #ifdef CONFIG_PM | 1171 | #ifdef CONFIG_PM |
1154 | 1172 | ||
1155 | static int ath6kl_usb_suspend(struct usb_interface *interface, | 1173 | static int ath6kl_usb_pm_suspend(struct usb_interface *interface, |
1156 | pm_message_t message) | 1174 | pm_message_t message) |
1157 | { | 1175 | { |
1158 | struct ath6kl_usb *device; | 1176 | struct ath6kl_usb *device; |
@@ -1162,7 +1180,7 @@ static int ath6kl_usb_suspend(struct usb_interface *interface, | |||
1162 | return 0; | 1180 | return 0; |
1163 | } | 1181 | } |
1164 | 1182 | ||
1165 | static int ath6kl_usb_resume(struct usb_interface *interface) | 1183 | static int ath6kl_usb_pm_resume(struct usb_interface *interface) |
1166 | { | 1184 | { |
1167 | struct ath6kl_usb *device; | 1185 | struct ath6kl_usb *device; |
1168 | device = usb_get_intfdata(interface); | 1186 | device = usb_get_intfdata(interface); |
@@ -1175,7 +1193,7 @@ static int ath6kl_usb_resume(struct usb_interface *interface) | |||
1175 | return 0; | 1193 | return 0; |
1176 | } | 1194 | } |
1177 | 1195 | ||
1178 | static int ath6kl_usb_reset_resume(struct usb_interface *intf) | 1196 | static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf) |
1179 | { | 1197 | { |
1180 | if (usb_get_intfdata(intf)) | 1198 | if (usb_get_intfdata(intf)) |
1181 | ath6kl_usb_remove(intf); | 1199 | ath6kl_usb_remove(intf); |
@@ -1184,9 +1202,9 @@ static int ath6kl_usb_reset_resume(struct usb_interface *intf) | |||
1184 | 1202 | ||
1185 | #else | 1203 | #else |
1186 | 1204 | ||
1187 | #define ath6kl_usb_suspend NULL | 1205 | #define ath6kl_usb_pm_suspend NULL |
1188 | #define ath6kl_usb_resume NULL | 1206 | #define ath6kl_usb_pm_resume NULL |
1189 | #define ath6kl_usb_reset_resume NULL | 1207 | #define ath6kl_usb_pm_reset_resume NULL |
1190 | 1208 | ||
1191 | #endif | 1209 | #endif |
1192 | 1210 | ||
@@ -1201,9 +1219,9 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids); | |||
1201 | static struct usb_driver ath6kl_usb_driver = { | 1219 | static struct usb_driver ath6kl_usb_driver = { |
1202 | .name = "ath6kl_usb", | 1220 | .name = "ath6kl_usb", |
1203 | .probe = ath6kl_usb_probe, | 1221 | .probe = ath6kl_usb_probe, |
1204 | .suspend = ath6kl_usb_suspend, | 1222 | .suspend = ath6kl_usb_pm_suspend, |
1205 | .resume = ath6kl_usb_resume, | 1223 | .resume = ath6kl_usb_pm_resume, |
1206 | .reset_resume = ath6kl_usb_reset_resume, | 1224 | .reset_resume = ath6kl_usb_pm_reset_resume, |
1207 | .disconnect = ath6kl_usb_remove, | 1225 | .disconnect = ath6kl_usb_remove, |
1208 | .id_table = ath6kl_usb_ids, | 1226 | .id_table = ath6kl_usb_ids, |
1209 | .supports_autosuspend = true, | 1227 | .supports_autosuspend = true, |
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index e91725bf401c..4994bea809eb 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c | |||
@@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = { | |||
46 | { 5, 4, 1 }, /* lvl 5 */ | 46 | { 5, 4, 1 }, /* lvl 5 */ |
47 | { 6, 5, 1 }, /* lvl 6 */ | 47 | { 6, 5, 1 }, /* lvl 6 */ |
48 | { 7, 6, 1 }, /* lvl 7 */ | 48 | { 7, 6, 1 }, /* lvl 7 */ |
49 | { 7, 6, 0 }, /* lvl 8 */ | 49 | { 7, 7, 1 }, /* lvl 8 */ |
50 | { 7, 7, 0 } /* lvl 9 */ | 50 | { 7, 8, 0 } /* lvl 9 */ |
51 | }; | 51 | }; |
52 | #define ATH9K_ANI_OFDM_NUM_LEVEL \ | 52 | #define ATH9K_ANI_OFDM_NUM_LEVEL \ |
53 | ARRAY_SIZE(ofdm_level_table) | 53 | ARRAY_SIZE(ofdm_level_table) |
@@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = { | |||
91 | { 4, 0 }, /* lvl 4 */ | 91 | { 4, 0 }, /* lvl 4 */ |
92 | { 5, 0 }, /* lvl 5 */ | 92 | { 5, 0 }, /* lvl 5 */ |
93 | { 6, 0 }, /* lvl 6 */ | 93 | { 6, 0 }, /* lvl 6 */ |
94 | { 6, 0 }, /* lvl 7 (only for high rssi) */ | 94 | { 7, 0 }, /* lvl 7 (only for high rssi) */ |
95 | { 7, 0 } /* lvl 8 (only for high rssi) */ | 95 | { 8, 0 } /* lvl 8 (only for high rssi) */ |
96 | }; | 96 | }; |
97 | 97 | ||
98 | #define ATH9K_ANI_CCK_NUM_LEVEL \ | 98 | #define ATH9K_ANI_CCK_NUM_LEVEL \ |
@@ -177,10 +177,15 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, | |||
177 | BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH) | 177 | BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH) |
178 | weak_sig = true; | 178 | weak_sig = true; |
179 | 179 | ||
180 | if (aniState->ofdmWeakSigDetect != weak_sig) | 180 | /* |
181 | ath9k_hw_ani_control(ah, | 181 | * OFDM Weak signal detection is always enabled for AP mode. |
182 | ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, | 182 | */ |
183 | entry_ofdm->ofdm_weak_signal_on); | 183 | if (ah->opmode != NL80211_IFTYPE_AP && |
184 | aniState->ofdmWeakSigDetect != weak_sig) { | ||
185 | ath9k_hw_ani_control(ah, | ||
186 | ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, | ||
187 | entry_ofdm->ofdm_weak_signal_on); | ||
188 | } | ||
184 | 189 | ||
185 | if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) { | 190 | if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) { |
186 | ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; | 191 | ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; |
@@ -363,18 +368,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) | |||
363 | ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning); | 368 | ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning); |
364 | ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning); | 369 | ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning); |
365 | 370 | ||
366 | /* | ||
367 | * enable phy counters if hw supports or if not, enable phy | ||
368 | * interrupts (so we can count each one) | ||
369 | */ | ||
370 | ath9k_ani_restart(ah); | 371 | ath9k_ani_restart(ah); |
371 | |||
372 | ENABLE_REGWRITE_BUFFER(ah); | ||
373 | |||
374 | REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); | ||
375 | REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); | ||
376 | |||
377 | REGWRITE_BUFFER_FLUSH(ah); | ||
378 | } | 372 | } |
379 | 373 | ||
380 | static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) | 374 | static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) |
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index 78b9fa9f6455..b54a3fb01883 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h | |||
@@ -20,20 +20,15 @@ | |||
20 | #define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) | 20 | #define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) |
21 | 21 | ||
22 | /* units are errors per second */ | 22 | /* units are errors per second */ |
23 | #define ATH9K_ANI_OFDM_TRIG_HIGH 3500 | 23 | #define ATH9K_ANI_OFDM_TRIG_HIGH 3500 |
24 | #define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 | 24 | #define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000 |
25 | 25 | ||
26 | /* units are errors per second */ | ||
27 | #define ATH9K_ANI_OFDM_TRIG_LOW 400 | 26 | #define ATH9K_ANI_OFDM_TRIG_LOW 400 |
28 | #define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 | 27 | #define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900 |
29 | 28 | ||
30 | /* units are errors per second */ | ||
31 | #define ATH9K_ANI_CCK_TRIG_HIGH 600 | 29 | #define ATH9K_ANI_CCK_TRIG_HIGH 600 |
32 | |||
33 | /* units are errors per second */ | ||
34 | #define ATH9K_ANI_CCK_TRIG_LOW 300 | 30 | #define ATH9K_ANI_CCK_TRIG_LOW 300 |
35 | 31 | ||
36 | #define ATH9K_ANI_NOISE_IMMUNE_LVL 4 | ||
37 | #define ATH9K_ANI_SPUR_IMMUNE_LVL 3 | 32 | #define ATH9K_ANI_SPUR_IMMUNE_LVL 3 |
38 | #define ATH9K_ANI_FIRSTEP_LVL 2 | 33 | #define ATH9K_ANI_FIRSTEP_LVL 2 |
39 | 34 | ||
@@ -45,10 +40,6 @@ | |||
45 | /* in ms */ | 40 | /* in ms */ |
46 | #define ATH9K_ANI_POLLINTERVAL 1000 | 41 | #define ATH9K_ANI_POLLINTERVAL 1000 |
47 | 42 | ||
48 | #define HAL_NOISE_IMMUNE_MAX 4 | ||
49 | #define HAL_SPUR_IMMUNE_MAX 7 | ||
50 | #define HAL_FIRST_STEP_MAX 2 | ||
51 | |||
52 | #define ATH9K_SIG_FIRSTEP_SETTING_MIN 0 | 43 | #define ATH9K_SIG_FIRSTEP_SETTING_MIN 0 |
53 | #define ATH9K_SIG_FIRSTEP_SETTING_MAX 20 | 44 | #define ATH9K_SIG_FIRSTEP_SETTING_MAX 20 |
54 | #define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0 | 45 | #define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0 |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index e6b92ff265fd..1e86977d3322 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | |||
@@ -3563,14 +3563,24 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) | |||
3563 | { | 3563 | { |
3564 | struct ath9k_hw_capabilities *pCap = &ah->caps; | 3564 | struct ath9k_hw_capabilities *pCap = &ah->caps; |
3565 | int chain; | 3565 | int chain; |
3566 | u32 regval; | 3566 | u32 regval, value, gpio; |
3567 | static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { | 3567 | static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { |
3568 | AR_PHY_SWITCH_CHAIN_0, | 3568 | AR_PHY_SWITCH_CHAIN_0, |
3569 | AR_PHY_SWITCH_CHAIN_1, | 3569 | AR_PHY_SWITCH_CHAIN_1, |
3570 | AR_PHY_SWITCH_CHAIN_2, | 3570 | AR_PHY_SWITCH_CHAIN_2, |
3571 | }; | 3571 | }; |
3572 | 3572 | ||
3573 | u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); | 3573 | if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) { |
3574 | if (ah->config.xlna_gpio) | ||
3575 | gpio = ah->config.xlna_gpio; | ||
3576 | else | ||
3577 | gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485; | ||
3578 | |||
3579 | ath9k_hw_cfg_output(ah, gpio, | ||
3580 | AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED); | ||
3581 | } | ||
3582 | |||
3583 | value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); | ||
3574 | 3584 | ||
3575 | if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { | 3585 | if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { |
3576 | REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, | 3586 | REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, |
@@ -3796,7 +3806,13 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan) | |||
3796 | REG_RMW_FIELD(ah, ext_atten_reg[i], | 3806 | REG_RMW_FIELD(ah, ext_atten_reg[i], |
3797 | AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value); | 3807 | AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value); |
3798 | 3808 | ||
3799 | value = ar9003_hw_atten_chain_get_margin(ah, i, chan); | 3809 | if (AR_SREV_9485(ah) && |
3810 | (ar9003_hw_get_rx_gain_idx(ah) == 0) && | ||
3811 | ah->config.xatten_margin_cfg) | ||
3812 | value = 5; | ||
3813 | else | ||
3814 | value = ar9003_hw_atten_chain_get_margin(ah, i, chan); | ||
3815 | |||
3800 | REG_RMW_FIELD(ah, ext_atten_reg[i], | 3816 | REG_RMW_FIELD(ah, ext_atten_reg[i], |
3801 | AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, | 3817 | AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, |
3802 | value); | 3818 | value); |
@@ -4546,7 +4562,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah, | |||
4546 | is2GHz); | 4562 | is2GHz); |
4547 | 4563 | ||
4548 | for (i = 0; i < ar9300RateSize; i++) { | 4564 | for (i = 0; i < ar9300RateSize; i++) { |
4549 | ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", | 4565 | ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n", |
4550 | i, targetPowerValT2[i]); | 4566 | i, targetPowerValT2[i]); |
4551 | } | 4567 | } |
4552 | } | 4568 | } |
@@ -5272,7 +5288,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, | |||
5272 | return; | 5288 | return; |
5273 | 5289 | ||
5274 | for (i = 0; i < ar9300RateSize; i++) { | 5290 | for (i = 0; i < ar9300RateSize; i++) { |
5275 | ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", | 5291 | ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n", |
5276 | i, targetPowerValT2[i]); | 5292 | i, targetPowerValT2[i]); |
5277 | } | 5293 | } |
5278 | 5294 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index a3523c969a3a..671aaa7e7337 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c | |||
@@ -627,9 +627,26 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah) | |||
627 | 627 | ||
628 | static void ar9003_rx_gain_table_mode2(struct ath_hw *ah) | 628 | static void ar9003_rx_gain_table_mode2(struct ath_hw *ah) |
629 | { | 629 | { |
630 | if (AR_SREV_9462_20(ah)) | 630 | if (AR_SREV_9462_20(ah)) { |
631 | INIT_INI_ARRAY(&ah->iniModesRxGain, | 631 | INIT_INI_ARRAY(&ah->iniModesRxGain, |
632 | ar9462_common_mixed_rx_gain_table_2p0); | 632 | ar9462_common_mixed_rx_gain_table_2p0); |
633 | INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core, | ||
634 | ar9462_2p0_baseband_core_mix_rxgain); | ||
635 | INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble, | ||
636 | ar9462_2p0_baseband_postamble_mix_rxgain); | ||
637 | INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna, | ||
638 | ar9462_2p0_baseband_postamble_5g_xlna); | ||
639 | } | ||
640 | } | ||
641 | |||
642 | static void ar9003_rx_gain_table_mode3(struct ath_hw *ah) | ||
643 | { | ||
644 | if (AR_SREV_9462_20(ah)) { | ||
645 | INIT_INI_ARRAY(&ah->iniModesRxGain, | ||
646 | ar9462_2p0_5g_xlna_only_rxgain); | ||
647 | INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna, | ||
648 | ar9462_2p0_baseband_postamble_5g_xlna); | ||
649 | } | ||
633 | } | 650 | } |
634 | 651 | ||
635 | static void ar9003_rx_gain_table_apply(struct ath_hw *ah) | 652 | static void ar9003_rx_gain_table_apply(struct ath_hw *ah) |
@@ -645,6 +662,9 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah) | |||
645 | case 2: | 662 | case 2: |
646 | ar9003_rx_gain_table_mode2(ah); | 663 | ar9003_rx_gain_table_mode2(ah); |
647 | break; | 664 | break; |
665 | case 3: | ||
666 | ar9003_rx_gain_table_mode3(ah); | ||
667 | break; | ||
648 | } | 668 | } |
649 | } | 669 | } |
650 | 670 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 83e03857c014..df84d20e1092 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c | |||
@@ -735,6 +735,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, | |||
735 | return -EINVAL; | 735 | return -EINVAL; |
736 | } | 736 | } |
737 | 737 | ||
738 | /* | ||
739 | * SOC, MAC, BB, RADIO initvals. | ||
740 | */ | ||
738 | for (i = 0; i < ATH_INI_NUM_SPLIT; i++) { | 741 | for (i = 0; i < ATH_INI_NUM_SPLIT; i++) { |
739 | ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex); | 742 | ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex); |
740 | ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex); | 743 | ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex); |
@@ -746,11 +749,39 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, | |||
746 | modesIndex); | 749 | modesIndex); |
747 | } | 750 | } |
748 | 751 | ||
752 | /* | ||
753 | * RXGAIN initvals. | ||
754 | */ | ||
749 | REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites); | 755 | REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites); |
756 | |||
757 | if (AR_SREV_9462_20(ah)) { | ||
758 | /* | ||
759 | * CUS217 mix LNA mode. | ||
760 | */ | ||
761 | if (ar9003_hw_get_rx_gain_idx(ah) == 2) { | ||
762 | REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core, | ||
763 | 1, regWrites); | ||
764 | REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble, | ||
765 | modesIndex, regWrites); | ||
766 | } | ||
767 | |||
768 | /* | ||
769 | * 5G-XLNA | ||
770 | */ | ||
771 | if ((ar9003_hw_get_rx_gain_idx(ah) == 2) || | ||
772 | (ar9003_hw_get_rx_gain_idx(ah) == 3)) { | ||
773 | REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna, | ||
774 | modesIndex, regWrites); | ||
775 | } | ||
776 | } | ||
777 | |||
750 | if (AR_SREV_9550(ah)) | 778 | if (AR_SREV_9550(ah)) |
751 | REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex, | 779 | REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex, |
752 | regWrites); | 780 | regWrites); |
753 | 781 | ||
782 | /* | ||
783 | * TXGAIN initvals. | ||
784 | */ | ||
754 | if (AR_SREV_9550(ah)) { | 785 | if (AR_SREV_9550(ah)) { |
755 | int modes_txgain_index; | 786 | int modes_txgain_index; |
756 | 787 | ||
@@ -772,8 +803,14 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, | |||
772 | REG_WRITE_ARRAY(&ah->iniModesFastClock, | 803 | REG_WRITE_ARRAY(&ah->iniModesFastClock, |
773 | modesIndex, regWrites); | 804 | modesIndex, regWrites); |
774 | 805 | ||
806 | /* | ||
807 | * Clock frequency initvals. | ||
808 | */ | ||
775 | REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); | 809 | REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); |
776 | 810 | ||
811 | /* | ||
812 | * JAPAN regulatory. | ||
813 | */ | ||
777 | if (chan->channel == 2484) | 814 | if (chan->channel == 2484) |
778 | ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); | 815 | ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); |
779 | 816 | ||
@@ -906,6 +943,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, | |||
906 | struct ath_common *common = ath9k_hw_common(ah); | 943 | struct ath_common *common = ath9k_hw_common(ah); |
907 | struct ath9k_channel *chan = ah->curchan; | 944 | struct ath9k_channel *chan = ah->curchan; |
908 | struct ar5416AniState *aniState = &ah->ani; | 945 | struct ar5416AniState *aniState = &ah->ani; |
946 | int m1ThreshLow, m2ThreshLow; | ||
947 | int m1Thresh, m2Thresh; | ||
948 | int m2CountThr, m2CountThrLow; | ||
949 | int m1ThreshLowExt, m2ThreshLowExt; | ||
950 | int m1ThreshExt, m2ThreshExt; | ||
909 | s32 value, value2; | 951 | s32 value, value2; |
910 | 952 | ||
911 | switch (cmd & ah->ani_function) { | 953 | switch (cmd & ah->ani_function) { |
@@ -919,6 +961,61 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, | |||
919 | */ | 961 | */ |
920 | u32 on = param ? 1 : 0; | 962 | u32 on = param ? 1 : 0; |
921 | 963 | ||
964 | if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) | ||
965 | goto skip_ws_det; | ||
966 | |||
967 | m1ThreshLow = on ? | ||
968 | aniState->iniDef.m1ThreshLow : m1ThreshLow_off; | ||
969 | m2ThreshLow = on ? | ||
970 | aniState->iniDef.m2ThreshLow : m2ThreshLow_off; | ||
971 | m1Thresh = on ? | ||
972 | aniState->iniDef.m1Thresh : m1Thresh_off; | ||
973 | m2Thresh = on ? | ||
974 | aniState->iniDef.m2Thresh : m2Thresh_off; | ||
975 | m2CountThr = on ? | ||
976 | aniState->iniDef.m2CountThr : m2CountThr_off; | ||
977 | m2CountThrLow = on ? | ||
978 | aniState->iniDef.m2CountThrLow : m2CountThrLow_off; | ||
979 | m1ThreshLowExt = on ? | ||
980 | aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off; | ||
981 | m2ThreshLowExt = on ? | ||
982 | aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off; | ||
983 | m1ThreshExt = on ? | ||
984 | aniState->iniDef.m1ThreshExt : m1ThreshExt_off; | ||
985 | m2ThreshExt = on ? | ||
986 | aniState->iniDef.m2ThreshExt : m2ThreshExt_off; | ||
987 | |||
988 | REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, | ||
989 | AR_PHY_SFCORR_LOW_M1_THRESH_LOW, | ||
990 | m1ThreshLow); | ||
991 | REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, | ||
992 | AR_PHY_SFCORR_LOW_M2_THRESH_LOW, | ||
993 | m2ThreshLow); | ||
994 | REG_RMW_FIELD(ah, AR_PHY_SFCORR, | ||
995 | AR_PHY_SFCORR_M1_THRESH, | ||
996 | m1Thresh); | ||
997 | REG_RMW_FIELD(ah, AR_PHY_SFCORR, | ||
998 | AR_PHY_SFCORR_M2_THRESH, | ||
999 | m2Thresh); | ||
1000 | REG_RMW_FIELD(ah, AR_PHY_SFCORR, | ||
1001 | AR_PHY_SFCORR_M2COUNT_THR, | ||
1002 | m2CountThr); | ||
1003 | REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, | ||
1004 | AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, | ||
1005 | m2CountThrLow); | ||
1006 | REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, | ||
1007 | AR_PHY_SFCORR_EXT_M1_THRESH_LOW, | ||
1008 | m1ThreshLowExt); | ||
1009 | REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, | ||
1010 | AR_PHY_SFCORR_EXT_M2_THRESH_LOW, | ||
1011 | m2ThreshLowExt); | ||
1012 | REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, | ||
1013 | AR_PHY_SFCORR_EXT_M1_THRESH, | ||
1014 | m1ThreshExt); | ||
1015 | REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, | ||
1016 | AR_PHY_SFCORR_EXT_M2_THRESH, | ||
1017 | m2ThreshExt); | ||
1018 | skip_ws_det: | ||
922 | if (on) | 1019 | if (on) |
923 | REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, | 1020 | REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, |
924 | AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); | 1021 | AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index e71774196c01..5013c731f9f6 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h | |||
@@ -351,6 +351,8 @@ | |||
351 | 351 | ||
352 | #define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118 | 352 | #define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118 |
353 | 353 | ||
354 | #define AR9300_EXT_LNA_CTL_GPIO_AR9485 9 | ||
355 | |||
354 | /* | 356 | /* |
355 | * AGC Field Definitions | 357 | * AGC Field Definitions |
356 | */ | 358 | */ |
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 999ab08c34e6..1d6b705ba110 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h | |||
@@ -78,7 +78,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { | |||
78 | {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, | 78 | {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, |
79 | {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, | 79 | {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, |
80 | {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, | 80 | {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, |
81 | {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18}, | 81 | {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, |
82 | {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, | 82 | {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, |
83 | {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, | 83 | {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, |
84 | {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, | 84 | {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, |
@@ -1449,4 +1449,284 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = { | |||
1449 | {0x0000b1fc, 0x00000196}, | 1449 | {0x0000b1fc, 0x00000196}, |
1450 | }; | 1450 | }; |
1451 | 1451 | ||
1452 | static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = { | ||
1453 | /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ | ||
1454 | {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, | ||
1455 | }; | ||
1456 | |||
1457 | static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = { | ||
1458 | /* Addr allmodes */ | ||
1459 | {0x0000a000, 0x00010000}, | ||
1460 | {0x0000a004, 0x00030002}, | ||
1461 | {0x0000a008, 0x00050004}, | ||
1462 | {0x0000a00c, 0x00810080}, | ||
1463 | {0x0000a010, 0x00830082}, | ||
1464 | {0x0000a014, 0x01810180}, | ||
1465 | {0x0000a018, 0x01830182}, | ||
1466 | {0x0000a01c, 0x01850184}, | ||
1467 | {0x0000a020, 0x01890188}, | ||
1468 | {0x0000a024, 0x018b018a}, | ||
1469 | {0x0000a028, 0x018d018c}, | ||
1470 | {0x0000a02c, 0x03820190}, | ||
1471 | {0x0000a030, 0x03840383}, | ||
1472 | {0x0000a034, 0x03880385}, | ||
1473 | {0x0000a038, 0x038a0389}, | ||
1474 | {0x0000a03c, 0x038c038b}, | ||
1475 | {0x0000a040, 0x0390038d}, | ||
1476 | {0x0000a044, 0x03920391}, | ||
1477 | {0x0000a048, 0x03940393}, | ||
1478 | {0x0000a04c, 0x03960395}, | ||
1479 | {0x0000a050, 0x00000000}, | ||
1480 | {0x0000a054, 0x00000000}, | ||
1481 | {0x0000a058, 0x00000000}, | ||
1482 | {0x0000a05c, 0x00000000}, | ||
1483 | {0x0000a060, 0x00000000}, | ||
1484 | {0x0000a064, 0x00000000}, | ||
1485 | {0x0000a068, 0x00000000}, | ||
1486 | {0x0000a06c, 0x00000000}, | ||
1487 | {0x0000a070, 0x00000000}, | ||
1488 | {0x0000a074, 0x00000000}, | ||
1489 | {0x0000a078, 0x00000000}, | ||
1490 | {0x0000a07c, 0x00000000}, | ||
1491 | {0x0000a080, 0x29292929}, | ||
1492 | {0x0000a084, 0x29292929}, | ||
1493 | {0x0000a088, 0x29292929}, | ||
1494 | {0x0000a08c, 0x29292929}, | ||
1495 | {0x0000a090, 0x22292929}, | ||
1496 | {0x0000a094, 0x1d1d2222}, | ||
1497 | {0x0000a098, 0x0c111117}, | ||
1498 | {0x0000a09c, 0x00030303}, | ||
1499 | {0x0000a0a0, 0x00000000}, | ||
1500 | {0x0000a0a4, 0x00000000}, | ||
1501 | {0x0000a0a8, 0x00000000}, | ||
1502 | {0x0000a0ac, 0x00000000}, | ||
1503 | {0x0000a0b0, 0x00000000}, | ||
1504 | {0x0000a0b4, 0x00000000}, | ||
1505 | {0x0000a0b8, 0x00000000}, | ||
1506 | {0x0000a0bc, 0x00000000}, | ||
1507 | {0x0000a0c0, 0x001f0000}, | ||
1508 | {0x0000a0c4, 0x01000101}, | ||
1509 | {0x0000a0c8, 0x011e011f}, | ||
1510 | {0x0000a0cc, 0x011c011d}, | ||
1511 | {0x0000a0d0, 0x02030204}, | ||
1512 | {0x0000a0d4, 0x02010202}, | ||
1513 | {0x0000a0d8, 0x021f0200}, | ||
1514 | {0x0000a0dc, 0x0302021e}, | ||
1515 | {0x0000a0e0, 0x03000301}, | ||
1516 | {0x0000a0e4, 0x031e031f}, | ||
1517 | {0x0000a0e8, 0x0402031d}, | ||
1518 | {0x0000a0ec, 0x04000401}, | ||
1519 | {0x0000a0f0, 0x041e041f}, | ||
1520 | {0x0000a0f4, 0x0502041d}, | ||
1521 | {0x0000a0f8, 0x05000501}, | ||
1522 | {0x0000a0fc, 0x051e051f}, | ||
1523 | {0x0000a100, 0x06010602}, | ||
1524 | {0x0000a104, 0x061f0600}, | ||
1525 | {0x0000a108, 0x061d061e}, | ||
1526 | {0x0000a10c, 0x07020703}, | ||
1527 | {0x0000a110, 0x07000701}, | ||
1528 | {0x0000a114, 0x00000000}, | ||
1529 | {0x0000a118, 0x00000000}, | ||
1530 | {0x0000a11c, 0x00000000}, | ||
1531 | {0x0000a120, 0x00000000}, | ||
1532 | {0x0000a124, 0x00000000}, | ||
1533 | {0x0000a128, 0x00000000}, | ||
1534 | {0x0000a12c, 0x00000000}, | ||
1535 | {0x0000a130, 0x00000000}, | ||
1536 | {0x0000a134, 0x00000000}, | ||
1537 | {0x0000a138, 0x00000000}, | ||
1538 | {0x0000a13c, 0x00000000}, | ||
1539 | {0x0000a140, 0x001f0000}, | ||
1540 | {0x0000a144, 0x01000101}, | ||
1541 | {0x0000a148, 0x011e011f}, | ||
1542 | {0x0000a14c, 0x011c011d}, | ||
1543 | {0x0000a150, 0x02030204}, | ||
1544 | {0x0000a154, 0x02010202}, | ||
1545 | {0x0000a158, 0x021f0200}, | ||
1546 | {0x0000a15c, 0x0302021e}, | ||
1547 | {0x0000a160, 0x03000301}, | ||
1548 | {0x0000a164, 0x031e031f}, | ||
1549 | {0x0000a168, 0x0402031d}, | ||
1550 | {0x0000a16c, 0x04000401}, | ||
1551 | {0x0000a170, 0x041e041f}, | ||
1552 | {0x0000a174, 0x0502041d}, | ||
1553 | {0x0000a178, 0x05000501}, | ||
1554 | {0x0000a17c, 0x051e051f}, | ||
1555 | {0x0000a180, 0x06010602}, | ||
1556 | {0x0000a184, 0x061f0600}, | ||
1557 | {0x0000a188, 0x061d061e}, | ||
1558 | {0x0000a18c, 0x07020703}, | ||
1559 | {0x0000a190, 0x07000701}, | ||
1560 | {0x0000a194, 0x00000000}, | ||
1561 | {0x0000a198, 0x00000000}, | ||
1562 | {0x0000a19c, 0x00000000}, | ||
1563 | {0x0000a1a0, 0x00000000}, | ||
1564 | {0x0000a1a4, 0x00000000}, | ||
1565 | {0x0000a1a8, 0x00000000}, | ||
1566 | {0x0000a1ac, 0x00000000}, | ||
1567 | {0x0000a1b0, 0x00000000}, | ||
1568 | {0x0000a1b4, 0x00000000}, | ||
1569 | {0x0000a1b8, 0x00000000}, | ||
1570 | {0x0000a1bc, 0x00000000}, | ||
1571 | {0x0000a1c0, 0x00000000}, | ||
1572 | {0x0000a1c4, 0x00000000}, | ||
1573 | {0x0000a1c8, 0x00000000}, | ||
1574 | {0x0000a1cc, 0x00000000}, | ||
1575 | {0x0000a1d0, 0x00000000}, | ||
1576 | {0x0000a1d4, 0x00000000}, | ||
1577 | {0x0000a1d8, 0x00000000}, | ||
1578 | {0x0000a1dc, 0x00000000}, | ||
1579 | {0x0000a1e0, 0x00000000}, | ||
1580 | {0x0000a1e4, 0x00000000}, | ||
1581 | {0x0000a1e8, 0x00000000}, | ||
1582 | {0x0000a1ec, 0x00000000}, | ||
1583 | {0x0000a1f0, 0x00000396}, | ||
1584 | {0x0000a1f4, 0x00000396}, | ||
1585 | {0x0000a1f8, 0x00000396}, | ||
1586 | {0x0000a1fc, 0x00000196}, | ||
1587 | {0x0000b000, 0x00010000}, | ||
1588 | {0x0000b004, 0x00030002}, | ||
1589 | {0x0000b008, 0x00050004}, | ||
1590 | {0x0000b00c, 0x00810080}, | ||
1591 | {0x0000b010, 0x00830082}, | ||
1592 | {0x0000b014, 0x01810180}, | ||
1593 | {0x0000b018, 0x01830182}, | ||
1594 | {0x0000b01c, 0x01850184}, | ||
1595 | {0x0000b020, 0x02810280}, | ||
1596 | {0x0000b024, 0x02830282}, | ||
1597 | {0x0000b028, 0x02850284}, | ||
1598 | {0x0000b02c, 0x02890288}, | ||
1599 | {0x0000b030, 0x028b028a}, | ||
1600 | {0x0000b034, 0x0388028c}, | ||
1601 | {0x0000b038, 0x038a0389}, | ||
1602 | {0x0000b03c, 0x038c038b}, | ||
1603 | {0x0000b040, 0x0390038d}, | ||
1604 | {0x0000b044, 0x03920391}, | ||
1605 | {0x0000b048, 0x03940393}, | ||
1606 | {0x0000b04c, 0x03960395}, | ||
1607 | {0x0000b050, 0x00000000}, | ||
1608 | {0x0000b054, 0x00000000}, | ||
1609 | {0x0000b058, 0x00000000}, | ||
1610 | {0x0000b05c, 0x00000000}, | ||
1611 | {0x0000b060, 0x00000000}, | ||
1612 | {0x0000b064, 0x00000000}, | ||
1613 | {0x0000b068, 0x00000000}, | ||
1614 | {0x0000b06c, 0x00000000}, | ||
1615 | {0x0000b070, 0x00000000}, | ||
1616 | {0x0000b074, 0x00000000}, | ||
1617 | {0x0000b078, 0x00000000}, | ||
1618 | {0x0000b07c, 0x00000000}, | ||
1619 | {0x0000b080, 0x2a2d2f32}, | ||
1620 | {0x0000b084, 0x21232328}, | ||
1621 | {0x0000b088, 0x19191c1e}, | ||
1622 | {0x0000b08c, 0x12141417}, | ||
1623 | {0x0000b090, 0x07070e0e}, | ||
1624 | {0x0000b094, 0x03030305}, | ||
1625 | {0x0000b098, 0x00000003}, | ||
1626 | {0x0000b09c, 0x00000000}, | ||
1627 | {0x0000b0a0, 0x00000000}, | ||
1628 | {0x0000b0a4, 0x00000000}, | ||
1629 | {0x0000b0a8, 0x00000000}, | ||
1630 | {0x0000b0ac, 0x00000000}, | ||
1631 | {0x0000b0b0, 0x00000000}, | ||
1632 | {0x0000b0b4, 0x00000000}, | ||
1633 | {0x0000b0b8, 0x00000000}, | ||
1634 | {0x0000b0bc, 0x00000000}, | ||
1635 | {0x0000b0c0, 0x003f0020}, | ||
1636 | {0x0000b0c4, 0x00400041}, | ||
1637 | {0x0000b0c8, 0x0140005f}, | ||
1638 | {0x0000b0cc, 0x0160015f}, | ||
1639 | {0x0000b0d0, 0x017e017f}, | ||
1640 | {0x0000b0d4, 0x02410242}, | ||
1641 | {0x0000b0d8, 0x025f0240}, | ||
1642 | {0x0000b0dc, 0x027f0260}, | ||
1643 | {0x0000b0e0, 0x0341027e}, | ||
1644 | {0x0000b0e4, 0x035f0340}, | ||
1645 | {0x0000b0e8, 0x037f0360}, | ||
1646 | {0x0000b0ec, 0x04400441}, | ||
1647 | {0x0000b0f0, 0x0460045f}, | ||
1648 | {0x0000b0f4, 0x0541047f}, | ||
1649 | {0x0000b0f8, 0x055f0540}, | ||
1650 | {0x0000b0fc, 0x057f0560}, | ||
1651 | {0x0000b100, 0x06400641}, | ||
1652 | {0x0000b104, 0x0660065f}, | ||
1653 | {0x0000b108, 0x067e067f}, | ||
1654 | {0x0000b10c, 0x07410742}, | ||
1655 | {0x0000b110, 0x075f0740}, | ||
1656 | {0x0000b114, 0x077f0760}, | ||
1657 | {0x0000b118, 0x07800781}, | ||
1658 | {0x0000b11c, 0x07a0079f}, | ||
1659 | {0x0000b120, 0x07c107bf}, | ||
1660 | {0x0000b124, 0x000007c0}, | ||
1661 | {0x0000b128, 0x00000000}, | ||
1662 | {0x0000b12c, 0x00000000}, | ||
1663 | {0x0000b130, 0x00000000}, | ||
1664 | {0x0000b134, 0x00000000}, | ||
1665 | {0x0000b138, 0x00000000}, | ||
1666 | {0x0000b13c, 0x00000000}, | ||
1667 | {0x0000b140, 0x003f0020}, | ||
1668 | {0x0000b144, 0x00400041}, | ||
1669 | {0x0000b148, 0x0140005f}, | ||
1670 | {0x0000b14c, 0x0160015f}, | ||
1671 | {0x0000b150, 0x017e017f}, | ||
1672 | {0x0000b154, 0x02410242}, | ||
1673 | {0x0000b158, 0x025f0240}, | ||
1674 | {0x0000b15c, 0x027f0260}, | ||
1675 | {0x0000b160, 0x0341027e}, | ||
1676 | {0x0000b164, 0x035f0340}, | ||
1677 | {0x0000b168, 0x037f0360}, | ||
1678 | {0x0000b16c, 0x04400441}, | ||
1679 | {0x0000b170, 0x0460045f}, | ||
1680 | {0x0000b174, 0x0541047f}, | ||
1681 | {0x0000b178, 0x055f0540}, | ||
1682 | {0x0000b17c, 0x057f0560}, | ||
1683 | {0x0000b180, 0x06400641}, | ||
1684 | {0x0000b184, 0x0660065f}, | ||
1685 | {0x0000b188, 0x067e067f}, | ||
1686 | {0x0000b18c, 0x07410742}, | ||
1687 | {0x0000b190, 0x075f0740}, | ||
1688 | {0x0000b194, 0x077f0760}, | ||
1689 | {0x0000b198, 0x07800781}, | ||
1690 | {0x0000b19c, 0x07a0079f}, | ||
1691 | {0x0000b1a0, 0x07c107bf}, | ||
1692 | {0x0000b1a4, 0x000007c0}, | ||
1693 | {0x0000b1a8, 0x00000000}, | ||
1694 | {0x0000b1ac, 0x00000000}, | ||
1695 | {0x0000b1b0, 0x00000000}, | ||
1696 | {0x0000b1b4, 0x00000000}, | ||
1697 | {0x0000b1b8, 0x00000000}, | ||
1698 | {0x0000b1bc, 0x00000000}, | ||
1699 | {0x0000b1c0, 0x00000000}, | ||
1700 | {0x0000b1c4, 0x00000000}, | ||
1701 | {0x0000b1c8, 0x00000000}, | ||
1702 | {0x0000b1cc, 0x00000000}, | ||
1703 | {0x0000b1d0, 0x00000000}, | ||
1704 | {0x0000b1d4, 0x00000000}, | ||
1705 | {0x0000b1d8, 0x00000000}, | ||
1706 | {0x0000b1dc, 0x00000000}, | ||
1707 | {0x0000b1e0, 0x00000000}, | ||
1708 | {0x0000b1e4, 0x00000000}, | ||
1709 | {0x0000b1e8, 0x00000000}, | ||
1710 | {0x0000b1ec, 0x00000000}, | ||
1711 | {0x0000b1f0, 0x00000396}, | ||
1712 | {0x0000b1f4, 0x00000396}, | ||
1713 | {0x0000b1f8, 0x00000396}, | ||
1714 | {0x0000b1fc, 0x00000196}, | ||
1715 | }; | ||
1716 | |||
1717 | static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = { | ||
1718 | /* Addr allmodes */ | ||
1719 | {0x00009fd0, 0x0a2d6b93}, | ||
1720 | }; | ||
1721 | |||
1722 | static const u32 ar9462_2p0_baseband_postamble_mix_rxgain[][5] = { | ||
1723 | /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ | ||
1724 | {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae}, | ||
1725 | {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da}, | ||
1726 | {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81}, | ||
1727 | {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8}, | ||
1728 | {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e}, | ||
1729 | {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e}, | ||
1730 | }; | ||
1731 | |||
1452 | #endif /* INITVALS_9462_2P0_H */ | 1732 | #endif /* INITVALS_9462_2P0_H */ |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 18fcee4e9d68..04b2d3ea728f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -296,6 +296,7 @@ struct ath_tx { | |||
296 | struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; | 296 | struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; |
297 | struct ath_descdma txdma; | 297 | struct ath_descdma txdma; |
298 | struct ath_txq *txq_map[IEEE80211_NUM_ACS]; | 298 | struct ath_txq *txq_map[IEEE80211_NUM_ACS]; |
299 | struct ath_txq *uapsdq; | ||
299 | u32 txq_max_pending[IEEE80211_NUM_ACS]; | 300 | u32 txq_max_pending[IEEE80211_NUM_ACS]; |
300 | u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32]; | 301 | u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32]; |
301 | }; | 302 | }; |
@@ -343,6 +344,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum, | |||
343 | void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); | 344 | void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); |
344 | int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | 345 | int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, |
345 | struct ath_tx_control *txctl); | 346 | struct ath_tx_control *txctl); |
347 | void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | ||
348 | struct sk_buff *skb); | ||
346 | void ath_tx_tasklet(struct ath_softc *sc); | 349 | void ath_tx_tasklet(struct ath_softc *sc); |
347 | void ath_tx_edma_tasklet(struct ath_softc *sc); | 350 | void ath_tx_edma_tasklet(struct ath_softc *sc); |
348 | int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, | 351 | int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, |
@@ -353,6 +356,11 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid | |||
353 | void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an); | 356 | void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an); |
354 | void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, | 357 | void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, |
355 | struct ath_node *an); | 358 | struct ath_node *an); |
359 | void ath9k_release_buffered_frames(struct ieee80211_hw *hw, | ||
360 | struct ieee80211_sta *sta, | ||
361 | u16 tids, int nframes, | ||
362 | enum ieee80211_frame_release_type reason, | ||
363 | bool more_data); | ||
356 | 364 | ||
357 | /********/ | 365 | /********/ |
358 | /* VIFs */ | 366 | /* VIFs */ |
@@ -623,6 +631,10 @@ void ath_ant_comb_update(struct ath_softc *sc); | |||
623 | /* Main driver core */ | 631 | /* Main driver core */ |
624 | /********************/ | 632 | /********************/ |
625 | 633 | ||
634 | #define ATH9K_PCI_CUS198 0x0001 | ||
635 | #define ATH9K_PCI_CUS230 0x0002 | ||
636 | #define ATH9K_PCI_CUS217 0x0004 | ||
637 | |||
626 | /* | 638 | /* |
627 | * Default cache line size, in bytes. | 639 | * Default cache line size, in bytes. |
628 | * Used when PCI device not fully initialized by bootrom/BIOS | 640 | * Used when PCI device not fully initialized by bootrom/BIOS |
@@ -707,6 +719,7 @@ struct ath_softc { | |||
707 | 719 | ||
708 | unsigned int hw_busy_count; | 720 | unsigned int hw_busy_count; |
709 | unsigned long sc_flags; | 721 | unsigned long sc_flags; |
722 | unsigned long driver_data; | ||
710 | 723 | ||
711 | u32 intrstatus; | 724 | u32 intrstatus; |
712 | u16 ps_flags; /* PS_* */ | 725 | u16 ps_flags; /* PS_* */ |
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index fd1eebab8647..1a17732bb089 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c | |||
@@ -108,23 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif, | |||
108 | ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); | 108 | ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); |
109 | } | 109 | } |
110 | 110 | ||
111 | static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) | ||
112 | { | ||
113 | struct ath_softc *sc = hw->priv; | ||
114 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
115 | struct ath_tx_control txctl; | ||
116 | |||
117 | memset(&txctl, 0, sizeof(struct ath_tx_control)); | ||
118 | txctl.txq = sc->beacon.cabq; | ||
119 | |||
120 | ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb); | ||
121 | |||
122 | if (ath_tx_start(hw, skb, &txctl) != 0) { | ||
123 | ath_dbg(common, XMIT, "CABQ TX failed\n"); | ||
124 | ieee80211_free_txskb(hw, skb); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, | 111 | static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, |
129 | struct ieee80211_vif *vif) | 112 | struct ieee80211_vif *vif) |
130 | { | 113 | { |
@@ -206,10 +189,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, | |||
206 | 189 | ||
207 | ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx); | 190 | ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx); |
208 | 191 | ||
209 | while (skb) { | 192 | if (skb) |
210 | ath9k_tx_cabq(hw, skb); | 193 | ath_tx_cabq(hw, vif, skb); |
211 | skb = ieee80211_get_buffered_bc(hw, vif); | ||
212 | } | ||
213 | 194 | ||
214 | return bf; | 195 | return bf; |
215 | } | 196 | } |
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 7304e7585009..5e8219a91e25 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c | |||
@@ -387,7 +387,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) | |||
387 | 387 | ||
388 | if (!caldata) { | 388 | if (!caldata) { |
389 | chan->noisefloor = nf; | 389 | chan->noisefloor = nf; |
390 | ah->noise = ath9k_hw_getchan_noise(ah, chan); | ||
391 | return false; | 390 | return false; |
392 | } | 391 | } |
393 | 392 | ||
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 0085e643132f..69581031f2cd 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h | |||
@@ -142,6 +142,7 @@ struct ath9k_htc_target_aggr { | |||
142 | #define WLAN_RC_40_FLAG 0x02 | 142 | #define WLAN_RC_40_FLAG 0x02 |
143 | #define WLAN_RC_SGI_FLAG 0x04 | 143 | #define WLAN_RC_SGI_FLAG 0x04 |
144 | #define WLAN_RC_HT_FLAG 0x08 | 144 | #define WLAN_RC_HT_FLAG 0x08 |
145 | #define ATH_RC_TX_STBC_FLAG 0x20 | ||
145 | 146 | ||
146 | struct ath9k_htc_rateset { | 147 | struct ath9k_htc_rateset { |
147 | u8 rs_nrates; | 148 | u8 rs_nrates; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 59f64367e8ca..bb0ba9e3e083 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -517,6 +517,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv, | |||
517 | ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", | 517 | ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", |
518 | tx_streams, rx_streams); | 518 | tx_streams, rx_streams); |
519 | 519 | ||
520 | if (tx_streams >= 2) | ||
521 | ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; | ||
522 | |||
520 | if (tx_streams != rx_streams) { | 523 | if (tx_streams != rx_streams) { |
521 | ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; | 524 | ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; |
522 | ht_info->mcs.tx_params |= ((tx_streams - 1) << | 525 | ht_info->mcs.tx_params |= ((tx_streams - 1) << |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 34869c2405aa..eaa94feb4333 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c | |||
@@ -627,6 +627,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, | |||
627 | trate->rates.ht_rates.rs_nrates = j; | 627 | trate->rates.ht_rates.rs_nrates = j; |
628 | 628 | ||
629 | caps = WLAN_RC_HT_FLAG; | 629 | caps = WLAN_RC_HT_FLAG; |
630 | if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) | ||
631 | caps |= ATH_RC_TX_STBC_FLAG; | ||
630 | if (sta->ht_cap.mcs.rx_mask[1]) | 632 | if (sta->ht_cap.mcs.rx_mask[1]) |
631 | caps |= WLAN_RC_DS_FLAG; | 633 | caps |= WLAN_RC_DS_FLAG; |
632 | if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && | 634 | if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index d813ab8104d6..5324c3346af8 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -1870,7 +1870,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, | |||
1870 | 1870 | ||
1871 | ah->caldata = caldata; | 1871 | ah->caldata = caldata; |
1872 | if (caldata && (chan->channel != caldata->channel || | 1872 | if (caldata && (chan->channel != caldata->channel || |
1873 | chan->channelFlags != caldata->channelFlags)) { | 1873 | chan->channelFlags != caldata->channelFlags || |
1874 | chan->chanmode != caldata->chanmode)) { | ||
1874 | /* Operating channel changed, reset channel calibration data */ | 1875 | /* Operating channel changed, reset channel calibration data */ |
1875 | memset(caldata, 0, sizeof(*caldata)); | 1876 | memset(caldata, 0, sizeof(*caldata)); |
1876 | ath9k_init_nfcal_hist_buffer(ah, chan); | 1877 | ath9k_init_nfcal_hist_buffer(ah, chan); |
@@ -3041,7 +3042,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah, | |||
3041 | 3042 | ||
3042 | timer_next = tsf + trig_timeout; | 3043 | timer_next = tsf + trig_timeout; |
3043 | 3044 | ||
3044 | ath_dbg(ath9k_hw_common(ah), HWTIMER, | 3045 | ath_dbg(ath9k_hw_common(ah), BTCOEX, |
3045 | "current tsf %x period %x timer_next %x\n", | 3046 | "current tsf %x period %x timer_next %x\n", |
3046 | tsf, timer_period, timer_next); | 3047 | tsf, timer_period, timer_next); |
3047 | 3048 | ||
@@ -3140,7 +3141,7 @@ void ath_gen_timer_isr(struct ath_hw *ah) | |||
3140 | index = rightmost_index(timer_table, &thresh_mask); | 3141 | index = rightmost_index(timer_table, &thresh_mask); |
3141 | timer = timer_table->timers[index]; | 3142 | timer = timer_table->timers[index]; |
3142 | BUG_ON(!timer); | 3143 | BUG_ON(!timer); |
3143 | ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n", | 3144 | ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n", |
3144 | index); | 3145 | index); |
3145 | timer->overflow(timer->arg); | 3146 | timer->overflow(timer->arg); |
3146 | } | 3147 | } |
@@ -3149,7 +3150,7 @@ void ath_gen_timer_isr(struct ath_hw *ah) | |||
3149 | index = rightmost_index(timer_table, &trigger_mask); | 3150 | index = rightmost_index(timer_table, &trigger_mask); |
3150 | timer = timer_table->timers[index]; | 3151 | timer = timer_table->timers[index]; |
3151 | BUG_ON(!timer); | 3152 | BUG_ON(!timer); |
3152 | ath_dbg(common, HWTIMER, | 3153 | ath_dbg(common, BTCOEX, |
3153 | "Gen timer[%d] trigger\n", index); | 3154 | "Gen timer[%d] trigger\n", index); |
3154 | timer->trigger(timer->arg); | 3155 | timer->trigger(timer->arg); |
3155 | } | 3156 | } |
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 7d259b7dc254..cd74b3afef7d 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
@@ -307,6 +307,10 @@ struct ath9k_ops_config { | |||
307 | u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; | 307 | u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; |
308 | u8 max_txtrig_level; | 308 | u8 max_txtrig_level; |
309 | u16 ani_poll_interval; /* ANI poll interval in ms */ | 309 | u16 ani_poll_interval; /* ANI poll interval in ms */ |
310 | |||
311 | /* Platform specific config */ | ||
312 | u32 xlna_gpio; | ||
313 | bool xatten_margin_cfg; | ||
310 | }; | 314 | }; |
311 | 315 | ||
312 | enum ath9k_int { | 316 | enum ath9k_int { |
@@ -888,6 +892,9 @@ struct ath_hw { | |||
888 | struct ar5416IniArray iniCckfirJapan2484; | 892 | struct ar5416IniArray iniCckfirJapan2484; |
889 | struct ar5416IniArray iniModes_9271_ANI_reg; | 893 | struct ar5416IniArray iniModes_9271_ANI_reg; |
890 | struct ar5416IniArray ini_radio_post_sys2ant; | 894 | struct ar5416IniArray ini_radio_post_sys2ant; |
895 | struct ar5416IniArray ini_modes_rxgain_5g_xlna; | ||
896 | struct ar5416IniArray ini_modes_rxgain_bb_core; | ||
897 | struct ar5416IniArray ini_modes_rxgain_bb_postamble; | ||
891 | 898 | ||
892 | struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT]; | 899 | struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT]; |
893 | struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT]; | 900 | struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT]; |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 389ee1b59976..1e555d899469 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -432,6 +432,8 @@ static int ath9k_init_queues(struct ath_softc *sc) | |||
432 | sc->config.cabqReadytime = ATH_CABQ_READY_TIME; | 432 | sc->config.cabqReadytime = ATH_CABQ_READY_TIME; |
433 | ath_cabq_update(sc); | 433 | ath_cabq_update(sc); |
434 | 434 | ||
435 | sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0); | ||
436 | |||
435 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | 437 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
436 | sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); | 438 | sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); |
437 | sc->tx.txq_map[i]->mac80211_qnum = i; | 439 | sc->tx.txq_map[i]->mac80211_qnum = i; |
@@ -511,6 +513,27 @@ static void ath9k_init_misc(struct ath_softc *sc) | |||
511 | sc->spec_config.fft_period = 0xF; | 513 | sc->spec_config.fft_period = 0xF; |
512 | } | 514 | } |
513 | 515 | ||
516 | static void ath9k_init_platform(struct ath_softc *sc) | ||
517 | { | ||
518 | struct ath_hw *ah = sc->sc_ah; | ||
519 | struct ath_common *common = ath9k_hw_common(ah); | ||
520 | |||
521 | if (common->bus_ops->ath_bus_type != ATH_PCI) | ||
522 | return; | ||
523 | |||
524 | if (sc->driver_data & (ATH9K_PCI_CUS198 | | ||
525 | ATH9K_PCI_CUS230)) { | ||
526 | ah->config.xlna_gpio = 9; | ||
527 | ah->config.xatten_margin_cfg = true; | ||
528 | |||
529 | ath_info(common, "Set parameters for %s\n", | ||
530 | (sc->driver_data & ATH9K_PCI_CUS198) ? | ||
531 | "CUS198" : "CUS230"); | ||
532 | } else if (sc->driver_data & ATH9K_PCI_CUS217) { | ||
533 | ath_info(common, "CUS217 card detected\n"); | ||
534 | } | ||
535 | } | ||
536 | |||
514 | static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, | 537 | static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, |
515 | void *ctx) | 538 | void *ctx) |
516 | { | 539 | { |
@@ -603,6 +626,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, | |||
603 | common->disable_ani = false; | 626 | common->disable_ani = false; |
604 | 627 | ||
605 | /* | 628 | /* |
629 | * Platform quirks. | ||
630 | */ | ||
631 | ath9k_init_platform(sc); | ||
632 | |||
633 | /* | ||
606 | * Enable Antenna diversity only when BTCOEX is disabled | 634 | * Enable Antenna diversity only when BTCOEX is disabled |
607 | * and the user manually requests the feature. | 635 | * and the user manually requests the feature. |
608 | */ | 636 | */ |
@@ -753,6 +781,15 @@ static const struct ieee80211_iface_combination if_comb[] = { | |||
753 | } | 781 | } |
754 | }; | 782 | }; |
755 | 783 | ||
784 | #ifdef CONFIG_PM | ||
785 | static const struct wiphy_wowlan_support ath9k_wowlan_support = { | ||
786 | .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, | ||
787 | .n_patterns = MAX_NUM_USER_PATTERN, | ||
788 | .pattern_min_len = 1, | ||
789 | .pattern_max_len = MAX_PATTERN_SIZE, | ||
790 | }; | ||
791 | #endif | ||
792 | |||
756 | void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | 793 | void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) |
757 | { | 794 | { |
758 | struct ath_hw *ah = sc->sc_ah; | 795 | struct ath_hw *ah = sc->sc_ah; |
@@ -800,13 +837,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | |||
800 | 837 | ||
801 | #ifdef CONFIG_PM_SLEEP | 838 | #ifdef CONFIG_PM_SLEEP |
802 | if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && | 839 | if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && |
803 | device_can_wakeup(sc->dev)) { | 840 | device_can_wakeup(sc->dev)) |
804 | hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | | 841 | hw->wiphy->wowlan = &ath9k_wowlan_support; |
805 | WIPHY_WOWLAN_DISCONNECT; | ||
806 | hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN; | ||
807 | hw->wiphy->wowlan.pattern_min_len = 1; | ||
808 | hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE; | ||
809 | } | ||
810 | 842 | ||
811 | atomic_set(&sc->wow_sleep_proc_intr, -1); | 843 | atomic_set(&sc->wow_sleep_proc_intr, -1); |
812 | atomic_set(&sc->wow_got_bmiss_intr, -1); | 844 | atomic_set(&sc->wow_got_bmiss_intr, -1); |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e5b186b04b29..1737a3e33685 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1210,13 +1210,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1210 | ath_update_survey_stats(sc); | 1210 | ath_update_survey_stats(sc); |
1211 | spin_unlock_irqrestore(&common->cc_lock, flags); | 1211 | spin_unlock_irqrestore(&common->cc_lock, flags); |
1212 | 1212 | ||
1213 | /* | ||
1214 | * Preserve the current channel values, before updating | ||
1215 | * the same channel | ||
1216 | */ | ||
1217 | if (ah->curchan && (old_pos == pos)) | ||
1218 | ath9k_hw_getnf(ah, ah->curchan); | ||
1219 | |||
1220 | ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], | 1213 | ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], |
1221 | curchan, channel_type); | 1214 | curchan, channel_type); |
1222 | 1215 | ||
@@ -2347,6 +2340,7 @@ struct ieee80211_ops ath9k_ops = { | |||
2347 | .flush = ath9k_flush, | 2340 | .flush = ath9k_flush, |
2348 | .tx_frames_pending = ath9k_tx_frames_pending, | 2341 | .tx_frames_pending = ath9k_tx_frames_pending, |
2349 | .tx_last_beacon = ath9k_tx_last_beacon, | 2342 | .tx_last_beacon = ath9k_tx_last_beacon, |
2343 | .release_buffered_frames = ath9k_release_buffered_frames, | ||
2350 | .get_stats = ath9k_get_stats, | 2344 | .get_stats = ath9k_get_stats, |
2351 | .set_antenna = ath9k_set_antenna, | 2345 | .set_antenna = ath9k_set_antenna, |
2352 | .get_antenna = ath9k_get_antenna, | 2346 | .get_antenna = ath9k_get_antenna, |
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 0e0d39583837..b096bb2c28c8 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c | |||
@@ -34,8 +34,51 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { | |||
34 | { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ | 34 | { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ |
35 | { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ | 35 | { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ |
36 | { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ | 36 | { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ |
37 | |||
38 | /* PCI-E CUS198 */ | ||
39 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
40 | 0x0032, | ||
41 | PCI_VENDOR_ID_AZWAVE, | ||
42 | 0x2086), | ||
43 | .driver_data = ATH9K_PCI_CUS198 }, | ||
44 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
45 | 0x0032, | ||
46 | PCI_VENDOR_ID_AZWAVE, | ||
47 | 0x1237), | ||
48 | .driver_data = ATH9K_PCI_CUS198 }, | ||
49 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
50 | 0x0032, | ||
51 | PCI_VENDOR_ID_AZWAVE, | ||
52 | 0x2126), | ||
53 | .driver_data = ATH9K_PCI_CUS198 }, | ||
54 | |||
55 | /* PCI-E CUS230 */ | ||
56 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
57 | 0x0032, | ||
58 | PCI_VENDOR_ID_AZWAVE, | ||
59 | 0x2152), | ||
60 | .driver_data = ATH9K_PCI_CUS230 }, | ||
61 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
62 | 0x0032, | ||
63 | PCI_VENDOR_ID_FOXCONN, | ||
64 | 0xE075), | ||
65 | .driver_data = ATH9K_PCI_CUS230 }, | ||
66 | |||
37 | { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ | 67 | { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ |
38 | { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ | 68 | { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ |
69 | |||
70 | /* PCI-E CUS217 */ | ||
71 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
72 | 0x0034, | ||
73 | PCI_VENDOR_ID_AZWAVE, | ||
74 | 0x2116), | ||
75 | .driver_data = ATH9K_PCI_CUS217 }, | ||
76 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, | ||
77 | 0x0034, | ||
78 | 0x11AD, /* LITEON */ | ||
79 | 0x6661), | ||
80 | .driver_data = ATH9K_PCI_CUS217 }, | ||
81 | |||
39 | { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ | 82 | { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ |
40 | { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ | 83 | { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ |
41 | { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */ | 84 | { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */ |
@@ -221,6 +264,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
221 | sc->hw = hw; | 264 | sc->hw = hw; |
222 | sc->dev = &pdev->dev; | 265 | sc->dev = &pdev->dev; |
223 | sc->mem = pcim_iomap_table(pdev)[0]; | 266 | sc->mem = pcim_iomap_table(pdev)[0]; |
267 | sc->driver_data = id->driver_data; | ||
224 | 268 | ||
225 | /* Will be cleared in ath9k_start() */ | 269 | /* Will be cleared in ath9k_start() */ |
226 | set_bit(SC_OP_INVALID, &sc->sc_flags); | 270 | set_bit(SC_OP_INVALID, &sc->sc_flags); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 1c9b1bac8b0d..7e19d9b5214e 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -518,6 +518,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
518 | ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, | 518 | ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, |
519 | !txfail); | 519 | !txfail); |
520 | } else { | 520 | } else { |
521 | if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) { | ||
522 | tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP; | ||
523 | ieee80211_sta_eosp(sta); | ||
524 | } | ||
521 | /* retry the un-acked ones */ | 525 | /* retry the un-acked ones */ |
522 | if (bf->bf_next == NULL && bf_last->bf_stale) { | 526 | if (bf->bf_next == NULL && bf_last->bf_stale) { |
523 | struct ath_buf *tbf; | 527 | struct ath_buf *tbf; |
@@ -786,25 +790,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
786 | return ndelim; | 790 | return ndelim; |
787 | } | 791 | } |
788 | 792 | ||
789 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | 793 | static struct ath_buf * |
790 | struct ath_txq *txq, | 794 | ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, |
791 | struct ath_atx_tid *tid, | 795 | struct ath_atx_tid *tid) |
792 | struct list_head *bf_q, | ||
793 | int *aggr_len) | ||
794 | { | 796 | { |
795 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) | ||
796 | struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; | ||
797 | int rl = 0, nframes = 0, ndelim, prev_al = 0; | ||
798 | u16 aggr_limit = 0, al = 0, bpad = 0, | ||
799 | al_delta, h_baw = tid->baw_size / 2; | ||
800 | enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; | ||
801 | struct ieee80211_tx_info *tx_info; | ||
802 | struct ath_frame_info *fi; | 797 | struct ath_frame_info *fi; |
803 | struct sk_buff *skb; | 798 | struct sk_buff *skb; |
799 | struct ath_buf *bf; | ||
804 | u16 seqno; | 800 | u16 seqno; |
805 | 801 | ||
806 | do { | 802 | while (1) { |
807 | skb = skb_peek(&tid->buf_q); | 803 | skb = skb_peek(&tid->buf_q); |
804 | if (!skb) | ||
805 | break; | ||
806 | |||
808 | fi = get_frame_info(skb); | 807 | fi = get_frame_info(skb); |
809 | bf = fi->bf; | 808 | bf = fi->bf; |
810 | if (!fi->bf) | 809 | if (!fi->bf) |
@@ -820,10 +819,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
820 | seqno = bf->bf_state.seqno; | 819 | seqno = bf->bf_state.seqno; |
821 | 820 | ||
822 | /* do not step over block-ack window */ | 821 | /* do not step over block-ack window */ |
823 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { | 822 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) |
824 | status = ATH_AGGR_BAW_CLOSED; | ||
825 | break; | 823 | break; |
826 | } | ||
827 | 824 | ||
828 | if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { | 825 | if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { |
829 | struct ath_tx_status ts = {}; | 826 | struct ath_tx_status ts = {}; |
@@ -837,6 +834,40 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
837 | continue; | 834 | continue; |
838 | } | 835 | } |
839 | 836 | ||
837 | bf->bf_next = NULL; | ||
838 | bf->bf_lastbf = bf; | ||
839 | return bf; | ||
840 | } | ||
841 | |||
842 | return NULL; | ||
843 | } | ||
844 | |||
845 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | ||
846 | struct ath_txq *txq, | ||
847 | struct ath_atx_tid *tid, | ||
848 | struct list_head *bf_q, | ||
849 | int *aggr_len) | ||
850 | { | ||
851 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) | ||
852 | struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; | ||
853 | int rl = 0, nframes = 0, ndelim, prev_al = 0; | ||
854 | u16 aggr_limit = 0, al = 0, bpad = 0, | ||
855 | al_delta, h_baw = tid->baw_size / 2; | ||
856 | enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; | ||
857 | struct ieee80211_tx_info *tx_info; | ||
858 | struct ath_frame_info *fi; | ||
859 | struct sk_buff *skb; | ||
860 | |||
861 | do { | ||
862 | bf = ath_tx_get_tid_subframe(sc, txq, tid); | ||
863 | if (!bf) { | ||
864 | status = ATH_AGGR_BAW_CLOSED; | ||
865 | break; | ||
866 | } | ||
867 | |||
868 | skb = bf->bf_mpdu; | ||
869 | fi = get_frame_info(skb); | ||
870 | |||
840 | if (!bf_first) | 871 | if (!bf_first) |
841 | bf_first = bf; | 872 | bf_first = bf; |
842 | 873 | ||
@@ -882,7 +913,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
882 | 913 | ||
883 | /* link buffers of this frame to the aggregate */ | 914 | /* link buffers of this frame to the aggregate */ |
884 | if (!fi->retries) | 915 | if (!fi->retries) |
885 | ath_tx_addto_baw(sc, tid, seqno); | 916 | ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); |
886 | bf->bf_state.ndelim = ndelim; | 917 | bf->bf_state.ndelim = ndelim; |
887 | 918 | ||
888 | __skb_unlink(skb, &tid->buf_q); | 919 | __skb_unlink(skb, &tid->buf_q); |
@@ -1090,10 +1121,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, | |||
1090 | struct ath_txq *txq, int len) | 1121 | struct ath_txq *txq, int len) |
1091 | { | 1122 | { |
1092 | struct ath_hw *ah = sc->sc_ah; | 1123 | struct ath_hw *ah = sc->sc_ah; |
1093 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); | 1124 | struct ath_buf *bf_first = NULL; |
1094 | struct ath_buf *bf_first = bf; | ||
1095 | struct ath_tx_info info; | 1125 | struct ath_tx_info info; |
1096 | bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); | ||
1097 | 1126 | ||
1098 | memset(&info, 0, sizeof(info)); | 1127 | memset(&info, 0, sizeof(info)); |
1099 | info.is_first = true; | 1128 | info.is_first = true; |
@@ -1101,24 +1130,11 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, | |||
1101 | info.txpower = MAX_RATE_POWER; | 1130 | info.txpower = MAX_RATE_POWER; |
1102 | info.qcu = txq->axq_qnum; | 1131 | info.qcu = txq->axq_qnum; |
1103 | 1132 | ||
1104 | info.flags = ATH9K_TXDESC_INTREQ; | ||
1105 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) | ||
1106 | info.flags |= ATH9K_TXDESC_NOACK; | ||
1107 | if (tx_info->flags & IEEE80211_TX_CTL_LDPC) | ||
1108 | info.flags |= ATH9K_TXDESC_LDPC; | ||
1109 | |||
1110 | ath_buf_set_rate(sc, bf, &info, len); | ||
1111 | |||
1112 | if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) | ||
1113 | info.flags |= ATH9K_TXDESC_CLRDMASK; | ||
1114 | |||
1115 | if (bf->bf_state.bfs_paprd) | ||
1116 | info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; | ||
1117 | |||
1118 | |||
1119 | while (bf) { | 1133 | while (bf) { |
1120 | struct sk_buff *skb = bf->bf_mpdu; | 1134 | struct sk_buff *skb = bf->bf_mpdu; |
1135 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
1121 | struct ath_frame_info *fi = get_frame_info(skb); | 1136 | struct ath_frame_info *fi = get_frame_info(skb); |
1137 | bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); | ||
1122 | 1138 | ||
1123 | info.type = get_hw_packet_type(skb); | 1139 | info.type = get_hw_packet_type(skb); |
1124 | if (bf->bf_next) | 1140 | if (bf->bf_next) |
@@ -1126,6 +1142,26 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, | |||
1126 | else | 1142 | else |
1127 | info.link = 0; | 1143 | info.link = 0; |
1128 | 1144 | ||
1145 | if (!bf_first) { | ||
1146 | bf_first = bf; | ||
1147 | |||
1148 | info.flags = ATH9K_TXDESC_INTREQ; | ||
1149 | if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) || | ||
1150 | txq == sc->tx.uapsdq) | ||
1151 | info.flags |= ATH9K_TXDESC_CLRDMASK; | ||
1152 | |||
1153 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) | ||
1154 | info.flags |= ATH9K_TXDESC_NOACK; | ||
1155 | if (tx_info->flags & IEEE80211_TX_CTL_LDPC) | ||
1156 | info.flags |= ATH9K_TXDESC_LDPC; | ||
1157 | |||
1158 | if (bf->bf_state.bfs_paprd) | ||
1159 | info.flags |= (u32) bf->bf_state.bfs_paprd << | ||
1160 | ATH9K_TXDESC_PAPRD_S; | ||
1161 | |||
1162 | ath_buf_set_rate(sc, bf, &info, len); | ||
1163 | } | ||
1164 | |||
1129 | info.buf_addr[0] = bf->bf_buf_addr; | 1165 | info.buf_addr[0] = bf->bf_buf_addr; |
1130 | info.buf_len[0] = skb->len; | 1166 | info.buf_len[0] = skb->len; |
1131 | info.pkt_len = fi->framelen; | 1167 | info.pkt_len = fi->framelen; |
@@ -1135,7 +1171,7 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, | |||
1135 | if (aggr) { | 1171 | if (aggr) { |
1136 | if (bf == bf_first) | 1172 | if (bf == bf_first) |
1137 | info.aggr = AGGR_BUF_FIRST; | 1173 | info.aggr = AGGR_BUF_FIRST; |
1138 | else if (!bf->bf_next) | 1174 | else if (bf == bf_first->bf_lastbf) |
1139 | info.aggr = AGGR_BUF_LAST; | 1175 | info.aggr = AGGR_BUF_LAST; |
1140 | else | 1176 | else |
1141 | info.aggr = AGGR_BUF_MIDDLE; | 1177 | info.aggr = AGGR_BUF_MIDDLE; |
@@ -1144,6 +1180,9 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, | |||
1144 | info.aggr_len = len; | 1180 | info.aggr_len = len; |
1145 | } | 1181 | } |
1146 | 1182 | ||
1183 | if (bf == bf_first->bf_lastbf) | ||
1184 | bf_first = NULL; | ||
1185 | |||
1147 | ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); | 1186 | ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); |
1148 | bf = bf->bf_next; | 1187 | bf = bf->bf_next; |
1149 | } | 1188 | } |
@@ -1328,6 +1367,70 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, | |||
1328 | ath_txq_unlock_complete(sc, txq); | 1367 | ath_txq_unlock_complete(sc, txq); |
1329 | } | 1368 | } |
1330 | 1369 | ||
1370 | void ath9k_release_buffered_frames(struct ieee80211_hw *hw, | ||
1371 | struct ieee80211_sta *sta, | ||
1372 | u16 tids, int nframes, | ||
1373 | enum ieee80211_frame_release_type reason, | ||
1374 | bool more_data) | ||
1375 | { | ||
1376 | struct ath_softc *sc = hw->priv; | ||
1377 | struct ath_node *an = (struct ath_node *)sta->drv_priv; | ||
1378 | struct ath_txq *txq = sc->tx.uapsdq; | ||
1379 | struct ieee80211_tx_info *info; | ||
1380 | struct list_head bf_q; | ||
1381 | struct ath_buf *bf_tail = NULL, *bf; | ||
1382 | int sent = 0; | ||
1383 | int i; | ||
1384 | |||
1385 | INIT_LIST_HEAD(&bf_q); | ||
1386 | for (i = 0; tids && nframes; i++, tids >>= 1) { | ||
1387 | struct ath_atx_tid *tid; | ||
1388 | |||
1389 | if (!(tids & 1)) | ||
1390 | continue; | ||
1391 | |||
1392 | tid = ATH_AN_2_TID(an, i); | ||
1393 | if (tid->paused) | ||
1394 | continue; | ||
1395 | |||
1396 | ath_txq_lock(sc, tid->ac->txq); | ||
1397 | while (!skb_queue_empty(&tid->buf_q) && nframes > 0) { | ||
1398 | bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid); | ||
1399 | if (!bf) | ||
1400 | break; | ||
1401 | |||
1402 | __skb_unlink(bf->bf_mpdu, &tid->buf_q); | ||
1403 | list_add_tail(&bf->list, &bf_q); | ||
1404 | ath_set_rates(tid->an->vif, tid->an->sta, bf); | ||
1405 | ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); | ||
1406 | bf->bf_state.bf_type &= ~BUF_AGGR; | ||
1407 | if (bf_tail) | ||
1408 | bf_tail->bf_next = bf; | ||
1409 | |||
1410 | bf_tail = bf; | ||
1411 | nframes--; | ||
1412 | sent++; | ||
1413 | TX_STAT_INC(txq->axq_qnum, a_queued_hw); | ||
1414 | |||
1415 | if (skb_queue_empty(&tid->buf_q)) | ||
1416 | ieee80211_sta_set_buffered(an->sta, i, false); | ||
1417 | } | ||
1418 | ath_txq_unlock_complete(sc, tid->ac->txq); | ||
1419 | } | ||
1420 | |||
1421 | if (list_empty(&bf_q)) | ||
1422 | return; | ||
1423 | |||
1424 | info = IEEE80211_SKB_CB(bf_tail->bf_mpdu); | ||
1425 | info->flags |= IEEE80211_TX_STATUS_EOSP; | ||
1426 | |||
1427 | bf = list_first_entry(&bf_q, struct ath_buf, list); | ||
1428 | ath_txq_lock(sc, txq); | ||
1429 | ath_tx_fill_desc(sc, bf, txq, 0); | ||
1430 | ath_tx_txqaddbuf(sc, txq, &bf_q, false); | ||
1431 | ath_txq_unlock(sc, txq); | ||
1432 | } | ||
1433 | |||
1331 | /********************/ | 1434 | /********************/ |
1332 | /* Queue Management */ | 1435 | /* Queue Management */ |
1333 | /********************/ | 1436 | /********************/ |
@@ -1681,8 +1784,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, | |||
1681 | } | 1784 | } |
1682 | } | 1785 | } |
1683 | 1786 | ||
1684 | static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | 1787 | static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq, |
1685 | struct sk_buff *skb, struct ath_tx_control *txctl) | 1788 | struct ath_atx_tid *tid, struct sk_buff *skb, |
1789 | struct ath_tx_control *txctl) | ||
1686 | { | 1790 | { |
1687 | struct ath_frame_info *fi = get_frame_info(skb); | 1791 | struct ath_frame_info *fi = get_frame_info(skb); |
1688 | struct list_head bf_head; | 1792 | struct list_head bf_head; |
@@ -1695,21 +1799,22 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
1695 | * - seqno is not within block-ack window | 1799 | * - seqno is not within block-ack window |
1696 | * - h/w queue depth exceeds low water mark | 1800 | * - h/w queue depth exceeds low water mark |
1697 | */ | 1801 | */ |
1698 | if (!skb_queue_empty(&tid->buf_q) || tid->paused || | 1802 | if ((!skb_queue_empty(&tid->buf_q) || tid->paused || |
1699 | !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || | 1803 | !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || |
1700 | txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { | 1804 | txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) && |
1805 | txq != sc->tx.uapsdq) { | ||
1701 | /* | 1806 | /* |
1702 | * Add this frame to software queue for scheduling later | 1807 | * Add this frame to software queue for scheduling later |
1703 | * for aggregation. | 1808 | * for aggregation. |
1704 | */ | 1809 | */ |
1705 | TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); | 1810 | TX_STAT_INC(txq->axq_qnum, a_queued_sw); |
1706 | __skb_queue_tail(&tid->buf_q, skb); | 1811 | __skb_queue_tail(&tid->buf_q, skb); |
1707 | if (!txctl->an || !txctl->an->sleeping) | 1812 | if (!txctl->an || !txctl->an->sleeping) |
1708 | ath_tx_queue_tid(txctl->txq, tid); | 1813 | ath_tx_queue_tid(txq, tid); |
1709 | return; | 1814 | return; |
1710 | } | 1815 | } |
1711 | 1816 | ||
1712 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); | 1817 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
1713 | if (!bf) { | 1818 | if (!bf) { |
1714 | ieee80211_free_txskb(sc->hw, skb); | 1819 | ieee80211_free_txskb(sc->hw, skb); |
1715 | return; | 1820 | return; |
@@ -1724,10 +1829,10 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
1724 | ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); | 1829 | ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); |
1725 | 1830 | ||
1726 | /* Queue to h/w without aggregation */ | 1831 | /* Queue to h/w without aggregation */ |
1727 | TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); | 1832 | TX_STAT_INC(txq->axq_qnum, a_queued_hw); |
1728 | bf->bf_lastbf = bf; | 1833 | bf->bf_lastbf = bf; |
1729 | ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); | 1834 | ath_tx_fill_desc(sc, bf, txq, fi->framelen); |
1730 | ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); | 1835 | ath_tx_txqaddbuf(sc, txq, &bf_head, false); |
1731 | } | 1836 | } |
1732 | 1837 | ||
1733 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | 1838 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, |
@@ -1865,22 +1970,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
1865 | return bf; | 1970 | return bf; |
1866 | } | 1971 | } |
1867 | 1972 | ||
1868 | /* Upon failure caller should free skb */ | 1973 | static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, |
1869 | int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | 1974 | struct ath_tx_control *txctl) |
1870 | struct ath_tx_control *txctl) | ||
1871 | { | 1975 | { |
1872 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 1976 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
1873 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1977 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1874 | struct ieee80211_sta *sta = txctl->sta; | 1978 | struct ieee80211_sta *sta = txctl->sta; |
1875 | struct ieee80211_vif *vif = info->control.vif; | 1979 | struct ieee80211_vif *vif = info->control.vif; |
1876 | struct ath_softc *sc = hw->priv; | 1980 | struct ath_softc *sc = hw->priv; |
1877 | struct ath_txq *txq = txctl->txq; | ||
1878 | struct ath_atx_tid *tid = NULL; | ||
1879 | struct ath_buf *bf; | ||
1880 | int padpos, padsize; | ||
1881 | int frmlen = skb->len + FCS_LEN; | 1981 | int frmlen = skb->len + FCS_LEN; |
1882 | u8 tidno; | 1982 | int padpos, padsize; |
1883 | int q; | ||
1884 | 1983 | ||
1885 | /* NOTE: sta can be NULL according to net/mac80211.h */ | 1984 | /* NOTE: sta can be NULL according to net/mac80211.h */ |
1886 | if (sta) | 1985 | if (sta) |
@@ -1901,6 +2000,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1901 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); | 2000 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); |
1902 | } | 2001 | } |
1903 | 2002 | ||
2003 | if ((vif && vif->type != NL80211_IFTYPE_AP && | ||
2004 | vif->type != NL80211_IFTYPE_AP_VLAN) || | ||
2005 | !ieee80211_is_data(hdr->frame_control)) | ||
2006 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; | ||
2007 | |||
1904 | /* Add the padding after the header if this is not already done */ | 2008 | /* Add the padding after the header if this is not already done */ |
1905 | padpos = ieee80211_hdrlen(hdr->frame_control); | 2009 | padpos = ieee80211_hdrlen(hdr->frame_control); |
1906 | padsize = padpos & 3; | 2010 | padsize = padpos & 3; |
@@ -1910,16 +2014,34 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1910 | 2014 | ||
1911 | skb_push(skb, padsize); | 2015 | skb_push(skb, padsize); |
1912 | memmove(skb->data, skb->data + padsize, padpos); | 2016 | memmove(skb->data, skb->data + padsize, padpos); |
1913 | hdr = (struct ieee80211_hdr *) skb->data; | ||
1914 | } | 2017 | } |
1915 | 2018 | ||
1916 | if ((vif && vif->type != NL80211_IFTYPE_AP && | ||
1917 | vif->type != NL80211_IFTYPE_AP_VLAN) || | ||
1918 | !ieee80211_is_data(hdr->frame_control)) | ||
1919 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; | ||
1920 | |||
1921 | setup_frame_info(hw, sta, skb, frmlen); | 2019 | setup_frame_info(hw, sta, skb, frmlen); |
2020 | return 0; | ||
2021 | } | ||
1922 | 2022 | ||
2023 | |||
2024 | /* Upon failure caller should free skb */ | ||
2025 | int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
2026 | struct ath_tx_control *txctl) | ||
2027 | { | ||
2028 | struct ieee80211_hdr *hdr; | ||
2029 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
2030 | struct ieee80211_sta *sta = txctl->sta; | ||
2031 | struct ieee80211_vif *vif = info->control.vif; | ||
2032 | struct ath_softc *sc = hw->priv; | ||
2033 | struct ath_txq *txq = txctl->txq; | ||
2034 | struct ath_atx_tid *tid = NULL; | ||
2035 | struct ath_buf *bf; | ||
2036 | u8 tidno; | ||
2037 | int q; | ||
2038 | int ret; | ||
2039 | |||
2040 | ret = ath_tx_prepare(hw, skb, txctl); | ||
2041 | if (ret) | ||
2042 | return ret; | ||
2043 | |||
2044 | hdr = (struct ieee80211_hdr *) skb->data; | ||
1923 | /* | 2045 | /* |
1924 | * At this point, the vif, hw_key and sta pointers in the tx control | 2046 | * At this point, the vif, hw_key and sta pointers in the tx control |
1925 | * info are no longer valid (overwritten by the ath_frame_info data. | 2047 | * info are no longer valid (overwritten by the ath_frame_info data. |
@@ -1935,6 +2057,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1935 | txq->stopped = true; | 2057 | txq->stopped = true; |
1936 | } | 2058 | } |
1937 | 2059 | ||
2060 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { | ||
2061 | ath_txq_unlock(sc, txq); | ||
2062 | txq = sc->tx.uapsdq; | ||
2063 | ath_txq_lock(sc, txq); | ||
2064 | } | ||
2065 | |||
1938 | if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { | 2066 | if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) { |
1939 | tidno = ieee80211_get_qos_ctl(hdr)[0] & | 2067 | tidno = ieee80211_get_qos_ctl(hdr)[0] & |
1940 | IEEE80211_QOS_CTL_TID_MASK; | 2068 | IEEE80211_QOS_CTL_TID_MASK; |
@@ -1948,11 +2076,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1948 | * Try aggregation if it's a unicast data frame | 2076 | * Try aggregation if it's a unicast data frame |
1949 | * and the destination is HT capable. | 2077 | * and the destination is HT capable. |
1950 | */ | 2078 | */ |
1951 | ath_tx_send_ampdu(sc, tid, skb, txctl); | 2079 | ath_tx_send_ampdu(sc, txq, tid, skb, txctl); |
1952 | goto out; | 2080 | goto out; |
1953 | } | 2081 | } |
1954 | 2082 | ||
1955 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); | 2083 | bf = ath_tx_setup_buffer(sc, txq, tid, skb); |
1956 | if (!bf) { | 2084 | if (!bf) { |
1957 | if (txctl->paprd) | 2085 | if (txctl->paprd) |
1958 | dev_kfree_skb_any(skb); | 2086 | dev_kfree_skb_any(skb); |
@@ -1967,7 +2095,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1967 | bf->bf_state.bfs_paprd_timestamp = jiffies; | 2095 | bf->bf_state.bfs_paprd_timestamp = jiffies; |
1968 | 2096 | ||
1969 | ath_set_rates(vif, sta, bf); | 2097 | ath_set_rates(vif, sta, bf); |
1970 | ath_tx_send_normal(sc, txctl->txq, tid, skb); | 2098 | ath_tx_send_normal(sc, txq, tid, skb); |
1971 | 2099 | ||
1972 | out: | 2100 | out: |
1973 | ath_txq_unlock(sc, txq); | 2101 | ath_txq_unlock(sc, txq); |
@@ -1975,6 +2103,74 @@ out: | |||
1975 | return 0; | 2103 | return 0; |
1976 | } | 2104 | } |
1977 | 2105 | ||
2106 | void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | ||
2107 | struct sk_buff *skb) | ||
2108 | { | ||
2109 | struct ath_softc *sc = hw->priv; | ||
2110 | struct ath_tx_control txctl = { | ||
2111 | .txq = sc->beacon.cabq | ||
2112 | }; | ||
2113 | struct ath_tx_info info = {}; | ||
2114 | struct ieee80211_hdr *hdr; | ||
2115 | struct ath_buf *bf_tail = NULL; | ||
2116 | struct ath_buf *bf; | ||
2117 | LIST_HEAD(bf_q); | ||
2118 | int duration = 0; | ||
2119 | int max_duration; | ||
2120 | |||
2121 | max_duration = | ||
2122 | sc->cur_beacon_conf.beacon_interval * 1000 * | ||
2123 | sc->cur_beacon_conf.dtim_period / ATH_BCBUF; | ||
2124 | |||
2125 | do { | ||
2126 | struct ath_frame_info *fi = get_frame_info(skb); | ||
2127 | |||
2128 | if (ath_tx_prepare(hw, skb, &txctl)) | ||
2129 | break; | ||
2130 | |||
2131 | bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb); | ||
2132 | if (!bf) | ||
2133 | break; | ||
2134 | |||
2135 | bf->bf_lastbf = bf; | ||
2136 | ath_set_rates(vif, NULL, bf); | ||
2137 | ath_buf_set_rate(sc, bf, &info, fi->framelen); | ||
2138 | duration += info.rates[0].PktDuration; | ||
2139 | if (bf_tail) | ||
2140 | bf_tail->bf_next = bf; | ||
2141 | |||
2142 | list_add_tail(&bf->list, &bf_q); | ||
2143 | bf_tail = bf; | ||
2144 | skb = NULL; | ||
2145 | |||
2146 | if (duration > max_duration) | ||
2147 | break; | ||
2148 | |||
2149 | skb = ieee80211_get_buffered_bc(hw, vif); | ||
2150 | } while(skb); | ||
2151 | |||
2152 | if (skb) | ||
2153 | ieee80211_free_txskb(hw, skb); | ||
2154 | |||
2155 | if (list_empty(&bf_q)) | ||
2156 | return; | ||
2157 | |||
2158 | bf = list_first_entry(&bf_q, struct ath_buf, list); | ||
2159 | hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; | ||
2160 | |||
2161 | if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) { | ||
2162 | hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA; | ||
2163 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | ||
2164 | sizeof(*hdr), DMA_TO_DEVICE); | ||
2165 | } | ||
2166 | |||
2167 | ath_txq_lock(sc, txctl.txq); | ||
2168 | ath_tx_fill_desc(sc, bf, txctl.txq, 0); | ||
2169 | ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false); | ||
2170 | TX_STAT_INC(txctl.txq->axq_qnum, queued); | ||
2171 | ath_txq_unlock(sc, txctl.txq); | ||
2172 | } | ||
2173 | |||
1978 | /*****************/ | 2174 | /*****************/ |
1979 | /* TX Completion */ | 2175 | /* TX Completion */ |
1980 | /*****************/ | 2176 | /*****************/ |
@@ -2020,7 +2216,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
2020 | } | 2216 | } |
2021 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); | 2217 | spin_unlock_irqrestore(&sc->sc_pm_lock, flags); |
2022 | 2218 | ||
2219 | __skb_queue_tail(&txq->complete_q, skb); | ||
2220 | |||
2023 | q = skb_get_queue_mapping(skb); | 2221 | q = skb_get_queue_mapping(skb); |
2222 | if (txq == sc->tx.uapsdq) | ||
2223 | txq = sc->tx.txq_map[q]; | ||
2224 | |||
2024 | if (txq == sc->tx.txq_map[q]) { | 2225 | if (txq == sc->tx.txq_map[q]) { |
2025 | if (WARN_ON(--txq->pending_frames < 0)) | 2226 | if (WARN_ON(--txq->pending_frames < 0)) |
2026 | txq->pending_frames = 0; | 2227 | txq->pending_frames = 0; |
@@ -2031,8 +2232,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
2031 | txq->stopped = false; | 2232 | txq->stopped = false; |
2032 | } | 2233 | } |
2033 | } | 2234 | } |
2034 | |||
2035 | __skb_queue_tail(&txq->complete_q, skb); | ||
2036 | } | 2235 | } |
2037 | 2236 | ||
2038 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | 2237 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, |
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 9dce106cd6d4..8596aba34f96 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h | |||
@@ -133,6 +133,9 @@ struct carl9170_sta_tid { | |||
133 | 133 | ||
134 | /* Preaggregation reorder queue */ | 134 | /* Preaggregation reorder queue */ |
135 | struct sk_buff_head queue; | 135 | struct sk_buff_head queue; |
136 | |||
137 | struct ieee80211_sta *sta; | ||
138 | struct ieee80211_vif *vif; | ||
136 | }; | 139 | }; |
137 | 140 | ||
138 | #define CARL9170_QUEUE_TIMEOUT 256 | 141 | #define CARL9170_QUEUE_TIMEOUT 256 |
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index e9010a481dfd..4a33c6e39ca2 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
@@ -1448,6 +1448,8 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, | |||
1448 | tid_info->state = CARL9170_TID_STATE_PROGRESS; | 1448 | tid_info->state = CARL9170_TID_STATE_PROGRESS; |
1449 | tid_info->tid = tid; | 1449 | tid_info->tid = tid; |
1450 | tid_info->max = sta_info->ampdu_max_len; | 1450 | tid_info->max = sta_info->ampdu_max_len; |
1451 | tid_info->sta = sta; | ||
1452 | tid_info->vif = vif; | ||
1451 | 1453 | ||
1452 | INIT_LIST_HEAD(&tid_info->list); | 1454 | INIT_LIST_HEAD(&tid_info->list); |
1453 | INIT_LIST_HEAD(&tid_info->tmp_list); | 1455 | INIT_LIST_HEAD(&tid_info->tmp_list); |
@@ -1857,6 +1859,7 @@ void *carl9170_alloc(size_t priv_size) | |||
1857 | IEEE80211_HW_SUPPORTS_PS | | 1859 | IEEE80211_HW_SUPPORTS_PS | |
1858 | IEEE80211_HW_PS_NULLFUNC_STACK | | 1860 | IEEE80211_HW_PS_NULLFUNC_STACK | |
1859 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | | 1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | |
1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | | ||
1860 | IEEE80211_HW_SIGNAL_DBM; | 1863 | IEEE80211_HW_SIGNAL_DBM; |
1861 | 1864 | ||
1862 | if (!modparam_noht) { | 1865 | if (!modparam_noht) { |
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index c61cafa2665b..e3f696ee4d23 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c | |||
@@ -625,7 +625,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar) | |||
625 | msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))) | 625 | msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))) |
626 | goto unlock; | 626 | goto unlock; |
627 | 627 | ||
628 | sta = __carl9170_get_tx_sta(ar, skb); | 628 | sta = iter->sta; |
629 | if (WARN_ON(!sta)) | 629 | if (WARN_ON(!sta)) |
630 | goto unlock; | 630 | goto unlock; |
631 | 631 | ||
@@ -866,6 +866,93 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar, | |||
866 | return false; | 866 | return false; |
867 | } | 867 | } |
868 | 868 | ||
869 | static void carl9170_tx_get_rates(struct ar9170 *ar, | ||
870 | struct ieee80211_vif *vif, | ||
871 | struct ieee80211_sta *sta, | ||
872 | struct sk_buff *skb) | ||
873 | { | ||
874 | struct ieee80211_tx_info *info; | ||
875 | |||
876 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES); | ||
877 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE); | ||
878 | |||
879 | info = IEEE80211_SKB_CB(skb); | ||
880 | |||
881 | ieee80211_get_tx_rates(vif, sta, skb, | ||
882 | info->control.rates, | ||
883 | IEEE80211_TX_MAX_RATES); | ||
884 | } | ||
885 | |||
886 | static void carl9170_tx_apply_rateset(struct ar9170 *ar, | ||
887 | struct ieee80211_tx_info *sinfo, | ||
888 | struct sk_buff *skb) | ||
889 | { | ||
890 | struct ieee80211_tx_rate *txrate; | ||
891 | struct ieee80211_tx_info *info; | ||
892 | struct _carl9170_tx_superframe *txc = (void *) skb->data; | ||
893 | int i; | ||
894 | bool ampdu; | ||
895 | bool no_ack; | ||
896 | |||
897 | info = IEEE80211_SKB_CB(skb); | ||
898 | ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); | ||
899 | no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); | ||
900 | |||
901 | /* Set the rate control probe flag for all (sub-) frames. | ||
902 | * This is because the TX_STATS_AMPDU flag is only set on | ||
903 | * the last frame, so it has to be inherited. | ||
904 | */ | ||
905 | info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); | ||
906 | |||
907 | /* NOTE: For the first rate, the ERP & AMPDU flags are directly | ||
908 | * taken from mac_control. For all fallback rate, the firmware | ||
909 | * updates the mac_control flags from the rate info field. | ||
910 | */ | ||
911 | for (i = 0; i < CARL9170_TX_MAX_RATES; i++) { | ||
912 | __le32 phy_set; | ||
913 | |||
914 | txrate = &sinfo->control.rates[i]; | ||
915 | if (txrate->idx < 0) | ||
916 | break; | ||
917 | |||
918 | phy_set = carl9170_tx_physet(ar, info, txrate); | ||
919 | if (i == 0) { | ||
920 | __le16 mac_tmp = cpu_to_le16(0); | ||
921 | |||
922 | /* first rate - part of the hw's frame header */ | ||
923 | txc->f.phy_control = phy_set; | ||
924 | |||
925 | if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS) | ||
926 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR); | ||
927 | |||
928 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
929 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); | ||
930 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
931 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); | ||
932 | |||
933 | txc->f.mac_control |= mac_tmp; | ||
934 | } else { | ||
935 | /* fallback rates are stored in the firmware's | ||
936 | * retry rate set array. | ||
937 | */ | ||
938 | txc->s.rr[i - 1] = phy_set; | ||
939 | } | ||
940 | |||
941 | SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], | ||
942 | txrate->count); | ||
943 | |||
944 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
945 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS << | ||
946 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
947 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
948 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << | ||
949 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
950 | |||
951 | if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS)) | ||
952 | txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU; | ||
953 | } | ||
954 | } | ||
955 | |||
869 | static int carl9170_tx_prepare(struct ar9170 *ar, | 956 | static int carl9170_tx_prepare(struct ar9170 *ar, |
870 | struct ieee80211_sta *sta, | 957 | struct ieee80211_sta *sta, |
871 | struct sk_buff *skb) | 958 | struct sk_buff *skb) |
@@ -874,13 +961,10 @@ static int carl9170_tx_prepare(struct ar9170 *ar, | |||
874 | struct _carl9170_tx_superframe *txc; | 961 | struct _carl9170_tx_superframe *txc; |
875 | struct carl9170_vif_info *cvif; | 962 | struct carl9170_vif_info *cvif; |
876 | struct ieee80211_tx_info *info; | 963 | struct ieee80211_tx_info *info; |
877 | struct ieee80211_tx_rate *txrate; | ||
878 | struct carl9170_tx_info *arinfo; | 964 | struct carl9170_tx_info *arinfo; |
879 | unsigned int hw_queue; | 965 | unsigned int hw_queue; |
880 | int i; | ||
881 | __le16 mac_tmp; | 966 | __le16 mac_tmp; |
882 | u16 len; | 967 | u16 len; |
883 | bool ampdu, no_ack; | ||
884 | 968 | ||
885 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); | 969 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); |
886 | BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) != | 970 | BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) != |
@@ -889,8 +973,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, | |||
889 | BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) != | 973 | BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) != |
890 | AR9170_TX_HWDESC_LEN); | 974 | AR9170_TX_HWDESC_LEN); |
891 | 975 | ||
892 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES); | ||
893 | |||
894 | BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC > | 976 | BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC > |
895 | ((CARL9170_TX_SUPER_MISC_VIF_ID >> | 977 | ((CARL9170_TX_SUPER_MISC_VIF_ID >> |
896 | CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1)); | 978 | CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1)); |
@@ -932,8 +1014,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar, | |||
932 | mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & | 1014 | mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & |
933 | AR9170_TX_MAC_QOS); | 1015 | AR9170_TX_MAC_QOS); |
934 | 1016 | ||
935 | no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); | 1017 | if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK)) |
936 | if (unlikely(no_ack)) | ||
937 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); | 1018 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); |
938 | 1019 | ||
939 | if (info->control.hw_key) { | 1020 | if (info->control.hw_key) { |
@@ -954,8 +1035,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar, | |||
954 | } | 1035 | } |
955 | } | 1036 | } |
956 | 1037 | ||
957 | ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); | 1038 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { |
958 | if (ampdu) { | ||
959 | unsigned int density, factor; | 1039 | unsigned int density, factor; |
960 | 1040 | ||
961 | if (unlikely(!sta || !cvif)) | 1041 | if (unlikely(!sta || !cvif)) |
@@ -982,50 +1062,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, | |||
982 | txc->s.ampdu_settings, factor); | 1062 | txc->s.ampdu_settings, factor); |
983 | } | 1063 | } |
984 | 1064 | ||
985 | /* | ||
986 | * NOTE: For the first rate, the ERP & AMPDU flags are directly | ||
987 | * taken from mac_control. For all fallback rate, the firmware | ||
988 | * updates the mac_control flags from the rate info field. | ||
989 | */ | ||
990 | for (i = 0; i < CARL9170_TX_MAX_RATES; i++) { | ||
991 | __le32 phy_set; | ||
992 | txrate = &info->control.rates[i]; | ||
993 | if (txrate->idx < 0) | ||
994 | break; | ||
995 | |||
996 | phy_set = carl9170_tx_physet(ar, info, txrate); | ||
997 | if (i == 0) { | ||
998 | /* first rate - part of the hw's frame header */ | ||
999 | txc->f.phy_control = phy_set; | ||
1000 | |||
1001 | if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS) | ||
1002 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR); | ||
1003 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
1004 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); | ||
1005 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
1006 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); | ||
1007 | |||
1008 | } else { | ||
1009 | /* fallback rates are stored in the firmware's | ||
1010 | * retry rate set array. | ||
1011 | */ | ||
1012 | txc->s.rr[i - 1] = phy_set; | ||
1013 | } | ||
1014 | |||
1015 | SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], | ||
1016 | txrate->count); | ||
1017 | |||
1018 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
1019 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS << | ||
1020 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
1021 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
1022 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << | ||
1023 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
1024 | |||
1025 | if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS)) | ||
1026 | txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU; | ||
1027 | } | ||
1028 | |||
1029 | txc->s.len = cpu_to_le16(skb->len); | 1065 | txc->s.len = cpu_to_le16(skb->len); |
1030 | txc->f.length = cpu_to_le16(len + FCS_LEN); | 1066 | txc->f.length = cpu_to_le16(len + FCS_LEN); |
1031 | txc->f.mac_control = mac_tmp; | 1067 | txc->f.mac_control = mac_tmp; |
@@ -1086,31 +1122,12 @@ static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) | |||
1086 | } | 1122 | } |
1087 | } | 1123 | } |
1088 | 1124 | ||
1089 | static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest, | ||
1090 | struct sk_buff *_src) | ||
1091 | { | ||
1092 | struct _carl9170_tx_superframe *dest, *src; | ||
1093 | |||
1094 | dest = (void *) _dest->data; | ||
1095 | src = (void *) _src->data; | ||
1096 | |||
1097 | /* | ||
1098 | * The mac80211 rate control algorithm expects that all MPDUs in | ||
1099 | * an AMPDU share the same tx vectors. | ||
1100 | * This is not really obvious right now, because the hardware | ||
1101 | * does the AMPDU setup according to its own rulebook. | ||
1102 | * Our nicely assembled, strictly monotonic increasing mpdu | ||
1103 | * chains will be broken up, mashed back together... | ||
1104 | */ | ||
1105 | |||
1106 | return (dest->f.phy_control == src->f.phy_control); | ||
1107 | } | ||
1108 | |||
1109 | static void carl9170_tx_ampdu(struct ar9170 *ar) | 1125 | static void carl9170_tx_ampdu(struct ar9170 *ar) |
1110 | { | 1126 | { |
1111 | struct sk_buff_head agg; | 1127 | struct sk_buff_head agg; |
1112 | struct carl9170_sta_tid *tid_info; | 1128 | struct carl9170_sta_tid *tid_info; |
1113 | struct sk_buff *skb, *first; | 1129 | struct sk_buff *skb, *first; |
1130 | struct ieee80211_tx_info *tx_info_first; | ||
1114 | unsigned int i = 0, done_ampdus = 0; | 1131 | unsigned int i = 0, done_ampdus = 0; |
1115 | u16 seq, queue, tmpssn; | 1132 | u16 seq, queue, tmpssn; |
1116 | 1133 | ||
@@ -1156,6 +1173,7 @@ retry: | |||
1156 | goto processed; | 1173 | goto processed; |
1157 | } | 1174 | } |
1158 | 1175 | ||
1176 | tx_info_first = NULL; | ||
1159 | while ((skb = skb_peek(&tid_info->queue))) { | 1177 | while ((skb = skb_peek(&tid_info->queue))) { |
1160 | /* strict 0, 1, ..., n - 1, n frame sequence order */ | 1178 | /* strict 0, 1, ..., n - 1, n frame sequence order */ |
1161 | if (unlikely(carl9170_get_seq(skb) != seq)) | 1179 | if (unlikely(carl9170_get_seq(skb) != seq)) |
@@ -1166,8 +1184,13 @@ retry: | |||
1166 | (tid_info->max - 1))) | 1184 | (tid_info->max - 1))) |
1167 | break; | 1185 | break; |
1168 | 1186 | ||
1169 | if (!carl9170_tx_rate_check(ar, skb, first)) | 1187 | if (!tx_info_first) { |
1170 | break; | 1188 | carl9170_tx_get_rates(ar, tid_info->vif, |
1189 | tid_info->sta, first); | ||
1190 | tx_info_first = IEEE80211_SKB_CB(first); | ||
1191 | } | ||
1192 | |||
1193 | carl9170_tx_apply_rateset(ar, tx_info_first, skb); | ||
1171 | 1194 | ||
1172 | atomic_inc(&ar->tx_ampdu_upload); | 1195 | atomic_inc(&ar->tx_ampdu_upload); |
1173 | tid_info->snx = seq = SEQ_NEXT(seq); | 1196 | tid_info->snx = seq = SEQ_NEXT(seq); |
@@ -1182,8 +1205,7 @@ retry: | |||
1182 | if (skb_queue_empty(&tid_info->queue) || | 1205 | if (skb_queue_empty(&tid_info->queue) || |
1183 | carl9170_get_seq(skb_peek(&tid_info->queue)) != | 1206 | carl9170_get_seq(skb_peek(&tid_info->queue)) != |
1184 | tid_info->snx) { | 1207 | tid_info->snx) { |
1185 | /* | 1208 | /* stop TID, if A-MPDU frames are still missing, |
1186 | * stop TID, if A-MPDU frames are still missing, | ||
1187 | * or whenever the queue is empty. | 1209 | * or whenever the queue is empty. |
1188 | */ | 1210 | */ |
1189 | 1211 | ||
@@ -1450,12 +1472,14 @@ void carl9170_op_tx(struct ieee80211_hw *hw, | |||
1450 | struct ar9170 *ar = hw->priv; | 1472 | struct ar9170 *ar = hw->priv; |
1451 | struct ieee80211_tx_info *info; | 1473 | struct ieee80211_tx_info *info; |
1452 | struct ieee80211_sta *sta = control->sta; | 1474 | struct ieee80211_sta *sta = control->sta; |
1475 | struct ieee80211_vif *vif; | ||
1453 | bool run; | 1476 | bool run; |
1454 | 1477 | ||
1455 | if (unlikely(!IS_STARTED(ar))) | 1478 | if (unlikely(!IS_STARTED(ar))) |
1456 | goto err_free; | 1479 | goto err_free; |
1457 | 1480 | ||
1458 | info = IEEE80211_SKB_CB(skb); | 1481 | info = IEEE80211_SKB_CB(skb); |
1482 | vif = info->control.vif; | ||
1459 | 1483 | ||
1460 | if (unlikely(carl9170_tx_prepare(ar, sta, skb))) | 1484 | if (unlikely(carl9170_tx_prepare(ar, sta, skb))) |
1461 | goto err_free; | 1485 | goto err_free; |
@@ -1486,6 +1510,8 @@ void carl9170_op_tx(struct ieee80211_hw *hw, | |||
1486 | } else { | 1510 | } else { |
1487 | unsigned int queue = skb_get_queue_mapping(skb); | 1511 | unsigned int queue = skb_get_queue_mapping(skb); |
1488 | 1512 | ||
1513 | carl9170_tx_get_rates(ar, vif, sta, skb); | ||
1514 | carl9170_tx_apply_rateset(ar, info, skb); | ||
1489 | skb_queue_tail(&ar->tx_pending[queue], skb); | 1515 | skb_queue_tail(&ar->tx_pending[queue], skb); |
1490 | } | 1516 | } |
1491 | 1517 | ||
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index ccc4c718f124..7d077c752dd5 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c | |||
@@ -42,11 +42,11 @@ static int __ath_regd_init(struct ath_regulatory *reg); | |||
42 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM) | 42 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM) |
43 | 43 | ||
44 | /* We allow IBSS on these on a case by case basis by regulatory domain */ | 44 | /* We allow IBSS on these on a case by case basis by regulatory domain */ |
45 | #define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\ | 45 | #define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 80, 0, 30,\ |
46 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) | 46 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) |
47 | #define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\ | 47 | #define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 80, 0, 30,\ |
48 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) | 48 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) |
49 | #define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\ | 49 | #define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 80, 0, 30,\ |
50 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) | 50 | NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) |
51 | 51 | ||
52 | #define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \ | 52 | #define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \ |
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index 5644ac54facc..ce8c0381825e 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig | |||
@@ -28,7 +28,7 @@ config WIL6210_ISR_COR | |||
28 | such monitoring impossible. | 28 | such monitoring impossible. |
29 | Say y unless you debug interrupts | 29 | Say y unless you debug interrupts |
30 | 30 | ||
31 | config ATH6KL_TRACING | 31 | config WIL6210_TRACING |
32 | bool "wil6210 tracing support" | 32 | bool "wil6210 tracing support" |
33 | depends on WIL6210 | 33 | depends on WIL6210 |
34 | depends on EVENT_TRACING | 34 | depends on EVENT_TRACING |
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 4eb05d0818c3..61c302a6bdea 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c | |||
@@ -402,6 +402,30 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy, | |||
402 | return 0; | 402 | return 0; |
403 | } | 403 | } |
404 | 404 | ||
405 | static int wil_fix_bcon(struct wil6210_priv *wil, | ||
406 | struct cfg80211_beacon_data *bcon) | ||
407 | { | ||
408 | struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; | ||
409 | size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); | ||
410 | int rc = 0; | ||
411 | |||
412 | if (bcon->probe_resp_len <= hlen) | ||
413 | return 0; | ||
414 | |||
415 | if (!bcon->proberesp_ies) { | ||
416 | bcon->proberesp_ies = f->u.probe_resp.variable; | ||
417 | bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; | ||
418 | rc = 1; | ||
419 | } | ||
420 | if (!bcon->assocresp_ies) { | ||
421 | bcon->assocresp_ies = f->u.probe_resp.variable; | ||
422 | bcon->assocresp_ies_len = bcon->probe_resp_len - hlen; | ||
423 | rc = 1; | ||
424 | } | ||
425 | |||
426 | return rc; | ||
427 | } | ||
428 | |||
405 | static int wil_cfg80211_start_ap(struct wiphy *wiphy, | 429 | static int wil_cfg80211_start_ap(struct wiphy *wiphy, |
406 | struct net_device *ndev, | 430 | struct net_device *ndev, |
407 | struct cfg80211_ap_settings *info) | 431 | struct cfg80211_ap_settings *info) |
@@ -423,10 +447,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, | |||
423 | print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, | 447 | print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, |
424 | info->ssid, info->ssid_len); | 448 | info->ssid, info->ssid_len); |
425 | 449 | ||
450 | if (wil_fix_bcon(wil, bcon)) | ||
451 | wil_dbg_misc(wil, "Fixed bcon\n"); | ||
452 | |||
426 | rc = wil_reset(wil); | 453 | rc = wil_reset(wil); |
427 | if (rc) | 454 | if (rc) |
428 | return rc; | 455 | return rc; |
429 | 456 | ||
457 | /* Rx VRING. */ | ||
458 | rc = wil_rx_init(wil); | ||
459 | if (rc) | ||
460 | return rc; | ||
461 | |||
430 | rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); | 462 | rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); |
431 | if (rc) | 463 | if (rc) |
432 | return rc; | 464 | return rc; |
@@ -455,8 +487,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, | |||
455 | if (rc) | 487 | if (rc) |
456 | return rc; | 488 | return rc; |
457 | 489 | ||
458 | /* Rx VRING. After MAC and beacon */ | ||
459 | rc = wil_rx_init(wil); | ||
460 | 490 | ||
461 | netif_carrier_on(ndev); | 491 | netif_carrier_on(ndev); |
462 | 492 | ||
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index c97b864667c5..0a2844c48a60 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c | |||
@@ -286,41 +286,36 @@ static int __wil_up(struct wil6210_priv *wil) | |||
286 | { | 286 | { |
287 | struct net_device *ndev = wil_to_ndev(wil); | 287 | struct net_device *ndev = wil_to_ndev(wil); |
288 | struct wireless_dev *wdev = wil->wdev; | 288 | struct wireless_dev *wdev = wil->wdev; |
289 | struct ieee80211_channel *channel = wdev->preset_chandef.chan; | ||
290 | int rc; | 289 | int rc; |
291 | int bi; | ||
292 | u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); | ||
293 | 290 | ||
294 | rc = wil_reset(wil); | 291 | rc = wil_reset(wil); |
295 | if (rc) | 292 | if (rc) |
296 | return rc; | 293 | return rc; |
297 | 294 | ||
298 | /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */ | 295 | /* Rx VRING. After MAC and beacon */ |
299 | wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC); | 296 | rc = wil_rx_init(wil); |
297 | if (rc) | ||
298 | return rc; | ||
299 | |||
300 | switch (wdev->iftype) { | 300 | switch (wdev->iftype) { |
301 | case NL80211_IFTYPE_STATION: | 301 | case NL80211_IFTYPE_STATION: |
302 | wil_dbg_misc(wil, "type: STATION\n"); | 302 | wil_dbg_misc(wil, "type: STATION\n"); |
303 | bi = 0; | ||
304 | ndev->type = ARPHRD_ETHER; | 303 | ndev->type = ARPHRD_ETHER; |
305 | break; | 304 | break; |
306 | case NL80211_IFTYPE_AP: | 305 | case NL80211_IFTYPE_AP: |
307 | wil_dbg_misc(wil, "type: AP\n"); | 306 | wil_dbg_misc(wil, "type: AP\n"); |
308 | bi = 100; | ||
309 | ndev->type = ARPHRD_ETHER; | 307 | ndev->type = ARPHRD_ETHER; |
310 | break; | 308 | break; |
311 | case NL80211_IFTYPE_P2P_CLIENT: | 309 | case NL80211_IFTYPE_P2P_CLIENT: |
312 | wil_dbg_misc(wil, "type: P2P_CLIENT\n"); | 310 | wil_dbg_misc(wil, "type: P2P_CLIENT\n"); |
313 | bi = 0; | ||
314 | ndev->type = ARPHRD_ETHER; | 311 | ndev->type = ARPHRD_ETHER; |
315 | break; | 312 | break; |
316 | case NL80211_IFTYPE_P2P_GO: | 313 | case NL80211_IFTYPE_P2P_GO: |
317 | wil_dbg_misc(wil, "type: P2P_GO\n"); | 314 | wil_dbg_misc(wil, "type: P2P_GO\n"); |
318 | bi = 100; | ||
319 | ndev->type = ARPHRD_ETHER; | 315 | ndev->type = ARPHRD_ETHER; |
320 | break; | 316 | break; |
321 | case NL80211_IFTYPE_MONITOR: | 317 | case NL80211_IFTYPE_MONITOR: |
322 | wil_dbg_misc(wil, "type: Monitor\n"); | 318 | wil_dbg_misc(wil, "type: Monitor\n"); |
323 | bi = 0; | ||
324 | ndev->type = ARPHRD_IEEE80211_RADIOTAP; | 319 | ndev->type = ARPHRD_IEEE80211_RADIOTAP; |
325 | /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */ | 320 | /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */ |
326 | break; | 321 | break; |
@@ -328,36 +323,9 @@ static int __wil_up(struct wil6210_priv *wil) | |||
328 | return -EOPNOTSUPP; | 323 | return -EOPNOTSUPP; |
329 | } | 324 | } |
330 | 325 | ||
331 | /* Apply profile in the following order: */ | ||
332 | /* SSID and channel for the AP */ | ||
333 | switch (wdev->iftype) { | ||
334 | case NL80211_IFTYPE_AP: | ||
335 | case NL80211_IFTYPE_P2P_GO: | ||
336 | if (wdev->ssid_len == 0) { | ||
337 | wil_err(wil, "SSID not set\n"); | ||
338 | return -EINVAL; | ||
339 | } | ||
340 | rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid); | ||
341 | if (rc) | ||
342 | return rc; | ||
343 | break; | ||
344 | default: | ||
345 | break; | ||
346 | } | ||
347 | |||
348 | /* MAC address - pre-requisite for other commands */ | 326 | /* MAC address - pre-requisite for other commands */ |
349 | wmi_set_mac_address(wil, ndev->dev_addr); | 327 | wmi_set_mac_address(wil, ndev->dev_addr); |
350 | 328 | ||
351 | /* Set up beaconing if required. */ | ||
352 | if (bi > 0) { | ||
353 | rc = wmi_pcp_start(wil, bi, wmi_nettype, | ||
354 | (channel ? channel->hw_value : 0)); | ||
355 | if (rc) | ||
356 | return rc; | ||
357 | } | ||
358 | |||
359 | /* Rx VRING. After MAC and beacon */ | ||
360 | wil_rx_init(wil); | ||
361 | 329 | ||
362 | napi_enable(&wil->napi_rx); | 330 | napi_enable(&wil->napi_rx); |
363 | napi_enable(&wil->napi_tx); | 331 | napi_enable(&wil->napi_tx); |
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 00dffeda983e..e1c492b9dfef 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c | |||
@@ -768,18 +768,16 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
768 | wil_err(wil, "Xmit in monitor mode not supported\n"); | 768 | wil_err(wil, "Xmit in monitor mode not supported\n"); |
769 | goto drop; | 769 | goto drop; |
770 | } | 770 | } |
771 | if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { | 771 | |
772 | rc = wmi_tx_eapol(wil, skb); | 772 | /* find vring */ |
773 | } else { | 773 | vring = wil_find_tx_vring(wil, skb); |
774 | /* find vring */ | 774 | if (!vring) { |
775 | vring = wil_find_tx_vring(wil, skb); | 775 | wil_err(wil, "No Tx VRING available\n"); |
776 | if (!vring) { | 776 | goto drop; |
777 | wil_err(wil, "No Tx VRING available\n"); | ||
778 | goto drop; | ||
779 | } | ||
780 | /* set up vring entry */ | ||
781 | rc = wil_tx_vring(wil, vring, skb); | ||
782 | } | 777 | } |
778 | /* set up vring entry */ | ||
779 | rc = wil_tx_vring(wil, vring, skb); | ||
780 | |||
783 | switch (rc) { | 781 | switch (rc) { |
784 | case 0: | 782 | case 0: |
785 | /* statistics will be updated on the tx_complete */ | 783 | /* statistics will be updated on the tx_complete */ |
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 373cf656f5b0..44fdab51de7e 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h | |||
@@ -329,7 +329,6 @@ int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid); | |||
329 | int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid); | 329 | int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid); |
330 | int wmi_set_channel(struct wil6210_priv *wil, int channel); | 330 | int wmi_set_channel(struct wil6210_priv *wil, int channel); |
331 | int wmi_get_channel(struct wil6210_priv *wil, int *channel); | 331 | int wmi_get_channel(struct wil6210_priv *wil, int *channel); |
332 | int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb); | ||
333 | int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, | 332 | int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, |
334 | const void *mac_addr); | 333 | const void *mac_addr); |
335 | int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, | 334 | int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, |
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 527ffb543821..dc8059ad4bab 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c | |||
@@ -75,10 +75,11 @@ static const struct { | |||
75 | {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */ | 75 | {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */ |
76 | {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */ | 76 | {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */ |
77 | {0x880000, 0x88a000, 0x880000}, /* various RGF */ | 77 | {0x880000, 0x88a000, 0x880000}, /* various RGF */ |
78 | {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */ | 78 | {0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */ |
79 | /* | 79 | /* |
80 | * 920000..930000 ucode code RAM | 80 | * 920000..930000 ucode code RAM |
81 | * 930000..932000 ucode data RAM | 81 | * 930000..932000 ucode data RAM |
82 | * 932000..949000 back-door debug data | ||
82 | */ | 83 | */ |
83 | }; | 84 | }; |
84 | 85 | ||
@@ -314,8 +315,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) | |||
314 | 315 | ||
315 | wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", | 316 | wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", |
316 | data->info.channel, data->info.mcs, data->info.snr); | 317 | data->info.channel, data->info.mcs, data->info.snr); |
317 | wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len, | 318 | wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, |
318 | le16_to_cpu(data->info.stype)); | 319 | le16_to_cpu(fc)); |
319 | wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", | 320 | wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", |
320 | data->info.qid, data->info.mid, data->info.cid); | 321 | data->info.qid, data->info.mid, data->info.cid); |
321 | 322 | ||
@@ -739,8 +740,12 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) | |||
739 | if (!wil->secure_pcp) | 740 | if (!wil->secure_pcp) |
740 | cmd.disable_sec = 1; | 741 | cmd.disable_sec = 1; |
741 | 742 | ||
743 | /* | ||
744 | * Processing time may be huge, in case of secure AP it takes about | ||
745 | * 3500ms for FW to start AP | ||
746 | */ | ||
742 | rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd), | 747 | rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd), |
743 | WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100); | 748 | WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000); |
744 | if (rc) | 749 | if (rc) |
745 | return rc; | 750 | return rc; |
746 | 751 | ||
@@ -834,40 +839,6 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) | |||
834 | return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); | 839 | return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); |
835 | } | 840 | } |
836 | 841 | ||
837 | int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb) | ||
838 | { | ||
839 | struct wmi_eapol_tx_cmd *cmd; | ||
840 | struct ethhdr *eth; | ||
841 | u16 eapol_len = skb->len - ETH_HLEN; | ||
842 | void *eapol = skb->data + ETH_HLEN; | ||
843 | uint i; | ||
844 | int rc; | ||
845 | |||
846 | skb_set_mac_header(skb, 0); | ||
847 | eth = eth_hdr(skb); | ||
848 | wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest); | ||
849 | for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { | ||
850 | if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0) | ||
851 | goto found_dest; | ||
852 | } | ||
853 | |||
854 | return -EINVAL; | ||
855 | |||
856 | found_dest: | ||
857 | /* find out eapol data & len */ | ||
858 | cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL); | ||
859 | if (!cmd) | ||
860 | return -EINVAL; | ||
861 | |||
862 | memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN); | ||
863 | cmd->eapol_len = cpu_to_le16(eapol_len); | ||
864 | memcpy(cmd->eapol, eapol, eapol_len); | ||
865 | rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len); | ||
866 | kfree(cmd); | ||
867 | |||
868 | return rc; | ||
869 | } | ||
870 | |||
871 | int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, | 842 | int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, |
872 | const void *mac_addr) | 843 | const void *mac_addr) |
873 | { | 844 | { |
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index 078e6f3477a9..3f21e0ba39ba 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig | |||
@@ -28,7 +28,7 @@ config B43 | |||
28 | 28 | ||
29 | config B43_BCMA | 29 | config B43_BCMA |
30 | bool "Support for BCMA bus" | 30 | bool "Support for BCMA bus" |
31 | depends on B43 && BCMA | 31 | depends on B43 && (BCMA = y || BCMA = B43) |
32 | default y | 32 | default y |
33 | 33 | ||
34 | config B43_BCMA_EXTRA | 34 | config B43_BCMA_EXTRA |
@@ -39,7 +39,7 @@ config B43_BCMA_EXTRA | |||
39 | 39 | ||
40 | config B43_SSB | 40 | config B43_SSB |
41 | bool | 41 | bool |
42 | depends on B43 && SSB | 42 | depends on B43 && (SSB = y || SSB = B43) |
43 | default y | 43 | default y |
44 | 44 | ||
45 | # Auto-select SSB PCI-HOST support, if possible | 45 | # Auto-select SSB PCI-HOST support, if possible |
@@ -111,6 +111,7 @@ config B43_PIO | |||
111 | config B43_PHY_N | 111 | config B43_PHY_N |
112 | bool "Support for 802.11n (N-PHY) devices" | 112 | bool "Support for 802.11n (N-PHY) devices" |
113 | depends on B43 | 113 | depends on B43 |
114 | default y | ||
114 | ---help--- | 115 | ---help--- |
115 | Support for the N-PHY. | 116 | Support for the N-PHY. |
116 | 117 | ||
@@ -132,6 +133,7 @@ config B43_PHY_LP | |||
132 | config B43_PHY_HT | 133 | config B43_PHY_HT |
133 | bool "Support for HT-PHY (high throughput) devices" | 134 | bool "Support for HT-PHY (high throughput) devices" |
134 | depends on B43 && B43_BCMA | 135 | depends on B43 && B43_BCMA |
136 | default y | ||
135 | ---help--- | 137 | ---help--- |
136 | Support for the HT-PHY. | 138 | Support for the HT-PHY. |
137 | 139 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 4891e3df2058..3f8e69c29146 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | |||
@@ -22,9 +22,11 @@ | |||
22 | #include <linux/pci_ids.h> | 22 | #include <linux/pci_ids.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/completion.h> | 24 | #include <linux/completion.h> |
25 | #include <linux/scatterlist.h> | ||
25 | #include <linux/mmc/sdio.h> | 26 | #include <linux/mmc/sdio.h> |
26 | #include <linux/mmc/sdio_func.h> | 27 | #include <linux/mmc/sdio_func.h> |
27 | #include <linux/mmc/card.h> | 28 | #include <linux/mmc/card.h> |
29 | #include <linux/mmc/host.h> | ||
28 | #include <linux/platform_data/brcmfmac-sdio.h> | 30 | #include <linux/platform_data/brcmfmac-sdio.h> |
29 | 31 | ||
30 | #include <defs.h> | 32 | #include <defs.h> |
@@ -303,6 +305,153 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, | |||
303 | *ret = retval; | 305 | *ret = retval; |
304 | } | 306 | } |
305 | 307 | ||
308 | /** | ||
309 | * brcmf_sdio_buffrw - SDIO interface function for block data access | ||
310 | * @sdiodev: brcmfmac sdio device | ||
311 | * @fn: SDIO function number | ||
312 | * @write: direction flag | ||
313 | * @addr: dongle memory address as source/destination | ||
314 | * @pkt: skb pointer | ||
315 | * | ||
316 | * This function takes the respbonsibility as the interface function to MMC | ||
317 | * stack for block data access. It assumes that the skb passed down by the | ||
318 | * caller has already been padded and aligned. | ||
319 | */ | ||
320 | static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn, | ||
321 | bool write, u32 addr, struct sk_buff_head *pktlist) | ||
322 | { | ||
323 | unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; | ||
324 | unsigned int max_blks, max_req_sz; | ||
325 | unsigned short max_seg_sz, seg_sz; | ||
326 | unsigned char *pkt_data; | ||
327 | struct sk_buff *pkt_next = NULL; | ||
328 | struct mmc_request mmc_req; | ||
329 | struct mmc_command mmc_cmd; | ||
330 | struct mmc_data mmc_dat; | ||
331 | struct sg_table st; | ||
332 | struct scatterlist *sgl; | ||
333 | struct mmc_host *host; | ||
334 | int ret = 0; | ||
335 | |||
336 | if (!pktlist->qlen) | ||
337 | return -EINVAL; | ||
338 | |||
339 | brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait); | ||
340 | if (brcmf_pm_resume_error(sdiodev)) | ||
341 | return -EIO; | ||
342 | |||
343 | /* Single skb use the standard mmc interface */ | ||
344 | if (pktlist->qlen == 1) { | ||
345 | pkt_next = pktlist->next; | ||
346 | req_sz = pkt_next->len + 3; | ||
347 | req_sz &= (uint)~3; | ||
348 | |||
349 | if (write) | ||
350 | return sdio_memcpy_toio(sdiodev->func[fn], addr, | ||
351 | ((u8 *)(pkt_next->data)), | ||
352 | req_sz); | ||
353 | else if (fn == 1) | ||
354 | return sdio_memcpy_fromio(sdiodev->func[fn], | ||
355 | ((u8 *)(pkt_next->data)), | ||
356 | addr, req_sz); | ||
357 | else | ||
358 | /* function 2 read is FIFO operation */ | ||
359 | return sdio_readsb(sdiodev->func[fn], | ||
360 | ((u8 *)(pkt_next->data)), addr, | ||
361 | req_sz); | ||
362 | } | ||
363 | |||
364 | host = sdiodev->func[fn]->card->host; | ||
365 | func_blk_sz = sdiodev->func[fn]->cur_blksize; | ||
366 | /* Blocks per command is limited by host count, host transfer | ||
367 | * size and the maximum for IO_RW_EXTENDED of 511 blocks. | ||
368 | */ | ||
369 | max_blks = min_t(unsigned int, host->max_blk_count, 511u); | ||
370 | max_req_sz = min_t(unsigned int, host->max_req_size, | ||
371 | max_blks * func_blk_sz); | ||
372 | max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC); | ||
373 | max_seg_sz = min_t(unsigned short, max_seg_sz, pktlist->qlen); | ||
374 | seg_sz = pktlist->qlen; | ||
375 | pkt_offset = 0; | ||
376 | pkt_next = pktlist->next; | ||
377 | |||
378 | if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) | ||
379 | return -ENOMEM; | ||
380 | |||
381 | while (seg_sz) { | ||
382 | req_sz = 0; | ||
383 | sg_cnt = 0; | ||
384 | memset(&mmc_req, 0, sizeof(struct mmc_request)); | ||
385 | memset(&mmc_cmd, 0, sizeof(struct mmc_command)); | ||
386 | memset(&mmc_dat, 0, sizeof(struct mmc_data)); | ||
387 | sgl = st.sgl; | ||
388 | /* prep sg table */ | ||
389 | while (pkt_next != (struct sk_buff *)pktlist) { | ||
390 | pkt_data = pkt_next->data + pkt_offset; | ||
391 | sg_data_sz = pkt_next->len - pkt_offset; | ||
392 | if (sg_data_sz > host->max_seg_size) | ||
393 | sg_data_sz = host->max_seg_size; | ||
394 | if (sg_data_sz > max_req_sz - req_sz) | ||
395 | sg_data_sz = max_req_sz - req_sz; | ||
396 | |||
397 | sg_set_buf(sgl, pkt_data, sg_data_sz); | ||
398 | |||
399 | sg_cnt++; | ||
400 | sgl = sg_next(sgl); | ||
401 | req_sz += sg_data_sz; | ||
402 | pkt_offset += sg_data_sz; | ||
403 | if (pkt_offset == pkt_next->len) { | ||
404 | pkt_offset = 0; | ||
405 | pkt_next = pkt_next->next; | ||
406 | } | ||
407 | |||
408 | if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz) | ||
409 | break; | ||
410 | } | ||
411 | seg_sz -= sg_cnt; | ||
412 | |||
413 | if (req_sz % func_blk_sz != 0) { | ||
414 | brcmf_err("sg request length %u is not %u aligned\n", | ||
415 | req_sz, func_blk_sz); | ||
416 | sg_free_table(&st); | ||
417 | return -ENOTBLK; | ||
418 | } | ||
419 | mmc_dat.sg = st.sgl; | ||
420 | mmc_dat.sg_len = sg_cnt; | ||
421 | mmc_dat.blksz = func_blk_sz; | ||
422 | mmc_dat.blocks = req_sz / func_blk_sz; | ||
423 | mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; | ||
424 | mmc_cmd.opcode = SD_IO_RW_EXTENDED; | ||
425 | mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */ | ||
426 | mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */ | ||
427 | mmc_cmd.arg |= 1<<27; /* block mode */ | ||
428 | /* incrementing addr for function 1 */ | ||
429 | mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0; | ||
430 | mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */ | ||
431 | mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */ | ||
432 | mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; | ||
433 | mmc_req.cmd = &mmc_cmd; | ||
434 | mmc_req.data = &mmc_dat; | ||
435 | if (fn == 1) | ||
436 | addr += req_sz; | ||
437 | |||
438 | mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card); | ||
439 | mmc_wait_for_req(host, &mmc_req); | ||
440 | |||
441 | ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; | ||
442 | if (ret != 0) { | ||
443 | brcmf_err("CMD53 sg block %s failed %d\n", | ||
444 | write ? "write" : "read", ret); | ||
445 | ret = -EIO; | ||
446 | break; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | sg_free_table(&st); | ||
451 | |||
452 | return ret; | ||
453 | } | ||
454 | |||
306 | static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn, | 455 | static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn, |
307 | uint flags, uint width, u32 *addr) | 456 | uint flags, uint width, u32 *addr) |
308 | { | 457 | { |
@@ -355,9 +504,9 @@ int | |||
355 | brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, | 504 | brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, |
356 | uint flags, struct sk_buff *pkt) | 505 | uint flags, struct sk_buff *pkt) |
357 | { | 506 | { |
358 | uint incr_fix; | ||
359 | uint width; | 507 | uint width; |
360 | int err = 0; | 508 | int err = 0; |
509 | struct sk_buff_head pkt_list; | ||
361 | 510 | ||
362 | brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", | 511 | brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", |
363 | fn, addr, pkt->len); | 512 | fn, addr, pkt->len); |
@@ -367,9 +516,10 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, | |||
367 | if (err) | 516 | if (err) |
368 | goto done; | 517 | goto done; |
369 | 518 | ||
370 | incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; | 519 | skb_queue_head_init(&pkt_list); |
371 | err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, | 520 | skb_queue_tail(&pkt_list, pkt); |
372 | fn, addr, pkt); | 521 | err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list); |
522 | skb_dequeue_tail(&pkt_list); | ||
373 | 523 | ||
374 | done: | 524 | done: |
375 | return err; | 525 | return err; |
@@ -391,8 +541,7 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, | |||
391 | goto done; | 541 | goto done; |
392 | 542 | ||
393 | incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; | 543 | incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; |
394 | err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr, | 544 | err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq); |
395 | pktq); | ||
396 | 545 | ||
397 | done: | 546 | done: |
398 | return err; | 547 | return err; |
@@ -424,10 +573,10 @@ int | |||
424 | brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, | 573 | brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, |
425 | uint flags, struct sk_buff *pkt) | 574 | uint flags, struct sk_buff *pkt) |
426 | { | 575 | { |
427 | uint incr_fix; | ||
428 | uint width; | 576 | uint width; |
429 | uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; | 577 | uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; |
430 | int err = 0; | 578 | int err = 0; |
579 | struct sk_buff_head pkt_list; | ||
431 | 580 | ||
432 | brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", | 581 | brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n", |
433 | fn, addr, pkt->len); | 582 | fn, addr, pkt->len); |
@@ -446,13 +595,14 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, | |||
446 | 595 | ||
447 | addr &= SBSDIO_SB_OFT_ADDR_MASK; | 596 | addr &= SBSDIO_SB_OFT_ADDR_MASK; |
448 | 597 | ||
449 | incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; | ||
450 | width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; | 598 | width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; |
451 | if (width == 4) | 599 | if (width == 4) |
452 | addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; | 600 | addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; |
453 | 601 | ||
454 | err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, | 602 | skb_queue_head_init(&pkt_list); |
455 | addr, pkt); | 603 | skb_queue_tail(&pkt_list, pkt); |
604 | err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list); | ||
605 | skb_dequeue_tail(&pkt_list); | ||
456 | 606 | ||
457 | done: | 607 | done: |
458 | return err; | 608 | return err; |
@@ -466,6 +616,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, | |||
466 | struct sk_buff *pkt; | 616 | struct sk_buff *pkt; |
467 | u32 sdaddr; | 617 | u32 sdaddr; |
468 | uint dsize; | 618 | uint dsize; |
619 | struct sk_buff_head pkt_list; | ||
469 | 620 | ||
470 | dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); | 621 | dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); |
471 | pkt = dev_alloc_skb(dsize); | 622 | pkt = dev_alloc_skb(dsize); |
@@ -474,6 +625,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, | |||
474 | return -EIO; | 625 | return -EIO; |
475 | } | 626 | } |
476 | pkt->priority = 0; | 627 | pkt->priority = 0; |
628 | skb_queue_head_init(&pkt_list); | ||
477 | 629 | ||
478 | /* Determine initial transfer parameters */ | 630 | /* Determine initial transfer parameters */ |
479 | sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; | 631 | sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; |
@@ -501,9 +653,10 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, | |||
501 | skb_put(pkt, dsize); | 653 | skb_put(pkt, dsize); |
502 | if (write) | 654 | if (write) |
503 | memcpy(pkt->data, data, dsize); | 655 | memcpy(pkt->data, data, dsize); |
504 | bcmerror = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, | 656 | skb_queue_tail(&pkt_list, pkt); |
505 | write, SDIO_FUNC_1, | 657 | bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write, |
506 | sdaddr, pkt); | 658 | sdaddr, &pkt_list); |
659 | skb_dequeue_tail(&pkt_list); | ||
507 | if (bcmerror) { | 660 | if (bcmerror) { |
508 | brcmf_err("membytes transfer failed\n"); | 661 | brcmf_err("membytes transfer failed\n"); |
509 | break; | 662 | break; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 11400b39cf0b..289e386f01f6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c | |||
@@ -66,7 +66,7 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); | |||
66 | static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; | 66 | static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; |
67 | 67 | ||
68 | 68 | ||
69 | static bool | 69 | bool |
70 | brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) | 70 | brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) |
71 | { | 71 | { |
72 | bool is_err = false; | 72 | bool is_err = false; |
@@ -76,7 +76,7 @@ brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev) | |||
76 | return is_err; | 76 | return is_err; |
77 | } | 77 | } |
78 | 78 | ||
79 | static void | 79 | void |
80 | brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq) | 80 | brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq) |
81 | { | 81 | { |
82 | #ifdef CONFIG_PM_SLEEP | 82 | #ifdef CONFIG_PM_SLEEP |
@@ -211,115 +211,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, | |||
211 | return err_ret; | 211 | return err_ret; |
212 | } | 212 | } |
213 | 213 | ||
214 | /* precondition: host controller is claimed */ | ||
215 | static int | ||
216 | brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo, | ||
217 | uint func, uint addr, struct sk_buff *pkt, uint pktlen) | ||
218 | { | ||
219 | int err_ret = 0; | ||
220 | |||
221 | if ((write) && (!fifo)) { | ||
222 | err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, | ||
223 | ((u8 *) (pkt->data)), pktlen); | ||
224 | } else if (write) { | ||
225 | err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, | ||
226 | ((u8 *) (pkt->data)), pktlen); | ||
227 | } else if (fifo) { | ||
228 | err_ret = sdio_readsb(sdiodev->func[func], | ||
229 | ((u8 *) (pkt->data)), addr, pktlen); | ||
230 | } else { | ||
231 | err_ret = sdio_memcpy_fromio(sdiodev->func[func], | ||
232 | ((u8 *) (pkt->data)), | ||
233 | addr, pktlen); | ||
234 | } | ||
235 | |||
236 | return err_ret; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * This function takes a queue of packets. The packets on the queue | ||
241 | * are assumed to be properly aligned by the caller. | ||
242 | */ | ||
243 | int | ||
244 | brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc, | ||
245 | uint write, uint func, uint addr, | ||
246 | struct sk_buff_head *pktq) | ||
247 | { | ||
248 | bool fifo = (fix_inc == SDIOH_DATA_FIX); | ||
249 | u32 SGCount = 0; | ||
250 | int err_ret = 0; | ||
251 | |||
252 | struct sk_buff *pkt; | ||
253 | |||
254 | brcmf_dbg(SDIO, "Enter\n"); | ||
255 | |||
256 | brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait); | ||
257 | if (brcmf_pm_resume_error(sdiodev)) | ||
258 | return -EIO; | ||
259 | |||
260 | skb_queue_walk(pktq, pkt) { | ||
261 | uint pkt_len = pkt->len; | ||
262 | pkt_len += 3; | ||
263 | pkt_len &= 0xFFFFFFFC; | ||
264 | |||
265 | err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func, | ||
266 | addr, pkt, pkt_len); | ||
267 | if (err_ret) { | ||
268 | brcmf_err("%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", | ||
269 | write ? "TX" : "RX", pkt, SGCount, addr, | ||
270 | pkt_len, err_ret); | ||
271 | } else { | ||
272 | brcmf_dbg(SDIO, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n", | ||
273 | write ? "TX" : "RX", pkt, SGCount, addr, | ||
274 | pkt_len); | ||
275 | } | ||
276 | if (!fifo) | ||
277 | addr += pkt_len; | ||
278 | |||
279 | SGCount++; | ||
280 | } | ||
281 | |||
282 | brcmf_dbg(SDIO, "Exit\n"); | ||
283 | return err_ret; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * This function takes a single DMA-able packet. | ||
288 | */ | ||
289 | int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, | ||
290 | uint fix_inc, uint write, uint func, uint addr, | ||
291 | struct sk_buff *pkt) | ||
292 | { | ||
293 | int status; | ||
294 | uint pkt_len; | ||
295 | bool fifo = (fix_inc == SDIOH_DATA_FIX); | ||
296 | |||
297 | brcmf_dbg(SDIO, "Enter\n"); | ||
298 | |||
299 | if (pkt == NULL) | ||
300 | return -EINVAL; | ||
301 | pkt_len = pkt->len; | ||
302 | |||
303 | brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait); | ||
304 | if (brcmf_pm_resume_error(sdiodev)) | ||
305 | return -EIO; | ||
306 | |||
307 | pkt_len += 3; | ||
308 | pkt_len &= (uint)~3; | ||
309 | |||
310 | status = brcmf_sdioh_request_data(sdiodev, write, fifo, func, | ||
311 | addr, pkt, pkt_len); | ||
312 | if (status) { | ||
313 | brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", | ||
314 | write ? "TX" : "RX", pkt, addr, pkt_len, status); | ||
315 | } else { | ||
316 | brcmf_dbg(SDIO, "%s xfr'd %p, addr=0x%05x, len=%d\n", | ||
317 | write ? "TX" : "RX", pkt, addr, pkt_len); | ||
318 | } | ||
319 | |||
320 | return status; | ||
321 | } | ||
322 | |||
323 | static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr) | 214 | static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr) |
324 | { | 215 | { |
325 | /* read 24 bits and return valid 17 bit addr */ | 216 | /* read 24 bits and return valid 17 bit addr */ |
@@ -468,7 +359,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, | |||
468 | atomic_set(&sdiodev->suspend, false); | 359 | atomic_set(&sdiodev->suspend, false); |
469 | init_waitqueue_head(&sdiodev->request_byte_wait); | 360 | init_waitqueue_head(&sdiodev->request_byte_wait); |
470 | init_waitqueue_head(&sdiodev->request_word_wait); | 361 | init_waitqueue_head(&sdiodev->request_word_wait); |
471 | init_waitqueue_head(&sdiodev->request_chain_wait); | ||
472 | init_waitqueue_head(&sdiodev->request_buffer_wait); | 362 | init_waitqueue_head(&sdiodev->request_buffer_wait); |
473 | 363 | ||
474 | brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n"); | 364 | brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n"); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index 28db9cf39672..86cbfe2c7c6c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h | |||
@@ -583,6 +583,7 @@ enum brcmf_netif_stop_reason { | |||
583 | * @bssidx: index of bss associated with this interface. | 583 | * @bssidx: index of bss associated with this interface. |
584 | * @mac_addr: assigned mac address. | 584 | * @mac_addr: assigned mac address. |
585 | * @netif_stop: bitmap indicates reason why netif queues are stopped. | 585 | * @netif_stop: bitmap indicates reason why netif queues are stopped. |
586 | * @netif_stop_lock: spinlock for update netif_stop from multiple sources. | ||
586 | * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. | 587 | * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. |
587 | * @pend_8021x_wait: used for signalling change in count. | 588 | * @pend_8021x_wait: used for signalling change in count. |
588 | */ | 589 | */ |
@@ -598,6 +599,7 @@ struct brcmf_if { | |||
598 | s32 bssidx; | 599 | s32 bssidx; |
599 | u8 mac_addr[ETH_ALEN]; | 600 | u8 mac_addr[ETH_ALEN]; |
600 | u8 netif_stop; | 601 | u8 netif_stop; |
602 | spinlock_t netif_stop_lock; | ||
601 | atomic_t pend_8021x_cnt; | 603 | atomic_t pend_8021x_cnt; |
602 | wait_queue_head_t pend_8021x_wait; | 604 | wait_queue_head_t pend_8021x_wait; |
603 | }; | 605 | }; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c index 59c77aa3b959..dd85401063cb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "dhd_bus.h" | 30 | #include "dhd_bus.h" |
31 | #include "fwsignal.h" | 31 | #include "fwsignal.h" |
32 | #include "dhd_dbg.h" | 32 | #include "dhd_dbg.h" |
33 | #include "tracepoint.h" | ||
33 | 34 | ||
34 | struct brcmf_proto_cdc_dcmd { | 35 | struct brcmf_proto_cdc_dcmd { |
35 | __le32 cmd; /* dongle command value */ | 36 | __le32 cmd; /* dongle command value */ |
@@ -292,6 +293,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset, | |||
292 | h->flags2 = 0; | 293 | h->flags2 = 0; |
293 | h->data_offset = offset; | 294 | h->data_offset = offset; |
294 | BDC_SET_IF_IDX(h, ifidx); | 295 | BDC_SET_IF_IDX(h, ifidx); |
296 | trace_brcmf_bdchdr(pktbuf->data); | ||
295 | } | 297 | } |
296 | 298 | ||
297 | int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, | 299 | int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, |
@@ -309,6 +311,7 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, | |||
309 | return -EBADE; | 311 | return -EBADE; |
310 | } | 312 | } |
311 | 313 | ||
314 | trace_brcmf_bdchdr(pktbuf->data); | ||
312 | h = (struct brcmf_proto_bdc_header *)(pktbuf->data); | 315 | h = (struct brcmf_proto_bdc_header *)(pktbuf->data); |
313 | 316 | ||
314 | *ifidx = BDC_GET_IF_IDX(h); | 317 | *ifidx = BDC_GET_IF_IDX(h); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c index 202869cd0932..c37b9d68e458 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c | |||
@@ -156,8 +156,11 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data, | |||
156 | "txs_suppr_core: %u\n" | 156 | "txs_suppr_core: %u\n" |
157 | "txs_suppr_ps: %u\n" | 157 | "txs_suppr_ps: %u\n" |
158 | "txs_tossed: %u\n" | 158 | "txs_tossed: %u\n" |
159 | "txs_host_tossed: %u\n" | ||
160 | "bus_flow_block: %u\n" | ||
161 | "fws_flow_block: %u\n" | ||
159 | "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n" | 162 | "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n" |
160 | "fifo_credits_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n", | 163 | "requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n", |
161 | fwstats->header_pulls, | 164 | fwstats->header_pulls, |
162 | fwstats->header_only_pkt, | 165 | fwstats->header_only_pkt, |
163 | fwstats->tlv_parse_failed, | 166 | fwstats->tlv_parse_failed, |
@@ -176,14 +179,17 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data, | |||
176 | fwstats->txs_supp_core, | 179 | fwstats->txs_supp_core, |
177 | fwstats->txs_supp_ps, | 180 | fwstats->txs_supp_ps, |
178 | fwstats->txs_tossed, | 181 | fwstats->txs_tossed, |
182 | fwstats->txs_host_tossed, | ||
183 | fwstats->bus_flow_block, | ||
184 | fwstats->fws_flow_block, | ||
179 | fwstats->send_pkts[0], fwstats->send_pkts[1], | 185 | fwstats->send_pkts[0], fwstats->send_pkts[1], |
180 | fwstats->send_pkts[2], fwstats->send_pkts[3], | 186 | fwstats->send_pkts[2], fwstats->send_pkts[3], |
181 | fwstats->send_pkts[4], | 187 | fwstats->send_pkts[4], |
182 | fwstats->fifo_credits_sent[0], | 188 | fwstats->requested_sent[0], |
183 | fwstats->fifo_credits_sent[1], | 189 | fwstats->requested_sent[1], |
184 | fwstats->fifo_credits_sent[2], | 190 | fwstats->requested_sent[2], |
185 | fwstats->fifo_credits_sent[3], | 191 | fwstats->requested_sent[3], |
186 | fwstats->fifo_credits_sent[4]); | 192 | fwstats->requested_sent[4]); |
187 | 193 | ||
188 | return simple_read_from_buffer(data, count, ppos, buf, res); | 194 | return simple_read_from_buffer(data, count, ppos, buf, res); |
189 | } | 195 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h index 009c87bfd9ae..0af1f5dc583a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h | |||
@@ -141,8 +141,7 @@ struct brcmf_fws_stats { | |||
141 | u32 header_pulls; | 141 | u32 header_pulls; |
142 | u32 pkt2bus; | 142 | u32 pkt2bus; |
143 | u32 send_pkts[5]; | 143 | u32 send_pkts[5]; |
144 | u32 fifo_credits_sent[5]; | 144 | u32 requested_sent[5]; |
145 | u32 fifo_credits_back[6]; | ||
146 | u32 generic_error; | 145 | u32 generic_error; |
147 | u32 mac_update_failed; | 146 | u32 mac_update_failed; |
148 | u32 mac_ps_update_failed; | 147 | u32 mac_ps_update_failed; |
@@ -158,6 +157,9 @@ struct brcmf_fws_stats { | |||
158 | u32 txs_supp_core; | 157 | u32 txs_supp_core; |
159 | u32 txs_supp_ps; | 158 | u32 txs_supp_ps; |
160 | u32 txs_tossed; | 159 | u32 txs_tossed; |
160 | u32 txs_host_tossed; | ||
161 | u32 bus_flow_block; | ||
162 | u32 fws_flow_block; | ||
161 | }; | 163 | }; |
162 | 164 | ||
163 | struct brcmf_pub; | 165 | struct brcmf_pub; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 2c593570497c..8c402e7b97eb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c | |||
@@ -179,7 +179,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, | |||
179 | struct brcmf_pub *drvr = ifp->drvr; | 179 | struct brcmf_pub *drvr = ifp->drvr; |
180 | struct ethhdr *eh; | 180 | struct ethhdr *eh; |
181 | 181 | ||
182 | brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); | 182 | brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx); |
183 | 183 | ||
184 | /* Can the device send data? */ | 184 | /* Can the device send data? */ |
185 | if (drvr->bus_if->state != BRCMF_BUS_DATA) { | 185 | if (drvr->bus_if->state != BRCMF_BUS_DATA) { |
@@ -240,11 +240,15 @@ done: | |||
240 | void brcmf_txflowblock_if(struct brcmf_if *ifp, | 240 | void brcmf_txflowblock_if(struct brcmf_if *ifp, |
241 | enum brcmf_netif_stop_reason reason, bool state) | 241 | enum brcmf_netif_stop_reason reason, bool state) |
242 | { | 242 | { |
243 | unsigned long flags; | ||
244 | |||
243 | if (!ifp) | 245 | if (!ifp) |
244 | return; | 246 | return; |
245 | 247 | ||
246 | brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", | 248 | brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n", |
247 | ifp->bssidx, ifp->netif_stop, reason, state); | 249 | ifp->bssidx, ifp->netif_stop, reason, state); |
250 | |||
251 | spin_lock_irqsave(&ifp->netif_stop_lock, flags); | ||
248 | if (state) { | 252 | if (state) { |
249 | if (!ifp->netif_stop) | 253 | if (!ifp->netif_stop) |
250 | netif_stop_queue(ifp->ndev); | 254 | netif_stop_queue(ifp->ndev); |
@@ -254,6 +258,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, | |||
254 | if (!ifp->netif_stop) | 258 | if (!ifp->netif_stop) |
255 | netif_wake_queue(ifp->ndev); | 259 | netif_wake_queue(ifp->ndev); |
256 | } | 260 | } |
261 | spin_unlock_irqrestore(&ifp->netif_stop_lock, flags); | ||
257 | } | 262 | } |
258 | 263 | ||
259 | void brcmf_txflowblock(struct device *dev, bool state) | 264 | void brcmf_txflowblock(struct device *dev, bool state) |
@@ -264,9 +269,14 @@ void brcmf_txflowblock(struct device *dev, bool state) | |||
264 | 269 | ||
265 | brcmf_dbg(TRACE, "Enter\n"); | 270 | brcmf_dbg(TRACE, "Enter\n"); |
266 | 271 | ||
267 | for (i = 0; i < BRCMF_MAX_IFS; i++) | 272 | if (brcmf_fws_fc_active(drvr->fws)) { |
268 | brcmf_txflowblock_if(drvr->iflist[i], | 273 | brcmf_fws_bus_blocked(drvr, state); |
269 | BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state); | 274 | } else { |
275 | for (i = 0; i < BRCMF_MAX_IFS; i++) | ||
276 | brcmf_txflowblock_if(drvr->iflist[i], | ||
277 | BRCMF_NETIF_STOP_REASON_BLOCK_BUS, | ||
278 | state); | ||
279 | } | ||
270 | } | 280 | } |
271 | 281 | ||
272 | void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) | 282 | void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) |
@@ -280,7 +290,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list) | |||
280 | u8 ifidx; | 290 | u8 ifidx; |
281 | int ret; | 291 | int ret; |
282 | 292 | ||
283 | brcmf_dbg(TRACE, "Enter\n"); | 293 | brcmf_dbg(DATA, "Enter\n"); |
284 | 294 | ||
285 | skb_queue_walk_safe(skb_list, skb, pnext) { | 295 | skb_queue_walk_safe(skb_list, skb, pnext) { |
286 | skb_unlink(skb, skb_list); | 296 | skb_unlink(skb, skb_list); |
@@ -630,7 +640,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) | |||
630 | /* set appropriate operations */ | 640 | /* set appropriate operations */ |
631 | ndev->netdev_ops = &brcmf_netdev_ops_pri; | 641 | ndev->netdev_ops = &brcmf_netdev_ops_pri; |
632 | 642 | ||
633 | ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; | 643 | ndev->hard_header_len += drvr->hdrlen; |
634 | ndev->ethtool_ops = &brcmf_ethtool_ops; | 644 | ndev->ethtool_ops = &brcmf_ethtool_ops; |
635 | 645 | ||
636 | drvr->rxsz = ndev->mtu + ndev->hard_header_len + | 646 | drvr->rxsz = ndev->mtu + ndev->hard_header_len + |
@@ -779,6 +789,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, | |||
779 | ifp->bssidx = bssidx; | 789 | ifp->bssidx = bssidx; |
780 | 790 | ||
781 | init_waitqueue_head(&ifp->pend_8021x_wait); | 791 | init_waitqueue_head(&ifp->pend_8021x_wait); |
792 | spin_lock_init(&ifp->netif_stop_lock); | ||
782 | 793 | ||
783 | if (mac_addr != NULL) | 794 | if (mac_addr != NULL) |
784 | memcpy(ifp->mac_addr, mac_addr, ETH_ALEN); | 795 | memcpy(ifp->mac_addr, mac_addr, ETH_ALEN); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index d2487518bd2a..264111968320 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
@@ -448,8 +448,6 @@ struct brcmf_sdio { | |||
448 | uint rxblen; /* Allocated length of rxbuf */ | 448 | uint rxblen; /* Allocated length of rxbuf */ |
449 | u8 *rxctl; /* Aligned pointer into rxbuf */ | 449 | u8 *rxctl; /* Aligned pointer into rxbuf */ |
450 | u8 *rxctl_orig; /* pointer for freeing rxctl */ | 450 | u8 *rxctl_orig; /* pointer for freeing rxctl */ |
451 | u8 *databuf; /* Buffer for receiving big glom packet */ | ||
452 | u8 *dataptr; /* Aligned pointer into databuf */ | ||
453 | uint rxlen; /* Length of valid data in buffer */ | 451 | uint rxlen; /* Length of valid data in buffer */ |
454 | spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */ | 452 | spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */ |
455 | 453 | ||
@@ -473,8 +471,6 @@ struct brcmf_sdio { | |||
473 | s32 idletime; /* Control for activity timeout */ | 471 | s32 idletime; /* Control for activity timeout */ |
474 | s32 idlecount; /* Activity timeout counter */ | 472 | s32 idlecount; /* Activity timeout counter */ |
475 | s32 idleclock; /* How to set bus driver when idle */ | 473 | s32 idleclock; /* How to set bus driver when idle */ |
476 | s32 sd_rxchain; | ||
477 | bool use_rxchain; /* If brcmf should use PKT chains */ | ||
478 | bool rxflow_mode; /* Rx flow control mode */ | 474 | bool rxflow_mode; /* Rx flow control mode */ |
479 | bool rxflow; /* Is rx flow control on */ | 475 | bool rxflow; /* Is rx flow control on */ |
480 | bool alp_only; /* Don't use HT clock (ALP only) */ | 476 | bool alp_only; /* Don't use HT clock (ALP only) */ |
@@ -495,8 +491,7 @@ struct brcmf_sdio { | |||
495 | 491 | ||
496 | struct workqueue_struct *brcmf_wq; | 492 | struct workqueue_struct *brcmf_wq; |
497 | struct work_struct datawork; | 493 | struct work_struct datawork; |
498 | struct list_head dpc_tsklst; | 494 | atomic_t dpc_tskcnt; |
499 | spinlock_t dpc_tl_lock; | ||
500 | 495 | ||
501 | const struct firmware *firmware; | 496 | const struct firmware *firmware; |
502 | u32 fw_ptr; | 497 | u32 fw_ptr; |
@@ -1026,29 +1021,6 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) | |||
1026 | bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; | 1021 | bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; |
1027 | } | 1022 | } |
1028 | 1023 | ||
1029 | /* copy a buffer into a pkt buffer chain */ | ||
1030 | static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len) | ||
1031 | { | ||
1032 | uint n, ret = 0; | ||
1033 | struct sk_buff *p; | ||
1034 | u8 *buf; | ||
1035 | |||
1036 | buf = bus->dataptr; | ||
1037 | |||
1038 | /* copy the data */ | ||
1039 | skb_queue_walk(&bus->glom, p) { | ||
1040 | n = min_t(uint, p->len, len); | ||
1041 | memcpy(p->data, buf, n); | ||
1042 | buf += n; | ||
1043 | len -= n; | ||
1044 | ret += n; | ||
1045 | if (!len) | ||
1046 | break; | ||
1047 | } | ||
1048 | |||
1049 | return ret; | ||
1050 | } | ||
1051 | |||
1052 | /* return total length of buffer chain */ | 1024 | /* return total length of buffer chain */ |
1053 | static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus) | 1025 | static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus) |
1054 | { | 1026 | { |
@@ -1202,8 +1174,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) | |||
1202 | int errcode; | 1174 | int errcode; |
1203 | u8 doff, sfdoff; | 1175 | u8 doff, sfdoff; |
1204 | 1176 | ||
1205 | bool usechain = bus->use_rxchain; | ||
1206 | |||
1207 | struct brcmf_sdio_read rd_new; | 1177 | struct brcmf_sdio_read rd_new; |
1208 | 1178 | ||
1209 | /* If packets, issue read(s) and send up packet chain */ | 1179 | /* If packets, issue read(s) and send up packet chain */ |
@@ -1238,7 +1208,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) | |||
1238 | if (sublen % BRCMF_SDALIGN) { | 1208 | if (sublen % BRCMF_SDALIGN) { |
1239 | brcmf_err("sublen %d not multiple of %d\n", | 1209 | brcmf_err("sublen %d not multiple of %d\n", |
1240 | sublen, BRCMF_SDALIGN); | 1210 | sublen, BRCMF_SDALIGN); |
1241 | usechain = false; | ||
1242 | } | 1211 | } |
1243 | totlen += sublen; | 1212 | totlen += sublen; |
1244 | 1213 | ||
@@ -1305,27 +1274,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) | |||
1305 | * packet and and copy into the chain. | 1274 | * packet and and copy into the chain. |
1306 | */ | 1275 | */ |
1307 | sdio_claim_host(bus->sdiodev->func[1]); | 1276 | sdio_claim_host(bus->sdiodev->func[1]); |
1308 | if (usechain) { | 1277 | errcode = brcmf_sdcard_recv_chain(bus->sdiodev, |
1309 | errcode = brcmf_sdcard_recv_chain(bus->sdiodev, | 1278 | bus->sdiodev->sbwad, |
1310 | bus->sdiodev->sbwad, | 1279 | SDIO_FUNC_2, F2SYNC, &bus->glom); |
1311 | SDIO_FUNC_2, F2SYNC, &bus->glom); | ||
1312 | } else if (bus->dataptr) { | ||
1313 | errcode = brcmf_sdcard_recv_buf(bus->sdiodev, | ||
1314 | bus->sdiodev->sbwad, | ||
1315 | SDIO_FUNC_2, F2SYNC, | ||
1316 | bus->dataptr, dlen); | ||
1317 | sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen); | ||
1318 | if (sublen != dlen) { | ||
1319 | brcmf_err("FAILED TO COPY, dlen %d sublen %d\n", | ||
1320 | dlen, sublen); | ||
1321 | errcode = -1; | ||
1322 | } | ||
1323 | pnext = NULL; | ||
1324 | } else { | ||
1325 | brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", | ||
1326 | dlen); | ||
1327 | errcode = -1; | ||
1328 | } | ||
1329 | sdio_release_host(bus->sdiodev->func[1]); | 1280 | sdio_release_host(bus->sdiodev->func[1]); |
1330 | bus->sdcnt.f2rxdata++; | 1281 | bus->sdcnt.f2rxdata++; |
1331 | 1282 | ||
@@ -2061,23 +2012,6 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus) | |||
2061 | } | 2012 | } |
2062 | } | 2013 | } |
2063 | 2014 | ||
2064 | static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus) | ||
2065 | { | ||
2066 | struct list_head *new_hd; | ||
2067 | unsigned long flags; | ||
2068 | |||
2069 | if (in_interrupt()) | ||
2070 | new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC); | ||
2071 | else | ||
2072 | new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL); | ||
2073 | if (new_hd == NULL) | ||
2074 | return; | ||
2075 | |||
2076 | spin_lock_irqsave(&bus->dpc_tl_lock, flags); | ||
2077 | list_add_tail(new_hd, &bus->dpc_tsklst); | ||
2078 | spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); | ||
2079 | } | ||
2080 | |||
2081 | static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) | 2015 | static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) |
2082 | { | 2016 | { |
2083 | u8 idx; | 2017 | u8 idx; |
@@ -2312,7 +2246,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) | |||
2312 | (!atomic_read(&bus->fcstate) && | 2246 | (!atomic_read(&bus->fcstate) && |
2313 | brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && | 2247 | brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && |
2314 | data_ok(bus)) || PKT_AVAILABLE()) { | 2248 | data_ok(bus)) || PKT_AVAILABLE()) { |
2315 | brcmf_sdbrcm_adddpctsk(bus); | 2249 | atomic_inc(&bus->dpc_tskcnt); |
2316 | } | 2250 | } |
2317 | 2251 | ||
2318 | /* If we're done for now, turn off clock request. */ | 2252 | /* If we're done for now, turn off clock request. */ |
@@ -2342,7 +2276,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) | |||
2342 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); | 2276 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); |
2343 | struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; | 2277 | struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; |
2344 | struct brcmf_sdio *bus = sdiodev->bus; | 2278 | struct brcmf_sdio *bus = sdiodev->bus; |
2345 | unsigned long flags; | ||
2346 | 2279 | ||
2347 | brcmf_dbg(TRACE, "Enter\n"); | 2280 | brcmf_dbg(TRACE, "Enter\n"); |
2348 | 2281 | ||
@@ -2369,26 +2302,21 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) | |||
2369 | } else { | 2302 | } else { |
2370 | ret = 0; | 2303 | ret = 0; |
2371 | } | 2304 | } |
2372 | spin_unlock_bh(&bus->txqlock); | ||
2373 | 2305 | ||
2374 | if (pktq_len(&bus->txq) >= TXHI) { | 2306 | if (pktq_len(&bus->txq) >= TXHI) { |
2375 | bus->txoff = true; | 2307 | bus->txoff = true; |
2376 | brcmf_txflowblock(bus->sdiodev->dev, true); | 2308 | brcmf_txflowblock(bus->sdiodev->dev, true); |
2377 | } | 2309 | } |
2310 | spin_unlock_bh(&bus->txqlock); | ||
2378 | 2311 | ||
2379 | #ifdef DEBUG | 2312 | #ifdef DEBUG |
2380 | if (pktq_plen(&bus->txq, prec) > qcount[prec]) | 2313 | if (pktq_plen(&bus->txq, prec) > qcount[prec]) |
2381 | qcount[prec] = pktq_plen(&bus->txq, prec); | 2314 | qcount[prec] = pktq_plen(&bus->txq, prec); |
2382 | #endif | 2315 | #endif |
2383 | 2316 | ||
2384 | spin_lock_irqsave(&bus->dpc_tl_lock, flags); | 2317 | if (atomic_read(&bus->dpc_tskcnt) == 0) { |
2385 | if (list_empty(&bus->dpc_tsklst)) { | 2318 | atomic_inc(&bus->dpc_tskcnt); |
2386 | spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); | ||
2387 | |||
2388 | brcmf_sdbrcm_adddpctsk(bus); | ||
2389 | queue_work(bus->brcmf_wq, &bus->datawork); | 2319 | queue_work(bus->brcmf_wq, &bus->datawork); |
2390 | } else { | ||
2391 | spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); | ||
2392 | } | 2320 | } |
2393 | 2321 | ||
2394 | return ret; | 2322 | return ret; |
@@ -2525,7 +2453,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) | |||
2525 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); | 2453 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); |
2526 | struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; | 2454 | struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; |
2527 | struct brcmf_sdio *bus = sdiodev->bus; | 2455 | struct brcmf_sdio *bus = sdiodev->bus; |
2528 | unsigned long flags; | ||
2529 | 2456 | ||
2530 | brcmf_dbg(TRACE, "Enter\n"); | 2457 | brcmf_dbg(TRACE, "Enter\n"); |
2531 | 2458 | ||
@@ -2612,18 +2539,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) | |||
2612 | } while (ret < 0 && retries++ < TXRETRIES); | 2539 | } while (ret < 0 && retries++ < TXRETRIES); |
2613 | } | 2540 | } |
2614 | 2541 | ||
2615 | spin_lock_irqsave(&bus->dpc_tl_lock, flags); | ||
2616 | if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && | 2542 | if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && |
2617 | list_empty(&bus->dpc_tsklst)) { | 2543 | atomic_read(&bus->dpc_tskcnt) == 0) { |
2618 | spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); | ||
2619 | |||
2620 | bus->activity = false; | 2544 | bus->activity = false; |
2621 | sdio_claim_host(bus->sdiodev->func[1]); | 2545 | sdio_claim_host(bus->sdiodev->func[1]); |
2622 | brcmf_dbg(INFO, "idle\n"); | 2546 | brcmf_dbg(INFO, "idle\n"); |
2623 | brcmf_sdbrcm_clkctl(bus, CLK_NONE, true); | 2547 | brcmf_sdbrcm_clkctl(bus, CLK_NONE, true); |
2624 | sdio_release_host(bus->sdiodev->func[1]); | 2548 | sdio_release_host(bus->sdiodev->func[1]); |
2625 | } else { | ||
2626 | spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); | ||
2627 | } | 2549 | } |
2628 | 2550 | ||
2629 | if (ret) | 2551 | if (ret) |
@@ -3451,7 +3373,7 @@ void brcmf_sdbrcm_isr(void *arg) | |||
3451 | if (!bus->intr) | 3373 | if (!bus->intr) |
3452 | brcmf_err("isr w/o interrupt configured!\n"); | 3374 | brcmf_err("isr w/o interrupt configured!\n"); |
3453 | 3375 | ||
3454 | brcmf_sdbrcm_adddpctsk(bus); | 3376 | atomic_inc(&bus->dpc_tskcnt); |
3455 | queue_work(bus->brcmf_wq, &bus->datawork); | 3377 | queue_work(bus->brcmf_wq, &bus->datawork); |
3456 | } | 3378 | } |
3457 | 3379 | ||
@@ -3460,7 +3382,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) | |||
3460 | #ifdef DEBUG | 3382 | #ifdef DEBUG |
3461 | struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev); | 3383 | struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev); |
3462 | #endif /* DEBUG */ | 3384 | #endif /* DEBUG */ |
3463 | unsigned long flags; | ||
3464 | 3385 | ||
3465 | brcmf_dbg(TIMER, "Enter\n"); | 3386 | brcmf_dbg(TIMER, "Enter\n"); |
3466 | 3387 | ||
@@ -3476,11 +3397,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) | |||
3476 | if (!bus->intr || | 3397 | if (!bus->intr || |
3477 | (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { | 3398 | (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { |
3478 | 3399 | ||
3479 | spin_lock_irqsave(&bus->dpc_tl_lock, flags); | 3400 | if (atomic_read(&bus->dpc_tskcnt) == 0) { |
3480 | if (list_empty(&bus->dpc_tsklst)) { | ||
3481 | u8 devpend; | 3401 | u8 devpend; |
3482 | spin_unlock_irqrestore(&bus->dpc_tl_lock, | 3402 | |
3483 | flags); | ||
3484 | sdio_claim_host(bus->sdiodev->func[1]); | 3403 | sdio_claim_host(bus->sdiodev->func[1]); |
3485 | devpend = brcmf_sdio_regrb(bus->sdiodev, | 3404 | devpend = brcmf_sdio_regrb(bus->sdiodev, |
3486 | SDIO_CCCR_INTx, | 3405 | SDIO_CCCR_INTx, |
@@ -3489,9 +3408,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) | |||
3489 | intstatus = | 3408 | intstatus = |
3490 | devpend & (INTR_STATUS_FUNC1 | | 3409 | devpend & (INTR_STATUS_FUNC1 | |
3491 | INTR_STATUS_FUNC2); | 3410 | INTR_STATUS_FUNC2); |
3492 | } else { | ||
3493 | spin_unlock_irqrestore(&bus->dpc_tl_lock, | ||
3494 | flags); | ||
3495 | } | 3411 | } |
3496 | 3412 | ||
3497 | /* If there is something, make like the ISR and | 3413 | /* If there is something, make like the ISR and |
@@ -3500,7 +3416,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) | |||
3500 | bus->sdcnt.pollcnt++; | 3416 | bus->sdcnt.pollcnt++; |
3501 | atomic_set(&bus->ipend, 1); | 3417 | atomic_set(&bus->ipend, 1); |
3502 | 3418 | ||
3503 | brcmf_sdbrcm_adddpctsk(bus); | 3419 | atomic_inc(&bus->dpc_tskcnt); |
3504 | queue_work(bus->brcmf_wq, &bus->datawork); | 3420 | queue_work(bus->brcmf_wq, &bus->datawork); |
3505 | } | 3421 | } |
3506 | } | 3422 | } |
@@ -3545,41 +3461,15 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) | |||
3545 | return (atomic_read(&bus->ipend) > 0); | 3461 | return (atomic_read(&bus->ipend) > 0); |
3546 | } | 3462 | } |
3547 | 3463 | ||
3548 | static bool brcmf_sdbrcm_chipmatch(u16 chipid) | ||
3549 | { | ||
3550 | if (chipid == BCM43143_CHIP_ID) | ||
3551 | return true; | ||
3552 | if (chipid == BCM43241_CHIP_ID) | ||
3553 | return true; | ||
3554 | if (chipid == BCM4329_CHIP_ID) | ||
3555 | return true; | ||
3556 | if (chipid == BCM4330_CHIP_ID) | ||
3557 | return true; | ||
3558 | if (chipid == BCM4334_CHIP_ID) | ||
3559 | return true; | ||
3560 | if (chipid == BCM4335_CHIP_ID) | ||
3561 | return true; | ||
3562 | return false; | ||
3563 | } | ||
3564 | |||
3565 | static void brcmf_sdio_dataworker(struct work_struct *work) | 3464 | static void brcmf_sdio_dataworker(struct work_struct *work) |
3566 | { | 3465 | { |
3567 | struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio, | 3466 | struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio, |
3568 | datawork); | 3467 | datawork); |
3569 | struct list_head *cur_hd, *tmp_hd; | ||
3570 | unsigned long flags; | ||
3571 | |||
3572 | spin_lock_irqsave(&bus->dpc_tl_lock, flags); | ||
3573 | list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) { | ||
3574 | spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); | ||
3575 | 3468 | ||
3469 | while (atomic_read(&bus->dpc_tskcnt)) { | ||
3576 | brcmf_sdbrcm_dpc(bus); | 3470 | brcmf_sdbrcm_dpc(bus); |
3577 | 3471 | atomic_dec(&bus->dpc_tskcnt); | |
3578 | spin_lock_irqsave(&bus->dpc_tl_lock, flags); | ||
3579 | list_del(cur_hd); | ||
3580 | kfree(cur_hd); | ||
3581 | } | 3472 | } |
3582 | spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); | ||
3583 | } | 3473 | } |
3584 | 3474 | ||
3585 | static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) | 3475 | static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) |
@@ -3589,9 +3479,6 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus) | |||
3589 | kfree(bus->rxbuf); | 3479 | kfree(bus->rxbuf); |
3590 | bus->rxctl = bus->rxbuf = NULL; | 3480 | bus->rxctl = bus->rxbuf = NULL; |
3591 | bus->rxlen = 0; | 3481 | bus->rxlen = 0; |
3592 | |||
3593 | kfree(bus->databuf); | ||
3594 | bus->databuf = NULL; | ||
3595 | } | 3482 | } |
3596 | 3483 | ||
3597 | static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus) | 3484 | static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus) |
@@ -3604,29 +3491,10 @@ static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus) | |||
3604 | ALIGNMENT) + BRCMF_SDALIGN; | 3491 | ALIGNMENT) + BRCMF_SDALIGN; |
3605 | bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC); | 3492 | bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC); |
3606 | if (!(bus->rxbuf)) | 3493 | if (!(bus->rxbuf)) |
3607 | goto fail; | 3494 | return false; |
3608 | } | ||
3609 | |||
3610 | /* Allocate buffer to receive glomed packet */ | ||
3611 | bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC); | ||
3612 | if (!(bus->databuf)) { | ||
3613 | /* release rxbuf which was already located as above */ | ||
3614 | if (!bus->rxblen) | ||
3615 | kfree(bus->rxbuf); | ||
3616 | goto fail; | ||
3617 | } | 3495 | } |
3618 | 3496 | ||
3619 | /* Align the buffer */ | ||
3620 | if ((unsigned long)bus->databuf % BRCMF_SDALIGN) | ||
3621 | bus->dataptr = bus->databuf + (BRCMF_SDALIGN - | ||
3622 | ((unsigned long)bus->databuf % BRCMF_SDALIGN)); | ||
3623 | else | ||
3624 | bus->dataptr = bus->databuf; | ||
3625 | |||
3626 | return true; | 3497 | return true; |
3627 | |||
3628 | fail: | ||
3629 | return false; | ||
3630 | } | 3498 | } |
3631 | 3499 | ||
3632 | static bool | 3500 | static bool |
@@ -3667,11 +3535,6 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva) | |||
3667 | goto fail; | 3535 | goto fail; |
3668 | } | 3536 | } |
3669 | 3537 | ||
3670 | if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) { | ||
3671 | brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip); | ||
3672 | goto fail; | ||
3673 | } | ||
3674 | |||
3675 | if (brcmf_sdbrcm_kso_init(bus)) { | 3538 | if (brcmf_sdbrcm_kso_init(bus)) { |
3676 | brcmf_err("error enabling KSO\n"); | 3539 | brcmf_err("error enabling KSO\n"); |
3677 | goto fail; | 3540 | goto fail; |
@@ -3770,10 +3633,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus) | |||
3770 | bus->blocksize = bus->sdiodev->func[2]->cur_blksize; | 3633 | bus->blocksize = bus->sdiodev->func[2]->cur_blksize; |
3771 | bus->roundup = min(max_roundup, bus->blocksize); | 3634 | bus->roundup = min(max_roundup, bus->blocksize); |
3772 | 3635 | ||
3773 | /* bus module does not support packet chaining */ | ||
3774 | bus->use_rxchain = false; | ||
3775 | bus->sd_rxchain = false; | ||
3776 | |||
3777 | /* SR state */ | 3636 | /* SR state */ |
3778 | bus->sleeping = false; | 3637 | bus->sleeping = false; |
3779 | bus->sr_enabled = false; | 3638 | bus->sr_enabled = false; |
@@ -3927,8 +3786,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) | |||
3927 | bus->watchdog_tsk = NULL; | 3786 | bus->watchdog_tsk = NULL; |
3928 | } | 3787 | } |
3929 | /* Initialize DPC thread */ | 3788 | /* Initialize DPC thread */ |
3930 | INIT_LIST_HEAD(&bus->dpc_tsklst); | 3789 | atomic_set(&bus->dpc_tskcnt, 0); |
3931 | spin_lock_init(&bus->dpc_tl_lock); | ||
3932 | 3790 | ||
3933 | /* Assign bus interface call back */ | 3791 | /* Assign bus interface call back */ |
3934 | bus->sdiodev->bus_if->dev = bus->sdiodev->dev; | 3792 | bus->sdiodev->bus_if->dev = bus->sdiodev->dev; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index 6ec5db9c60a5..e679214b3c98 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h | |||
@@ -101,7 +101,8 @@ struct brcmf_event; | |||
101 | BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \ | 101 | BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \ |
102 | BRCMF_ENUM_DEF(DCS_REQUEST, 73) \ | 102 | BRCMF_ENUM_DEF(DCS_REQUEST, 73) \ |
103 | BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \ | 103 | BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \ |
104 | BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) | 104 | BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \ |
105 | BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) | ||
105 | 106 | ||
106 | #define BRCMF_ENUM_DEF(id, val) \ | 107 | #define BRCMF_ENUM_DEF(id, val) \ |
107 | BRCMF_E_##id = (val), | 108 | BRCMF_E_##id = (val), |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index 5352dc1fdf3c..13e75c4b1a6b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/etherdevice.h> | 22 | #include <linux/etherdevice.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/jiffies.h> | 24 | #include <linux/jiffies.h> |
25 | #include <uapi/linux/nl80211.h> | ||
26 | #include <net/cfg80211.h> | 25 | #include <net/cfg80211.h> |
27 | 26 | ||
28 | #include <brcmu_utils.h> | 27 | #include <brcmu_utils.h> |
@@ -142,7 +141,7 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id) | |||
142 | #define BRCMF_FWS_FLOWCONTROL_HIWATER 128 | 141 | #define BRCMF_FWS_FLOWCONTROL_HIWATER 128 |
143 | #define BRCMF_FWS_FLOWCONTROL_LOWATER 64 | 142 | #define BRCMF_FWS_FLOWCONTROL_LOWATER 64 |
144 | 143 | ||
145 | #define BRCMF_FWS_PSQ_PREC_COUNT ((NL80211_NUM_ACS + 1) * 2) | 144 | #define BRCMF_FWS_PSQ_PREC_COUNT ((BRCMF_FWS_FIFO_COUNT + 1) * 2) |
146 | #define BRCMF_FWS_PSQ_LEN 256 | 145 | #define BRCMF_FWS_PSQ_LEN 256 |
147 | 146 | ||
148 | #define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01 | 147 | #define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01 |
@@ -157,11 +156,13 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id) | |||
157 | * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver. | 156 | * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver. |
158 | * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue. | 157 | * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue. |
159 | * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware. | 158 | * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware. |
159 | * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info. | ||
160 | */ | 160 | */ |
161 | enum brcmf_fws_skb_state { | 161 | enum brcmf_fws_skb_state { |
162 | BRCMF_FWS_SKBSTATE_NEW, | 162 | BRCMF_FWS_SKBSTATE_NEW, |
163 | BRCMF_FWS_SKBSTATE_DELAYED, | 163 | BRCMF_FWS_SKBSTATE_DELAYED, |
164 | BRCMF_FWS_SKBSTATE_SUPPRESSED | 164 | BRCMF_FWS_SKBSTATE_SUPPRESSED, |
165 | BRCMF_FWS_SKBSTATE_TIM | ||
165 | }; | 166 | }; |
166 | 167 | ||
167 | /** | 168 | /** |
@@ -193,9 +194,8 @@ struct brcmf_skbuff_cb { | |||
193 | * b[11] - packet sent upon firmware request. | 194 | * b[11] - packet sent upon firmware request. |
194 | * b[10] - packet only contains signalling data. | 195 | * b[10] - packet only contains signalling data. |
195 | * b[9] - packet is a tx packet. | 196 | * b[9] - packet is a tx packet. |
196 | * b[8] - packet uses FIFO credit (non-pspoll). | 197 | * b[8] - packet used requested credit |
197 | * b[7] - interface in AP mode. | 198 | * b[7] - interface in AP mode. |
198 | * b[6:4] - AC FIFO number. | ||
199 | * b[3:0] - interface index. | 199 | * b[3:0] - interface index. |
200 | */ | 200 | */ |
201 | #define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800 | 201 | #define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800 |
@@ -204,12 +204,10 @@ struct brcmf_skbuff_cb { | |||
204 | #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10 | 204 | #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10 |
205 | #define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200 | 205 | #define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200 |
206 | #define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9 | 206 | #define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9 |
207 | #define BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK 0x0100 | 207 | #define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK 0x0100 |
208 | #define BRCMF_SKB_IF_FLAGS_CREDITCHECK_SHIFT 8 | 208 | #define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT 8 |
209 | #define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080 | 209 | #define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080 |
210 | #define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7 | 210 | #define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7 |
211 | #define BRCMF_SKB_IF_FLAGS_FIFO_MASK 0x0070 | ||
212 | #define BRCMF_SKB_IF_FLAGS_FIFO_SHIFT 4 | ||
213 | #define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f | 211 | #define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f |
214 | #define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0 | 212 | #define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0 |
215 | 213 | ||
@@ -246,7 +244,7 @@ struct brcmf_skbuff_cb { | |||
246 | #define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00 | 244 | #define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00 |
247 | #define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8 | 245 | #define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8 |
248 | #define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff | 246 | #define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff |
249 | #define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0 | 247 | #define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0 |
250 | 248 | ||
251 | #define brcmf_skb_htod_tag_set_field(skb, field, value) \ | 249 | #define brcmf_skb_htod_tag_set_field(skb, field, value) \ |
252 | brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \ | 250 | brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \ |
@@ -278,6 +276,7 @@ struct brcmf_skbuff_cb { | |||
278 | /** | 276 | /** |
279 | * enum brcmf_fws_fifo - fifo indices used by dongle firmware. | 277 | * enum brcmf_fws_fifo - fifo indices used by dongle firmware. |
280 | * | 278 | * |
279 | * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background. | ||
281 | * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. | 280 | * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. |
282 | * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. | 281 | * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. |
283 | * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. | 282 | * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. |
@@ -287,7 +286,8 @@ struct brcmf_skbuff_cb { | |||
287 | * @BRCMF_FWS_FIFO_COUNT: number of fifos. | 286 | * @BRCMF_FWS_FIFO_COUNT: number of fifos. |
288 | */ | 287 | */ |
289 | enum brcmf_fws_fifo { | 288 | enum brcmf_fws_fifo { |
290 | BRCMF_FWS_FIFO_AC_BK, | 289 | BRCMF_FWS_FIFO_FIRST, |
290 | BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST, | ||
291 | BRCMF_FWS_FIFO_AC_BE, | 291 | BRCMF_FWS_FIFO_AC_BE, |
292 | BRCMF_FWS_FIFO_AC_VI, | 292 | BRCMF_FWS_FIFO_AC_VI, |
293 | BRCMF_FWS_FIFO_AC_VO, | 293 | BRCMF_FWS_FIFO_AC_VO, |
@@ -307,12 +307,15 @@ enum brcmf_fws_fifo { | |||
307 | * firmware suppress the packet as device is already in PS mode. | 307 | * firmware suppress the packet as device is already in PS mode. |
308 | * @BRCMF_FWS_TXSTATUS_FW_TOSSED: | 308 | * @BRCMF_FWS_TXSTATUS_FW_TOSSED: |
309 | * firmware tossed the packet. | 309 | * firmware tossed the packet. |
310 | * @BRCMF_FWS_TXSTATUS_HOST_TOSSED: | ||
311 | * host tossed the packet. | ||
310 | */ | 312 | */ |
311 | enum brcmf_fws_txstatus { | 313 | enum brcmf_fws_txstatus { |
312 | BRCMF_FWS_TXSTATUS_DISCARD, | 314 | BRCMF_FWS_TXSTATUS_DISCARD, |
313 | BRCMF_FWS_TXSTATUS_CORE_SUPPRESS, | 315 | BRCMF_FWS_TXSTATUS_CORE_SUPPRESS, |
314 | BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS, | 316 | BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS, |
315 | BRCMF_FWS_TXSTATUS_FW_TOSSED | 317 | BRCMF_FWS_TXSTATUS_FW_TOSSED, |
318 | BRCMF_FWS_TXSTATUS_HOST_TOSSED | ||
316 | }; | 319 | }; |
317 | 320 | ||
318 | enum brcmf_fws_fcmode { | 321 | enum brcmf_fws_fcmode { |
@@ -343,6 +346,7 @@ enum brcmf_fws_mac_desc_state { | |||
343 | * @transit_count: packet in transit to firmware. | 346 | * @transit_count: packet in transit to firmware. |
344 | */ | 347 | */ |
345 | struct brcmf_fws_mac_descriptor { | 348 | struct brcmf_fws_mac_descriptor { |
349 | char name[16]; | ||
346 | u8 occupied; | 350 | u8 occupied; |
347 | u8 mac_handle; | 351 | u8 mac_handle; |
348 | u8 interface_id; | 352 | u8 interface_id; |
@@ -356,7 +360,6 @@ struct brcmf_fws_mac_descriptor { | |||
356 | u8 seq[BRCMF_FWS_FIFO_COUNT]; | 360 | u8 seq[BRCMF_FWS_FIFO_COUNT]; |
357 | struct pktq psq; | 361 | struct pktq psq; |
358 | int transit_count; | 362 | int transit_count; |
359 | int suppress_count; | ||
360 | int suppr_transit_count; | 363 | int suppr_transit_count; |
361 | bool send_tim_signal; | 364 | bool send_tim_signal; |
362 | u8 traffic_pending_bmp; | 365 | u8 traffic_pending_bmp; |
@@ -383,12 +386,10 @@ enum brcmf_fws_hanger_item_state { | |||
383 | * struct brcmf_fws_hanger_item - single entry for tx pending packet. | 386 | * struct brcmf_fws_hanger_item - single entry for tx pending packet. |
384 | * | 387 | * |
385 | * @state: entry is either free or occupied. | 388 | * @state: entry is either free or occupied. |
386 | * @gen: generation. | ||
387 | * @pkt: packet itself. | 389 | * @pkt: packet itself. |
388 | */ | 390 | */ |
389 | struct brcmf_fws_hanger_item { | 391 | struct brcmf_fws_hanger_item { |
390 | enum brcmf_fws_hanger_item_state state; | 392 | enum brcmf_fws_hanger_item_state state; |
391 | u8 gen; | ||
392 | struct sk_buff *pkt; | 393 | struct sk_buff *pkt; |
393 | }; | 394 | }; |
394 | 395 | ||
@@ -424,6 +425,7 @@ struct brcmf_fws_info { | |||
424 | struct brcmf_fws_stats stats; | 425 | struct brcmf_fws_stats stats; |
425 | struct brcmf_fws_hanger hanger; | 426 | struct brcmf_fws_hanger hanger; |
426 | enum brcmf_fws_fcmode fcmode; | 427 | enum brcmf_fws_fcmode fcmode; |
428 | bool bcmc_credit_check; | ||
427 | struct brcmf_fws_macdesc_table desc; | 429 | struct brcmf_fws_macdesc_table desc; |
428 | struct workqueue_struct *fws_wq; | 430 | struct workqueue_struct *fws_wq; |
429 | struct work_struct fws_dequeue_work; | 431 | struct work_struct fws_dequeue_work; |
@@ -434,6 +436,8 @@ struct brcmf_fws_info { | |||
434 | u32 fifo_credit_map; | 436 | u32 fifo_credit_map; |
435 | u32 fifo_delay_map; | 437 | u32 fifo_delay_map; |
436 | unsigned long borrow_defer_timestamp; | 438 | unsigned long borrow_defer_timestamp; |
439 | bool bus_flow_blocked; | ||
440 | bool creditmap_received; | ||
437 | }; | 441 | }; |
438 | 442 | ||
439 | /* | 443 | /* |
@@ -507,7 +511,6 @@ static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger) | |||
507 | { | 511 | { |
508 | int i; | 512 | int i; |
509 | 513 | ||
510 | brcmf_dbg(TRACE, "enter\n"); | ||
511 | memset(hanger, 0, sizeof(*hanger)); | 514 | memset(hanger, 0, sizeof(*hanger)); |
512 | for (i = 0; i < ARRAY_SIZE(hanger->items); i++) | 515 | for (i = 0; i < ARRAY_SIZE(hanger->items); i++) |
513 | hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; | 516 | hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; |
@@ -517,7 +520,6 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h) | |||
517 | { | 520 | { |
518 | u32 i; | 521 | u32 i; |
519 | 522 | ||
520 | brcmf_dbg(TRACE, "enter\n"); | ||
521 | i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS; | 523 | i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS; |
522 | 524 | ||
523 | while (i != h->slot_pos) { | 525 | while (i != h->slot_pos) { |
@@ -533,14 +535,12 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h) | |||
533 | h->failed_slotfind++; | 535 | h->failed_slotfind++; |
534 | i = BRCMF_FWS_HANGER_MAXITEMS; | 536 | i = BRCMF_FWS_HANGER_MAXITEMS; |
535 | done: | 537 | done: |
536 | brcmf_dbg(TRACE, "exit: %d\n", i); | ||
537 | return i; | 538 | return i; |
538 | } | 539 | } |
539 | 540 | ||
540 | static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, | 541 | static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, |
541 | struct sk_buff *pkt, u32 slot_id) | 542 | struct sk_buff *pkt, u32 slot_id) |
542 | { | 543 | { |
543 | brcmf_dbg(TRACE, "enter\n"); | ||
544 | if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) | 544 | if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) |
545 | return -ENOENT; | 545 | return -ENOENT; |
546 | 546 | ||
@@ -560,7 +560,6 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, | |||
560 | u32 slot_id, struct sk_buff **pktout, | 560 | u32 slot_id, struct sk_buff **pktout, |
561 | bool remove_item) | 561 | bool remove_item) |
562 | { | 562 | { |
563 | brcmf_dbg(TRACE, "enter\n"); | ||
564 | if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) | 563 | if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) |
565 | return -ENOENT; | 564 | return -ENOENT; |
566 | 565 | ||
@@ -574,23 +573,18 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, | |||
574 | if (remove_item) { | 573 | if (remove_item) { |
575 | h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; | 574 | h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; |
576 | h->items[slot_id].pkt = NULL; | 575 | h->items[slot_id].pkt = NULL; |
577 | h->items[slot_id].gen = 0xff; | ||
578 | h->popped++; | 576 | h->popped++; |
579 | } | 577 | } |
580 | return 0; | 578 | return 0; |
581 | } | 579 | } |
582 | 580 | ||
583 | static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, | 581 | static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, |
584 | u32 slot_id, u8 gen) | 582 | u32 slot_id) |
585 | { | 583 | { |
586 | brcmf_dbg(TRACE, "enter\n"); | ||
587 | |||
588 | if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) | 584 | if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) |
589 | return -ENOENT; | 585 | return -ENOENT; |
590 | 586 | ||
591 | h->items[slot_id].gen = gen; | 587 | if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { |
592 | |||
593 | if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_INUSE) { | ||
594 | brcmf_err("entry not in use\n"); | 588 | brcmf_err("entry not in use\n"); |
595 | return -EINVAL; | 589 | return -EINVAL; |
596 | } | 590 | } |
@@ -599,25 +593,6 @@ static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, | |||
599 | return 0; | 593 | return 0; |
600 | } | 594 | } |
601 | 595 | ||
602 | static int brcmf_fws_hanger_get_genbit(struct brcmf_fws_hanger *hanger, | ||
603 | struct sk_buff *pkt, u32 slot_id, | ||
604 | int *gen) | ||
605 | { | ||
606 | brcmf_dbg(TRACE, "enter\n"); | ||
607 | *gen = 0xff; | ||
608 | |||
609 | if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) | ||
610 | return -ENOENT; | ||
611 | |||
612 | if (hanger->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { | ||
613 | brcmf_err("slot not in use\n"); | ||
614 | return -EINVAL; | ||
615 | } | ||
616 | |||
617 | *gen = hanger->items[slot_id].gen; | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, | 596 | static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, |
622 | bool (*fn)(struct sk_buff *, void *), | 597 | bool (*fn)(struct sk_buff *, void *), |
623 | int ifidx) | 598 | int ifidx) |
@@ -627,7 +602,6 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, | |||
627 | int i; | 602 | int i; |
628 | enum brcmf_fws_hanger_item_state s; | 603 | enum brcmf_fws_hanger_item_state s; |
629 | 604 | ||
630 | brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); | ||
631 | for (i = 0; i < ARRAY_SIZE(h->items); i++) { | 605 | for (i = 0; i < ARRAY_SIZE(h->items); i++) { |
632 | s = h->items[i].state; | 606 | s = h->items[i].state; |
633 | if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE || | 607 | if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE || |
@@ -644,14 +618,28 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, | |||
644 | } | 618 | } |
645 | } | 619 | } |
646 | 620 | ||
647 | static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc, | 621 | static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws, |
648 | u8 *addr, u8 ifidx) | 622 | struct brcmf_fws_mac_descriptor *desc) |
623 | { | ||
624 | if (desc == &fws->desc.other) | ||
625 | strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name)); | ||
626 | else if (desc->mac_handle) | ||
627 | scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d", | ||
628 | desc->mac_handle, desc->interface_id); | ||
629 | else | ||
630 | scnprintf(desc->name, sizeof(desc->name), "MACIF:%d", | ||
631 | desc->interface_id); | ||
632 | } | ||
633 | |||
634 | static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc, | ||
635 | u8 *addr, u8 ifidx) | ||
649 | { | 636 | { |
650 | brcmf_dbg(TRACE, | 637 | brcmf_dbg(TRACE, |
651 | "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx); | 638 | "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx); |
652 | desc->occupied = 1; | 639 | desc->occupied = 1; |
653 | desc->state = BRCMF_FWS_STATE_OPEN; | 640 | desc->state = BRCMF_FWS_STATE_OPEN; |
654 | desc->requested_credit = 0; | 641 | desc->requested_credit = 0; |
642 | desc->requested_packet = 0; | ||
655 | /* depending on use may need ifp->bssidx instead */ | 643 | /* depending on use may need ifp->bssidx instead */ |
656 | desc->interface_id = ifidx; | 644 | desc->interface_id = ifidx; |
657 | desc->ac_bitmap = 0xff; /* update this when handling APSD */ | 645 | desc->ac_bitmap = 0xff; /* update this when handling APSD */ |
@@ -660,22 +648,22 @@ static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc, | |||
660 | } | 648 | } |
661 | 649 | ||
662 | static | 650 | static |
663 | void brcmf_fws_clear_mac_descriptor(struct brcmf_fws_mac_descriptor *desc) | 651 | void brcmf_fws_macdesc_deinit(struct brcmf_fws_mac_descriptor *desc) |
664 | { | 652 | { |
665 | brcmf_dbg(TRACE, | 653 | brcmf_dbg(TRACE, |
666 | "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id); | 654 | "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id); |
667 | desc->occupied = 0; | 655 | desc->occupied = 0; |
668 | desc->state = BRCMF_FWS_STATE_CLOSE; | 656 | desc->state = BRCMF_FWS_STATE_CLOSE; |
669 | desc->requested_credit = 0; | 657 | desc->requested_credit = 0; |
658 | desc->requested_packet = 0; | ||
670 | } | 659 | } |
671 | 660 | ||
672 | static struct brcmf_fws_mac_descriptor * | 661 | static struct brcmf_fws_mac_descriptor * |
673 | brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea) | 662 | brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea) |
674 | { | 663 | { |
675 | struct brcmf_fws_mac_descriptor *entry; | 664 | struct brcmf_fws_mac_descriptor *entry; |
676 | int i; | 665 | int i; |
677 | 666 | ||
678 | brcmf_dbg(TRACE, "enter: ea=%pM\n", ea); | ||
679 | if (ea == NULL) | 667 | if (ea == NULL) |
680 | return ERR_PTR(-EINVAL); | 668 | return ERR_PTR(-EINVAL); |
681 | 669 | ||
@@ -690,42 +678,33 @@ brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea) | |||
690 | } | 678 | } |
691 | 679 | ||
692 | static struct brcmf_fws_mac_descriptor* | 680 | static struct brcmf_fws_mac_descriptor* |
693 | brcmf_fws_find_mac_desc(struct brcmf_fws_info *fws, struct brcmf_if *ifp, | 681 | brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da) |
694 | u8 *da) | ||
695 | { | 682 | { |
696 | struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; | 683 | struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; |
697 | bool multicast; | 684 | bool multicast; |
698 | enum nl80211_iftype iftype; | ||
699 | |||
700 | brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); | ||
701 | 685 | ||
702 | multicast = is_multicast_ether_addr(da); | 686 | multicast = is_multicast_ether_addr(da); |
703 | iftype = brcmf_cfg80211_get_iftype(ifp); | ||
704 | 687 | ||
705 | /* Multicast destination and P2P clients get the interface entry. | 688 | /* Multicast destination, STA and P2P clients get the interface entry. |
706 | * STA gets the interface entry if there is no exact match. For | 689 | * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations |
707 | * example, TDLS destinations have their own entry. | 690 | * have their own entry. |
708 | */ | 691 | */ |
709 | entry = NULL; | 692 | if (multicast && ifp->fws_desc) { |
710 | if ((multicast || iftype == NL80211_IFTYPE_STATION || | ||
711 | iftype == NL80211_IFTYPE_P2P_CLIENT) && ifp->fws_desc) | ||
712 | entry = ifp->fws_desc; | 693 | entry = ifp->fws_desc; |
713 | |||
714 | if (entry != NULL && iftype != NL80211_IFTYPE_STATION) | ||
715 | goto done; | 694 | goto done; |
695 | } | ||
716 | 696 | ||
717 | entry = brcmf_fws_mac_descriptor_lookup(fws, da); | 697 | entry = brcmf_fws_macdesc_lookup(fws, da); |
718 | if (IS_ERR(entry)) | 698 | if (IS_ERR(entry)) |
719 | entry = &fws->desc.other; | 699 | entry = ifp->fws_desc; |
720 | 700 | ||
721 | done: | 701 | done: |
722 | brcmf_dbg(TRACE, "exit: entry=%p\n", entry); | ||
723 | return entry; | 702 | return entry; |
724 | } | 703 | } |
725 | 704 | ||
726 | static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws, | 705 | static bool brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws, |
727 | struct brcmf_fws_mac_descriptor *entry, | 706 | struct brcmf_fws_mac_descriptor *entry, |
728 | int fifo) | 707 | int fifo) |
729 | { | 708 | { |
730 | struct brcmf_fws_mac_descriptor *if_entry; | 709 | struct brcmf_fws_mac_descriptor *if_entry; |
731 | bool closed; | 710 | bool closed; |
@@ -748,15 +727,11 @@ static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws, | |||
748 | return closed || !(entry->ac_bitmap & BIT(fifo)); | 727 | return closed || !(entry->ac_bitmap & BIT(fifo)); |
749 | } | 728 | } |
750 | 729 | ||
751 | static void brcmf_fws_mac_desc_cleanup(struct brcmf_fws_info *fws, | 730 | static void brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws, |
752 | struct brcmf_fws_mac_descriptor *entry, | 731 | struct brcmf_fws_mac_descriptor *entry, |
753 | int ifidx) | 732 | int ifidx) |
754 | { | 733 | { |
755 | brcmf_dbg(TRACE, "enter: entry=(ea=%pM, ifid=%d), ifidx=%d\n", | ||
756 | entry->ea, entry->interface_id, ifidx); | ||
757 | if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { | 734 | if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { |
758 | brcmf_dbg(TRACE, "flush psq: ifidx=%d, qlen=%d\n", | ||
759 | ifidx, entry->psq.len); | ||
760 | brcmf_fws_psq_flush(fws, &entry->psq, ifidx); | 735 | brcmf_fws_psq_flush(fws, &entry->psq, ifidx); |
761 | entry->occupied = !!(entry->psq.len); | 736 | entry->occupied = !!(entry->psq.len); |
762 | } | 737 | } |
@@ -772,7 +747,6 @@ static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws, | |||
772 | int prec; | 747 | int prec; |
773 | u32 hslot; | 748 | u32 hslot; |
774 | 749 | ||
775 | brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); | ||
776 | txq = brcmf_bus_gettxq(fws->drvr->bus_if); | 750 | txq = brcmf_bus_gettxq(fws->drvr->bus_if); |
777 | if (IS_ERR(txq)) { | 751 | if (IS_ERR(txq)) { |
778 | brcmf_dbg(TRACE, "no txq to clean up\n"); | 752 | brcmf_dbg(TRACE, "no txq to clean up\n"); |
@@ -798,7 +772,6 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx) | |||
798 | struct brcmf_fws_mac_descriptor *table; | 772 | struct brcmf_fws_mac_descriptor *table; |
799 | bool (*matchfn)(struct sk_buff *, void *) = NULL; | 773 | bool (*matchfn)(struct sk_buff *, void *) = NULL; |
800 | 774 | ||
801 | brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); | ||
802 | if (fws == NULL) | 775 | if (fws == NULL) |
803 | return; | 776 | return; |
804 | 777 | ||
@@ -808,51 +781,122 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx) | |||
808 | /* cleanup individual nodes */ | 781 | /* cleanup individual nodes */ |
809 | table = &fws->desc.nodes[0]; | 782 | table = &fws->desc.nodes[0]; |
810 | for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) | 783 | for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) |
811 | brcmf_fws_mac_desc_cleanup(fws, &table[i], ifidx); | 784 | brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx); |
812 | 785 | ||
813 | brcmf_fws_mac_desc_cleanup(fws, &fws->desc.other, ifidx); | 786 | brcmf_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx); |
814 | brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx); | 787 | brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx); |
815 | brcmf_fws_hanger_cleanup(fws, matchfn, ifidx); | 788 | brcmf_fws_hanger_cleanup(fws, matchfn, ifidx); |
816 | } | 789 | } |
817 | 790 | ||
818 | static void brcmf_fws_tim_update(struct brcmf_fws_info *ctx, | 791 | static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb) |
819 | struct brcmf_fws_mac_descriptor *entry, | ||
820 | int prec) | ||
821 | { | 792 | { |
822 | brcmf_dbg(TRACE, "enter: ea=%pM\n", entry->ea); | 793 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; |
823 | if (entry->state == BRCMF_FWS_STATE_CLOSE) { | 794 | u8 *wlh; |
824 | /* check delayedQ and suppressQ in one call using bitmap */ | 795 | u16 data_offset = 0; |
825 | if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0) | 796 | u8 fillers; |
826 | entry->traffic_pending_bmp = | 797 | __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod); |
827 | entry->traffic_pending_bmp & ~NBITVAL(prec); | 798 | |
828 | else | 799 | brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u (%u), pkttag=0x%08X, hslot=%d\n", |
829 | entry->traffic_pending_bmp = | 800 | entry->ea, entry->interface_id, |
830 | entry->traffic_pending_bmp | NBITVAL(prec); | 801 | brcmf_skb_if_flags_get_field(skb, INDEX), |
802 | le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff); | ||
803 | if (entry->send_tim_signal) | ||
804 | data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; | ||
805 | |||
806 | /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ | ||
807 | data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN; | ||
808 | fillers = round_up(data_offset, 4) - data_offset; | ||
809 | data_offset += fillers; | ||
810 | |||
811 | skb_push(skb, data_offset); | ||
812 | wlh = skb->data; | ||
813 | |||
814 | wlh[0] = BRCMF_FWS_TYPE_PKTTAG; | ||
815 | wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN; | ||
816 | memcpy(&wlh[2], &pkttag, sizeof(pkttag)); | ||
817 | wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2; | ||
818 | |||
819 | if (entry->send_tim_signal) { | ||
820 | entry->send_tim_signal = 0; | ||
821 | wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP; | ||
822 | wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; | ||
823 | wlh[2] = entry->mac_handle; | ||
824 | wlh[3] = entry->traffic_pending_bmp; | ||
825 | brcmf_dbg(TRACE, "adding TIM info: %02X:%02X:%02X:%02X\n", | ||
826 | wlh[0], wlh[1], wlh[2], wlh[3]); | ||
827 | wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2; | ||
828 | entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; | ||
831 | } | 829 | } |
832 | /* request a TIM update to firmware at the next piggyback opportunity */ | 830 | if (fillers) |
831 | memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers); | ||
832 | |||
833 | brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX), | ||
834 | data_offset >> 2, skb); | ||
835 | return 0; | ||
836 | } | ||
837 | |||
838 | static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws, | ||
839 | struct brcmf_fws_mac_descriptor *entry, | ||
840 | int fifo, bool send_immediately) | ||
841 | { | ||
842 | struct sk_buff *skb; | ||
843 | struct brcmf_bus *bus; | ||
844 | struct brcmf_skbuff_cb *skcb; | ||
845 | s32 err; | ||
846 | u32 len; | ||
847 | |||
848 | /* check delayedQ and suppressQ in one call using bitmap */ | ||
849 | if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0) | ||
850 | entry->traffic_pending_bmp &= ~NBITVAL(fifo); | ||
851 | else | ||
852 | entry->traffic_pending_bmp |= NBITVAL(fifo); | ||
853 | |||
854 | entry->send_tim_signal = false; | ||
833 | if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) | 855 | if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) |
834 | entry->send_tim_signal = true; | 856 | entry->send_tim_signal = true; |
857 | if (send_immediately && entry->send_tim_signal && | ||
858 | entry->state == BRCMF_FWS_STATE_CLOSE) { | ||
859 | /* create a dummy packet and sent that. The traffic */ | ||
860 | /* bitmap info will automatically be attached to that packet */ | ||
861 | len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 + | ||
862 | BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 + | ||
863 | 4 + fws->drvr->hdrlen; | ||
864 | skb = brcmu_pkt_buf_get_skb(len); | ||
865 | if (skb == NULL) | ||
866 | return false; | ||
867 | skb_pull(skb, len); | ||
868 | skcb = brcmf_skbcb(skb); | ||
869 | skcb->mac = entry; | ||
870 | skcb->state = BRCMF_FWS_SKBSTATE_TIM; | ||
871 | bus = fws->drvr->bus_if; | ||
872 | err = brcmf_fws_hdrpush(fws, skb); | ||
873 | if (err == 0) | ||
874 | err = brcmf_bus_txdata(bus, skb); | ||
875 | if (err) | ||
876 | brcmu_pkt_buf_free_skb(skb); | ||
877 | return true; | ||
878 | } | ||
879 | return false; | ||
835 | } | 880 | } |
836 | 881 | ||
837 | static void | 882 | static void |
838 | brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, | 883 | brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, |
839 | u8 if_id) | 884 | u8 if_id) |
840 | { | 885 | { |
841 | struct brcmf_if *ifp = fws->drvr->iflist[if_id]; | 886 | struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1]; |
842 | 887 | ||
843 | if (WARN_ON(!ifp)) | 888 | if (WARN_ON(!ifp)) |
844 | return; | 889 | return; |
845 | 890 | ||
846 | brcmf_dbg(TRACE, | ||
847 | "enter: bssidx=%d, ifidx=%d\n", ifp->bssidx, ifp->ifidx); | ||
848 | |||
849 | if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && | 891 | if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && |
850 | pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER) | 892 | pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER) |
851 | brcmf_txflowblock_if(ifp, | 893 | brcmf_txflowblock_if(ifp, |
852 | BRCMF_NETIF_STOP_REASON_FWS_FC, false); | 894 | BRCMF_NETIF_STOP_REASON_FWS_FC, false); |
853 | if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && | 895 | if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && |
854 | pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) | 896 | pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) { |
897 | fws->stats.fws_flow_block++; | ||
855 | brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true); | 898 | brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true); |
899 | } | ||
856 | return; | 900 | return; |
857 | } | 901 | } |
858 | 902 | ||
@@ -876,34 +920,38 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) | |||
876 | 920 | ||
877 | entry = &fws->desc.nodes[mac_handle & 0x1F]; | 921 | entry = &fws->desc.nodes[mac_handle & 0x1F]; |
878 | if (type == BRCMF_FWS_TYPE_MACDESC_DEL) { | 922 | if (type == BRCMF_FWS_TYPE_MACDESC_DEL) { |
879 | brcmf_dbg(TRACE, "deleting mac %pM idx %d\n", addr, ifidx); | ||
880 | if (entry->occupied) { | 923 | if (entry->occupied) { |
881 | brcmf_fws_mac_desc_cleanup(fws, entry, -1); | 924 | brcmf_dbg(TRACE, "deleting %s mac %pM\n", |
882 | brcmf_fws_clear_mac_descriptor(entry); | 925 | entry->name, addr); |
926 | brcmf_fws_macdesc_cleanup(fws, entry, -1); | ||
927 | brcmf_fws_macdesc_deinit(entry); | ||
883 | } else | 928 | } else |
884 | fws->stats.mac_update_failed++; | 929 | fws->stats.mac_update_failed++; |
885 | return 0; | 930 | return 0; |
886 | } | 931 | } |
887 | 932 | ||
888 | brcmf_dbg(TRACE, | 933 | existing = brcmf_fws_macdesc_lookup(fws, addr); |
889 | "add mac %pM handle %u idx %d\n", addr, mac_handle, ifidx); | ||
890 | existing = brcmf_fws_mac_descriptor_lookup(fws, addr); | ||
891 | if (IS_ERR(existing)) { | 934 | if (IS_ERR(existing)) { |
892 | if (!entry->occupied) { | 935 | if (!entry->occupied) { |
893 | entry->mac_handle = mac_handle; | 936 | entry->mac_handle = mac_handle; |
894 | brcmf_fws_init_mac_descriptor(entry, addr, ifidx); | 937 | brcmf_fws_macdesc_init(entry, addr, ifidx); |
938 | brcmf_fws_macdesc_set_name(fws, entry); | ||
895 | brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, | 939 | brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, |
896 | BRCMF_FWS_PSQ_LEN); | 940 | BRCMF_FWS_PSQ_LEN); |
941 | brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); | ||
897 | } else { | 942 | } else { |
898 | fws->stats.mac_update_failed++; | 943 | fws->stats.mac_update_failed++; |
899 | } | 944 | } |
900 | } else { | 945 | } else { |
901 | if (entry != existing) { | 946 | if (entry != existing) { |
902 | brcmf_dbg(TRACE, "relocate mac\n"); | 947 | brcmf_dbg(TRACE, "copy mac %s\n", existing->name); |
903 | memcpy(entry, existing, | 948 | memcpy(entry, existing, |
904 | offsetof(struct brcmf_fws_mac_descriptor, psq)); | 949 | offsetof(struct brcmf_fws_mac_descriptor, psq)); |
905 | entry->mac_handle = mac_handle; | 950 | entry->mac_handle = mac_handle; |
906 | brcmf_fws_clear_mac_descriptor(existing); | 951 | brcmf_fws_macdesc_deinit(existing); |
952 | brcmf_fws_macdesc_set_name(fws, entry); | ||
953 | brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, | ||
954 | addr); | ||
907 | } else { | 955 | } else { |
908 | brcmf_dbg(TRACE, "use existing\n"); | 956 | brcmf_dbg(TRACE, "use existing\n"); |
909 | WARN_ON(entry->mac_handle != mac_handle); | 957 | WARN_ON(entry->mac_handle != mac_handle); |
@@ -918,7 +966,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, | |||
918 | { | 966 | { |
919 | struct brcmf_fws_mac_descriptor *entry; | 967 | struct brcmf_fws_mac_descriptor *entry; |
920 | u8 mac_handle; | 968 | u8 mac_handle; |
921 | int i; | ||
922 | 969 | ||
923 | mac_handle = data[0]; | 970 | mac_handle = data[0]; |
924 | entry = &fws->desc.nodes[mac_handle & 0x1F]; | 971 | entry = &fws->desc.nodes[mac_handle & 0x1F]; |
@@ -926,16 +973,18 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, | |||
926 | fws->stats.mac_ps_update_failed++; | 973 | fws->stats.mac_ps_update_failed++; |
927 | return -ESRCH; | 974 | return -ESRCH; |
928 | } | 975 | } |
929 | 976 | /* a state update should wipe old credits */ | |
930 | /* a state update should wipe old credits? */ | ||
931 | entry->requested_credit = 0; | 977 | entry->requested_credit = 0; |
978 | entry->requested_packet = 0; | ||
932 | if (type == BRCMF_FWS_TYPE_MAC_OPEN) { | 979 | if (type == BRCMF_FWS_TYPE_MAC_OPEN) { |
933 | entry->state = BRCMF_FWS_STATE_OPEN; | 980 | entry->state = BRCMF_FWS_STATE_OPEN; |
934 | return BRCMF_FWS_RET_OK_SCHEDULE; | 981 | return BRCMF_FWS_RET_OK_SCHEDULE; |
935 | } else { | 982 | } else { |
936 | entry->state = BRCMF_FWS_STATE_CLOSE; | 983 | entry->state = BRCMF_FWS_STATE_CLOSE; |
937 | for (i = BRCMF_FWS_FIFO_AC_BE; i < NL80211_NUM_ACS; i++) | 984 | brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false); |
938 | brcmf_fws_tim_update(fws, entry, i); | 985 | brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false); |
986 | brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false); | ||
987 | brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); | ||
939 | } | 988 | } |
940 | return BRCMF_FWS_RET_OK_NOSCHEDULE; | 989 | return BRCMF_FWS_RET_OK_NOSCHEDULE; |
941 | } | 990 | } |
@@ -949,7 +998,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, | |||
949 | 998 | ||
950 | ifidx = data[0]; | 999 | ifidx = data[0]; |
951 | 1000 | ||
952 | brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx); | ||
953 | if (ifidx >= BRCMF_MAX_IFS) { | 1001 | if (ifidx >= BRCMF_MAX_IFS) { |
954 | ret = -ERANGE; | 1002 | ret = -ERANGE; |
955 | goto fail; | 1003 | goto fail; |
@@ -961,6 +1009,8 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, | |||
961 | goto fail; | 1009 | goto fail; |
962 | } | 1010 | } |
963 | 1011 | ||
1012 | brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type, | ||
1013 | entry->name); | ||
964 | switch (type) { | 1014 | switch (type) { |
965 | case BRCMF_FWS_TYPE_INTERFACE_OPEN: | 1015 | case BRCMF_FWS_TYPE_INTERFACE_OPEN: |
966 | entry->state = BRCMF_FWS_STATE_OPEN; | 1016 | entry->state = BRCMF_FWS_STATE_OPEN; |
@@ -991,6 +1041,9 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, | |||
991 | return -ESRCH; | 1041 | return -ESRCH; |
992 | } | 1042 | } |
993 | 1043 | ||
1044 | brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n", | ||
1045 | brcmf_fws_get_tlv_name(type), type, entry->name, | ||
1046 | data[0], data[2]); | ||
994 | if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) | 1047 | if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) |
995 | entry->requested_credit = data[0]; | 1048 | entry->requested_credit = data[0]; |
996 | else | 1049 | else |
@@ -1000,6 +1053,37 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, | |||
1000 | return BRCMF_FWS_RET_OK_SCHEDULE; | 1053 | return BRCMF_FWS_RET_OK_SCHEDULE; |
1001 | } | 1054 | } |
1002 | 1055 | ||
1056 | static void | ||
1057 | brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry, | ||
1058 | struct sk_buff *skb) | ||
1059 | { | ||
1060 | if (entry->requested_credit > 0) { | ||
1061 | entry->requested_credit--; | ||
1062 | brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); | ||
1063 | brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1); | ||
1064 | if (entry->state != BRCMF_FWS_STATE_CLOSE) | ||
1065 | brcmf_err("requested credit set while mac not closed!\n"); | ||
1066 | } else if (entry->requested_packet > 0) { | ||
1067 | entry->requested_packet--; | ||
1068 | brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); | ||
1069 | brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); | ||
1070 | if (entry->state != BRCMF_FWS_STATE_CLOSE) | ||
1071 | brcmf_err("requested packet set while mac not closed!\n"); | ||
1072 | } else { | ||
1073 | brcmf_skb_if_flags_set_field(skb, REQUESTED, 0); | ||
1074 | brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); | ||
1075 | } | ||
1076 | } | ||
1077 | |||
1078 | static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb) | ||
1079 | { | ||
1080 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; | ||
1081 | |||
1082 | if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) && | ||
1083 | (entry->state == BRCMF_FWS_STATE_CLOSE)) | ||
1084 | entry->requested_credit++; | ||
1085 | } | ||
1086 | |||
1003 | static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, | 1087 | static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, |
1004 | u8 fifo, u8 credits) | 1088 | u8 fifo, u8 credits) |
1005 | { | 1089 | { |
@@ -1010,6 +1094,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, | |||
1010 | if (!credits) | 1094 | if (!credits) |
1011 | return; | 1095 | return; |
1012 | 1096 | ||
1097 | fws->fifo_credit_map |= 1 << fifo; | ||
1098 | |||
1013 | if ((fifo == BRCMF_FWS_FIFO_AC_BE) && | 1099 | if ((fifo == BRCMF_FWS_FIFO_AC_BE) && |
1014 | (fws->credits_borrowed[0])) { | 1100 | (fws->credits_borrowed[0])) { |
1015 | for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; | 1101 | for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; |
@@ -1031,7 +1117,6 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, | |||
1031 | } | 1117 | } |
1032 | } | 1118 | } |
1033 | 1119 | ||
1034 | fws->fifo_credit_map |= 1 << fifo; | ||
1035 | fws->fifo_credit[fifo] += credits; | 1120 | fws->fifo_credit[fifo] += credits; |
1036 | } | 1121 | } |
1037 | 1122 | ||
@@ -1042,27 +1127,6 @@ static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws) | |||
1042 | queue_work(fws->fws_wq, &fws->fws_dequeue_work); | 1127 | queue_work(fws->fws_wq, &fws->fws_dequeue_work); |
1043 | } | 1128 | } |
1044 | 1129 | ||
1045 | static void brcmf_skb_pick_up_credit(struct brcmf_fws_info *fws, int fifo, | ||
1046 | struct sk_buff *p) | ||
1047 | { | ||
1048 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(p)->mac; | ||
1049 | |||
1050 | if (brcmf_skbcb(p)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) { | ||
1051 | if (fws->fcmode != BRCMF_FWS_FCMODE_IMPLIED_CREDIT) | ||
1052 | return; | ||
1053 | brcmf_fws_return_credits(fws, fifo, 1); | ||
1054 | } else { | ||
1055 | /* | ||
1056 | * if this packet did not count against FIFO credit, it | ||
1057 | * must have taken a requested_credit from the destination | ||
1058 | * entry (for pspoll etc.) | ||
1059 | */ | ||
1060 | if (!brcmf_skb_if_flags_get_field(p, REQUESTED)) | ||
1061 | entry->requested_credit++; | ||
1062 | } | ||
1063 | brcmf_fws_schedule_deq(fws); | ||
1064 | } | ||
1065 | |||
1066 | static int brcmf_fws_enq(struct brcmf_fws_info *fws, | 1130 | static int brcmf_fws_enq(struct brcmf_fws_info *fws, |
1067 | enum brcmf_fws_skb_state state, int fifo, | 1131 | enum brcmf_fws_skb_state state, int fifo, |
1068 | struct sk_buff *p) | 1132 | struct sk_buff *p) |
@@ -1078,7 +1142,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws, | |||
1078 | return -ENOENT; | 1142 | return -ENOENT; |
1079 | } | 1143 | } |
1080 | 1144 | ||
1081 | brcmf_dbg(TRACE, "enter: ea=%pM, qlen=%d\n", entry->ea, entry->psq.len); | 1145 | brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p); |
1082 | if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { | 1146 | if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { |
1083 | prec += 1; | 1147 | prec += 1; |
1084 | qfull_stat = &fws->stats.supprq_full_error; | 1148 | qfull_stat = &fws->stats.supprq_full_error; |
@@ -1095,14 +1159,12 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws, | |||
1095 | 1159 | ||
1096 | /* update the sk_buff state */ | 1160 | /* update the sk_buff state */ |
1097 | brcmf_skbcb(p)->state = state; | 1161 | brcmf_skbcb(p)->state = state; |
1098 | if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) | ||
1099 | entry->suppress_count++; | ||
1100 | 1162 | ||
1101 | /* | 1163 | /* |
1102 | * A packet has been pushed so update traffic | 1164 | * A packet has been pushed so update traffic |
1103 | * availability bitmap, if applicable | 1165 | * availability bitmap, if applicable |
1104 | */ | 1166 | */ |
1105 | brcmf_fws_tim_update(fws, entry, fifo); | 1167 | brcmf_fws_tim_update(fws, entry, fifo, true); |
1106 | brcmf_fws_flow_control_check(fws, &entry->psq, | 1168 | brcmf_fws_flow_control_check(fws, &entry->psq, |
1107 | brcmf_skb_if_flags_get_field(p, INDEX)); | 1169 | brcmf_skb_if_flags_get_field(p, INDEX)); |
1108 | return 0; | 1170 | return 0; |
@@ -1113,7 +1175,6 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) | |||
1113 | struct brcmf_fws_mac_descriptor *table; | 1175 | struct brcmf_fws_mac_descriptor *table; |
1114 | struct brcmf_fws_mac_descriptor *entry; | 1176 | struct brcmf_fws_mac_descriptor *entry; |
1115 | struct sk_buff *p; | 1177 | struct sk_buff *p; |
1116 | int use_credit = 1; | ||
1117 | int num_nodes; | 1178 | int num_nodes; |
1118 | int node_pos; | 1179 | int node_pos; |
1119 | int prec_out; | 1180 | int prec_out; |
@@ -1127,7 +1188,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) | |||
1127 | for (i = 0; i < num_nodes; i++) { | 1188 | for (i = 0; i < num_nodes; i++) { |
1128 | entry = &table[(node_pos + i) % num_nodes]; | 1189 | entry = &table[(node_pos + i) % num_nodes]; |
1129 | if (!entry->occupied || | 1190 | if (!entry->occupied || |
1130 | brcmf_fws_mac_desc_closed(fws, entry, fifo)) | 1191 | brcmf_fws_macdesc_closed(fws, entry, fifo)) |
1131 | continue; | 1192 | continue; |
1132 | 1193 | ||
1133 | if (entry->suppressed) | 1194 | if (entry->suppressed) |
@@ -1137,9 +1198,8 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) | |||
1137 | p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); | 1198 | p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); |
1138 | if (p == NULL) { | 1199 | if (p == NULL) { |
1139 | if (entry->suppressed) { | 1200 | if (entry->suppressed) { |
1140 | if (entry->suppr_transit_count > | 1201 | if (entry->suppr_transit_count) |
1141 | entry->suppress_count) | 1202 | continue; |
1142 | return NULL; | ||
1143 | entry->suppressed = false; | 1203 | entry->suppressed = false; |
1144 | p = brcmu_pktq_mdeq(&entry->psq, | 1204 | p = brcmu_pktq_mdeq(&entry->psq, |
1145 | 1 << (fifo * 2), &prec_out); | 1205 | 1 << (fifo * 2), &prec_out); |
@@ -1148,26 +1208,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) | |||
1148 | if (p == NULL) | 1208 | if (p == NULL) |
1149 | continue; | 1209 | continue; |
1150 | 1210 | ||
1151 | /* did the packet come from suppress sub-queue? */ | 1211 | brcmf_fws_macdesc_use_req_credit(entry, p); |
1152 | if (entry->requested_credit > 0) { | ||
1153 | entry->requested_credit--; | ||
1154 | /* | ||
1155 | * if the packet was pulled out while destination is in | ||
1156 | * closed state but had a non-zero packets requested, | ||
1157 | * then this should not count against the FIFO credit. | ||
1158 | * That is due to the fact that the firmware will | ||
1159 | * most likely hold onto this packet until a suitable | ||
1160 | * time later to push it to the appropriate AC FIFO. | ||
1161 | */ | ||
1162 | if (entry->state == BRCMF_FWS_STATE_CLOSE) | ||
1163 | use_credit = 0; | ||
1164 | } else if (entry->requested_packet > 0) { | ||
1165 | entry->requested_packet--; | ||
1166 | brcmf_skb_if_flags_set_field(p, REQUESTED, 1); | ||
1167 | if (entry->state == BRCMF_FWS_STATE_CLOSE) | ||
1168 | use_credit = 0; | ||
1169 | } | ||
1170 | brcmf_skb_if_flags_set_field(p, CREDITCHECK, use_credit); | ||
1171 | 1212 | ||
1172 | /* move dequeue position to ensure fair round-robin */ | 1213 | /* move dequeue position to ensure fair round-robin */ |
1173 | fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes; | 1214 | fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes; |
@@ -1179,7 +1220,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) | |||
1179 | * A packet has been picked up, update traffic | 1220 | * A packet has been picked up, update traffic |
1180 | * availability bitmap, if applicable | 1221 | * availability bitmap, if applicable |
1181 | */ | 1222 | */ |
1182 | brcmf_fws_tim_update(fws, entry, fifo); | 1223 | brcmf_fws_tim_update(fws, entry, fifo, false); |
1183 | 1224 | ||
1184 | /* | 1225 | /* |
1185 | * decrement total enqueued fifo packets and | 1226 | * decrement total enqueued fifo packets and |
@@ -1192,7 +1233,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) | |||
1192 | } | 1233 | } |
1193 | p = NULL; | 1234 | p = NULL; |
1194 | done: | 1235 | done: |
1195 | brcmf_dbg(TRACE, "exit: fifo %d skb %p\n", fifo, p); | 1236 | brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p); |
1196 | return p; | 1237 | return p; |
1197 | } | 1238 | } |
1198 | 1239 | ||
@@ -1202,22 +1243,26 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, | |||
1202 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; | 1243 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; |
1203 | u32 hslot; | 1244 | u32 hslot; |
1204 | int ret; | 1245 | int ret; |
1246 | u8 ifidx; | ||
1205 | 1247 | ||
1206 | hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); | 1248 | hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); |
1207 | 1249 | ||
1208 | /* this packet was suppressed */ | 1250 | /* this packet was suppressed */ |
1209 | if (!entry->suppressed || entry->generation != genbit) { | 1251 | if (!entry->suppressed) { |
1210 | entry->suppressed = true; | 1252 | entry->suppressed = true; |
1211 | entry->suppress_count = brcmu_pktq_mlen(&entry->psq, | ||
1212 | 1 << (fifo * 2 + 1)); | ||
1213 | entry->suppr_transit_count = entry->transit_count; | 1253 | entry->suppr_transit_count = entry->transit_count; |
1254 | brcmf_dbg(DATA, "suppress %s: transit %d\n", | ||
1255 | entry->name, entry->transit_count); | ||
1214 | } | 1256 | } |
1215 | 1257 | ||
1216 | entry->generation = genbit; | 1258 | entry->generation = genbit; |
1217 | 1259 | ||
1218 | ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb); | 1260 | ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); |
1261 | if (ret == 0) | ||
1262 | ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, | ||
1263 | skb); | ||
1219 | if (ret != 0) { | 1264 | if (ret != 0) { |
1220 | /* suppress q is full, drop this packet */ | 1265 | /* suppress q is full or hdrpull failed, drop this packet */ |
1221 | brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, | 1266 | brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, |
1222 | true); | 1267 | true); |
1223 | } else { | 1268 | } else { |
@@ -1225,26 +1270,24 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, | |||
1225 | * Mark suppressed to avoid a double free during | 1270 | * Mark suppressed to avoid a double free during |
1226 | * wlfc cleanup | 1271 | * wlfc cleanup |
1227 | */ | 1272 | */ |
1228 | brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot, | 1273 | brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot); |
1229 | genbit); | ||
1230 | entry->suppress_count++; | ||
1231 | } | 1274 | } |
1232 | 1275 | ||
1233 | return ret; | 1276 | return ret; |
1234 | } | 1277 | } |
1235 | 1278 | ||
1236 | static int | 1279 | static int |
1237 | brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, | 1280 | brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, |
1238 | u32 genbit) | 1281 | u32 genbit) |
1239 | { | 1282 | { |
1240 | u32 fifo; | 1283 | u32 fifo; |
1241 | int ret; | 1284 | int ret; |
1242 | bool remove_from_hanger = true; | 1285 | bool remove_from_hanger = true; |
1243 | struct sk_buff *skb; | 1286 | struct sk_buff *skb; |
1287 | struct brcmf_skbuff_cb *skcb; | ||
1244 | struct brcmf_fws_mac_descriptor *entry = NULL; | 1288 | struct brcmf_fws_mac_descriptor *entry = NULL; |
1245 | 1289 | ||
1246 | brcmf_dbg(TRACE, "status: flags=0x%X, hslot=%d\n", | 1290 | brcmf_dbg(DATA, "flags %d\n", flags); |
1247 | flags, hslot); | ||
1248 | 1291 | ||
1249 | if (flags == BRCMF_FWS_TXSTATUS_DISCARD) | 1292 | if (flags == BRCMF_FWS_TXSTATUS_DISCARD) |
1250 | fws->stats.txs_discard++; | 1293 | fws->stats.txs_discard++; |
@@ -1256,6 +1299,8 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, | |||
1256 | remove_from_hanger = false; | 1299 | remove_from_hanger = false; |
1257 | } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) | 1300 | } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) |
1258 | fws->stats.txs_tossed++; | 1301 | fws->stats.txs_tossed++; |
1302 | else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) | ||
1303 | fws->stats.txs_host_tossed++; | ||
1259 | else | 1304 | else |
1260 | brcmf_err("unexpected txstatus\n"); | 1305 | brcmf_err("unexpected txstatus\n"); |
1261 | 1306 | ||
@@ -1266,26 +1311,35 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, | |||
1266 | return ret; | 1311 | return ret; |
1267 | } | 1312 | } |
1268 | 1313 | ||
1269 | entry = brcmf_skbcb(skb)->mac; | 1314 | skcb = brcmf_skbcb(skb); |
1315 | entry = skcb->mac; | ||
1270 | if (WARN_ON(!entry)) { | 1316 | if (WARN_ON(!entry)) { |
1271 | brcmu_pkt_buf_free_skb(skb); | 1317 | brcmu_pkt_buf_free_skb(skb); |
1272 | return -EINVAL; | 1318 | return -EINVAL; |
1273 | } | 1319 | } |
1320 | entry->transit_count--; | ||
1321 | if (entry->suppressed && entry->suppr_transit_count) | ||
1322 | entry->suppr_transit_count--; | ||
1323 | |||
1324 | brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags, | ||
1325 | skcb->htod); | ||
1274 | 1326 | ||
1275 | /* pick up the implicit credit from this packet */ | 1327 | /* pick up the implicit credit from this packet */ |
1276 | fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); | 1328 | fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); |
1277 | brcmf_skb_pick_up_credit(fws, fifo, skb); | 1329 | if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) || |
1330 | (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) || | ||
1331 | (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) { | ||
1332 | brcmf_fws_return_credits(fws, fifo, 1); | ||
1333 | brcmf_fws_schedule_deq(fws); | ||
1334 | } | ||
1335 | brcmf_fws_macdesc_return_req_credit(skb); | ||
1278 | 1336 | ||
1279 | if (!remove_from_hanger) | 1337 | if (!remove_from_hanger) |
1280 | ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit); | 1338 | ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit); |
1281 | 1339 | ||
1282 | if (remove_from_hanger || ret) { | 1340 | if (remove_from_hanger || ret) |
1283 | entry->transit_count--; | ||
1284 | if (entry->suppressed) | ||
1285 | entry->suppr_transit_count--; | ||
1286 | |||
1287 | brcmf_txfinalize(fws->drvr, skb, true); | 1341 | brcmf_txfinalize(fws->drvr, skb, true); |
1288 | } | 1342 | |
1289 | return 0; | 1343 | return 0; |
1290 | } | 1344 | } |
1291 | 1345 | ||
@@ -1299,11 +1353,11 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, | |||
1299 | return BRCMF_FWS_RET_OK_NOSCHEDULE; | 1353 | return BRCMF_FWS_RET_OK_NOSCHEDULE; |
1300 | } | 1354 | } |
1301 | 1355 | ||
1302 | brcmf_dbg(TRACE, "enter: data %pM\n", data); | 1356 | brcmf_dbg(DATA, "enter: data %pM\n", data); |
1303 | for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) | 1357 | for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) |
1304 | brcmf_fws_return_credits(fws, i, data[i]); | 1358 | brcmf_fws_return_credits(fws, i, data[i]); |
1305 | 1359 | ||
1306 | brcmf_dbg(INFO, "map: credit %x delay %x\n", fws->fifo_credit_map, | 1360 | brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map, |
1307 | fws->fifo_delay_map); | 1361 | fws->fifo_delay_map); |
1308 | return BRCMF_FWS_RET_OK_SCHEDULE; | 1362 | return BRCMF_FWS_RET_OK_SCHEDULE; |
1309 | } | 1363 | } |
@@ -1323,7 +1377,7 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) | |||
1323 | hslot = brcmf_txstatus_get_field(status, HSLOT); | 1377 | hslot = brcmf_txstatus_get_field(status, HSLOT); |
1324 | genbit = brcmf_txstatus_get_field(status, GENERATION); | 1378 | genbit = brcmf_txstatus_get_field(status, GENERATION); |
1325 | 1379 | ||
1326 | return brcmf_fws_txstatus_process(fws, flags, hslot, genbit); | 1380 | return brcmf_fws_txs_process(fws, flags, hslot, genbit); |
1327 | } | 1381 | } |
1328 | 1382 | ||
1329 | static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) | 1383 | static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) |
@@ -1331,7 +1385,7 @@ static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) | |||
1331 | __le32 timestamp; | 1385 | __le32 timestamp; |
1332 | 1386 | ||
1333 | memcpy(×tamp, &data[2], sizeof(timestamp)); | 1387 | memcpy(×tamp, &data[2], sizeof(timestamp)); |
1334 | brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1], | 1388 | brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1], |
1335 | le32_to_cpu(timestamp)); | 1389 | le32_to_cpu(timestamp)); |
1336 | return 0; | 1390 | return 0; |
1337 | } | 1391 | } |
@@ -1364,6 +1418,10 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, | |||
1364 | brcmf_err("event payload too small (%d)\n", e->datalen); | 1418 | brcmf_err("event payload too small (%d)\n", e->datalen); |
1365 | return -EINVAL; | 1419 | return -EINVAL; |
1366 | } | 1420 | } |
1421 | if (fws->creditmap_received) | ||
1422 | return 0; | ||
1423 | |||
1424 | fws->creditmap_received = true; | ||
1367 | 1425 | ||
1368 | brcmf_dbg(TRACE, "enter: credits %pM\n", credits); | 1426 | brcmf_dbg(TRACE, "enter: credits %pM\n", credits); |
1369 | brcmf_fws_lock(ifp->drvr, flags); | 1427 | brcmf_fws_lock(ifp->drvr, flags); |
@@ -1379,6 +1437,20 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, | |||
1379 | return 0; | 1437 | return 0; |
1380 | } | 1438 | } |
1381 | 1439 | ||
1440 | static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, | ||
1441 | const struct brcmf_event_msg *e, | ||
1442 | void *data) | ||
1443 | { | ||
1444 | struct brcmf_fws_info *fws = ifp->drvr->fws; | ||
1445 | ulong flags; | ||
1446 | |||
1447 | brcmf_fws_lock(ifp->drvr, flags); | ||
1448 | if (fws) | ||
1449 | fws->bcmc_credit_check = true; | ||
1450 | brcmf_fws_unlock(ifp->drvr, flags); | ||
1451 | return 0; | ||
1452 | } | ||
1453 | |||
1382 | int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, | 1454 | int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, |
1383 | struct sk_buff *skb) | 1455 | struct sk_buff *skb) |
1384 | { | 1456 | { |
@@ -1392,7 +1464,7 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, | |||
1392 | s32 status; | 1464 | s32 status; |
1393 | s32 err; | 1465 | s32 err; |
1394 | 1466 | ||
1395 | brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n", | 1467 | brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n", |
1396 | ifidx, skb->len, signal_len); | 1468 | ifidx, skb->len, signal_len); |
1397 | 1469 | ||
1398 | WARN_ON(signal_len > skb->len); | 1470 | WARN_ON(signal_len > skb->len); |
@@ -1426,14 +1498,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, | |||
1426 | len = signal_data[1]; | 1498 | len = signal_data[1]; |
1427 | data = signal_data + 2; | 1499 | data = signal_data + 2; |
1428 | 1500 | ||
1429 | brcmf_dbg(INFO, "tlv type=%d (%s), len=%d, data[0]=%d\n", type, | 1501 | brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n", |
1430 | brcmf_fws_get_tlv_name(type), len, *data); | 1502 | brcmf_fws_get_tlv_name(type), type, len, |
1503 | brcmf_fws_get_tlv_len(fws, type)); | ||
1431 | 1504 | ||
1432 | /* abort parsing when length invalid */ | 1505 | /* abort parsing when length invalid */ |
1433 | if (data_len < len + 2) | 1506 | if (data_len < len + 2) |
1434 | break; | 1507 | break; |
1435 | 1508 | ||
1436 | if (len != brcmf_fws_get_tlv_len(fws, type)) | 1509 | if (len < brcmf_fws_get_tlv_len(fws, type)) |
1437 | break; | 1510 | break; |
1438 | 1511 | ||
1439 | err = BRCMF_FWS_RET_OK_NOSCHEDULE; | 1512 | err = BRCMF_FWS_RET_OK_NOSCHEDULE; |
@@ -1502,64 +1575,32 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, | |||
1502 | return 0; | 1575 | return 0; |
1503 | } | 1576 | } |
1504 | 1577 | ||
1505 | static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb) | ||
1506 | { | ||
1507 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; | ||
1508 | u8 *wlh; | ||
1509 | u16 data_offset = 0; | ||
1510 | u8 fillers; | ||
1511 | __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod); | ||
1512 | |||
1513 | brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u, pkttag=0x%08X\n", | ||
1514 | entry->ea, entry->interface_id, le32_to_cpu(pkttag)); | ||
1515 | if (entry->send_tim_signal) | ||
1516 | data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; | ||
1517 | |||
1518 | /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ | ||
1519 | data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN; | ||
1520 | fillers = round_up(data_offset, 4) - data_offset; | ||
1521 | data_offset += fillers; | ||
1522 | |||
1523 | skb_push(skb, data_offset); | ||
1524 | wlh = skb->data; | ||
1525 | |||
1526 | wlh[0] = BRCMF_FWS_TYPE_PKTTAG; | ||
1527 | wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN; | ||
1528 | memcpy(&wlh[2], &pkttag, sizeof(pkttag)); | ||
1529 | wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2; | ||
1530 | |||
1531 | if (entry->send_tim_signal) { | ||
1532 | entry->send_tim_signal = 0; | ||
1533 | wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP; | ||
1534 | wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; | ||
1535 | wlh[2] = entry->mac_handle; | ||
1536 | wlh[3] = entry->traffic_pending_bmp; | ||
1537 | wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2; | ||
1538 | entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; | ||
1539 | } | ||
1540 | if (fillers) | ||
1541 | memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers); | ||
1542 | |||
1543 | brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX), | ||
1544 | data_offset >> 2, skb); | ||
1545 | return 0; | ||
1546 | } | ||
1547 | |||
1548 | static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, | 1578 | static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, |
1549 | struct sk_buff *p) | 1579 | struct sk_buff *p) |
1550 | { | 1580 | { |
1551 | struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); | 1581 | struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); |
1552 | struct brcmf_fws_mac_descriptor *entry = skcb->mac; | 1582 | struct brcmf_fws_mac_descriptor *entry = skcb->mac; |
1553 | int rc = 0; | 1583 | int rc = 0; |
1554 | bool header_needed; | 1584 | bool first_time; |
1555 | int hslot = BRCMF_FWS_HANGER_MAXITEMS; | 1585 | int hslot = BRCMF_FWS_HANGER_MAXITEMS; |
1556 | u8 free_ctr; | 1586 | u8 free_ctr; |
1557 | u8 ifidx; | ||
1558 | u8 flags; | 1587 | u8 flags; |
1559 | 1588 | ||
1560 | header_needed = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED; | 1589 | first_time = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED; |
1561 | 1590 | ||
1562 | if (header_needed) { | 1591 | brcmf_skb_if_flags_set_field(p, TRANSMIT, 1); |
1592 | brcmf_skb_htod_tag_set_field(p, FIFO, fifo); | ||
1593 | brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation); | ||
1594 | flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST; | ||
1595 | if (brcmf_skb_if_flags_get_field(p, REQUESTED)) { | ||
1596 | /* | ||
1597 | * Indicate that this packet is being sent in response to an | ||
1598 | * explicit request from the firmware side. | ||
1599 | */ | ||
1600 | flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED; | ||
1601 | } | ||
1602 | brcmf_skb_htod_tag_set_field(p, FLAGS, flags); | ||
1603 | if (first_time) { | ||
1563 | /* obtaining free slot may fail, but that will be caught | 1604 | /* obtaining free slot may fail, but that will be caught |
1564 | * by the hanger push. This assures the packet has a BDC | 1605 | * by the hanger push. This assures the packet has a BDC |
1565 | * header upon return. | 1606 | * header upon return. |
@@ -1568,47 +1609,20 @@ static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, | |||
1568 | free_ctr = entry->seq[fifo]; | 1609 | free_ctr = entry->seq[fifo]; |
1569 | brcmf_skb_htod_tag_set_field(p, HSLOT, hslot); | 1610 | brcmf_skb_htod_tag_set_field(p, HSLOT, hslot); |
1570 | brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr); | 1611 | brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr); |
1571 | brcmf_skb_htod_tag_set_field(p, GENERATION, 1); | ||
1572 | entry->transit_count++; | ||
1573 | } | ||
1574 | brcmf_skb_if_flags_set_field(p, TRANSMIT, 1); | ||
1575 | brcmf_skb_htod_tag_set_field(p, FIFO, fifo); | ||
1576 | |||
1577 | flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST; | ||
1578 | if (!(skcb->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) { | ||
1579 | /* | ||
1580 | Indicate that this packet is being sent in response to an | ||
1581 | explicit request from the firmware side. | ||
1582 | */ | ||
1583 | flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED; | ||
1584 | } | ||
1585 | brcmf_skb_htod_tag_set_field(p, FLAGS, flags); | ||
1586 | if (header_needed) { | ||
1587 | brcmf_fws_hdrpush(fws, p); | ||
1588 | rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot); | 1612 | rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot); |
1589 | if (rc) | 1613 | if (rc) |
1590 | brcmf_err("hanger push failed: rc=%d\n", rc); | 1614 | brcmf_err("hanger push failed: rc=%d\n", rc); |
1591 | } else { | ||
1592 | int gen; | ||
1593 | |||
1594 | /* remove old header */ | ||
1595 | rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, p); | ||
1596 | if (rc == 0) { | ||
1597 | hslot = brcmf_skb_htod_tag_get_field(p, HSLOT); | ||
1598 | brcmf_fws_hanger_get_genbit(&fws->hanger, p, | ||
1599 | hslot, &gen); | ||
1600 | brcmf_skb_htod_tag_set_field(p, GENERATION, gen); | ||
1601 | |||
1602 | /* push new header */ | ||
1603 | brcmf_fws_hdrpush(fws, p); | ||
1604 | } | ||
1605 | } | 1615 | } |
1606 | 1616 | ||
1617 | if (rc == 0) | ||
1618 | brcmf_fws_hdrpush(fws, p); | ||
1619 | |||
1607 | return rc; | 1620 | return rc; |
1608 | } | 1621 | } |
1609 | 1622 | ||
1610 | static void | 1623 | static void |
1611 | brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) | 1624 | brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, |
1625 | struct sk_buff *skb, int fifo) | ||
1612 | { | 1626 | { |
1613 | /* | 1627 | /* |
1614 | put the packet back to the head of queue | 1628 | put the packet back to the head of queue |
@@ -1622,13 +1636,11 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) | |||
1622 | enum brcmf_fws_skb_state state; | 1636 | enum brcmf_fws_skb_state state; |
1623 | struct sk_buff *pktout; | 1637 | struct sk_buff *pktout; |
1624 | int rc = 0; | 1638 | int rc = 0; |
1625 | int fifo; | ||
1626 | int hslot; | 1639 | int hslot; |
1627 | u8 ifidx; | ||
1628 | 1640 | ||
1629 | fifo = brcmf_skb_if_flags_get_field(skb, FIFO); | ||
1630 | state = brcmf_skbcb(skb)->state; | 1641 | state = brcmf_skbcb(skb)->state; |
1631 | entry = brcmf_skbcb(skb)->mac; | 1642 | entry = brcmf_skbcb(skb)->mac; |
1643 | hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); | ||
1632 | 1644 | ||
1633 | if (entry != NULL) { | 1645 | if (entry != NULL) { |
1634 | if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { | 1646 | if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { |
@@ -1640,19 +1652,6 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) | |||
1640 | rc = -ENOSPC; | 1652 | rc = -ENOSPC; |
1641 | } | 1653 | } |
1642 | } else { | 1654 | } else { |
1643 | hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); | ||
1644 | |||
1645 | /* remove header first */ | ||
1646 | rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); | ||
1647 | if (rc) { | ||
1648 | brcmf_err("header removal failed\n"); | ||
1649 | /* free the hanger slot */ | ||
1650 | brcmf_fws_hanger_poppkt(&fws->hanger, hslot, | ||
1651 | &pktout, true); | ||
1652 | rc = -EINVAL; | ||
1653 | goto fail; | ||
1654 | } | ||
1655 | |||
1656 | /* delay-q packets are going to delay-q */ | 1655 | /* delay-q packets are going to delay-q */ |
1657 | pktout = brcmu_pktq_penq_head(&entry->psq, | 1656 | pktout = brcmu_pktq_penq_head(&entry->psq, |
1658 | 2 * fifo, skb); | 1657 | 2 * fifo, skb); |
@@ -1668,33 +1667,30 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb) | |||
1668 | /* decrement sequence count */ | 1667 | /* decrement sequence count */ |
1669 | entry->seq[fifo]--; | 1668 | entry->seq[fifo]--; |
1670 | } | 1669 | } |
1671 | /* | ||
1672 | if this packet did not count against FIFO credit, it must have | ||
1673 | taken a requested_credit from the firmware (for pspoll etc.) | ||
1674 | */ | ||
1675 | if (!(brcmf_skbcb(skb)->if_flags & | ||
1676 | BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) | ||
1677 | entry->requested_credit++; | ||
1678 | } else { | 1670 | } else { |
1679 | brcmf_err("no mac entry linked\n"); | 1671 | brcmf_err("no mac entry linked\n"); |
1680 | rc = -ENOENT; | 1672 | rc = -ENOENT; |
1681 | } | 1673 | } |
1682 | 1674 | ||
1683 | |||
1684 | fail: | ||
1685 | if (rc) { | 1675 | if (rc) { |
1686 | brcmf_txfinalize(fws->drvr, skb, false); | ||
1687 | fws->stats.rollback_failed++; | 1676 | fws->stats.rollback_failed++; |
1688 | } else | 1677 | brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, |
1678 | hslot, 0); | ||
1679 | } else { | ||
1689 | fws->stats.rollback_success++; | 1680 | fws->stats.rollback_success++; |
1681 | brcmf_fws_return_credits(fws, fifo, 1); | ||
1682 | brcmf_fws_macdesc_return_req_credit(skb); | ||
1683 | } | ||
1690 | } | 1684 | } |
1691 | 1685 | ||
1692 | static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) | 1686 | static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) |
1693 | { | 1687 | { |
1694 | int lender_ac; | 1688 | int lender_ac; |
1695 | 1689 | ||
1696 | if (time_after(fws->borrow_defer_timestamp, jiffies)) | 1690 | if (time_after(fws->borrow_defer_timestamp, jiffies)) { |
1691 | fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); | ||
1697 | return -ENAVAIL; | 1692 | return -ENAVAIL; |
1693 | } | ||
1698 | 1694 | ||
1699 | for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { | 1695 | for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { |
1700 | if (fws->fifo_credit[lender_ac]) { | 1696 | if (fws->fifo_credit[lender_ac]) { |
@@ -1702,10 +1698,12 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) | |||
1702 | fws->fifo_credit[lender_ac]--; | 1698 | fws->fifo_credit[lender_ac]--; |
1703 | if (fws->fifo_credit[lender_ac] == 0) | 1699 | if (fws->fifo_credit[lender_ac] == 0) |
1704 | fws->fifo_credit_map &= ~(1 << lender_ac); | 1700 | fws->fifo_credit_map &= ~(1 << lender_ac); |
1705 | brcmf_dbg(TRACE, "borrow credit from: %d\n", lender_ac); | 1701 | fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE); |
1702 | brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac); | ||
1706 | return 0; | 1703 | return 0; |
1707 | } | 1704 | } |
1708 | } | 1705 | } |
1706 | fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); | ||
1709 | return -ENAVAIL; | 1707 | return -ENAVAIL; |
1710 | } | 1708 | } |
1711 | 1709 | ||
@@ -1714,33 +1712,6 @@ static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo, | |||
1714 | { | 1712 | { |
1715 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; | 1713 | struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; |
1716 | int *credit = &fws->fifo_credit[fifo]; | 1714 | int *credit = &fws->fifo_credit[fifo]; |
1717 | int use_credit = 1; | ||
1718 | |||
1719 | brcmf_dbg(TRACE, "enter: ac=%d, credits=%d\n", fifo, *credit); | ||
1720 | |||
1721 | if (entry->requested_credit > 0) { | ||
1722 | /* | ||
1723 | * if the packet was pulled out while destination is in | ||
1724 | * closed state but had a non-zero packets requested, | ||
1725 | * then this should not count against the FIFO credit. | ||
1726 | * That is due to the fact that the firmware will | ||
1727 | * most likely hold onto this packet until a suitable | ||
1728 | * time later to push it to the appropriate AC FIFO. | ||
1729 | */ | ||
1730 | entry->requested_credit--; | ||
1731 | if (entry->state == BRCMF_FWS_STATE_CLOSE) | ||
1732 | use_credit = 0; | ||
1733 | } else if (entry->requested_packet > 0) { | ||
1734 | entry->requested_packet--; | ||
1735 | brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); | ||
1736 | if (entry->state == BRCMF_FWS_STATE_CLOSE) | ||
1737 | use_credit = 0; | ||
1738 | } | ||
1739 | brcmf_skb_if_flags_set_field(skb, CREDITCHECK, use_credit); | ||
1740 | if (!use_credit) { | ||
1741 | brcmf_dbg(TRACE, "exit: no creditcheck set\n"); | ||
1742 | return 0; | ||
1743 | } | ||
1744 | 1715 | ||
1745 | if (fifo != BRCMF_FWS_FIFO_AC_BE) | 1716 | if (fifo != BRCMF_FWS_FIFO_AC_BE) |
1746 | fws->borrow_defer_timestamp = jiffies + | 1717 | fws->borrow_defer_timestamp = jiffies + |
@@ -1748,17 +1719,22 @@ static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo, | |||
1748 | 1719 | ||
1749 | if (!(*credit)) { | 1720 | if (!(*credit)) { |
1750 | /* Try to borrow a credit from other queue */ | 1721 | /* Try to borrow a credit from other queue */ |
1751 | if (fifo == BRCMF_FWS_FIFO_AC_BE && | 1722 | if (fifo != BRCMF_FWS_FIFO_AC_BE || |
1752 | brcmf_fws_borrow_credit(fws) == 0) | 1723 | (brcmf_fws_borrow_credit(fws) != 0)) { |
1753 | return 0; | 1724 | brcmf_dbg(DATA, "ac=%d, credits depleted\n", fifo); |
1754 | 1725 | return -ENAVAIL; | |
1755 | brcmf_dbg(TRACE, "exit: ac=%d, credits depleted\n", fifo); | 1726 | } |
1756 | return -ENAVAIL; | 1727 | } else { |
1728 | (*credit)--; | ||
1729 | if (!(*credit)) | ||
1730 | fws->fifo_credit_map &= ~(1 << fifo); | ||
1757 | } | 1731 | } |
1758 | (*credit)--; | 1732 | |
1759 | if (!(*credit)) | 1733 | brcmf_fws_macdesc_use_req_credit(entry, skb); |
1760 | fws->fifo_credit_map &= ~(1 << fifo); | 1734 | |
1761 | brcmf_dbg(TRACE, "exit: ac=%d, credits=%d\n", fifo, *credit); | 1735 | brcmf_dbg(DATA, "ac=%d, credits=%02d:%02d:%02d:%02d\n", fifo, |
1736 | fws->fifo_credit[0], fws->fifo_credit[1], | ||
1737 | fws->fifo_credit[2], fws->fifo_credit[3]); | ||
1762 | return 0; | 1738 | return 0; |
1763 | } | 1739 | } |
1764 | 1740 | ||
@@ -1769,6 +1745,7 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, | |||
1769 | struct brcmf_fws_mac_descriptor *entry; | 1745 | struct brcmf_fws_mac_descriptor *entry; |
1770 | struct brcmf_bus *bus = fws->drvr->bus_if; | 1746 | struct brcmf_bus *bus = fws->drvr->bus_if; |
1771 | int rc; | 1747 | int rc; |
1748 | u8 ifidx; | ||
1772 | 1749 | ||
1773 | entry = skcb->mac; | 1750 | entry = skcb->mac; |
1774 | if (IS_ERR(entry)) | 1751 | if (IS_ERR(entry)) |
@@ -1780,21 +1757,27 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, | |||
1780 | goto rollback; | 1757 | goto rollback; |
1781 | } | 1758 | } |
1782 | 1759 | ||
1760 | brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags, | ||
1761 | skcb->htod); | ||
1783 | rc = brcmf_bus_txdata(bus, skb); | 1762 | rc = brcmf_bus_txdata(bus, skb); |
1784 | if (rc < 0) | 1763 | if (rc < 0) { |
1764 | brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); | ||
1785 | goto rollback; | 1765 | goto rollback; |
1766 | } | ||
1786 | 1767 | ||
1768 | entry->transit_count++; | ||
1769 | if (entry->suppressed) | ||
1770 | entry->suppr_transit_count++; | ||
1787 | entry->seq[fifo]++; | 1771 | entry->seq[fifo]++; |
1788 | fws->stats.pkt2bus++; | 1772 | fws->stats.pkt2bus++; |
1789 | if (brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) { | 1773 | fws->stats.send_pkts[fifo]++; |
1790 | fws->stats.send_pkts[fifo]++; | 1774 | if (brcmf_skb_if_flags_get_field(skb, REQUESTED)) |
1791 | fws->stats.fifo_credits_sent[fifo]++; | 1775 | fws->stats.requested_sent[fifo]++; |
1792 | } | ||
1793 | 1776 | ||
1794 | return rc; | 1777 | return rc; |
1795 | 1778 | ||
1796 | rollback: | 1779 | rollback: |
1797 | brcmf_fws_rollback_toq(fws, skb); | 1780 | brcmf_fws_rollback_toq(fws, skb, fifo); |
1798 | return rc; | 1781 | return rc; |
1799 | } | 1782 | } |
1800 | 1783 | ||
@@ -1826,19 +1809,25 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) | |||
1826 | 1809 | ||
1827 | /* set control buffer information */ | 1810 | /* set control buffer information */ |
1828 | skcb->if_flags = 0; | 1811 | skcb->if_flags = 0; |
1829 | skcb->mac = brcmf_fws_find_mac_desc(fws, ifp, eh->h_dest); | 1812 | skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest); |
1830 | skcb->state = BRCMF_FWS_SKBSTATE_NEW; | 1813 | skcb->state = BRCMF_FWS_SKBSTATE_NEW; |
1831 | brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); | 1814 | brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); |
1832 | if (!multicast) | 1815 | if (!multicast) |
1833 | fifo = brcmf_fws_prio2fifo[skb->priority]; | 1816 | fifo = brcmf_fws_prio2fifo[skb->priority]; |
1834 | brcmf_skb_if_flags_set_field(skb, FIFO, fifo); | ||
1835 | 1817 | ||
1836 | brcmf_dbg(TRACE, "ea=%pM, multi=%d, fifo=%d\n", eh->h_dest, | 1818 | brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name, |
1837 | multicast, fifo); | 1819 | eh->h_dest, multicast, fifo); |
1838 | 1820 | ||
1839 | brcmf_fws_lock(drvr, flags); | 1821 | brcmf_fws_lock(drvr, flags); |
1822 | /* multicast credit support is conditional, setting | ||
1823 | * flag to false to assure credit is consumed below. | ||
1824 | */ | ||
1825 | if (fws->bcmc_credit_check) | ||
1826 | multicast = false; | ||
1827 | |||
1840 | if (skcb->mac->suppressed || | 1828 | if (skcb->mac->suppressed || |
1841 | brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) || | 1829 | fws->bus_flow_blocked || |
1830 | brcmf_fws_macdesc_closed(fws, skcb->mac, fifo) || | ||
1842 | brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) || | 1831 | brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) || |
1843 | (!multicast && | 1832 | (!multicast && |
1844 | brcmf_fws_consume_credit(fws, fifo, skb) < 0)) { | 1833 | brcmf_fws_consume_credit(fws, fifo, skb) < 0)) { |
@@ -1846,9 +1835,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) | |||
1846 | drvr->fws->fifo_delay_map |= 1 << fifo; | 1835 | drvr->fws->fifo_delay_map |= 1 << fifo; |
1847 | brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); | 1836 | brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); |
1848 | } else { | 1837 | } else { |
1849 | if (brcmf_fws_commit_skb(fws, fifo, skb)) | 1838 | brcmf_fws_commit_skb(fws, fifo, skb); |
1850 | if (!multicast) | ||
1851 | brcmf_skb_pick_up_credit(fws, fifo, skb); | ||
1852 | } | 1839 | } |
1853 | brcmf_fws_unlock(drvr, flags); | 1840 | brcmf_fws_unlock(drvr, flags); |
1854 | return 0; | 1841 | return 0; |
@@ -1862,7 +1849,7 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp) | |||
1862 | if (!entry) | 1849 | if (!entry) |
1863 | return; | 1850 | return; |
1864 | 1851 | ||
1865 | brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx); | 1852 | brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); |
1866 | } | 1853 | } |
1867 | 1854 | ||
1868 | void brcmf_fws_add_interface(struct brcmf_if *ifp) | 1855 | void brcmf_fws_add_interface(struct brcmf_if *ifp) |
@@ -1870,16 +1857,16 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp) | |||
1870 | struct brcmf_fws_info *fws = ifp->drvr->fws; | 1857 | struct brcmf_fws_info *fws = ifp->drvr->fws; |
1871 | struct brcmf_fws_mac_descriptor *entry; | 1858 | struct brcmf_fws_mac_descriptor *entry; |
1872 | 1859 | ||
1873 | brcmf_dbg(TRACE, "enter: idx=%d, mac=%pM\n", | ||
1874 | ifp->bssidx, ifp->mac_addr); | ||
1875 | if (!ifp->ndev || !ifp->drvr->fw_signals) | 1860 | if (!ifp->ndev || !ifp->drvr->fw_signals) |
1876 | return; | 1861 | return; |
1877 | 1862 | ||
1878 | entry = &fws->desc.iface[ifp->ifidx]; | 1863 | entry = &fws->desc.iface[ifp->ifidx]; |
1879 | ifp->fws_desc = entry; | 1864 | ifp->fws_desc = entry; |
1880 | brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx); | 1865 | brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); |
1866 | brcmf_fws_macdesc_set_name(fws, entry); | ||
1881 | brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, | 1867 | brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, |
1882 | BRCMF_FWS_PSQ_LEN); | 1868 | BRCMF_FWS_PSQ_LEN); |
1869 | brcmf_dbg(TRACE, "added %s\n", entry->name); | ||
1883 | } | 1870 | } |
1884 | 1871 | ||
1885 | void brcmf_fws_del_interface(struct brcmf_if *ifp) | 1872 | void brcmf_fws_del_interface(struct brcmf_if *ifp) |
@@ -1887,13 +1874,13 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp) | |||
1887 | struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; | 1874 | struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; |
1888 | ulong flags; | 1875 | ulong flags; |
1889 | 1876 | ||
1890 | brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); | ||
1891 | if (!entry) | 1877 | if (!entry) |
1892 | return; | 1878 | return; |
1893 | 1879 | ||
1894 | brcmf_fws_lock(ifp->drvr, flags); | 1880 | brcmf_fws_lock(ifp->drvr, flags); |
1895 | ifp->fws_desc = NULL; | 1881 | ifp->fws_desc = NULL; |
1896 | brcmf_fws_clear_mac_descriptor(entry); | 1882 | brcmf_dbg(TRACE, "deleting %s\n", entry->name); |
1883 | brcmf_fws_macdesc_deinit(entry); | ||
1897 | brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); | 1884 | brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); |
1898 | brcmf_fws_unlock(ifp->drvr, flags); | 1885 | brcmf_fws_unlock(ifp->drvr, flags); |
1899 | } | 1886 | } |
@@ -1904,39 +1891,37 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) | |||
1904 | struct sk_buff *skb; | 1891 | struct sk_buff *skb; |
1905 | ulong flags; | 1892 | ulong flags; |
1906 | int fifo; | 1893 | int fifo; |
1907 | int credit; | ||
1908 | 1894 | ||
1909 | fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); | 1895 | fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); |
1910 | 1896 | ||
1911 | brcmf_dbg(TRACE, "enter: fws=%p\n", fws); | ||
1912 | brcmf_fws_lock(fws->drvr, flags); | 1897 | brcmf_fws_lock(fws->drvr, flags); |
1913 | for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) { | 1898 | for (fifo = NL80211_NUM_ACS; fifo >= 0 && !fws->bus_flow_blocked; |
1914 | brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo, | 1899 | fifo--) { |
1915 | fws->fifo_credit[fifo]); | 1900 | while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) && |
1916 | for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) { | 1901 | (fifo == BRCMF_FWS_FIFO_BCMC))) { |
1917 | skb = brcmf_fws_deq(fws, fifo); | 1902 | skb = brcmf_fws_deq(fws, fifo); |
1918 | if (!skb || brcmf_fws_commit_skb(fws, fifo, skb)) | 1903 | if (!skb) |
1904 | break; | ||
1905 | fws->fifo_credit[fifo]--; | ||
1906 | if (brcmf_fws_commit_skb(fws, fifo, skb)) | ||
1907 | break; | ||
1908 | if (fws->bus_flow_blocked) | ||
1919 | break; | 1909 | break; |
1920 | if (brcmf_skbcb(skb)->if_flags & | ||
1921 | BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) | ||
1922 | credit++; | ||
1923 | } | 1910 | } |
1924 | if ((fifo == BRCMF_FWS_FIFO_AC_BE) && | 1911 | if ((fifo == BRCMF_FWS_FIFO_AC_BE) && |
1925 | (credit == fws->fifo_credit[fifo])) { | 1912 | (fws->fifo_credit[fifo] == 0) && |
1926 | fws->fifo_credit[fifo] -= credit; | 1913 | (!fws->bus_flow_blocked)) { |
1927 | while (brcmf_fws_borrow_credit(fws) == 0) { | 1914 | while (brcmf_fws_borrow_credit(fws) == 0) { |
1928 | skb = brcmf_fws_deq(fws, fifo); | 1915 | skb = brcmf_fws_deq(fws, fifo); |
1929 | if (!skb) { | 1916 | if (!skb) { |
1930 | brcmf_fws_return_credits(fws, fifo, 1); | 1917 | brcmf_fws_return_credits(fws, fifo, 1); |
1931 | break; | 1918 | break; |
1932 | } | 1919 | } |
1933 | if (brcmf_fws_commit_skb(fws, fifo, skb)) { | 1920 | if (brcmf_fws_commit_skb(fws, fifo, skb)) |
1934 | brcmf_fws_return_credits(fws, fifo, 1); | 1921 | break; |
1922 | if (fws->bus_flow_blocked) | ||
1935 | break; | 1923 | break; |
1936 | } | ||
1937 | } | 1924 | } |
1938 | } else { | ||
1939 | fws->fifo_credit[fifo] -= credit; | ||
1940 | } | 1925 | } |
1941 | } | 1926 | } |
1942 | brcmf_fws_unlock(fws->drvr, flags); | 1927 | brcmf_fws_unlock(fws->drvr, flags); |
@@ -1982,6 +1967,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr) | |||
1982 | brcmf_err("register credit map handler failed\n"); | 1967 | brcmf_err("register credit map handler failed\n"); |
1983 | goto fail; | 1968 | goto fail; |
1984 | } | 1969 | } |
1970 | rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT, | ||
1971 | brcmf_fws_notify_bcmc_credit_support); | ||
1972 | if (rc < 0) { | ||
1973 | brcmf_err("register bcmc credit handler failed\n"); | ||
1974 | brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); | ||
1975 | goto fail; | ||
1976 | } | ||
1985 | 1977 | ||
1986 | /* setting the iovar may fail if feature is unsupported | 1978 | /* setting the iovar may fail if feature is unsupported |
1987 | * so leave the rc as is so driver initialization can | 1979 | * so leave the rc as is so driver initialization can |
@@ -1993,19 +1985,20 @@ int brcmf_fws_init(struct brcmf_pub *drvr) | |||
1993 | } | 1985 | } |
1994 | 1986 | ||
1995 | brcmf_fws_hanger_init(&drvr->fws->hanger); | 1987 | brcmf_fws_hanger_init(&drvr->fws->hanger); |
1996 | brcmf_fws_init_mac_descriptor(&drvr->fws->desc.other, NULL, 0); | 1988 | brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0); |
1989 | brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other); | ||
1997 | brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, | 1990 | brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, |
1998 | BRCMF_FWS_PSQ_LEN); | 1991 | BRCMF_FWS_PSQ_LEN); |
1999 | 1992 | ||
2000 | /* create debugfs file for statistics */ | 1993 | /* create debugfs file for statistics */ |
2001 | brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats); | 1994 | brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats); |
2002 | 1995 | ||
2003 | /* TODO: remove upon feature delivery */ | 1996 | brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", |
2004 | brcmf_err("%s bdcv2 tlv signaling [%x]\n", | ||
2005 | drvr->fw_signals ? "enabled" : "disabled", tlv); | 1997 | drvr->fw_signals ? "enabled" : "disabled", tlv); |
2006 | return 0; | 1998 | return 0; |
2007 | 1999 | ||
2008 | fail_event: | 2000 | fail_event: |
2001 | brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT); | ||
2009 | brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); | 2002 | brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); |
2010 | fail: | 2003 | fail: |
2011 | brcmf_fws_deinit(drvr); | 2004 | brcmf_fws_deinit(drvr); |
@@ -2043,25 +2036,31 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws) | |||
2043 | if (!fws) | 2036 | if (!fws) |
2044 | return false; | 2037 | return false; |
2045 | 2038 | ||
2046 | brcmf_dbg(TRACE, "enter: mode=%d\n", fws->fcmode); | ||
2047 | return fws->fcmode != BRCMF_FWS_FCMODE_NONE; | 2039 | return fws->fcmode != BRCMF_FWS_FCMODE_NONE; |
2048 | } | 2040 | } |
2049 | 2041 | ||
2050 | void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) | 2042 | void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) |
2051 | { | 2043 | { |
2052 | ulong flags; | 2044 | ulong flags; |
2045 | u32 hslot; | ||
2053 | 2046 | ||
2054 | brcmf_fws_lock(fws->drvr, flags); | 2047 | if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) { |
2055 | brcmf_fws_txstatus_process(fws, BRCMF_FWS_TXSTATUS_FW_TOSSED, | 2048 | brcmu_pkt_buf_free_skb(skb); |
2056 | brcmf_skb_htod_tag_get_field(skb, HSLOT), 0); | 2049 | return; |
2057 | /* the packet never reached firmware so reclaim credit */ | ||
2058 | if (fws->fcmode == BRCMF_FWS_FCMODE_EXPLICIT_CREDIT && | ||
2059 | brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) { | ||
2060 | brcmf_fws_return_credits(fws, | ||
2061 | brcmf_skb_htod_tag_get_field(skb, | ||
2062 | FIFO), | ||
2063 | 1); | ||
2064 | brcmf_fws_schedule_deq(fws); | ||
2065 | } | 2050 | } |
2051 | brcmf_fws_lock(fws->drvr, flags); | ||
2052 | hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); | ||
2053 | brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0); | ||
2066 | brcmf_fws_unlock(fws->drvr, flags); | 2054 | brcmf_fws_unlock(fws->drvr, flags); |
2067 | } | 2055 | } |
2056 | |||
2057 | void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) | ||
2058 | { | ||
2059 | struct brcmf_fws_info *fws = drvr->fws; | ||
2060 | |||
2061 | fws->bus_flow_blocked = flow_blocked; | ||
2062 | if (!flow_blocked) | ||
2063 | brcmf_fws_schedule_deq(fws); | ||
2064 | else | ||
2065 | fws->stats.bus_flow_block++; | ||
2066 | } | ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h index fbe483d23752..9fc860910bd8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h | |||
@@ -29,5 +29,6 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp); | |||
29 | void brcmf_fws_add_interface(struct brcmf_if *ifp); | 29 | void brcmf_fws_add_interface(struct brcmf_if *ifp); |
30 | void brcmf_fws_del_interface(struct brcmf_if *ifp); | 30 | void brcmf_fws_del_interface(struct brcmf_if *ifp); |
31 | void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb); | 31 | void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb); |
32 | void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked); | ||
32 | 33 | ||
33 | #endif /* FWSIGNAL_H_ */ | 34 | #endif /* FWSIGNAL_H_ */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h index 7c1b6332747e..793df66fe0bf 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h | |||
@@ -170,7 +170,6 @@ struct brcmf_sdio_dev { | |||
170 | atomic_t suspend; /* suspend flag */ | 170 | atomic_t suspend; /* suspend flag */ |
171 | wait_queue_head_t request_byte_wait; | 171 | wait_queue_head_t request_byte_wait; |
172 | wait_queue_head_t request_word_wait; | 172 | wait_queue_head_t request_word_wait; |
173 | wait_queue_head_t request_chain_wait; | ||
174 | wait_queue_head_t request_buffer_wait; | 173 | wait_queue_head_t request_buffer_wait; |
175 | struct device *dev; | 174 | struct device *dev; |
176 | struct brcmf_bus *bus_if; | 175 | struct brcmf_bus *bus_if; |
@@ -272,16 +271,6 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, | |||
272 | uint rw, uint fnc, uint addr, | 271 | uint rw, uint fnc, uint addr, |
273 | u32 *word, uint nbyte); | 272 | u32 *word, uint nbyte); |
274 | 273 | ||
275 | /* read or write any buffer using cmd53 */ | ||
276 | extern int | ||
277 | brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, | ||
278 | uint fix_inc, uint rw, uint fnc_num, u32 addr, | ||
279 | struct sk_buff *pkt); | ||
280 | extern int | ||
281 | brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc, | ||
282 | uint write, uint func, uint addr, | ||
283 | struct sk_buff_head *pktq); | ||
284 | |||
285 | /* Watchdog timer interface for pm ops */ | 274 | /* Watchdog timer interface for pm ops */ |
286 | extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, | 275 | extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, |
287 | bool enable); | 276 | bool enable); |
@@ -291,4 +280,8 @@ extern void brcmf_sdbrcm_disconnect(void *ptr); | |||
291 | extern void brcmf_sdbrcm_isr(void *arg); | 280 | extern void brcmf_sdbrcm_isr(void *arg); |
292 | 281 | ||
293 | extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick); | 282 | extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick); |
283 | |||
284 | extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, | ||
285 | wait_queue_head_t *wq); | ||
286 | extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev); | ||
294 | #endif /* _BRCM_SDH_H_ */ | 287 | #endif /* _BRCM_SDH_H_ */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h index 9df1f7a681e0..bc2917112899 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h | |||
@@ -87,6 +87,27 @@ TRACE_EVENT(brcmf_hexdump, | |||
87 | TP_printk("hexdump [length=%lu]", __entry->len) | 87 | TP_printk("hexdump [length=%lu]", __entry->len) |
88 | ); | 88 | ); |
89 | 89 | ||
90 | TRACE_EVENT(brcmf_bdchdr, | ||
91 | TP_PROTO(void *data), | ||
92 | TP_ARGS(data), | ||
93 | TP_STRUCT__entry( | ||
94 | __field(u8, flags) | ||
95 | __field(u8, prio) | ||
96 | __field(u8, flags2) | ||
97 | __field(u32, siglen) | ||
98 | __dynamic_array(u8, signal, *((u8 *)data + 3) * 4) | ||
99 | ), | ||
100 | TP_fast_assign( | ||
101 | __entry->flags = *(u8 *)data; | ||
102 | __entry->prio = *((u8 *)data + 1); | ||
103 | __entry->flags2 = *((u8 *)data + 2); | ||
104 | __entry->siglen = *((u8 *)data + 3) * 4; | ||
105 | memcpy(__get_dynamic_array(signal), | ||
106 | (u8 *)data + 4, __entry->siglen); | ||
107 | ), | ||
108 | TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen) | ||
109 | ); | ||
110 | |||
90 | #ifdef CONFIG_BRCM_TRACING | 111 | #ifdef CONFIG_BRCM_TRACING |
91 | 112 | ||
92 | #undef TRACE_INCLUDE_PATH | 113 | #undef TRACE_INCLUDE_PATH |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 01aed7ad6bec..322cadc51ded 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c | |||
@@ -82,6 +82,7 @@ struct brcmf_usbdev_info { | |||
82 | int tx_high_watermark; | 82 | int tx_high_watermark; |
83 | int tx_freecount; | 83 | int tx_freecount; |
84 | bool tx_flowblock; | 84 | bool tx_flowblock; |
85 | spinlock_t tx_flowblock_lock; | ||
85 | 86 | ||
86 | struct brcmf_usbreq *tx_reqs; | 87 | struct brcmf_usbreq *tx_reqs; |
87 | struct brcmf_usbreq *rx_reqs; | 88 | struct brcmf_usbreq *rx_reqs; |
@@ -411,6 +412,7 @@ static void brcmf_usb_tx_complete(struct urb *urb) | |||
411 | { | 412 | { |
412 | struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; | 413 | struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; |
413 | struct brcmf_usbdev_info *devinfo = req->devinfo; | 414 | struct brcmf_usbdev_info *devinfo = req->devinfo; |
415 | unsigned long flags; | ||
414 | 416 | ||
415 | brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status, | 417 | brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status, |
416 | req->skb); | 418 | req->skb); |
@@ -419,11 +421,13 @@ static void brcmf_usb_tx_complete(struct urb *urb) | |||
419 | brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); | 421 | brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); |
420 | req->skb = NULL; | 422 | req->skb = NULL; |
421 | brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); | 423 | brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); |
424 | spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); | ||
422 | if (devinfo->tx_freecount > devinfo->tx_high_watermark && | 425 | if (devinfo->tx_freecount > devinfo->tx_high_watermark && |
423 | devinfo->tx_flowblock) { | 426 | devinfo->tx_flowblock) { |
424 | brcmf_txflowblock(devinfo->dev, false); | 427 | brcmf_txflowblock(devinfo->dev, false); |
425 | devinfo->tx_flowblock = false; | 428 | devinfo->tx_flowblock = false; |
426 | } | 429 | } |
430 | spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); | ||
427 | } | 431 | } |
428 | 432 | ||
429 | static void brcmf_usb_rx_complete(struct urb *urb) | 433 | static void brcmf_usb_rx_complete(struct urb *urb) |
@@ -568,6 +572,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) | |||
568 | struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); | 572 | struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); |
569 | struct brcmf_usbreq *req; | 573 | struct brcmf_usbreq *req; |
570 | int ret; | 574 | int ret; |
575 | unsigned long flags; | ||
571 | 576 | ||
572 | brcmf_dbg(USB, "Enter, skb=%p\n", skb); | 577 | brcmf_dbg(USB, "Enter, skb=%p\n", skb); |
573 | if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) { | 578 | if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) { |
@@ -599,11 +604,13 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) | |||
599 | goto fail; | 604 | goto fail; |
600 | } | 605 | } |
601 | 606 | ||
607 | spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags); | ||
602 | if (devinfo->tx_freecount < devinfo->tx_low_watermark && | 608 | if (devinfo->tx_freecount < devinfo->tx_low_watermark && |
603 | !devinfo->tx_flowblock) { | 609 | !devinfo->tx_flowblock) { |
604 | brcmf_txflowblock(dev, true); | 610 | brcmf_txflowblock(dev, true); |
605 | devinfo->tx_flowblock = true; | 611 | devinfo->tx_flowblock = true; |
606 | } | 612 | } |
613 | spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags); | ||
607 | return 0; | 614 | return 0; |
608 | 615 | ||
609 | fail: | 616 | fail: |
@@ -1164,6 +1171,7 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo, | |||
1164 | 1171 | ||
1165 | /* Initialize the spinlocks */ | 1172 | /* Initialize the spinlocks */ |
1166 | spin_lock_init(&devinfo->qlock); | 1173 | spin_lock_init(&devinfo->qlock); |
1174 | spin_lock_init(&devinfo->tx_flowblock_lock); | ||
1167 | 1175 | ||
1168 | INIT_LIST_HEAD(&devinfo->rx_freeq); | 1176 | INIT_LIST_HEAD(&devinfo->rx_freeq); |
1169 | INIT_LIST_HEAD(&devinfo->rx_postq); | 1177 | INIT_LIST_HEAD(&devinfo->rx_postq); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 301e572e8923..277b37ae7126 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -3982,6 +3982,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
3982 | struct brcmf_fil_af_params_le *af_params; | 3982 | struct brcmf_fil_af_params_le *af_params; |
3983 | bool ack; | 3983 | bool ack; |
3984 | s32 chan_nr; | 3984 | s32 chan_nr; |
3985 | u32 freq; | ||
3985 | 3986 | ||
3986 | brcmf_dbg(TRACE, "Enter\n"); | 3987 | brcmf_dbg(TRACE, "Enter\n"); |
3987 | 3988 | ||
@@ -3994,6 +3995,8 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
3994 | return -EPERM; | 3995 | return -EPERM; |
3995 | } | 3996 | } |
3996 | 3997 | ||
3998 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); | ||
3999 | |||
3997 | if (ieee80211_is_probe_resp(mgmt->frame_control)) { | 4000 | if (ieee80211_is_probe_resp(mgmt->frame_control)) { |
3998 | /* Right now the only reason to get a probe response */ | 4001 | /* Right now the only reason to get a probe response */ |
3999 | /* is for p2p listen response or for p2p GO from */ | 4002 | /* is for p2p listen response or for p2p GO from */ |
@@ -4009,7 +4012,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
4009 | ie_offset = DOT11_MGMT_HDR_LEN + | 4012 | ie_offset = DOT11_MGMT_HDR_LEN + |
4010 | DOT11_BCN_PRB_FIXED_LEN; | 4013 | DOT11_BCN_PRB_FIXED_LEN; |
4011 | ie_len = len - ie_offset; | 4014 | ie_len = len - ie_offset; |
4012 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); | ||
4013 | if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) | 4015 | if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) |
4014 | vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; | 4016 | vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; |
4015 | err = brcmf_vif_set_mgmt_ie(vif, | 4017 | err = brcmf_vif_set_mgmt_ie(vif, |
@@ -4033,16 +4035,22 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
4033 | memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); | 4035 | memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); |
4034 | /* Add the length exepted for 802.11 header */ | 4036 | /* Add the length exepted for 802.11 header */ |
4035 | action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); | 4037 | action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); |
4036 | /* Add the channel */ | 4038 | /* Add the channel. Use the one specified as parameter if any or |
4037 | chan_nr = ieee80211_frequency_to_channel(chan->center_freq); | 4039 | * the current one (got from the firmware) otherwise |
4040 | */ | ||
4041 | if (chan) | ||
4042 | freq = chan->center_freq; | ||
4043 | else | ||
4044 | brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL, | ||
4045 | &freq); | ||
4046 | chan_nr = ieee80211_frequency_to_channel(freq); | ||
4038 | af_params->channel = cpu_to_le32(chan_nr); | 4047 | af_params->channel = cpu_to_le32(chan_nr); |
4039 | 4048 | ||
4040 | memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], | 4049 | memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], |
4041 | le16_to_cpu(action_frame->len)); | 4050 | le16_to_cpu(action_frame->len)); |
4042 | 4051 | ||
4043 | brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n", | 4052 | brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n", |
4044 | *cookie, le16_to_cpu(action_frame->len), | 4053 | *cookie, le16_to_cpu(action_frame->len), freq); |
4045 | chan->center_freq); | ||
4046 | 4054 | ||
4047 | ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), | 4055 | ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), |
4048 | af_params); | 4056 | af_params); |
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c index 9f9adb4fbfb8..da885036ca5f 100644 --- a/drivers/net/wireless/cw1200/main.c +++ b/drivers/net/wireless/cw1200/main.c | |||
@@ -245,6 +245,14 @@ module_param(cw1200_ba_tx_tids, int, 0644); | |||
245 | MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs"); | 245 | MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs"); |
246 | MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs"); | 246 | MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs"); |
247 | 247 | ||
248 | #ifdef CONFIG_PM | ||
249 | static const struct wiphy_wowlan_support cw1200_wowlan_support = { | ||
250 | /* Support only for limited wowlan functionalities */ | ||
251 | .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, | ||
252 | }; | ||
253 | #endif | ||
254 | |||
255 | |||
248 | static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, | 256 | static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, |
249 | const bool have_5ghz) | 257 | const bool have_5ghz) |
250 | { | 258 | { |
@@ -289,10 +297,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, | |||
289 | BIT(NL80211_IFTYPE_P2P_GO); | 297 | BIT(NL80211_IFTYPE_P2P_GO); |
290 | 298 | ||
291 | #ifdef CONFIG_PM | 299 | #ifdef CONFIG_PM |
292 | /* Support only for limited wowlan functionalities */ | 300 | hw->wiphy->wowlan = &cw1200_wowlan_support; |
293 | hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY | | ||
294 | WIPHY_WOWLAN_DISCONNECT; | ||
295 | hw->wiphy->wowlan.n_patterns = 0; | ||
296 | #endif | 301 | #endif |
297 | 302 | ||
298 | hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; | 303 | hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; |
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index dce5e8f030b2..9581d07a4242 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c | |||
@@ -3727,7 +3727,8 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3727 | * 5. Setup HW Constants | 3727 | * 5. Setup HW Constants |
3728 | * ********************/ | 3728 | * ********************/ |
3729 | /* Device-specific setup */ | 3729 | /* Device-specific setup */ |
3730 | if (il3945_hw_set_hw_params(il)) { | 3730 | err = il3945_hw_set_hw_params(il); |
3731 | if (err) { | ||
3731 | IL_ERR("failed to set hw settings\n"); | 3732 | IL_ERR("failed to set hw settings\n"); |
3732 | goto out_eeprom_free; | 3733 | goto out_eeprom_free; |
3733 | } | 3734 | } |
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c index dc1e6da9976a..c092033945cc 100644 --- a/drivers/net/wireless/iwlegacy/3945.c +++ b/drivers/net/wireless/iwlegacy/3945.c | |||
@@ -331,6 +331,19 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) | |||
331 | return; | 331 | return; |
332 | } | 332 | } |
333 | 333 | ||
334 | /* | ||
335 | * Firmware will not transmit frame on passive channel, if it not yet | ||
336 | * received some valid frame on that channel. When this error happen | ||
337 | * we have to wait until firmware will unblock itself i.e. when we | ||
338 | * note received beacon or other frame. We unblock queues in | ||
339 | * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed. | ||
340 | */ | ||
341 | if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) && | ||
342 | il->iw_mode == NL80211_IFTYPE_STATION) { | ||
343 | il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE); | ||
344 | D_INFO("Stopped queues - RX waiting on passive channel\n"); | ||
345 | } | ||
346 | |||
334 | txq->time_stamp = jiffies; | 347 | txq->time_stamp = jiffies; |
335 | info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); | 348 | info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); |
336 | ieee80211_tx_info_clear_status(info); | 349 | ieee80211_tx_info_clear_status(info); |
@@ -488,6 +501,11 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb, | |||
488 | return; | 501 | return; |
489 | } | 502 | } |
490 | 503 | ||
504 | if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) { | ||
505 | il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); | ||
506 | D_INFO("Woke queues - frame received on passive channel\n"); | ||
507 | } | ||
508 | |||
491 | skb = dev_alloc_skb(128); | 509 | skb = dev_alloc_skb(128); |
492 | if (!skb) { | 510 | if (!skb) { |
493 | IL_ERR("dev_alloc_skb failed\n"); | 511 | IL_ERR("dev_alloc_skb failed\n"); |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index 3c4899b7c1ab..b9b2bb51e605 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
@@ -588,6 +588,11 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, | |||
588 | return; | 588 | return; |
589 | } | 589 | } |
590 | 590 | ||
591 | if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) { | ||
592 | il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); | ||
593 | D_INFO("Woke queues - frame received on passive channel\n"); | ||
594 | } | ||
595 | |||
591 | /* In case of HW accelerated crypto and bad decryption, drop */ | 596 | /* In case of HW accelerated crypto and bad decryption, drop */ |
592 | if (!il->cfg->mod_params->sw_crypto && | 597 | if (!il->cfg->mod_params->sw_crypto && |
593 | il_set_decrypted_flag(il, hdr, ampdu_status, stats)) | 598 | il_set_decrypted_flag(il, hdr, ampdu_status, stats)) |
@@ -2806,6 +2811,19 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) | |||
2806 | return; | 2811 | return; |
2807 | } | 2812 | } |
2808 | 2813 | ||
2814 | /* | ||
2815 | * Firmware will not transmit frame on passive channel, if it not yet | ||
2816 | * received some valid frame on that channel. When this error happen | ||
2817 | * we have to wait until firmware will unblock itself i.e. when we | ||
2818 | * note received beacon or other frame. We unblock queues in | ||
2819 | * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed. | ||
2820 | */ | ||
2821 | if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) && | ||
2822 | il->iw_mode == NL80211_IFTYPE_STATION) { | ||
2823 | il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE); | ||
2824 | D_INFO("Stopped queues - RX waiting on passive channel\n"); | ||
2825 | } | ||
2826 | |||
2809 | spin_lock_irqsave(&il->sta_lock, flags); | 2827 | spin_lock_irqsave(&il->sta_lock, flags); |
2810 | if (txq->sched_retry) { | 2828 | if (txq->sched_retry) { |
2811 | const u32 scd_ssn = il4965_get_scd_ssn(tx_resp); | 2829 | const u32 scd_ssn = il4965_get_scd_ssn(tx_resp); |
@@ -5741,7 +5759,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) | |||
5741 | hw->flags = | 5759 | hw->flags = |
5742 | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | | 5760 | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | |
5743 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | | 5761 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | |
5744 | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | 5762 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | |
5763 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | ||
5745 | if (il->cfg->sku & IL_SKU_N) | 5764 | if (il->cfg->sku & IL_SKU_N) |
5746 | hw->flags |= | 5765 | hw->flags |= |
5747 | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | | 5766 | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index e9a3cbc409ae..3195aad440dd 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c | |||
@@ -5307,6 +5307,17 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
5307 | D_MAC80211("BSSID %pM\n", bss_conf->bssid); | 5307 | D_MAC80211("BSSID %pM\n", bss_conf->bssid); |
5308 | 5308 | ||
5309 | /* | 5309 | /* |
5310 | * On passive channel we wait with blocked queues to see if | ||
5311 | * there is traffic on that channel. If no frame will be | ||
5312 | * received (what is very unlikely since scan detects AP on | ||
5313 | * that channel, but theoretically possible), mac80211 associate | ||
5314 | * procedure will time out and mac80211 will call us with NULL | ||
5315 | * bssid. We have to unblock queues on such condition. | ||
5316 | */ | ||
5317 | if (is_zero_ether_addr(bss_conf->bssid)) | ||
5318 | il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); | ||
5319 | |||
5320 | /* | ||
5310 | * If there is currently a HW scan going on in the background, | 5321 | * If there is currently a HW scan going on in the background, |
5311 | * then we need to cancel it, otherwise sometimes we are not | 5322 | * then we need to cancel it, otherwise sometimes we are not |
5312 | * able to authenticate (FIXME: why ?) | 5323 | * able to authenticate (FIXME: why ?) |
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h index 4caaf52986a4..83f8ed8a5528 100644 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/iwlegacy/common.h | |||
@@ -1299,6 +1299,8 @@ struct il_priv { | |||
1299 | /* queue refcounts */ | 1299 | /* queue refcounts */ |
1300 | #define IL_MAX_HW_QUEUES 32 | 1300 | #define IL_MAX_HW_QUEUES 32 |
1301 | unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)]; | 1301 | unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)]; |
1302 | #define IL_STOP_REASON_PASSIVE 0 | ||
1303 | unsigned long stop_reason; | ||
1302 | /* for each AC */ | 1304 | /* for each AC */ |
1303 | atomic_t queue_stop_count[4]; | 1305 | atomic_t queue_stop_count[4]; |
1304 | 1306 | ||
@@ -2257,6 +2259,19 @@ il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) | |||
2257 | } | 2259 | } |
2258 | 2260 | ||
2259 | static inline void | 2261 | static inline void |
2262 | _il_wake_queue(struct il_priv *il, u8 ac) | ||
2263 | { | ||
2264 | if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0) | ||
2265 | ieee80211_wake_queue(il->hw, ac); | ||
2266 | } | ||
2267 | |||
2268 | static inline void | ||
2269 | _il_stop_queue(struct il_priv *il, u8 ac) | ||
2270 | { | ||
2271 | if (atomic_inc_return(&il->queue_stop_count[ac]) > 0) | ||
2272 | ieee80211_stop_queue(il->hw, ac); | ||
2273 | } | ||
2274 | static inline void | ||
2260 | il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) | 2275 | il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) |
2261 | { | 2276 | { |
2262 | u8 queue = txq->swq_id; | 2277 | u8 queue = txq->swq_id; |
@@ -2264,8 +2279,7 @@ il_wake_queue(struct il_priv *il, struct il_tx_queue *txq) | |||
2264 | u8 hwq = (queue >> 2) & 0x1f; | 2279 | u8 hwq = (queue >> 2) & 0x1f; |
2265 | 2280 | ||
2266 | if (test_and_clear_bit(hwq, il->queue_stopped)) | 2281 | if (test_and_clear_bit(hwq, il->queue_stopped)) |
2267 | if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0) | 2282 | _il_wake_queue(il, ac); |
2268 | ieee80211_wake_queue(il->hw, ac); | ||
2269 | } | 2283 | } |
2270 | 2284 | ||
2271 | static inline void | 2285 | static inline void |
@@ -2276,8 +2290,27 @@ il_stop_queue(struct il_priv *il, struct il_tx_queue *txq) | |||
2276 | u8 hwq = (queue >> 2) & 0x1f; | 2290 | u8 hwq = (queue >> 2) & 0x1f; |
2277 | 2291 | ||
2278 | if (!test_and_set_bit(hwq, il->queue_stopped)) | 2292 | if (!test_and_set_bit(hwq, il->queue_stopped)) |
2279 | if (atomic_inc_return(&il->queue_stop_count[ac]) > 0) | 2293 | _il_stop_queue(il, ac); |
2280 | ieee80211_stop_queue(il->hw, ac); | 2294 | } |
2295 | |||
2296 | static inline void | ||
2297 | il_wake_queues_by_reason(struct il_priv *il, int reason) | ||
2298 | { | ||
2299 | u8 ac; | ||
2300 | |||
2301 | if (test_and_clear_bit(reason, &il->stop_reason)) | ||
2302 | for (ac = 0; ac < 4; ac++) | ||
2303 | _il_wake_queue(il, ac); | ||
2304 | } | ||
2305 | |||
2306 | static inline void | ||
2307 | il_stop_queues_by_reason(struct il_priv *il, int reason) | ||
2308 | { | ||
2309 | u8 ac; | ||
2310 | |||
2311 | if (!test_and_set_bit(reason, &il->stop_reason)) | ||
2312 | for (ac = 0; ac < 4; ac++) | ||
2313 | _il_stop_queue(il, ac); | ||
2281 | } | 2314 | } |
2282 | 2315 | ||
2283 | #ifdef ieee80211_stop_queue | 2316 | #ifdef ieee80211_stop_queue |
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 3b5613ea458b..f55a758b87f6 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile | |||
@@ -7,14 +7,16 @@ iwlwifi-objs += iwl-notif-wait.o | |||
7 | iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o | 7 | iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o |
8 | iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o | 8 | iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o |
9 | iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o | 9 | iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o |
10 | iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o | 10 | iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o |
11 | iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o | ||
12 | |||
13 | iwlwifi-objs += $(iwlwifi-m) | ||
11 | 14 | ||
12 | iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o | 15 | iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o |
13 | iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o | 16 | iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o |
14 | 17 | ||
15 | ccflags-y += -D__CHECK_ENDIAN__ -I$(src) | 18 | ccflags-y += -D__CHECK_ENDIAN__ -I$(src) |
16 | 19 | ||
17 | |||
18 | obj-$(CONFIG_IWLDVM) += dvm/ | 20 | obj-$(CONFIG_IWLDVM) += dvm/ |
19 | obj-$(CONFIG_IWLMVM) += mvm/ | 21 | obj-$(CONFIG_IWLMVM) += mvm/ |
20 | 22 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index f1b8df16dbba..5cd87f949266 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h | |||
@@ -915,6 +915,9 @@ struct iwl_priv { | |||
915 | __le64 replay_ctr; | 915 | __le64 replay_ctr; |
916 | __le16 last_seq_ctl; | 916 | __le16 last_seq_ctl; |
917 | bool have_rekey_data; | 917 | bool have_rekey_data; |
918 | #ifdef CONFIG_PM_SLEEP | ||
919 | struct wiphy_wowlan_support wowlan_support; | ||
920 | #endif | ||
918 | 921 | ||
919 | /* device_pointers: pointers to ucode event tables */ | 922 | /* device_pointers: pointers to ucode event tables */ |
920 | struct { | 923 | struct { |
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index c0039a992909..eef64bb854f7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -208,20 +208,21 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
208 | priv->trans->ops->d3_suspend && | 208 | priv->trans->ops->d3_suspend && |
209 | priv->trans->ops->d3_resume && | 209 | priv->trans->ops->d3_resume && |
210 | device_can_wakeup(priv->trans->dev)) { | 210 | device_can_wakeup(priv->trans->dev)) { |
211 | hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | | 211 | priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | |
212 | WIPHY_WOWLAN_DISCONNECT | | 212 | WIPHY_WOWLAN_DISCONNECT | |
213 | WIPHY_WOWLAN_EAP_IDENTITY_REQ | | 213 | WIPHY_WOWLAN_EAP_IDENTITY_REQ | |
214 | WIPHY_WOWLAN_RFKILL_RELEASE; | 214 | WIPHY_WOWLAN_RFKILL_RELEASE; |
215 | if (!iwlwifi_mod_params.sw_crypto) | 215 | if (!iwlwifi_mod_params.sw_crypto) |
216 | hw->wiphy->wowlan.flags |= | 216 | priv->wowlan_support.flags |= |
217 | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | | 217 | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | |
218 | WIPHY_WOWLAN_GTK_REKEY_FAILURE; | 218 | WIPHY_WOWLAN_GTK_REKEY_FAILURE; |
219 | 219 | ||
220 | hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; | 220 | priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; |
221 | hw->wiphy->wowlan.pattern_min_len = | 221 | priv->wowlan_support.pattern_min_len = |
222 | IWLAGN_WOWLAN_MIN_PATTERN_LEN; | 222 | IWLAGN_WOWLAN_MIN_PATTERN_LEN; |
223 | hw->wiphy->wowlan.pattern_max_len = | 223 | priv->wowlan_support.pattern_max_len = |
224 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; | 224 | IWLAGN_WOWLAN_MAX_PATTERN_LEN; |
225 | hw->wiphy->wowlan = &priv->wowlan_support; | ||
225 | } | 226 | } |
226 | #endif | 227 | #endif |
227 | 228 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 68f754659570..7aa9c8dc33ef 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c | |||
@@ -1859,14 +1859,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, | |||
1859 | return pos; | 1859 | return pos; |
1860 | } | 1860 | } |
1861 | 1861 | ||
1862 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1863 | if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) | 1862 | if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) |
1864 | size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) | 1863 | size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) |
1865 | ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; | 1864 | ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; |
1866 | #else | ||
1867 | size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) | ||
1868 | ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; | ||
1869 | #endif | ||
1870 | IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", | 1865 | IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", |
1871 | size); | 1866 | size); |
1872 | 1867 | ||
@@ -1910,10 +1905,8 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) | |||
1910 | unsigned int reload_msec; | 1905 | unsigned int reload_msec; |
1911 | unsigned long reload_jiffies; | 1906 | unsigned long reload_jiffies; |
1912 | 1907 | ||
1913 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1914 | if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) | 1908 | if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) |
1915 | iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); | 1909 | iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); |
1916 | #endif | ||
1917 | 1910 | ||
1918 | /* uCode is no longer loaded. */ | 1911 | /* uCode is no longer loaded. */ |
1919 | priv->ucode_loaded = false; | 1912 | priv->ucode_loaded = false; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index a193832fc790..0189b9050f22 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h | |||
@@ -237,6 +237,7 @@ struct iwl_cfg { | |||
237 | /* | 237 | /* |
238 | * This list declares the config structures for all devices. | 238 | * This list declares the config structures for all devices. |
239 | */ | 239 | */ |
240 | #if IS_ENABLED(CONFIG_IWLDVM) | ||
240 | extern const struct iwl_cfg iwl5300_agn_cfg; | 241 | extern const struct iwl_cfg iwl5300_agn_cfg; |
241 | extern const struct iwl_cfg iwl5100_agn_cfg; | 242 | extern const struct iwl_cfg iwl5100_agn_cfg; |
242 | extern const struct iwl_cfg iwl5350_agn_cfg; | 243 | extern const struct iwl_cfg iwl5350_agn_cfg; |
@@ -278,11 +279,14 @@ extern const struct iwl_cfg iwl6035_2agn_cfg; | |||
278 | extern const struct iwl_cfg iwl105_bgn_cfg; | 279 | extern const struct iwl_cfg iwl105_bgn_cfg; |
279 | extern const struct iwl_cfg iwl105_bgn_d_cfg; | 280 | extern const struct iwl_cfg iwl105_bgn_d_cfg; |
280 | extern const struct iwl_cfg iwl135_bgn_cfg; | 281 | extern const struct iwl_cfg iwl135_bgn_cfg; |
282 | #endif /* CONFIG_IWLDVM */ | ||
283 | #if IS_ENABLED(CONFIG_IWLMVM) | ||
281 | extern const struct iwl_cfg iwl7260_2ac_cfg; | 284 | extern const struct iwl_cfg iwl7260_2ac_cfg; |
282 | extern const struct iwl_cfg iwl7260_2n_cfg; | 285 | extern const struct iwl_cfg iwl7260_2n_cfg; |
283 | extern const struct iwl_cfg iwl7260_n_cfg; | 286 | extern const struct iwl_cfg iwl7260_n_cfg; |
284 | extern const struct iwl_cfg iwl3160_2ac_cfg; | 287 | extern const struct iwl_cfg iwl3160_2ac_cfg; |
285 | extern const struct iwl_cfg iwl3160_2n_cfg; | 288 | extern const struct iwl_cfg iwl3160_2n_cfg; |
286 | extern const struct iwl_cfg iwl3160_n_cfg; | 289 | extern const struct iwl_cfg iwl3160_n_cfg; |
290 | #endif /* CONFIG_IWLMVM */ | ||
287 | 291 | ||
288 | #endif /* __IWL_CONFIG_H__ */ | 292 | #endif /* __IWL_CONFIG_H__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 8cf5db7fb5c9..7edb8519c8a4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h | |||
@@ -34,7 +34,11 @@ | |||
34 | 34 | ||
35 | static inline bool iwl_have_debug_level(u32 level) | 35 | static inline bool iwl_have_debug_level(u32 level) |
36 | { | 36 | { |
37 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
37 | return iwlwifi_mod_params.debug_level & level; | 38 | return iwlwifi_mod_params.debug_level & level; |
39 | #else | ||
40 | return false; | ||
41 | #endif | ||
38 | } | 42 | } |
39 | 43 | ||
40 | void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, | 44 | void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h index 7d1450916308..429337a2b9a1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/iwlwifi/iwl-drv.h | |||
@@ -62,8 +62,7 @@ | |||
62 | 62 | ||
63 | #ifndef __iwl_drv_h__ | 63 | #ifndef __iwl_drv_h__ |
64 | #define __iwl_drv_h__ | 64 | #define __iwl_drv_h__ |
65 | 65 | #include <linux/export.h> | |
66 | #include <linux/module.h> | ||
67 | 66 | ||
68 | /* for all modules */ | 67 | /* for all modules */ |
69 | #define DRV_NAME "iwlwifi" | 68 | #define DRV_NAME "iwlwifi" |
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index 36dfe0919f6b..d4ad505b0a4b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h | |||
@@ -115,7 +115,9 @@ struct iwl_mod_params { | |||
115 | int led_mode; | 115 | int led_mode; |
116 | bool power_save; | 116 | bool power_save; |
117 | int power_level; | 117 | int power_level; |
118 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
118 | u32 debug_level; | 119 | u32 debug_level; |
120 | #endif | ||
119 | int ant_coupling; | 121 | int ant_coupling; |
120 | bool bt_ch_announce; | 122 | bool bt_ch_announce; |
121 | bool auto_agg; | 123 | bool auto_agg; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c index 25745daa0d5d..1a405ae6a9c5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c | |||
@@ -92,20 +92,16 @@ struct iwl_phy_db_entry { | |||
92 | struct iwl_phy_db { | 92 | struct iwl_phy_db { |
93 | struct iwl_phy_db_entry cfg; | 93 | struct iwl_phy_db_entry cfg; |
94 | struct iwl_phy_db_entry calib_nch; | 94 | struct iwl_phy_db_entry calib_nch; |
95 | struct iwl_phy_db_entry calib_ch; | ||
96 | struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; | 95 | struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS]; |
97 | struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; | 96 | struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS]; |
98 | 97 | ||
99 | u32 channel_num; | ||
100 | u32 channel_size; | ||
101 | |||
102 | struct iwl_trans *trans; | 98 | struct iwl_trans *trans; |
103 | }; | 99 | }; |
104 | 100 | ||
105 | enum iwl_phy_db_section_type { | 101 | enum iwl_phy_db_section_type { |
106 | IWL_PHY_DB_CFG = 1, | 102 | IWL_PHY_DB_CFG = 1, |
107 | IWL_PHY_DB_CALIB_NCH, | 103 | IWL_PHY_DB_CALIB_NCH, |
108 | IWL_PHY_DB_CALIB_CH, | 104 | IWL_PHY_DB_UNUSED, |
109 | IWL_PHY_DB_CALIB_CHG_PAPD, | 105 | IWL_PHY_DB_CALIB_CHG_PAPD, |
110 | IWL_PHY_DB_CALIB_CHG_TXP, | 106 | IWL_PHY_DB_CALIB_CHG_TXP, |
111 | IWL_PHY_DB_MAX | 107 | IWL_PHY_DB_MAX |
@@ -169,8 +165,6 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db, | |||
169 | return &phy_db->cfg; | 165 | return &phy_db->cfg; |
170 | case IWL_PHY_DB_CALIB_NCH: | 166 | case IWL_PHY_DB_CALIB_NCH: |
171 | return &phy_db->calib_nch; | 167 | return &phy_db->calib_nch; |
172 | case IWL_PHY_DB_CALIB_CH: | ||
173 | return &phy_db->calib_ch; | ||
174 | case IWL_PHY_DB_CALIB_CHG_PAPD: | 168 | case IWL_PHY_DB_CALIB_CHG_PAPD: |
175 | if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) | 169 | if (chg_id >= IWL_NUM_PAPD_CH_GROUPS) |
176 | return NULL; | 170 | return NULL; |
@@ -208,7 +202,6 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db) | |||
208 | 202 | ||
209 | iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); | 203 | iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0); |
210 | iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); | 204 | iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0); |
211 | iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0); | ||
212 | for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) | 205 | for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++) |
213 | iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); | 206 | iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i); |
214 | for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) | 207 | for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) |
@@ -248,13 +241,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, | |||
248 | 241 | ||
249 | entry->size = size; | 242 | entry->size = size; |
250 | 243 | ||
251 | if (type == IWL_PHY_DB_CALIB_CH) { | ||
252 | phy_db->channel_num = | ||
253 | le32_to_cpup((__le32 *)phy_db_notif->data); | ||
254 | phy_db->channel_size = | ||
255 | (size - CHANNEL_NUM_SIZE) / phy_db->channel_num; | ||
256 | } | ||
257 | |||
258 | IWL_DEBUG_INFO(phy_db->trans, | 244 | IWL_DEBUG_INFO(phy_db->trans, |
259 | "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", | 245 | "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", |
260 | __func__, __LINE__, type, size); | 246 | __func__, __LINE__, type, size); |
@@ -328,10 +314,7 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, | |||
328 | u32 type, u8 **data, u16 *size, u16 ch_id) | 314 | u32 type, u8 **data, u16 *size, u16 ch_id) |
329 | { | 315 | { |
330 | struct iwl_phy_db_entry *entry; | 316 | struct iwl_phy_db_entry *entry; |
331 | u32 channel_num; | ||
332 | u32 channel_size; | ||
333 | u16 ch_group_id = 0; | 317 | u16 ch_group_id = 0; |
334 | u16 index; | ||
335 | 318 | ||
336 | if (!phy_db) | 319 | if (!phy_db) |
337 | return -EINVAL; | 320 | return -EINVAL; |
@@ -346,21 +329,8 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db, | |||
346 | if (!entry) | 329 | if (!entry) |
347 | return -EINVAL; | 330 | return -EINVAL; |
348 | 331 | ||
349 | if (type == IWL_PHY_DB_CALIB_CH) { | 332 | *data = entry->data; |
350 | index = ch_id_to_ch_index(ch_id); | 333 | *size = entry->size; |
351 | channel_num = phy_db->channel_num; | ||
352 | channel_size = phy_db->channel_size; | ||
353 | if (index >= channel_num) { | ||
354 | IWL_ERR(phy_db->trans, "Wrong channel number %d\n", | ||
355 | ch_id); | ||
356 | return -EINVAL; | ||
357 | } | ||
358 | *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size; | ||
359 | *size = channel_size; | ||
360 | } else { | ||
361 | *data = entry->data; | ||
362 | *size = entry->size; | ||
363 | } | ||
364 | 334 | ||
365 | IWL_DEBUG_INFO(phy_db->trans, | 335 | IWL_DEBUG_INFO(phy_db->trans, |
366 | "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", | 336 | "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", |
@@ -413,6 +383,9 @@ static int iwl_phy_db_send_all_channel_groups( | |||
413 | if (!entry) | 383 | if (!entry) |
414 | return -EINVAL; | 384 | return -EINVAL; |
415 | 385 | ||
386 | if (WARN_ON_ONCE(!entry->size)) | ||
387 | continue; | ||
388 | |||
416 | /* Send the requested PHY DB section */ | 389 | /* Send the requested PHY DB section */ |
417 | err = iwl_send_phy_db_cmd(phy_db, | 390 | err = iwl_send_phy_db_cmd(phy_db, |
418 | type, | 391 | type, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 7a2ef3f013fd..8c49db02c9c1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c | |||
@@ -420,8 +420,7 @@ static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr) | |||
420 | return cpu_to_le16(be16_to_cpu((__force __be16)check)); | 420 | return cpu_to_le16(be16_to_cpu((__force __be16)check)); |
421 | } | 421 | } |
422 | 422 | ||
423 | static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm, | 423 | static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif, |
424 | struct ieee80211_vif *vif, | ||
425 | struct cfg80211_wowlan_tcp *tcp, | 424 | struct cfg80211_wowlan_tcp *tcp, |
426 | void *_pkt, u8 *mask, | 425 | void *_pkt, u8 *mask, |
427 | __le16 *pseudo_hdr_csum, | 426 | __le16 *pseudo_hdr_csum, |
@@ -567,21 +566,21 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, | |||
567 | 566 | ||
568 | /* SYN (TX) */ | 567 | /* SYN (TX) */ |
569 | iwl_mvm_build_tcp_packet( | 568 | iwl_mvm_build_tcp_packet( |
570 | mvm, vif, tcp, cfg->syn_tx.data, NULL, | 569 | vif, tcp, cfg->syn_tx.data, NULL, |
571 | &cfg->syn_tx.info.tcp_pseudo_header_checksum, | 570 | &cfg->syn_tx.info.tcp_pseudo_header_checksum, |
572 | MVM_TCP_TX_SYN); | 571 | MVM_TCP_TX_SYN); |
573 | cfg->syn_tx.info.tcp_payload_length = 0; | 572 | cfg->syn_tx.info.tcp_payload_length = 0; |
574 | 573 | ||
575 | /* SYN/ACK (RX) */ | 574 | /* SYN/ACK (RX) */ |
576 | iwl_mvm_build_tcp_packet( | 575 | iwl_mvm_build_tcp_packet( |
577 | mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, | 576 | vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask, |
578 | &cfg->synack_rx.info.tcp_pseudo_header_checksum, | 577 | &cfg->synack_rx.info.tcp_pseudo_header_checksum, |
579 | MVM_TCP_RX_SYNACK); | 578 | MVM_TCP_RX_SYNACK); |
580 | cfg->synack_rx.info.tcp_payload_length = 0; | 579 | cfg->synack_rx.info.tcp_payload_length = 0; |
581 | 580 | ||
582 | /* KEEPALIVE/ACK (TX) */ | 581 | /* KEEPALIVE/ACK (TX) */ |
583 | iwl_mvm_build_tcp_packet( | 582 | iwl_mvm_build_tcp_packet( |
584 | mvm, vif, tcp, cfg->keepalive_tx.data, NULL, | 583 | vif, tcp, cfg->keepalive_tx.data, NULL, |
585 | &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, | 584 | &cfg->keepalive_tx.info.tcp_pseudo_header_checksum, |
586 | MVM_TCP_TX_DATA); | 585 | MVM_TCP_TX_DATA); |
587 | cfg->keepalive_tx.info.tcp_payload_length = | 586 | cfg->keepalive_tx.info.tcp_payload_length = |
@@ -605,7 +604,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, | |||
605 | 604 | ||
606 | /* ACK (RX) */ | 605 | /* ACK (RX) */ |
607 | iwl_mvm_build_tcp_packet( | 606 | iwl_mvm_build_tcp_packet( |
608 | mvm, vif, tcp, cfg->keepalive_ack_rx.data, | 607 | vif, tcp, cfg->keepalive_ack_rx.data, |
609 | cfg->keepalive_ack_rx.rx_mask, | 608 | cfg->keepalive_ack_rx.rx_mask, |
610 | &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, | 609 | &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum, |
611 | MVM_TCP_RX_ACK); | 610 | MVM_TCP_RX_ACK); |
@@ -613,7 +612,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, | |||
613 | 612 | ||
614 | /* WAKEUP (RX) */ | 613 | /* WAKEUP (RX) */ |
615 | iwl_mvm_build_tcp_packet( | 614 | iwl_mvm_build_tcp_packet( |
616 | mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, | 615 | vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask, |
617 | &cfg->wake_rx.info.tcp_pseudo_header_checksum, | 616 | &cfg->wake_rx.info.tcp_pseudo_header_checksum, |
618 | MVM_TCP_RX_WAKE); | 617 | MVM_TCP_RX_WAKE); |
619 | cfg->wake_rx.info.tcp_payload_length = | 618 | cfg->wake_rx.info.tcp_payload_length = |
@@ -621,7 +620,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm, | |||
621 | 620 | ||
622 | /* FIN */ | 621 | /* FIN */ |
623 | iwl_mvm_build_tcp_packet( | 622 | iwl_mvm_build_tcp_packet( |
624 | mvm, vif, tcp, cfg->fin_tx.data, NULL, | 623 | vif, tcp, cfg->fin_tx.data, NULL, |
625 | &cfg->fin_tx.info.tcp_pseudo_header_checksum, | 624 | &cfg->fin_tx.info.tcp_pseudo_header_checksum, |
626 | MVM_TCP_TX_FIN); | 625 | MVM_TCP_TX_FIN); |
627 | cfg->fin_tx.info.tcp_payload_length = 0; | 626 | cfg->fin_tx.info.tcp_payload_length = 0; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h index 6994232f5726..700cce731770 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h | |||
@@ -228,10 +228,11 @@ struct iwl_tx_cmd { | |||
228 | __le16 len; | 228 | __le16 len; |
229 | __le16 next_frame_len; | 229 | __le16 next_frame_len; |
230 | __le32 tx_flags; | 230 | __le32 tx_flags; |
231 | /* DRAM_SCRATCH_API_U_VER_1 */ | 231 | struct { |
232 | u8 try_cnt; | 232 | u8 try_cnt; |
233 | u8 btkill_cnt; | 233 | u8 btkill_cnt; |
234 | __le16 reserved; | 234 | __le16 reserved; |
235 | } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ | ||
235 | __le32 rate_n_flags; | 236 | __le32 rate_n_flags; |
236 | u8 sta_id; | 237 | u8 sta_id; |
237 | u8 sec_ctl; | 238 | u8 sec_ctl; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 46c7c0507c25..273b0cc197ab 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | |||
@@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, | |||
193 | u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, | 193 | u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, |
194 | struct ieee80211_vif *vif) | 194 | struct ieee80211_vif *vif) |
195 | { | 195 | { |
196 | u32 qmask, ac; | 196 | u32 qmask = 0, ac; |
197 | 197 | ||
198 | if (vif->type == NL80211_IFTYPE_P2P_DEVICE) | 198 | if (vif->type == NL80211_IFTYPE_P2P_DEVICE) |
199 | return BIT(IWL_MVM_OFFCHANNEL_QUEUE); | 199 | return BIT(IWL_MVM_OFFCHANNEL_QUEUE); |
200 | 200 | ||
201 | qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ? | ||
202 | BIT(vif->cab_queue) : 0; | ||
203 | |||
204 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) | 201 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) |
205 | if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) | 202 | if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) |
206 | qmask |= BIT(vif->hw_queue[ac]); | 203 | qmask |= BIT(vif->hw_queue[ac]); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 2ed296caeb28..e08683b20531 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -81,12 +81,12 @@ | |||
81 | static const struct ieee80211_iface_limit iwl_mvm_limits[] = { | 81 | static const struct ieee80211_iface_limit iwl_mvm_limits[] = { |
82 | { | 82 | { |
83 | .max = 1, | 83 | .max = 1, |
84 | .types = BIT(NL80211_IFTYPE_STATION) | | 84 | .types = BIT(NL80211_IFTYPE_STATION), |
85 | BIT(NL80211_IFTYPE_AP), | ||
86 | }, | 85 | }, |
87 | { | 86 | { |
88 | .max = 1, | 87 | .max = 1, |
89 | .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | | 88 | .types = BIT(NL80211_IFTYPE_AP) | |
89 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | ||
90 | BIT(NL80211_IFTYPE_P2P_GO), | 90 | BIT(NL80211_IFTYPE_P2P_GO), |
91 | }, | 91 | }, |
92 | { | 92 | { |
@@ -236,20 +236,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
236 | mvm->trans->ops->d3_suspend && | 236 | mvm->trans->ops->d3_suspend && |
237 | mvm->trans->ops->d3_resume && | 237 | mvm->trans->ops->d3_resume && |
238 | device_can_wakeup(mvm->trans->dev)) { | 238 | device_can_wakeup(mvm->trans->dev)) { |
239 | hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | | 239 | mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | |
240 | WIPHY_WOWLAN_DISCONNECT | | 240 | WIPHY_WOWLAN_DISCONNECT | |
241 | WIPHY_WOWLAN_EAP_IDENTITY_REQ | | 241 | WIPHY_WOWLAN_EAP_IDENTITY_REQ | |
242 | WIPHY_WOWLAN_RFKILL_RELEASE; | 242 | WIPHY_WOWLAN_RFKILL_RELEASE; |
243 | if (!iwlwifi_mod_params.sw_crypto) | 243 | if (!iwlwifi_mod_params.sw_crypto) |
244 | hw->wiphy->wowlan.flags |= | 244 | mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | |
245 | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | | 245 | WIPHY_WOWLAN_GTK_REKEY_FAILURE | |
246 | WIPHY_WOWLAN_GTK_REKEY_FAILURE | | 246 | WIPHY_WOWLAN_4WAY_HANDSHAKE; |
247 | WIPHY_WOWLAN_4WAY_HANDSHAKE; | 247 | |
248 | 248 | mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; | |
249 | hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; | 249 | mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; |
250 | hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; | 250 | mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; |
251 | hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; | 251 | mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; |
252 | hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; | 252 | hw->wiphy->wowlan = &mvm->wowlan; |
253 | } | 253 | } |
254 | #endif | 254 | #endif |
255 | 255 | ||
@@ -651,8 +651,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, | |||
651 | * By now, all the AC queues are empty. The AGG queues are | 651 | * By now, all the AC queues are empty. The AGG queues are |
652 | * empty too. We already got all the Tx responses for all the | 652 | * empty too. We already got all the Tx responses for all the |
653 | * packets in the queues. The drain work can have been | 653 | * packets in the queues. The drain work can have been |
654 | * triggered. Flush it. This work item takes the mutex, so kill | 654 | * triggered. Flush it. |
655 | * it before we take it. | ||
656 | */ | 655 | */ |
657 | flush_work(&mvm->sta_drained_wk); | 656 | flush_work(&mvm->sta_drained_wk); |
658 | } | 657 | } |
@@ -778,7 +777,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, | |||
778 | ret = iwl_mvm_power_update_mode(mvm, vif); | 777 | ret = iwl_mvm_power_update_mode(mvm, vif); |
779 | if (ret) | 778 | if (ret) |
780 | IWL_ERR(mvm, "failed to update power mode\n"); | 779 | IWL_ERR(mvm, "failed to update power mode\n"); |
781 | } else if (changes & BSS_CHANGED_DTIM_PERIOD) { | 780 | } else if (changes & BSS_CHANGED_BEACON_INFO) { |
782 | /* | 781 | /* |
783 | * We received a beacon _after_ association so | 782 | * We received a beacon _after_ association so |
784 | * remove the session protection. | 783 | * remove the session protection. |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 4e10aae71038..c7409f159a36 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
@@ -458,6 +458,7 @@ struct iwl_mvm { | |||
458 | struct ieee80211_vif *p2p_device_vif; | 458 | struct ieee80211_vif *p2p_device_vif; |
459 | 459 | ||
460 | #ifdef CONFIG_PM_SLEEP | 460 | #ifdef CONFIG_PM_SLEEP |
461 | struct wiphy_wowlan_support wowlan; | ||
461 | int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; | 462 | int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; |
462 | #ifdef CONFIG_IWLWIFI_DEBUGFS | 463 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
463 | bool d3_test_active; | 464 | bool d3_test_active; |
@@ -689,16 +690,11 @@ void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); | |||
689 | void | 690 | void |
690 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, | 691 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, |
691 | struct iwl_beacon_filter_cmd *cmd); | 692 | struct iwl_beacon_filter_cmd *cmd); |
692 | int iwl_mvm_dbgfs_set_fw_dbg_log(struct iwl_mvm *mvm); | ||
693 | #else | 693 | #else |
694 | static inline void | 694 | static inline void |
695 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, | 695 | iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, |
696 | struct iwl_beacon_filter_cmd *cmd) | 696 | struct iwl_beacon_filter_cmd *cmd) |
697 | {} | 697 | {} |
698 | static inline int iwl_mvm_dbgfs_set_fw_dbg_log(struct iwl_mvm *mvm) | ||
699 | { | ||
700 | return 0; | ||
701 | } | ||
702 | #endif | 698 | #endif |
703 | int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, | 699 | int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, |
704 | struct ieee80211_vif *vif); | 700 | struct ieee80211_vif *vif); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 2278858d5658..62fe5209093b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c | |||
@@ -229,9 +229,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, | |||
229 | if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) | 229 | if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) |
230 | mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); | 230 | mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); |
231 | 231 | ||
232 | if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) | ||
233 | mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue); | ||
234 | |||
235 | /* for HW restart - need to reset the seq_number etc... */ | 232 | /* for HW restart - need to reset the seq_number etc... */ |
236 | memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data)); | 233 | memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data)); |
237 | 234 | ||
@@ -1292,17 +1289,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, | |||
1292 | struct iwl_mvm_add_sta_cmd cmd = { | 1289 | struct iwl_mvm_add_sta_cmd cmd = { |
1293 | .add_modify = STA_MODE_MODIFY, | 1290 | .add_modify = STA_MODE_MODIFY, |
1294 | .sta_id = mvmsta->sta_id, | 1291 | .sta_id = mvmsta->sta_id, |
1295 | .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, | 1292 | .station_flags_msk = cpu_to_le32(STA_FLG_PS), |
1296 | .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE), | ||
1297 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), | 1293 | .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), |
1298 | }; | 1294 | }; |
1299 | int ret; | 1295 | int ret; |
1300 | 1296 | ||
1301 | /* | ||
1302 | * Same modify mask for sleep_tx_count and sleep_state_flags but this | ||
1303 | * should be fine since if we set the STA as "awake", then | ||
1304 | * sleep_tx_count is not relevant. | ||
1305 | */ | ||
1306 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); | 1297 | ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); |
1307 | if (ret) | 1298 | if (ret) |
1308 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); | 1299 | IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index 3efa0a0cc987..94b265eb32b8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h | |||
@@ -250,7 +250,6 @@ enum iwl_mvm_agg_state { | |||
250 | * the first packet to be sent in legacy HW queue in Tx AGG stop flow. | 250 | * the first packet to be sent in legacy HW queue in Tx AGG stop flow. |
251 | * Basically when next_reclaimed reaches ssn, we can tell mac80211 that | 251 | * Basically when next_reclaimed reaches ssn, we can tell mac80211 that |
252 | * we are ready to finish the Tx AGG stop / start flow. | 252 | * we are ready to finish the Tx AGG stop / start flow. |
253 | * @wait_for_ba: Expect block-ack before next Tx reply | ||
254 | */ | 253 | */ |
255 | struct iwl_mvm_tid_data { | 254 | struct iwl_mvm_tid_data { |
256 | u16 seq_number; | 255 | u16 seq_number; |
@@ -260,7 +259,6 @@ struct iwl_mvm_tid_data { | |||
260 | enum iwl_mvm_agg_state state; | 259 | enum iwl_mvm_agg_state state; |
261 | u16 txq_id; | 260 | u16 txq_id; |
262 | u16 ssn; | 261 | u16 ssn; |
263 | bool wait_for_ba; | ||
264 | }; | 262 | }; |
265 | 263 | ||
266 | /** | 264 | /** |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index b9ba4e71ea4a..f0e96a927407 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -408,7 +408,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
408 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, | 408 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, |
409 | tid, txq_id, seq_number); | 409 | tid, txq_id, seq_number); |
410 | 410 | ||
411 | /* NOTE: aggregation will need changes here (for txq id) */ | ||
412 | if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) | 411 | if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) |
413 | goto drop_unlock_sta; | 412 | goto drop_unlock_sta; |
414 | 413 | ||
@@ -610,8 +609,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
610 | !(info->flags & IEEE80211_TX_STAT_ACK)) | 609 | !(info->flags & IEEE80211_TX_STAT_ACK)) |
611 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | 610 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; |
612 | 611 | ||
613 | /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ | 612 | /* W/A FW bug: seq_ctl is wrong when the status isn't success */ |
614 | if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { | 613 | if (status != TX_STATUS_SUCCESS) { |
615 | struct ieee80211_hdr *hdr = (void *)skb->data; | 614 | struct ieee80211_hdr *hdr = (void *)skb->data; |
616 | seq_ctl = le16_to_cpu(hdr->seq_ctrl); | 615 | seq_ctl = le16_to_cpu(hdr->seq_ctrl); |
617 | } | 616 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index db7bdd35a9c5..81f3ea5b09a4 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -78,6 +78,7 @@ | |||
78 | 78 | ||
79 | /* Hardware specific file defines the PCI IDs table for that hardware module */ | 79 | /* Hardware specific file defines the PCI IDs table for that hardware module */ |
80 | static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | 80 | static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { |
81 | #if IS_ENABLED(CONFIG_IWLDVM) | ||
81 | {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ | 82 | {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ |
82 | {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ | 83 | {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ |
83 | {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ | 84 | {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ |
@@ -253,7 +254,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
253 | {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, | 254 | {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, |
254 | {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, | 255 | {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, |
255 | {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, | 256 | {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, |
257 | #endif /* CONFIG_IWLDVM */ | ||
256 | 258 | ||
259 | #if IS_ENABLED(CONFIG_IWLMVM) | ||
257 | /* 7000 Series */ | 260 | /* 7000 Series */ |
258 | {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, | 261 | {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, |
259 | {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, | 262 | {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, |
@@ -304,6 +307,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
304 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, | 307 | {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, |
305 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, | 308 | {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, |
306 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, | 309 | {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, |
310 | #endif /* CONFIG_IWLMVM */ | ||
307 | 311 | ||
308 | {0} | 312 | {0} |
309 | }; | 313 | }; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 148843e7f34f..b654dcdd048a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -217,6 +217,7 @@ struct iwl_pcie_txq_scratch_buf { | |||
217 | * @trans_pcie: pointer back to transport (for timer) | 217 | * @trans_pcie: pointer back to transport (for timer) |
218 | * @need_update: indicates need to update read/write index | 218 | * @need_update: indicates need to update read/write index |
219 | * @active: stores if queue is active | 219 | * @active: stores if queue is active |
220 | * @ampdu: true if this queue is an ampdu queue for an specific RA/TID | ||
220 | * | 221 | * |
221 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame | 222 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame |
222 | * descriptors) and required locking structures. | 223 | * descriptors) and required locking structures. |
@@ -232,6 +233,7 @@ struct iwl_txq { | |||
232 | struct iwl_trans_pcie *trans_pcie; | 233 | struct iwl_trans_pcie *trans_pcie; |
233 | u8 need_update; | 234 | u8 need_update; |
234 | u8 active; | 235 | u8 active; |
236 | bool ampdu; | ||
235 | }; | 237 | }; |
236 | 238 | ||
237 | static inline dma_addr_t | 239 | static inline dma_addr_t |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 567e67ad1f61..3688dc5ba1ac 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -802,9 +802,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) | |||
802 | u32 handled = 0; | 802 | u32 handled = 0; |
803 | unsigned long flags; | 803 | unsigned long flags; |
804 | u32 i; | 804 | u32 i; |
805 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
806 | u32 inta_mask; | ||
807 | #endif | ||
808 | 805 | ||
809 | lock_map_acquire(&trans->sync_cmd_lockdep_map); | 806 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
810 | 807 | ||
@@ -826,14 +823,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) | |||
826 | 823 | ||
827 | inta = trans_pcie->inta; | 824 | inta = trans_pcie->inta; |
828 | 825 | ||
829 | #ifdef CONFIG_IWLWIFI_DEBUG | 826 | if (iwl_have_debug_level(IWL_DL_ISR)) |
830 | if (iwl_have_debug_level(IWL_DL_ISR)) { | ||
831 | /* just for debug */ | ||
832 | inta_mask = iwl_read32(trans, CSR_INT_MASK); | ||
833 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", | 827 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
834 | inta, inta_mask); | 828 | inta, iwl_read32(trans, CSR_INT_MASK)); |
835 | } | ||
836 | #endif | ||
837 | 829 | ||
838 | /* saved interrupt in inta variable now we can reset trans_pcie->inta */ | 830 | /* saved interrupt in inta variable now we can reset trans_pcie->inta */ |
839 | trans_pcie->inta = 0; | 831 | trans_pcie->inta = 0; |
@@ -855,12 +847,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) | |||
855 | goto out; | 847 | goto out; |
856 | } | 848 | } |
857 | 849 | ||
858 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
859 | if (iwl_have_debug_level(IWL_DL_ISR)) { | 850 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
860 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | 851 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
861 | if (inta & CSR_INT_BIT_SCD) { | 852 | if (inta & CSR_INT_BIT_SCD) { |
862 | IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " | 853 | IWL_DEBUG_ISR(trans, |
863 | "the frame/frames.\n"); | 854 | "Scheduler finished to transmit the frame/frames.\n"); |
864 | isr_stats->sch++; | 855 | isr_stats->sch++; |
865 | } | 856 | } |
866 | 857 | ||
@@ -870,7 +861,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) | |||
870 | isr_stats->alive++; | 861 | isr_stats->alive++; |
871 | } | 862 | } |
872 | } | 863 | } |
873 | #endif | 864 | |
874 | /* Safely ignore these bits for debug checks below */ | 865 | /* Safely ignore these bits for debug checks below */ |
875 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | 866 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); |
876 | 867 | ||
@@ -1118,9 +1109,6 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) | |||
1118 | struct iwl_trans *trans = data; | 1109 | struct iwl_trans *trans = data; |
1119 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1110 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1120 | u32 inta, inta_mask; | 1111 | u32 inta, inta_mask; |
1121 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1122 | u32 inta_fh; | ||
1123 | #endif | ||
1124 | 1112 | ||
1125 | lockdep_assert_held(&trans_pcie->irq_lock); | 1113 | lockdep_assert_held(&trans_pcie->irq_lock); |
1126 | 1114 | ||
@@ -1159,13 +1147,11 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data) | |||
1159 | return IRQ_HANDLED; | 1147 | return IRQ_HANDLED; |
1160 | } | 1148 | } |
1161 | 1149 | ||
1162 | #ifdef CONFIG_IWLWIFI_DEBUG | 1150 | if (iwl_have_debug_level(IWL_DL_ISR)) |
1163 | if (iwl_have_debug_level(IWL_DL_ISR)) { | 1151 | IWL_DEBUG_ISR(trans, |
1164 | inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); | 1152 | "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", |
1165 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " | 1153 | inta, inta_mask, |
1166 | "fh 0x%08x\n", inta, inta_mask, inta_fh); | 1154 | iwl_read32(trans, CSR_FH_INT_STATUS)); |
1167 | } | ||
1168 | #endif | ||
1169 | 1155 | ||
1170 | trans_pcie->inta |= inta; | 1156 | trans_pcie->inta |= inta; |
1171 | /* the thread will service interrupts and re-enable them */ | 1157 | /* the thread will service interrupts and re-enable them */ |
@@ -1198,7 +1184,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) | |||
1198 | { | 1184 | { |
1199 | struct iwl_trans *trans = data; | 1185 | struct iwl_trans *trans = data; |
1200 | struct iwl_trans_pcie *trans_pcie; | 1186 | struct iwl_trans_pcie *trans_pcie; |
1201 | u32 inta, inta_mask; | 1187 | u32 inta; |
1202 | u32 val = 0; | 1188 | u32 val = 0; |
1203 | u32 read; | 1189 | u32 read; |
1204 | unsigned long flags; | 1190 | unsigned long flags; |
@@ -1226,7 +1212,6 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) | |||
1226 | * If we have something to service, the tasklet will re-enable ints. | 1212 | * If we have something to service, the tasklet will re-enable ints. |
1227 | * If we *don't* have something, we'll re-enable before leaving here. | 1213 | * If we *don't* have something, we'll re-enable before leaving here. |
1228 | */ | 1214 | */ |
1229 | inta_mask = iwl_read32(trans, CSR_INT_MASK); | ||
1230 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | 1215 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
1231 | 1216 | ||
1232 | /* Ignore interrupt if there's nothing in NIC to service. | 1217 | /* Ignore interrupt if there's nothing in NIC to service. |
@@ -1271,8 +1256,11 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data) | |||
1271 | val |= 0x8000; | 1256 | val |= 0x8000; |
1272 | 1257 | ||
1273 | inta = (0xff & val) | ((0xff00 & val) << 16); | 1258 | inta = (0xff & val) | ((0xff00 & val) << 16); |
1274 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", | 1259 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n", |
1275 | inta, inta_mask, val); | 1260 | inta, trans_pcie->inta_mask, val); |
1261 | if (iwl_have_debug_level(IWL_DL_ISR)) | ||
1262 | IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n", | ||
1263 | iwl_read32(trans, CSR_INT_MASK)); | ||
1276 | 1264 | ||
1277 | inta &= trans_pcie->inta_mask; | 1265 | inta &= trans_pcie->inta_mask; |
1278 | trans_pcie->inta |= inta; | 1266 | trans_pcie->inta |= inta; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index f65da1984d91..c47c92165aba 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -576,10 +576,16 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) | |||
576 | 576 | ||
577 | spin_lock_bh(&txq->lock); | 577 | spin_lock_bh(&txq->lock); |
578 | while (q->write_ptr != q->read_ptr) { | 578 | while (q->write_ptr != q->read_ptr) { |
579 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", | ||
580 | txq_id, q->read_ptr); | ||
579 | iwl_pcie_txq_free_tfd(trans, txq); | 581 | iwl_pcie_txq_free_tfd(trans, txq); |
580 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 582 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
581 | } | 583 | } |
584 | txq->active = false; | ||
582 | spin_unlock_bh(&txq->lock); | 585 | spin_unlock_bh(&txq->lock); |
586 | |||
587 | /* just in case - this queue may have been stopped */ | ||
588 | iwl_wake_queue(trans, txq); | ||
583 | } | 589 | } |
584 | 590 | ||
585 | /* | 591 | /* |
@@ -927,6 +933,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |||
927 | 933 | ||
928 | spin_lock_bh(&txq->lock); | 934 | spin_lock_bh(&txq->lock); |
929 | 935 | ||
936 | if (!txq->active) { | ||
937 | IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", | ||
938 | txq_id, ssn); | ||
939 | goto out; | ||
940 | } | ||
941 | |||
930 | if (txq->q.read_ptr == tfd_num) | 942 | if (txq->q.read_ptr == tfd_num) |
931 | goto out; | 943 | goto out; |
932 | 944 | ||
@@ -1073,6 +1085,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
1073 | 1085 | ||
1074 | /* enable aggregations for the queue */ | 1086 | /* enable aggregations for the queue */ |
1075 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); | 1087 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); |
1088 | trans_pcie->txq[txq_id].ampdu = true; | ||
1076 | } else { | 1089 | } else { |
1077 | /* | 1090 | /* |
1078 | * disable aggregations for the queue, this will also make the | 1091 | * disable aggregations for the queue, this will also make the |
@@ -1107,6 +1120,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
1107 | (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | | 1120 | (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | |
1108 | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | | 1121 | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | |
1109 | SCD_QUEUE_STTS_REG_MSK); | 1122 | SCD_QUEUE_STTS_REG_MSK); |
1123 | trans_pcie->txq[txq_id].active = true; | ||
1110 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", | 1124 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", |
1111 | txq_id, fifo, ssn & 0xff); | 1125 | txq_id, fifo, ssn & 0xff); |
1112 | } | 1126 | } |
@@ -1129,6 +1143,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | |||
1129 | ARRAY_SIZE(zero_val)); | 1143 | ARRAY_SIZE(zero_val)); |
1130 | 1144 | ||
1131 | iwl_pcie_txq_unmap(trans, txq_id); | 1145 | iwl_pcie_txq_unmap(trans, txq_id); |
1146 | trans_pcie->txq[txq_id].ampdu = false; | ||
1132 | 1147 | ||
1133 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); | 1148 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); |
1134 | } | 1149 | } |
@@ -1599,7 +1614,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1599 | u8 wait_write_ptr = 0; | 1614 | u8 wait_write_ptr = 0; |
1600 | __le16 fc = hdr->frame_control; | 1615 | __le16 fc = hdr->frame_control; |
1601 | u8 hdr_len = ieee80211_hdrlen(fc); | 1616 | u8 hdr_len = ieee80211_hdrlen(fc); |
1602 | u16 __maybe_unused wifi_seq; | 1617 | u16 wifi_seq; |
1603 | 1618 | ||
1604 | txq = &trans_pcie->txq[txq_id]; | 1619 | txq = &trans_pcie->txq[txq_id]; |
1605 | q = &txq->q; | 1620 | q = &txq->q; |
@@ -1616,13 +1631,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1616 | * the BA. | 1631 | * the BA. |
1617 | * Check here that the packets are in the right place on the ring. | 1632 | * Check here that the packets are in the right place on the ring. |
1618 | */ | 1633 | */ |
1619 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1620 | wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | 1634 | wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); |
1621 | WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && | 1635 | WARN_ONCE(trans_pcie->txq[txq_id].ampdu && |
1622 | ((wifi_seq & 0xff) != q->write_ptr), | 1636 | (wifi_seq & 0xff) != q->write_ptr, |
1623 | "Q: %d WiFi Seq %d tfdNum %d", | 1637 | "Q: %d WiFi Seq %d tfdNum %d", |
1624 | txq_id, wifi_seq, q->write_ptr); | 1638 | txq_id, wifi_seq, q->write_ptr); |
1625 | #endif | ||
1626 | 1639 | ||
1627 | /* Set up driver data for this TFD */ | 1640 | /* Set up driver data for this TFD */ |
1628 | txq->entries[q->write_ptr].skb = skb; | 1641 | txq->entries[q->write_ptr].skb = skb; |
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c new file mode 100644 index 000000000000..8d683070bdb3 --- /dev/null +++ b/drivers/net/wireless/mwifiex/11h.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * Marvell Wireless LAN device driver: 802.11h | ||
3 | * | ||
4 | * Copyright (C) 2013, Marvell International Ltd. | ||
5 | * | ||
6 | * This software file (the "File") is distributed by Marvell International | ||
7 | * Ltd. under the terms of the GNU General Public License Version 2, June 1991 | ||
8 | * (the "License"). You may use, redistribute and/or modify this File in | ||
9 | * accordance with the terms and conditions of the License, a copy of which | ||
10 | * is available by writing to the Free Software Foundation, Inc., | ||
11 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the | ||
12 | * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. | ||
13 | * | ||
14 | * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE | ||
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE | ||
16 | * ARE EXPRESSLY DISCLAIMED. The License provides additional details about | ||
17 | * this warranty disclaimer. | ||
18 | */ | ||
19 | |||
20 | #include "main.h" | ||
21 | #include "fw.h" | ||
22 | |||
23 | |||
24 | /* This function appends 11h info to a buffer while joining an | ||
25 | * infrastructure BSS | ||
26 | */ | ||
27 | static void | ||
28 | mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer, | ||
29 | struct mwifiex_bssdescriptor *bss_desc) | ||
30 | { | ||
31 | struct mwifiex_ie_types_header *ie_header; | ||
32 | struct mwifiex_ie_types_pwr_capability *cap; | ||
33 | struct mwifiex_ie_types_local_pwr_constraint *constraint; | ||
34 | struct ieee80211_supported_band *sband; | ||
35 | u8 radio_type; | ||
36 | int i; | ||
37 | |||
38 | if (!buffer || !(*buffer)) | ||
39 | return; | ||
40 | |||
41 | radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); | ||
42 | sband = priv->wdev->wiphy->bands[radio_type]; | ||
43 | |||
44 | cap = (struct mwifiex_ie_types_pwr_capability *)*buffer; | ||
45 | cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY); | ||
46 | cap->header.len = cpu_to_le16(2); | ||
47 | cap->min_pwr = 0; | ||
48 | cap->max_pwr = 0; | ||
49 | *buffer += sizeof(*cap); | ||
50 | |||
51 | constraint = (struct mwifiex_ie_types_local_pwr_constraint *)*buffer; | ||
52 | constraint->header.type = cpu_to_le16(WLAN_EID_PWR_CONSTRAINT); | ||
53 | constraint->header.len = cpu_to_le16(2); | ||
54 | constraint->chan = bss_desc->channel; | ||
55 | constraint->constraint = bss_desc->local_constraint; | ||
56 | *buffer += sizeof(*constraint); | ||
57 | |||
58 | ie_header = (struct mwifiex_ie_types_header *)*buffer; | ||
59 | ie_header->type = cpu_to_le16(TLV_TYPE_PASSTHROUGH); | ||
60 | ie_header->len = cpu_to_le16(2 * sband->n_channels + 2); | ||
61 | *buffer += sizeof(*ie_header); | ||
62 | *(*buffer)++ = WLAN_EID_SUPPORTED_CHANNELS; | ||
63 | *(*buffer)++ = 2 * sband->n_channels; | ||
64 | for (i = 0; i < sband->n_channels; i++) { | ||
65 | *(*buffer)++ = ieee80211_frequency_to_channel( | ||
66 | sband->channels[i].center_freq); | ||
67 | *(*buffer)++ = 1; /* one channel in the subband */ | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* Enable or disable the 11h extensions in the firmware */ | ||
72 | static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag) | ||
73 | { | ||
74 | u32 enable = flag; | ||
75 | |||
76 | return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, | ||
77 | HostCmd_ACT_GEN_SET, DOT11H_I, &enable); | ||
78 | } | ||
79 | |||
80 | /* This functions processes TLV buffer for a pending BSS Join command. | ||
81 | * | ||
82 | * Activate 11h functionality in the firmware if the spectrum management | ||
83 | * capability bit is found in the network we are joining. Also, necessary | ||
84 | * TLVs are set based on requested network's 11h capability. | ||
85 | */ | ||
86 | void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer, | ||
87 | struct mwifiex_bssdescriptor *bss_desc) | ||
88 | { | ||
89 | if (bss_desc->sensed_11h) { | ||
90 | /* Activate 11h functions in firmware, turns on capability | ||
91 | * bit | ||
92 | */ | ||
93 | mwifiex_11h_activate(priv, true); | ||
94 | bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT; | ||
95 | mwifiex_11h_process_infra_join(priv, buffer, bss_desc); | ||
96 | } else { | ||
97 | /* Deactivate 11h functions in the firmware */ | ||
98 | mwifiex_11h_activate(priv, false); | ||
99 | bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT; | ||
100 | } | ||
101 | } | ||
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile index ecf28464367f..a42a506fd32b 100644 --- a/drivers/net/wireless/mwifiex/Makefile +++ b/drivers/net/wireless/mwifiex/Makefile | |||
@@ -40,6 +40,7 @@ mwifiex-y += sta_rx.o | |||
40 | mwifiex-y += uap_txrx.o | 40 | mwifiex-y += uap_txrx.o |
41 | mwifiex-y += cfg80211.o | 41 | mwifiex-y += cfg80211.o |
42 | mwifiex-y += ethtool.o | 42 | mwifiex-y += ethtool.o |
43 | mwifiex-y += 11h.o | ||
43 | mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o | 44 | mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o |
44 | obj-$(CONFIG_MWIFIEX) += mwifiex.o | 45 | obj-$(CONFIG_MWIFIEX) += mwifiex.o |
45 | 46 | ||
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 00a82817eb6b..ef5fa890a286 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
@@ -20,6 +20,9 @@ | |||
20 | #include "cfg80211.h" | 20 | #include "cfg80211.h" |
21 | #include "main.h" | 21 | #include "main.h" |
22 | 22 | ||
23 | static char *reg_alpha2; | ||
24 | module_param(reg_alpha2, charp, 0); | ||
25 | |||
23 | static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { | 26 | static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { |
24 | { | 27 | { |
25 | .max = 2, .types = BIT(NL80211_IFTYPE_STATION), | 28 | .max = 2, .types = BIT(NL80211_IFTYPE_STATION), |
@@ -2475,6 +2478,27 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { | |||
2475 | #endif | 2478 | #endif |
2476 | }; | 2479 | }; |
2477 | 2480 | ||
2481 | #ifdef CONFIG_PM | ||
2482 | static const struct wiphy_wowlan_support mwifiex_wowlan_support = { | ||
2483 | .flags = WIPHY_WOWLAN_MAGIC_PKT, | ||
2484 | .n_patterns = MWIFIEX_MAX_FILTERS, | ||
2485 | .pattern_min_len = 1, | ||
2486 | .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, | ||
2487 | .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, | ||
2488 | }; | ||
2489 | #endif | ||
2490 | |||
2491 | static bool mwifiex_is_valid_alpha2(const char *alpha2) | ||
2492 | { | ||
2493 | if (!alpha2 || strlen(alpha2) != 2) | ||
2494 | return false; | ||
2495 | |||
2496 | if (isalpha(alpha2[0]) && isalpha(alpha2[1])) | ||
2497 | return true; | ||
2498 | |||
2499 | return false; | ||
2500 | } | ||
2501 | |||
2478 | /* | 2502 | /* |
2479 | * This function registers the device with CFG802.11 subsystem. | 2503 | * This function registers the device with CFG802.11 subsystem. |
2480 | * | 2504 | * |
@@ -2527,16 +2551,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) | |||
2527 | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | | 2551 | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | |
2528 | WIPHY_FLAG_AP_UAPSD | | 2552 | WIPHY_FLAG_AP_UAPSD | |
2529 | WIPHY_FLAG_CUSTOM_REGULATORY | | 2553 | WIPHY_FLAG_CUSTOM_REGULATORY | |
2554 | WIPHY_FLAG_STRICT_REGULATORY | | ||
2530 | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; | 2555 | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; |
2531 | 2556 | ||
2532 | wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); | 2557 | wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); |
2533 | 2558 | ||
2534 | #ifdef CONFIG_PM | 2559 | #ifdef CONFIG_PM |
2535 | wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT; | 2560 | wiphy->wowlan = &mwifiex_wowlan_support; |
2536 | wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS; | ||
2537 | wiphy->wowlan.pattern_min_len = 1; | ||
2538 | wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN; | ||
2539 | wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN; | ||
2540 | #endif | 2561 | #endif |
2541 | 2562 | ||
2542 | wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | | 2563 | wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | |
@@ -2568,10 +2589,16 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) | |||
2568 | wiphy_free(wiphy); | 2589 | wiphy_free(wiphy); |
2569 | return ret; | 2590 | return ret; |
2570 | } | 2591 | } |
2571 | country_code = mwifiex_11d_code_2_region(priv->adapter->region_code); | 2592 | |
2572 | if (country_code) | 2593 | if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) { |
2573 | dev_info(adapter->dev, | 2594 | wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2); |
2574 | "ignoring F/W country code %2.2s\n", country_code); | 2595 | regulatory_hint(wiphy, reg_alpha2); |
2596 | } else { | ||
2597 | country_code = mwifiex_11d_code_2_region(adapter->region_code); | ||
2598 | if (country_code) | ||
2599 | wiphy_info(wiphy, "ignoring F/W country code %2.2s\n", | ||
2600 | country_code); | ||
2601 | } | ||
2575 | 2602 | ||
2576 | adapter->wiphy = wiphy; | 2603 | adapter->wiphy = wiphy; |
2577 | return ret; | 2604 | return ret; |
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index d6ada7354c14..1b45aa533300 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h | |||
@@ -245,6 +245,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { | |||
245 | #define HT_BW_20 0 | 245 | #define HT_BW_20 0 |
246 | #define HT_BW_40 1 | 246 | #define HT_BW_40 1 |
247 | 247 | ||
248 | #define DFS_CHAN_MOVE_TIME 10000 | ||
249 | |||
248 | #define HostCmd_CMD_GET_HW_SPEC 0x0003 | 250 | #define HostCmd_CMD_GET_HW_SPEC 0x0003 |
249 | #define HostCmd_CMD_802_11_SCAN 0x0006 | 251 | #define HostCmd_CMD_802_11_SCAN 0x0006 |
250 | #define HostCmd_CMD_802_11_GET_LOG 0x000b | 252 | #define HostCmd_CMD_802_11_GET_LOG 0x000b |
@@ -438,6 +440,7 @@ enum P2P_MODES { | |||
438 | #define EVENT_BW_CHANGE 0x00000048 | 440 | #define EVENT_BW_CHANGE 0x00000048 |
439 | #define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c | 441 | #define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c |
440 | #define EVENT_HOSTWAKE_STAIE 0x0000004d | 442 | #define EVENT_HOSTWAKE_STAIE 0x0000004d |
443 | #define EVENT_CHANNEL_SWITCH_ANN 0x00000050 | ||
441 | #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f | 444 | #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f |
442 | 445 | ||
443 | #define EVENT_ID_MASK 0xffff | 446 | #define EVENT_ID_MASK 0xffff |
@@ -975,6 +978,7 @@ enum SNMP_MIB_INDEX { | |||
975 | LONG_RETRY_LIM_I = 7, | 978 | LONG_RETRY_LIM_I = 7, |
976 | FRAG_THRESH_I = 8, | 979 | FRAG_THRESH_I = 8, |
977 | DOT11D_I = 9, | 980 | DOT11D_I = 9, |
981 | DOT11H_I = 10, | ||
978 | }; | 982 | }; |
979 | 983 | ||
980 | #define MAX_SNMP_BUF_SIZE 128 | 984 | #define MAX_SNMP_BUF_SIZE 128 |
@@ -1206,6 +1210,18 @@ struct host_cmd_ds_sta_deauth { | |||
1206 | __le16 reason; | 1210 | __le16 reason; |
1207 | } __packed; | 1211 | } __packed; |
1208 | 1212 | ||
1213 | struct mwifiex_ie_types_pwr_capability { | ||
1214 | struct mwifiex_ie_types_header header; | ||
1215 | s8 min_pwr; | ||
1216 | s8 max_pwr; | ||
1217 | }; | ||
1218 | |||
1219 | struct mwifiex_ie_types_local_pwr_constraint { | ||
1220 | struct mwifiex_ie_types_header header; | ||
1221 | u8 chan; | ||
1222 | u8 constraint; | ||
1223 | }; | ||
1224 | |||
1209 | struct mwifiex_ie_types_wmm_param_set { | 1225 | struct mwifiex_ie_types_wmm_param_set { |
1210 | struct mwifiex_ie_types_header header; | 1226 | struct mwifiex_ie_types_header header; |
1211 | u8 wmm_ie[1]; | 1227 | u8 wmm_ie[1]; |
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index c7f11c0c3bb7..caaf4bd56b30 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c | |||
@@ -52,84 +52,6 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv) | |||
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
55 | static void scan_delay_timer_fn(unsigned long data) | ||
56 | { | ||
57 | struct mwifiex_private *priv = (struct mwifiex_private *)data; | ||
58 | struct mwifiex_adapter *adapter = priv->adapter; | ||
59 | struct cmd_ctrl_node *cmd_node, *tmp_node; | ||
60 | unsigned long flags; | ||
61 | |||
62 | if (adapter->surprise_removed) | ||
63 | return; | ||
64 | |||
65 | if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { | ||
66 | /* | ||
67 | * Abort scan operation by cancelling all pending scan | ||
68 | * commands | ||
69 | */ | ||
70 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); | ||
71 | list_for_each_entry_safe(cmd_node, tmp_node, | ||
72 | &adapter->scan_pending_q, list) { | ||
73 | list_del(&cmd_node->list); | ||
74 | mwifiex_insert_cmd_to_free_q(adapter, cmd_node); | ||
75 | } | ||
76 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); | ||
77 | |||
78 | spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); | ||
79 | adapter->scan_processing = false; | ||
80 | adapter->scan_delay_cnt = 0; | ||
81 | adapter->empty_tx_q_cnt = 0; | ||
82 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); | ||
83 | |||
84 | if (priv->scan_request) { | ||
85 | dev_dbg(adapter->dev, "info: aborting scan\n"); | ||
86 | cfg80211_scan_done(priv->scan_request, 1); | ||
87 | priv->scan_request = NULL; | ||
88 | } else { | ||
89 | priv->scan_aborting = false; | ||
90 | dev_dbg(adapter->dev, "info: scan already aborted\n"); | ||
91 | } | ||
92 | goto done; | ||
93 | } | ||
94 | |||
95 | if (!atomic_read(&priv->adapter->is_tx_received)) { | ||
96 | adapter->empty_tx_q_cnt++; | ||
97 | if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) { | ||
98 | /* | ||
99 | * No Tx traffic for 200msec. Get scan command from | ||
100 | * scan pending queue and put to cmd pending queue to | ||
101 | * resume scan operation | ||
102 | */ | ||
103 | adapter->scan_delay_cnt = 0; | ||
104 | adapter->empty_tx_q_cnt = 0; | ||
105 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); | ||
106 | cmd_node = list_first_entry(&adapter->scan_pending_q, | ||
107 | struct cmd_ctrl_node, list); | ||
108 | list_del(&cmd_node->list); | ||
109 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, | ||
110 | flags); | ||
111 | |||
112 | mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, | ||
113 | true); | ||
114 | queue_work(adapter->workqueue, &adapter->main_work); | ||
115 | goto done; | ||
116 | } | ||
117 | } else { | ||
118 | adapter->empty_tx_q_cnt = 0; | ||
119 | } | ||
120 | |||
121 | /* Delay scan operation further by 20msec */ | ||
122 | mod_timer(&priv->scan_delay_timer, jiffies + | ||
123 | msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); | ||
124 | adapter->scan_delay_cnt++; | ||
125 | |||
126 | done: | ||
127 | if (atomic_read(&priv->adapter->is_tx_received)) | ||
128 | atomic_set(&priv->adapter->is_tx_received, false); | ||
129 | |||
130 | return; | ||
131 | } | ||
132 | |||
133 | /* | 55 | /* |
134 | * This function initializes the private structure and sets default | 56 | * This function initializes the private structure and sets default |
135 | * values to the members. | 57 | * values to the members. |
@@ -211,8 +133,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv) | |||
211 | 133 | ||
212 | priv->scan_block = false; | 134 | priv->scan_block = false; |
213 | 135 | ||
214 | setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn, | 136 | priv->csa_chan = 0; |
215 | (unsigned long)priv); | 137 | priv->csa_expire_time = 0; |
216 | 138 | ||
217 | return mwifiex_add_bss_prio_tbl(priv); | 139 | return mwifiex_add_bss_prio_tbl(priv); |
218 | } | 140 | } |
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 122175af18c6..1c8a771e8e81 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c | |||
@@ -534,6 +534,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, | |||
534 | 534 | ||
535 | mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc); | 535 | mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc); |
536 | 536 | ||
537 | mwifiex_11h_process_join(priv, &pos, bss_desc); | ||
538 | |||
537 | cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN); | 539 | cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN); |
538 | 540 | ||
539 | /* Set the Capability info at last */ | 541 | /* Set the Capability info at last */ |
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 5bc7ef8d04d6..e15ab72fb03d 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c | |||
@@ -28,6 +28,84 @@ const char driver_version[] = "mwifiex " VERSION " (%s) "; | |||
28 | static char *cal_data_cfg; | 28 | static char *cal_data_cfg; |
29 | module_param(cal_data_cfg, charp, 0); | 29 | module_param(cal_data_cfg, charp, 0); |
30 | 30 | ||
31 | static void scan_delay_timer_fn(unsigned long data) | ||
32 | { | ||
33 | struct mwifiex_private *priv = (struct mwifiex_private *)data; | ||
34 | struct mwifiex_adapter *adapter = priv->adapter; | ||
35 | struct cmd_ctrl_node *cmd_node, *tmp_node; | ||
36 | unsigned long flags; | ||
37 | |||
38 | if (adapter->surprise_removed) | ||
39 | return; | ||
40 | |||
41 | if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { | ||
42 | /* | ||
43 | * Abort scan operation by cancelling all pending scan | ||
44 | * commands | ||
45 | */ | ||
46 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); | ||
47 | list_for_each_entry_safe(cmd_node, tmp_node, | ||
48 | &adapter->scan_pending_q, list) { | ||
49 | list_del(&cmd_node->list); | ||
50 | mwifiex_insert_cmd_to_free_q(adapter, cmd_node); | ||
51 | } | ||
52 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); | ||
53 | |||
54 | spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); | ||
55 | adapter->scan_processing = false; | ||
56 | adapter->scan_delay_cnt = 0; | ||
57 | adapter->empty_tx_q_cnt = 0; | ||
58 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); | ||
59 | |||
60 | if (priv->scan_request) { | ||
61 | dev_dbg(adapter->dev, "info: aborting scan\n"); | ||
62 | cfg80211_scan_done(priv->scan_request, 1); | ||
63 | priv->scan_request = NULL; | ||
64 | } else { | ||
65 | priv->scan_aborting = false; | ||
66 | dev_dbg(adapter->dev, "info: scan already aborted\n"); | ||
67 | } | ||
68 | goto done; | ||
69 | } | ||
70 | |||
71 | if (!atomic_read(&priv->adapter->is_tx_received)) { | ||
72 | adapter->empty_tx_q_cnt++; | ||
73 | if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) { | ||
74 | /* | ||
75 | * No Tx traffic for 200msec. Get scan command from | ||
76 | * scan pending queue and put to cmd pending queue to | ||
77 | * resume scan operation | ||
78 | */ | ||
79 | adapter->scan_delay_cnt = 0; | ||
80 | adapter->empty_tx_q_cnt = 0; | ||
81 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); | ||
82 | cmd_node = list_first_entry(&adapter->scan_pending_q, | ||
83 | struct cmd_ctrl_node, list); | ||
84 | list_del(&cmd_node->list); | ||
85 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, | ||
86 | flags); | ||
87 | |||
88 | mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, | ||
89 | true); | ||
90 | queue_work(adapter->workqueue, &adapter->main_work); | ||
91 | goto done; | ||
92 | } | ||
93 | } else { | ||
94 | adapter->empty_tx_q_cnt = 0; | ||
95 | } | ||
96 | |||
97 | /* Delay scan operation further by 20msec */ | ||
98 | mod_timer(&priv->scan_delay_timer, jiffies + | ||
99 | msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); | ||
100 | adapter->scan_delay_cnt++; | ||
101 | |||
102 | done: | ||
103 | if (atomic_read(&priv->adapter->is_tx_received)) | ||
104 | atomic_set(&priv->adapter->is_tx_received, false); | ||
105 | |||
106 | return; | ||
107 | } | ||
108 | |||
31 | /* | 109 | /* |
32 | * This function registers the device and performs all the necessary | 110 | * This function registers the device and performs all the necessary |
33 | * initializations. | 111 | * initializations. |
@@ -75,6 +153,10 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, | |||
75 | 153 | ||
76 | adapter->priv[i]->adapter = adapter; | 154 | adapter->priv[i]->adapter = adapter; |
77 | adapter->priv_num++; | 155 | adapter->priv_num++; |
156 | |||
157 | setup_timer(&adapter->priv[i]->scan_delay_timer, | ||
158 | scan_delay_timer_fn, | ||
159 | (unsigned long)adapter->priv[i]); | ||
78 | } | 160 | } |
79 | mwifiex_init_lock_list(adapter); | 161 | mwifiex_init_lock_list(adapter); |
80 | 162 | ||
@@ -587,9 +669,8 @@ static void mwifiex_set_multicast_list(struct net_device *dev) | |||
587 | mcast_list.mode = MWIFIEX_ALL_MULTI_MODE; | 669 | mcast_list.mode = MWIFIEX_ALL_MULTI_MODE; |
588 | } else { | 670 | } else { |
589 | mcast_list.mode = MWIFIEX_MULTICAST_MODE; | 671 | mcast_list.mode = MWIFIEX_MULTICAST_MODE; |
590 | if (netdev_mc_count(dev)) | 672 | mcast_list.num_multicast_addr = |
591 | mcast_list.num_multicast_addr = | 673 | mwifiex_copy_mcast_addr(&mcast_list, dev); |
592 | mwifiex_copy_mcast_addr(&mcast_list, dev); | ||
593 | } | 674 | } |
594 | mwifiex_request_set_multicast_list(priv, &mcast_list); | 675 | mwifiex_request_set_multicast_list(priv, &mcast_list); |
595 | } | 676 | } |
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 0832c2437daf..3da73d36acdf 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h | |||
@@ -309,6 +309,9 @@ struct mwifiex_bssdescriptor { | |||
309 | u16 wapi_offset; | 309 | u16 wapi_offset; |
310 | u8 *beacon_buf; | 310 | u8 *beacon_buf; |
311 | u32 beacon_buf_size; | 311 | u32 beacon_buf_size; |
312 | u8 sensed_11h; | ||
313 | u8 local_constraint; | ||
314 | u8 chan_sw_ie_present; | ||
312 | }; | 315 | }; |
313 | 316 | ||
314 | struct mwifiex_current_bss_params { | 317 | struct mwifiex_current_bss_params { |
@@ -510,6 +513,8 @@ struct mwifiex_private { | |||
510 | u32 mgmt_frame_mask; | 513 | u32 mgmt_frame_mask; |
511 | struct mwifiex_roc_cfg roc_cfg; | 514 | struct mwifiex_roc_cfg roc_cfg; |
512 | bool scan_aborting; | 515 | bool scan_aborting; |
516 | u8 csa_chan; | ||
517 | unsigned long csa_expire_time; | ||
513 | }; | 518 | }; |
514 | 519 | ||
515 | enum mwifiex_ba_status { | 520 | enum mwifiex_ba_status { |
@@ -1018,6 +1023,24 @@ static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb) | |||
1018 | return (*(u32 *)skb->data == PKT_TYPE_MGMT); | 1023 | return (*(u32 *)skb->data == PKT_TYPE_MGMT); |
1019 | } | 1024 | } |
1020 | 1025 | ||
1026 | /* This function retrieves channel closed for operation by Channel | ||
1027 | * Switch Announcement. | ||
1028 | */ | ||
1029 | static inline u8 | ||
1030 | mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv) | ||
1031 | { | ||
1032 | if (!priv->csa_chan) | ||
1033 | return 0; | ||
1034 | |||
1035 | /* Clear csa channel, if DFS channel move time has passed */ | ||
1036 | if (jiffies > priv->csa_expire_time) { | ||
1037 | priv->csa_chan = 0; | ||
1038 | priv->csa_expire_time = 0; | ||
1039 | } | ||
1040 | |||
1041 | return priv->csa_chan; | ||
1042 | } | ||
1043 | |||
1021 | int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, | 1044 | int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, |
1022 | u32 func_init_shutdown); | 1045 | u32 func_init_shutdown); |
1023 | int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); | 1046 | int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); |
@@ -1119,6 +1142,10 @@ u8 *mwifiex_11d_code_2_region(u8 code); | |||
1119 | void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, | 1142 | void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, |
1120 | struct mwifiex_sta_node *node); | 1143 | struct mwifiex_sta_node *node); |
1121 | 1144 | ||
1145 | void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer, | ||
1146 | struct mwifiex_bssdescriptor *bss_desc); | ||
1147 | int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv); | ||
1148 | |||
1122 | extern const struct ethtool_ops mwifiex_ethtool_ops; | 1149 | extern const struct ethtool_ops mwifiex_ethtool_ops; |
1123 | 1150 | ||
1124 | #ifdef CONFIG_DEBUG_FS | 1151 | #ifdef CONFIG_DEBUG_FS |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 801b6b728379..c447d9bd1aa9 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
@@ -391,6 +391,12 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, | |||
391 | return 0; | 391 | return 0; |
392 | } | 392 | } |
393 | 393 | ||
394 | if (bss_desc->chan_sw_ie_present) { | ||
395 | dev_err(adapter->dev, | ||
396 | "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n"); | ||
397 | return -1; | ||
398 | } | ||
399 | |||
394 | if (mwifiex_is_bss_wapi(priv, bss_desc)) { | 400 | if (mwifiex_is_bss_wapi(priv, bss_desc)) { |
395 | dev_dbg(adapter->dev, "info: return success for WAPI AP\n"); | 401 | dev_dbg(adapter->dev, "info: return success for WAPI AP\n"); |
396 | return 0; | 402 | return 0; |
@@ -569,6 +575,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, | |||
569 | return -1; | 575 | return -1; |
570 | } | 576 | } |
571 | 577 | ||
578 | /* Check csa channel expiry before preparing scan list */ | ||
579 | mwifiex_11h_get_csa_closed_channel(priv); | ||
580 | |||
572 | chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); | 581 | chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); |
573 | 582 | ||
574 | /* Set the temp channel struct pointer to the start of the desired | 583 | /* Set the temp channel struct pointer to the start of the desired |
@@ -598,6 +607,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, | |||
598 | while (tlv_idx < max_chan_per_scan && | 607 | while (tlv_idx < max_chan_per_scan && |
599 | tmp_chan_list->chan_number && !done_early) { | 608 | tmp_chan_list->chan_number && !done_early) { |
600 | 609 | ||
610 | if (tmp_chan_list->chan_number == priv->csa_chan) { | ||
611 | tmp_chan_list++; | ||
612 | continue; | ||
613 | } | ||
614 | |||
601 | dev_dbg(priv->adapter->dev, | 615 | dev_dbg(priv->adapter->dev, |
602 | "info: Scan: Chan(%3d), Radio(%d)," | 616 | "info: Scan: Chan(%3d), Radio(%d)," |
603 | " Mode(%d, %d), Dur(%d)\n", | 617 | " Mode(%d, %d), Dur(%d)\n", |
@@ -1169,6 +1183,19 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, | |||
1169 | bss_entry->erp_flags = *(current_ptr + 2); | 1183 | bss_entry->erp_flags = *(current_ptr + 2); |
1170 | break; | 1184 | break; |
1171 | 1185 | ||
1186 | case WLAN_EID_PWR_CONSTRAINT: | ||
1187 | bss_entry->local_constraint = *(current_ptr + 2); | ||
1188 | bss_entry->sensed_11h = true; | ||
1189 | break; | ||
1190 | |||
1191 | case WLAN_EID_CHANNEL_SWITCH: | ||
1192 | bss_entry->chan_sw_ie_present = true; | ||
1193 | case WLAN_EID_PWR_CAPABILITY: | ||
1194 | case WLAN_EID_TPC_REPORT: | ||
1195 | case WLAN_EID_QUIET: | ||
1196 | bss_entry->sensed_11h = true; | ||
1197 | break; | ||
1198 | |||
1172 | case WLAN_EID_EXT_SUPP_RATES: | 1199 | case WLAN_EID_EXT_SUPP_RATES: |
1173 | /* | 1200 | /* |
1174 | * Only process extended supported rate | 1201 | * Only process extended supported rate |
@@ -1575,6 +1602,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
1575 | goto check_next_scan; | 1602 | goto check_next_scan; |
1576 | } | 1603 | } |
1577 | 1604 | ||
1605 | /* Check csa channel expiry before parsing scan response */ | ||
1606 | mwifiex_11h_get_csa_closed_channel(priv); | ||
1607 | |||
1578 | bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); | 1608 | bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); |
1579 | dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n", | 1609 | dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n", |
1580 | bytes_left); | 1610 | bytes_left); |
@@ -1727,6 +1757,13 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
1727 | struct ieee80211_channel *chan; | 1757 | struct ieee80211_channel *chan; |
1728 | u8 band; | 1758 | u8 band; |
1729 | 1759 | ||
1760 | /* Skip entry if on csa closed channel */ | ||
1761 | if (channel == priv->csa_chan) { | ||
1762 | dev_dbg(adapter->dev, | ||
1763 | "Dropping entry on csa closed channel\n"); | ||
1764 | continue; | ||
1765 | } | ||
1766 | |||
1730 | band = BAND_G; | 1767 | band = BAND_G; |
1731 | if (chan_band_tlv) { | 1768 | if (chan_band_tlv) { |
1732 | chan_band = | 1769 | chan_band = |
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 41aafc7454ed..ea265ec0e522 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c | |||
@@ -427,6 +427,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) | |||
427 | 427 | ||
428 | break; | 428 | break; |
429 | 429 | ||
430 | case EVENT_CHANNEL_SWITCH_ANN: | ||
431 | dev_dbg(adapter->dev, "event: Channel Switch Announcement\n"); | ||
432 | priv->csa_expire_time = | ||
433 | jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME); | ||
434 | priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel; | ||
435 | ret = mwifiex_send_cmd_async(priv, | ||
436 | HostCmd_CMD_802_11_DEAUTHENTICATE, | ||
437 | HostCmd_ACT_GEN_SET, 0, | ||
438 | priv->curr_bss_params.bss_descriptor.mac_address); | ||
439 | break; | ||
440 | |||
430 | default: | 441 | default: |
431 | dev_dbg(adapter->dev, "event: unknown event id: %#x\n", | 442 | dev_dbg(adapter->dev, "event: unknown event id: %#x\n", |
432 | eventcause); | 443 | eventcause); |
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 1a8a19dbd635..206c3e038072 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c | |||
@@ -104,16 +104,14 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, | |||
104 | } else { | 104 | } else { |
105 | priv->curr_pkt_filter &= | 105 | priv->curr_pkt_filter &= |
106 | ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; | 106 | ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; |
107 | if (mcast_list->num_multicast_addr) { | 107 | dev_dbg(priv->adapter->dev, |
108 | dev_dbg(priv->adapter->dev, | 108 | "info: Set multicast list=%d\n", |
109 | "info: Set multicast list=%d\n", | 109 | mcast_list->num_multicast_addr); |
110 | mcast_list->num_multicast_addr); | 110 | /* Send multicast addresses to firmware */ |
111 | /* Send multicast addresses to firmware */ | 111 | ret = mwifiex_send_cmd_async(priv, |
112 | ret = mwifiex_send_cmd_async(priv, | 112 | HostCmd_CMD_MAC_MULTICAST_ADR, |
113 | HostCmd_CMD_MAC_MULTICAST_ADR, | 113 | HostCmd_ACT_GEN_SET, 0, |
114 | HostCmd_ACT_GEN_SET, 0, | 114 | mcast_list); |
115 | mcast_list); | ||
116 | } | ||
117 | } | 115 | } |
118 | } | 116 | } |
119 | dev_dbg(priv->adapter->dev, | 117 | dev_dbg(priv->adapter->dev, |
@@ -180,6 +178,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, | |||
180 | */ | 178 | */ |
181 | bss_desc->disable_11ac = true; | 179 | bss_desc->disable_11ac = true; |
182 | 180 | ||
181 | if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT) | ||
182 | bss_desc->sensed_11h = true; | ||
183 | |||
183 | return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); | 184 | return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); |
184 | } | 185 | } |
185 | 186 | ||
@@ -257,30 +258,37 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, | |||
257 | } | 258 | } |
258 | 259 | ||
259 | if (priv->bss_mode == NL80211_IFTYPE_STATION) { | 260 | if (priv->bss_mode == NL80211_IFTYPE_STATION) { |
261 | u8 config_bands; | ||
262 | |||
260 | /* Infra mode */ | 263 | /* Infra mode */ |
261 | ret = mwifiex_deauthenticate(priv, NULL); | 264 | ret = mwifiex_deauthenticate(priv, NULL); |
262 | if (ret) | 265 | if (ret) |
263 | goto done; | 266 | goto done; |
264 | 267 | ||
265 | if (bss_desc) { | 268 | if (!bss_desc) |
266 | u8 config_bands = 0; | 269 | return -1; |
267 | 270 | ||
268 | if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band) | 271 | if (mwifiex_band_to_radio_type(bss_desc->bss_band) == |
269 | == HostCmd_SCAN_RADIO_TYPE_BG) | 272 | HostCmd_SCAN_RADIO_TYPE_BG) |
270 | config_bands = BAND_B | BAND_G | BAND_GN | | 273 | config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC; |
271 | BAND_GAC; | 274 | else |
272 | else | 275 | config_bands = BAND_A | BAND_AN | BAND_AAC; |
273 | config_bands = BAND_A | BAND_AN | BAND_AAC; | ||
274 | 276 | ||
275 | if (!((config_bands | adapter->fw_bands) & | 277 | if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands)) |
276 | ~adapter->fw_bands)) | 278 | adapter->config_bands = config_bands; |
277 | adapter->config_bands = config_bands; | ||
278 | } | ||
279 | 279 | ||
280 | ret = mwifiex_check_network_compatibility(priv, bss_desc); | 280 | ret = mwifiex_check_network_compatibility(priv, bss_desc); |
281 | if (ret) | 281 | if (ret) |
282 | goto done; | 282 | goto done; |
283 | 283 | ||
284 | if (mwifiex_11h_get_csa_closed_channel(priv) == | ||
285 | (u8)bss_desc->channel) { | ||
286 | dev_err(adapter->dev, | ||
287 | "Attempt to reconnect on csa closed chan(%d)\n", | ||
288 | bss_desc->channel); | ||
289 | goto done; | ||
290 | } | ||
291 | |||
284 | dev_dbg(adapter->dev, "info: SSID found in scan list ... " | 292 | dev_dbg(adapter->dev, "info: SSID found in scan list ... " |
285 | "associating...\n"); | 293 | "associating...\n"); |
286 | 294 | ||
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 4be3d33ceae8..944e8846f6fc 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c | |||
@@ -37,6 +37,9 @@ | |||
37 | /* Offset for TOS field in the IP header */ | 37 | /* Offset for TOS field in the IP header */ |
38 | #define IPTOS_OFFSET 5 | 38 | #define IPTOS_OFFSET 5 |
39 | 39 | ||
40 | static bool enable_tx_amsdu; | ||
41 | module_param(enable_tx_amsdu, bool, 0644); | ||
42 | |||
40 | /* WMM information IE */ | 43 | /* WMM information IE */ |
41 | static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, | 44 | static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, |
42 | 0x00, 0x50, 0xf2, 0x02, | 45 | 0x00, 0x50, 0xf2, 0x02, |
@@ -1233,7 +1236,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) | |||
1233 | mwifiex_send_delba(priv, tid_del, ra, 1); | 1236 | mwifiex_send_delba(priv, tid_del, ra, 1); |
1234 | } | 1237 | } |
1235 | } | 1238 | } |
1236 | if (mwifiex_is_amsdu_allowed(priv, tid) && | 1239 | if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && |
1237 | mwifiex_is_11n_aggragation_possible(priv, ptr, | 1240 | mwifiex_is_11n_aggragation_possible(priv, ptr, |
1238 | adapter->tx_buf_size)) | 1241 | adapter->tx_buf_size)) |
1239 | mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, | 1242 | mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, |
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index 1f9cb55c3360..bdfe637953f4 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c | |||
@@ -881,7 +881,8 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv, | |||
881 | 881 | ||
882 | if (!upriv->udev) { | 882 | if (!upriv->udev) { |
883 | dbg("Device disconnected"); | 883 | dbg("Device disconnected"); |
884 | return -ENODEV; | 884 | retval = -ENODEV; |
885 | goto exit; | ||
885 | } | 886 | } |
886 | 887 | ||
887 | if (upriv->read_urb->status != -EINPROGRESS) | 888 | if (upriv->read_urb->status != -EINPROGRESS) |
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index f7143733d7e9..3d53a09da5a1 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c | |||
@@ -1767,33 +1767,45 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { | |||
1767 | .config = rt2400pci_config, | 1767 | .config = rt2400pci_config, |
1768 | }; | 1768 | }; |
1769 | 1769 | ||
1770 | static const struct data_queue_desc rt2400pci_queue_rx = { | 1770 | static void rt2400pci_queue_init(struct data_queue *queue) |
1771 | .entry_num = 24, | 1771 | { |
1772 | .data_size = DATA_FRAME_SIZE, | 1772 | switch (queue->qid) { |
1773 | .desc_size = RXD_DESC_SIZE, | 1773 | case QID_RX: |
1774 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 1774 | queue->limit = 24; |
1775 | }; | 1775 | queue->data_size = DATA_FRAME_SIZE; |
1776 | queue->desc_size = RXD_DESC_SIZE; | ||
1777 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
1778 | break; | ||
1776 | 1779 | ||
1777 | static const struct data_queue_desc rt2400pci_queue_tx = { | 1780 | case QID_AC_VO: |
1778 | .entry_num = 24, | 1781 | case QID_AC_VI: |
1779 | .data_size = DATA_FRAME_SIZE, | 1782 | case QID_AC_BE: |
1780 | .desc_size = TXD_DESC_SIZE, | 1783 | case QID_AC_BK: |
1781 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 1784 | queue->limit = 24; |
1782 | }; | 1785 | queue->data_size = DATA_FRAME_SIZE; |
1786 | queue->desc_size = TXD_DESC_SIZE; | ||
1787 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
1788 | break; | ||
1783 | 1789 | ||
1784 | static const struct data_queue_desc rt2400pci_queue_bcn = { | 1790 | case QID_BEACON: |
1785 | .entry_num = 1, | 1791 | queue->limit = 1; |
1786 | .data_size = MGMT_FRAME_SIZE, | 1792 | queue->data_size = MGMT_FRAME_SIZE; |
1787 | .desc_size = TXD_DESC_SIZE, | 1793 | queue->desc_size = TXD_DESC_SIZE; |
1788 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 1794 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); |
1789 | }; | 1795 | break; |
1790 | 1796 | ||
1791 | static const struct data_queue_desc rt2400pci_queue_atim = { | 1797 | case QID_ATIM: |
1792 | .entry_num = 8, | 1798 | queue->limit = 8; |
1793 | .data_size = DATA_FRAME_SIZE, | 1799 | queue->data_size = DATA_FRAME_SIZE; |
1794 | .desc_size = TXD_DESC_SIZE, | 1800 | queue->desc_size = TXD_DESC_SIZE; |
1795 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 1801 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); |
1796 | }; | 1802 | break; |
1803 | |||
1804 | default: | ||
1805 | BUG(); | ||
1806 | break; | ||
1807 | } | ||
1808 | } | ||
1797 | 1809 | ||
1798 | static const struct rt2x00_ops rt2400pci_ops = { | 1810 | static const struct rt2x00_ops rt2400pci_ops = { |
1799 | .name = KBUILD_MODNAME, | 1811 | .name = KBUILD_MODNAME, |
@@ -1801,11 +1813,7 @@ static const struct rt2x00_ops rt2400pci_ops = { | |||
1801 | .eeprom_size = EEPROM_SIZE, | 1813 | .eeprom_size = EEPROM_SIZE, |
1802 | .rf_size = RF_SIZE, | 1814 | .rf_size = RF_SIZE, |
1803 | .tx_queues = NUM_TX_QUEUES, | 1815 | .tx_queues = NUM_TX_QUEUES, |
1804 | .extra_tx_headroom = 0, | 1816 | .queue_init = rt2400pci_queue_init, |
1805 | .rx = &rt2400pci_queue_rx, | ||
1806 | .tx = &rt2400pci_queue_tx, | ||
1807 | .bcn = &rt2400pci_queue_bcn, | ||
1808 | .atim = &rt2400pci_queue_atim, | ||
1809 | .lib = &rt2400pci_rt2x00_ops, | 1817 | .lib = &rt2400pci_rt2x00_ops, |
1810 | .hw = &rt2400pci_mac80211_ops, | 1818 | .hw = &rt2400pci_mac80211_ops, |
1811 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS | 1819 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS |
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 77e45b223d15..0ac5c589ddce 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c | |||
@@ -2056,33 +2056,45 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { | |||
2056 | .config = rt2500pci_config, | 2056 | .config = rt2500pci_config, |
2057 | }; | 2057 | }; |
2058 | 2058 | ||
2059 | static const struct data_queue_desc rt2500pci_queue_rx = { | 2059 | static void rt2500pci_queue_init(struct data_queue *queue) |
2060 | .entry_num = 32, | 2060 | { |
2061 | .data_size = DATA_FRAME_SIZE, | 2061 | switch (queue->qid) { |
2062 | .desc_size = RXD_DESC_SIZE, | 2062 | case QID_RX: |
2063 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 2063 | queue->limit = 32; |
2064 | }; | 2064 | queue->data_size = DATA_FRAME_SIZE; |
2065 | queue->desc_size = RXD_DESC_SIZE; | ||
2066 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
2067 | break; | ||
2065 | 2068 | ||
2066 | static const struct data_queue_desc rt2500pci_queue_tx = { | 2069 | case QID_AC_VO: |
2067 | .entry_num = 32, | 2070 | case QID_AC_VI: |
2068 | .data_size = DATA_FRAME_SIZE, | 2071 | case QID_AC_BE: |
2069 | .desc_size = TXD_DESC_SIZE, | 2072 | case QID_AC_BK: |
2070 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 2073 | queue->limit = 32; |
2071 | }; | 2074 | queue->data_size = DATA_FRAME_SIZE; |
2075 | queue->desc_size = TXD_DESC_SIZE; | ||
2076 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
2077 | break; | ||
2072 | 2078 | ||
2073 | static const struct data_queue_desc rt2500pci_queue_bcn = { | 2079 | case QID_BEACON: |
2074 | .entry_num = 1, | 2080 | queue->limit = 1; |
2075 | .data_size = MGMT_FRAME_SIZE, | 2081 | queue->data_size = MGMT_FRAME_SIZE; |
2076 | .desc_size = TXD_DESC_SIZE, | 2082 | queue->desc_size = TXD_DESC_SIZE; |
2077 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 2083 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); |
2078 | }; | 2084 | break; |
2079 | 2085 | ||
2080 | static const struct data_queue_desc rt2500pci_queue_atim = { | 2086 | case QID_ATIM: |
2081 | .entry_num = 8, | 2087 | queue->limit = 8; |
2082 | .data_size = DATA_FRAME_SIZE, | 2088 | queue->data_size = DATA_FRAME_SIZE; |
2083 | .desc_size = TXD_DESC_SIZE, | 2089 | queue->desc_size = TXD_DESC_SIZE; |
2084 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 2090 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); |
2085 | }; | 2091 | break; |
2092 | |||
2093 | default: | ||
2094 | BUG(); | ||
2095 | break; | ||
2096 | } | ||
2097 | } | ||
2086 | 2098 | ||
2087 | static const struct rt2x00_ops rt2500pci_ops = { | 2099 | static const struct rt2x00_ops rt2500pci_ops = { |
2088 | .name = KBUILD_MODNAME, | 2100 | .name = KBUILD_MODNAME, |
@@ -2090,11 +2102,7 @@ static const struct rt2x00_ops rt2500pci_ops = { | |||
2090 | .eeprom_size = EEPROM_SIZE, | 2102 | .eeprom_size = EEPROM_SIZE, |
2091 | .rf_size = RF_SIZE, | 2103 | .rf_size = RF_SIZE, |
2092 | .tx_queues = NUM_TX_QUEUES, | 2104 | .tx_queues = NUM_TX_QUEUES, |
2093 | .extra_tx_headroom = 0, | 2105 | .queue_init = rt2500pci_queue_init, |
2094 | .rx = &rt2500pci_queue_rx, | ||
2095 | .tx = &rt2500pci_queue_tx, | ||
2096 | .bcn = &rt2500pci_queue_bcn, | ||
2097 | .atim = &rt2500pci_queue_atim, | ||
2098 | .lib = &rt2500pci_rt2x00_ops, | 2106 | .lib = &rt2500pci_rt2x00_ops, |
2099 | .hw = &rt2500pci_mac80211_ops, | 2107 | .hw = &rt2500pci_mac80211_ops, |
2100 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS | 2108 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS |
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index a7f7b365eff4..85acc79f68b8 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c | |||
@@ -1867,33 +1867,45 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { | |||
1867 | .config = rt2500usb_config, | 1867 | .config = rt2500usb_config, |
1868 | }; | 1868 | }; |
1869 | 1869 | ||
1870 | static const struct data_queue_desc rt2500usb_queue_rx = { | 1870 | static void rt2500usb_queue_init(struct data_queue *queue) |
1871 | .entry_num = 32, | 1871 | { |
1872 | .data_size = DATA_FRAME_SIZE, | 1872 | switch (queue->qid) { |
1873 | .desc_size = RXD_DESC_SIZE, | 1873 | case QID_RX: |
1874 | .priv_size = sizeof(struct queue_entry_priv_usb), | 1874 | queue->limit = 32; |
1875 | }; | 1875 | queue->data_size = DATA_FRAME_SIZE; |
1876 | queue->desc_size = RXD_DESC_SIZE; | ||
1877 | queue->priv_size = sizeof(struct queue_entry_priv_usb); | ||
1878 | break; | ||
1876 | 1879 | ||
1877 | static const struct data_queue_desc rt2500usb_queue_tx = { | 1880 | case QID_AC_VO: |
1878 | .entry_num = 32, | 1881 | case QID_AC_VI: |
1879 | .data_size = DATA_FRAME_SIZE, | 1882 | case QID_AC_BE: |
1880 | .desc_size = TXD_DESC_SIZE, | 1883 | case QID_AC_BK: |
1881 | .priv_size = sizeof(struct queue_entry_priv_usb), | 1884 | queue->limit = 32; |
1882 | }; | 1885 | queue->data_size = DATA_FRAME_SIZE; |
1886 | queue->desc_size = TXD_DESC_SIZE; | ||
1887 | queue->priv_size = sizeof(struct queue_entry_priv_usb); | ||
1888 | break; | ||
1883 | 1889 | ||
1884 | static const struct data_queue_desc rt2500usb_queue_bcn = { | 1890 | case QID_BEACON: |
1885 | .entry_num = 1, | 1891 | queue->limit = 1; |
1886 | .data_size = MGMT_FRAME_SIZE, | 1892 | queue->data_size = MGMT_FRAME_SIZE; |
1887 | .desc_size = TXD_DESC_SIZE, | 1893 | queue->desc_size = TXD_DESC_SIZE; |
1888 | .priv_size = sizeof(struct queue_entry_priv_usb_bcn), | 1894 | queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn); |
1889 | }; | 1895 | break; |
1890 | 1896 | ||
1891 | static const struct data_queue_desc rt2500usb_queue_atim = { | 1897 | case QID_ATIM: |
1892 | .entry_num = 8, | 1898 | queue->limit = 8; |
1893 | .data_size = DATA_FRAME_SIZE, | 1899 | queue->data_size = DATA_FRAME_SIZE; |
1894 | .desc_size = TXD_DESC_SIZE, | 1900 | queue->desc_size = TXD_DESC_SIZE; |
1895 | .priv_size = sizeof(struct queue_entry_priv_usb), | 1901 | queue->priv_size = sizeof(struct queue_entry_priv_usb); |
1896 | }; | 1902 | break; |
1903 | |||
1904 | default: | ||
1905 | BUG(); | ||
1906 | break; | ||
1907 | } | ||
1908 | } | ||
1897 | 1909 | ||
1898 | static const struct rt2x00_ops rt2500usb_ops = { | 1910 | static const struct rt2x00_ops rt2500usb_ops = { |
1899 | .name = KBUILD_MODNAME, | 1911 | .name = KBUILD_MODNAME, |
@@ -1901,11 +1913,7 @@ static const struct rt2x00_ops rt2500usb_ops = { | |||
1901 | .eeprom_size = EEPROM_SIZE, | 1913 | .eeprom_size = EEPROM_SIZE, |
1902 | .rf_size = RF_SIZE, | 1914 | .rf_size = RF_SIZE, |
1903 | .tx_queues = NUM_TX_QUEUES, | 1915 | .tx_queues = NUM_TX_QUEUES, |
1904 | .extra_tx_headroom = TXD_DESC_SIZE, | 1916 | .queue_init = rt2500usb_queue_init, |
1905 | .rx = &rt2500usb_queue_rx, | ||
1906 | .tx = &rt2500usb_queue_tx, | ||
1907 | .bcn = &rt2500usb_queue_bcn, | ||
1908 | .atim = &rt2500usb_queue_atim, | ||
1909 | .lib = &rt2500usb_rt2x00_ops, | 1917 | .lib = &rt2500usb_rt2x00_ops, |
1910 | .hw = &rt2500usb_mac80211_ops, | 1918 | .hw = &rt2500usb_mac80211_ops, |
1911 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS | 1919 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS |
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 330f1d25726d..7c7478219bbc 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -1186,29 +1186,43 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { | |||
1186 | .sta_remove = rt2800_sta_remove, | 1186 | .sta_remove = rt2800_sta_remove, |
1187 | }; | 1187 | }; |
1188 | 1188 | ||
1189 | static const struct data_queue_desc rt2800pci_queue_rx = { | 1189 | static void rt2800pci_queue_init(struct data_queue *queue) |
1190 | .entry_num = 128, | 1190 | { |
1191 | .data_size = AGGREGATION_SIZE, | 1191 | switch (queue->qid) { |
1192 | .desc_size = RXD_DESC_SIZE, | 1192 | case QID_RX: |
1193 | .winfo_size = RXWI_DESC_SIZE, | 1193 | queue->limit = 128; |
1194 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 1194 | queue->data_size = AGGREGATION_SIZE; |
1195 | }; | 1195 | queue->desc_size = RXD_DESC_SIZE; |
1196 | queue->winfo_size = RXWI_DESC_SIZE; | ||
1197 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
1198 | break; | ||
1196 | 1199 | ||
1197 | static const struct data_queue_desc rt2800pci_queue_tx = { | 1200 | case QID_AC_VO: |
1198 | .entry_num = 64, | 1201 | case QID_AC_VI: |
1199 | .data_size = AGGREGATION_SIZE, | 1202 | case QID_AC_BE: |
1200 | .desc_size = TXD_DESC_SIZE, | 1203 | case QID_AC_BK: |
1201 | .winfo_size = TXWI_DESC_SIZE, | 1204 | queue->limit = 64; |
1202 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 1205 | queue->data_size = AGGREGATION_SIZE; |
1203 | }; | 1206 | queue->desc_size = TXD_DESC_SIZE; |
1207 | queue->winfo_size = TXWI_DESC_SIZE; | ||
1208 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
1209 | break; | ||
1204 | 1210 | ||
1205 | static const struct data_queue_desc rt2800pci_queue_bcn = { | 1211 | case QID_BEACON: |
1206 | .entry_num = 8, | 1212 | queue->limit = 8; |
1207 | .data_size = 0, /* No DMA required for beacons */ | 1213 | queue->data_size = 0; /* No DMA required for beacons */ |
1208 | .desc_size = TXD_DESC_SIZE, | 1214 | queue->desc_size = TXD_DESC_SIZE; |
1209 | .winfo_size = TXWI_DESC_SIZE, | 1215 | queue->winfo_size = TXWI_DESC_SIZE; |
1210 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 1216 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); |
1211 | }; | 1217 | break; |
1218 | |||
1219 | case QID_ATIM: | ||
1220 | /* fallthrough */ | ||
1221 | default: | ||
1222 | BUG(); | ||
1223 | break; | ||
1224 | } | ||
1225 | } | ||
1212 | 1226 | ||
1213 | static const struct rt2x00_ops rt2800pci_ops = { | 1227 | static const struct rt2x00_ops rt2800pci_ops = { |
1214 | .name = KBUILD_MODNAME, | 1228 | .name = KBUILD_MODNAME, |
@@ -1217,10 +1231,7 @@ static const struct rt2x00_ops rt2800pci_ops = { | |||
1217 | .eeprom_size = EEPROM_SIZE, | 1231 | .eeprom_size = EEPROM_SIZE, |
1218 | .rf_size = RF_SIZE, | 1232 | .rf_size = RF_SIZE, |
1219 | .tx_queues = NUM_TX_QUEUES, | 1233 | .tx_queues = NUM_TX_QUEUES, |
1220 | .extra_tx_headroom = TXWI_DESC_SIZE, | 1234 | .queue_init = rt2800pci_queue_init, |
1221 | .rx = &rt2800pci_queue_rx, | ||
1222 | .tx = &rt2800pci_queue_tx, | ||
1223 | .bcn = &rt2800pci_queue_bcn, | ||
1224 | .lib = &rt2800pci_rt2x00_ops, | 1235 | .lib = &rt2800pci_rt2x00_ops, |
1225 | .drv = &rt2800pci_rt2800_ops, | 1236 | .drv = &rt2800pci_rt2800_ops, |
1226 | .hw = &rt2800pci_mac80211_ops, | 1237 | .hw = &rt2800pci_mac80211_ops, |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index c71a48da9a31..7edd903dd749 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -849,85 +849,63 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { | |||
849 | .sta_remove = rt2800_sta_remove, | 849 | .sta_remove = rt2800_sta_remove, |
850 | }; | 850 | }; |
851 | 851 | ||
852 | static const struct data_queue_desc rt2800usb_queue_rx = { | 852 | static void rt2800usb_queue_init(struct data_queue *queue) |
853 | .entry_num = 128, | 853 | { |
854 | .data_size = AGGREGATION_SIZE, | 854 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; |
855 | .desc_size = RXINFO_DESC_SIZE, | 855 | unsigned short txwi_size, rxwi_size; |
856 | .winfo_size = RXWI_DESC_SIZE, | ||
857 | .priv_size = sizeof(struct queue_entry_priv_usb), | ||
858 | }; | ||
859 | |||
860 | static const struct data_queue_desc rt2800usb_queue_tx = { | ||
861 | .entry_num = 16, | ||
862 | .data_size = AGGREGATION_SIZE, | ||
863 | .desc_size = TXINFO_DESC_SIZE, | ||
864 | .winfo_size = TXWI_DESC_SIZE, | ||
865 | .priv_size = sizeof(struct queue_entry_priv_usb), | ||
866 | }; | ||
867 | |||
868 | static const struct data_queue_desc rt2800usb_queue_bcn = { | ||
869 | .entry_num = 8, | ||
870 | .data_size = MGMT_FRAME_SIZE, | ||
871 | .desc_size = TXINFO_DESC_SIZE, | ||
872 | .winfo_size = TXWI_DESC_SIZE, | ||
873 | .priv_size = sizeof(struct queue_entry_priv_usb), | ||
874 | }; | ||
875 | 856 | ||
876 | static const struct rt2x00_ops rt2800usb_ops = { | 857 | if (rt2x00_rt(rt2x00dev, RT5592)) { |
877 | .name = KBUILD_MODNAME, | 858 | txwi_size = TXWI_DESC_SIZE_5592; |
878 | .drv_data_size = sizeof(struct rt2800_drv_data), | 859 | rxwi_size = RXWI_DESC_SIZE_5592; |
879 | .max_ap_intf = 8, | 860 | } else { |
880 | .eeprom_size = EEPROM_SIZE, | 861 | txwi_size = TXWI_DESC_SIZE; |
881 | .rf_size = RF_SIZE, | 862 | rxwi_size = RXWI_DESC_SIZE; |
882 | .tx_queues = NUM_TX_QUEUES, | 863 | } |
883 | .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, | ||
884 | .rx = &rt2800usb_queue_rx, | ||
885 | .tx = &rt2800usb_queue_tx, | ||
886 | .bcn = &rt2800usb_queue_bcn, | ||
887 | .lib = &rt2800usb_rt2x00_ops, | ||
888 | .drv = &rt2800usb_rt2800_ops, | ||
889 | .hw = &rt2800usb_mac80211_ops, | ||
890 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS | ||
891 | .debugfs = &rt2800_rt2x00debug, | ||
892 | #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ | ||
893 | }; | ||
894 | 864 | ||
895 | static const struct data_queue_desc rt2800usb_queue_rx_5592 = { | 865 | switch (queue->qid) { |
896 | .entry_num = 128, | 866 | case QID_RX: |
897 | .data_size = AGGREGATION_SIZE, | 867 | queue->limit = 128; |
898 | .desc_size = RXINFO_DESC_SIZE, | 868 | queue->data_size = AGGREGATION_SIZE; |
899 | .winfo_size = RXWI_DESC_SIZE_5592, | 869 | queue->desc_size = RXINFO_DESC_SIZE; |
900 | .priv_size = sizeof(struct queue_entry_priv_usb), | 870 | queue->winfo_size = rxwi_size; |
901 | }; | 871 | queue->priv_size = sizeof(struct queue_entry_priv_usb); |
872 | break; | ||
902 | 873 | ||
903 | static const struct data_queue_desc rt2800usb_queue_tx_5592 = { | 874 | case QID_AC_VO: |
904 | .entry_num = 16, | 875 | case QID_AC_VI: |
905 | .data_size = AGGREGATION_SIZE, | 876 | case QID_AC_BE: |
906 | .desc_size = TXINFO_DESC_SIZE, | 877 | case QID_AC_BK: |
907 | .winfo_size = TXWI_DESC_SIZE_5592, | 878 | queue->limit = 16; |
908 | .priv_size = sizeof(struct queue_entry_priv_usb), | 879 | queue->data_size = AGGREGATION_SIZE; |
909 | }; | 880 | queue->desc_size = TXINFO_DESC_SIZE; |
881 | queue->winfo_size = txwi_size; | ||
882 | queue->priv_size = sizeof(struct queue_entry_priv_usb); | ||
883 | break; | ||
910 | 884 | ||
911 | static const struct data_queue_desc rt2800usb_queue_bcn_5592 = { | 885 | case QID_BEACON: |
912 | .entry_num = 8, | 886 | queue->limit = 8; |
913 | .data_size = MGMT_FRAME_SIZE, | 887 | queue->data_size = MGMT_FRAME_SIZE; |
914 | .desc_size = TXINFO_DESC_SIZE, | 888 | queue->desc_size = TXINFO_DESC_SIZE; |
915 | .winfo_size = TXWI_DESC_SIZE_5592, | 889 | queue->winfo_size = txwi_size; |
916 | .priv_size = sizeof(struct queue_entry_priv_usb), | 890 | queue->priv_size = sizeof(struct queue_entry_priv_usb); |
917 | }; | 891 | break; |
918 | 892 | ||
893 | case QID_ATIM: | ||
894 | /* fallthrough */ | ||
895 | default: | ||
896 | BUG(); | ||
897 | break; | ||
898 | } | ||
899 | } | ||
919 | 900 | ||
920 | static const struct rt2x00_ops rt2800usb_ops_5592 = { | 901 | static const struct rt2x00_ops rt2800usb_ops = { |
921 | .name = KBUILD_MODNAME, | 902 | .name = KBUILD_MODNAME, |
922 | .drv_data_size = sizeof(struct rt2800_drv_data), | 903 | .drv_data_size = sizeof(struct rt2800_drv_data), |
923 | .max_ap_intf = 8, | 904 | .max_ap_intf = 8, |
924 | .eeprom_size = EEPROM_SIZE, | 905 | .eeprom_size = EEPROM_SIZE, |
925 | .rf_size = RF_SIZE, | 906 | .rf_size = RF_SIZE, |
926 | .tx_queues = NUM_TX_QUEUES, | 907 | .tx_queues = NUM_TX_QUEUES, |
927 | .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592, | 908 | .queue_init = rt2800usb_queue_init, |
928 | .rx = &rt2800usb_queue_rx_5592, | ||
929 | .tx = &rt2800usb_queue_tx_5592, | ||
930 | .bcn = &rt2800usb_queue_bcn_5592, | ||
931 | .lib = &rt2800usb_rt2x00_ops, | 909 | .lib = &rt2800usb_rt2x00_ops, |
932 | .drv = &rt2800usb_rt2800_ops, | 910 | .drv = &rt2800usb_rt2800_ops, |
933 | .hw = &rt2800usb_mac80211_ops, | 911 | .hw = &rt2800usb_mac80211_ops, |
@@ -1248,15 +1226,15 @@ static struct usb_device_id rt2800usb_device_table[] = { | |||
1248 | #endif | 1226 | #endif |
1249 | #ifdef CONFIG_RT2800USB_RT55XX | 1227 | #ifdef CONFIG_RT2800USB_RT55XX |
1250 | /* Arcadyan */ | 1228 | /* Arcadyan */ |
1251 | { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 }, | 1229 | { USB_DEVICE(0x043e, 0x7a32) }, |
1252 | /* AVM GmbH */ | 1230 | /* AVM GmbH */ |
1253 | { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 }, | 1231 | { USB_DEVICE(0x057c, 0x8501) }, |
1254 | /* D-Link DWA-160-B2 */ | 1232 | /* D-Link DWA-160-B2 */ |
1255 | { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 }, | 1233 | { USB_DEVICE(0x2001, 0x3c1a) }, |
1256 | /* Proware */ | 1234 | /* Proware */ |
1257 | { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 }, | 1235 | { USB_DEVICE(0x043e, 0x7a13) }, |
1258 | /* Ralink */ | 1236 | /* Ralink */ |
1259 | { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 }, | 1237 | { USB_DEVICE(0x148f, 0x5572) }, |
1260 | #endif | 1238 | #endif |
1261 | #ifdef CONFIG_RT2800USB_UNKNOWN | 1239 | #ifdef CONFIG_RT2800USB_UNKNOWN |
1262 | /* | 1240 | /* |
@@ -1361,9 +1339,6 @@ MODULE_LICENSE("GPL"); | |||
1361 | static int rt2800usb_probe(struct usb_interface *usb_intf, | 1339 | static int rt2800usb_probe(struct usb_interface *usb_intf, |
1362 | const struct usb_device_id *id) | 1340 | const struct usb_device_id *id) |
1363 | { | 1341 | { |
1364 | if (id->driver_info == 5592) | ||
1365 | return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592); | ||
1366 | |||
1367 | return rt2x00usb_probe(usb_intf, &rt2800usb_ops); | 1342 | return rt2x00usb_probe(usb_intf, &rt2800usb_ops); |
1368 | } | 1343 | } |
1369 | 1344 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 7510723a8c37..ee3fc570b11d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h | |||
@@ -648,11 +648,7 @@ struct rt2x00_ops { | |||
648 | const unsigned int eeprom_size; | 648 | const unsigned int eeprom_size; |
649 | const unsigned int rf_size; | 649 | const unsigned int rf_size; |
650 | const unsigned int tx_queues; | 650 | const unsigned int tx_queues; |
651 | const unsigned int extra_tx_headroom; | 651 | void (*queue_init)(struct data_queue *queue); |
652 | const struct data_queue_desc *rx; | ||
653 | const struct data_queue_desc *tx; | ||
654 | const struct data_queue_desc *bcn; | ||
655 | const struct data_queue_desc *atim; | ||
656 | const struct rt2x00lib_ops *lib; | 652 | const struct rt2x00lib_ops *lib; |
657 | const void *drv; | 653 | const void *drv; |
658 | const struct ieee80211_ops *hw; | 654 | const struct ieee80211_ops *hw; |
@@ -1010,6 +1006,9 @@ struct rt2x00_dev { | |||
1010 | */ | 1006 | */ |
1011 | struct list_head bar_list; | 1007 | struct list_head bar_list; |
1012 | spinlock_t bar_list_lock; | 1008 | spinlock_t bar_list_lock; |
1009 | |||
1010 | /* Extra TX headroom required for alignment purposes. */ | ||
1011 | unsigned int extra_tx_headroom; | ||
1013 | }; | 1012 | }; |
1014 | 1013 | ||
1015 | struct rt2x00_bar_list_entry { | 1014 | struct rt2x00_bar_list_entry { |
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 6a201725bc50..f03e3bba51c3 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c | |||
@@ -334,7 +334,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, | |||
334 | /* | 334 | /* |
335 | * Remove the extra tx headroom from the skb. | 335 | * Remove the extra tx headroom from the skb. |
336 | */ | 336 | */ |
337 | skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom); | 337 | skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); |
338 | 338 | ||
339 | /* | 339 | /* |
340 | * Signal that the TX descriptor is no longer in the skb. | 340 | * Signal that the TX descriptor is no longer in the skb. |
@@ -1049,7 +1049,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) | |||
1049 | */ | 1049 | */ |
1050 | rt2x00dev->hw->extra_tx_headroom = | 1050 | rt2x00dev->hw->extra_tx_headroom = |
1051 | max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM, | 1051 | max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM, |
1052 | rt2x00dev->ops->extra_tx_headroom); | 1052 | rt2x00dev->extra_tx_headroom); |
1053 | 1053 | ||
1054 | /* | 1054 | /* |
1055 | * Take TX headroom required for alignment into account. | 1055 | * Take TX headroom required for alignment into account. |
@@ -1256,6 +1256,17 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev) | |||
1256 | rt2x00dev->hw->wiphy->n_iface_combinations = 1; | 1256 | rt2x00dev->hw->wiphy->n_iface_combinations = 1; |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev) | ||
1260 | { | ||
1261 | if (WARN_ON(!rt2x00dev->tx)) | ||
1262 | return 0; | ||
1263 | |||
1264 | if (rt2x00_is_usb(rt2x00dev)) | ||
1265 | return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size; | ||
1266 | |||
1267 | return rt2x00dev->tx[0].winfo_size; | ||
1268 | } | ||
1269 | |||
1259 | /* | 1270 | /* |
1260 | * driver allocation handlers. | 1271 | * driver allocation handlers. |
1261 | */ | 1272 | */ |
@@ -1330,13 +1341,16 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) | |||
1330 | if (retval) | 1341 | if (retval) |
1331 | goto exit; | 1342 | goto exit; |
1332 | 1343 | ||
1344 | /* Cache TX headroom value */ | ||
1345 | rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev); | ||
1346 | |||
1333 | /* | 1347 | /* |
1334 | * Determine which operating modes are supported, all modes | 1348 | * Determine which operating modes are supported, all modes |
1335 | * which require beaconing, depend on the availability of | 1349 | * which require beaconing, depend on the availability of |
1336 | * beacon entries. | 1350 | * beacon entries. |
1337 | */ | 1351 | */ |
1338 | rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); | 1352 | rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); |
1339 | if (rt2x00dev->ops->bcn->entry_num > 0) | 1353 | if (rt2x00dev->bcn->limit > 0) |
1340 | rt2x00dev->hw->wiphy->interface_modes |= | 1354 | rt2x00dev->hw->wiphy->interface_modes |= |
1341 | BIT(NL80211_IFTYPE_ADHOC) | | 1355 | BIT(NL80211_IFTYPE_ADHOC) | |
1342 | BIT(NL80211_IFTYPE_AP) | | 1356 | BIT(NL80211_IFTYPE_AP) | |
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 5efbbbdca701..6c0a91ff963c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
@@ -542,8 +542,8 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry, | |||
542 | /* | 542 | /* |
543 | * Add the requested extra tx headroom in front of the skb. | 543 | * Add the requested extra tx headroom in front of the skb. |
544 | */ | 544 | */ |
545 | skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom); | 545 | skb_push(entry->skb, rt2x00dev->extra_tx_headroom); |
546 | memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom); | 546 | memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); |
547 | 547 | ||
548 | /* | 548 | /* |
549 | * Call the driver's write_tx_data function, if it exists. | 549 | * Call the driver's write_tx_data function, if it exists. |
@@ -596,7 +596,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry) | |||
596 | { | 596 | { |
597 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | 597 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
598 | struct ieee80211_bar *bar = (void *) (entry->skb->data + | 598 | struct ieee80211_bar *bar = (void *) (entry->skb->data + |
599 | rt2x00dev->ops->extra_tx_headroom); | 599 | rt2x00dev->extra_tx_headroom); |
600 | struct rt2x00_bar_list_entry *bar_entry; | 600 | struct rt2x00_bar_list_entry *bar_entry; |
601 | 601 | ||
602 | if (likely(!ieee80211_is_back_req(bar->frame_control))) | 602 | if (likely(!ieee80211_is_back_req(bar->frame_control))) |
@@ -1161,8 +1161,7 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) | |||
1161 | } | 1161 | } |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static int rt2x00queue_alloc_entries(struct data_queue *queue, | 1164 | static int rt2x00queue_alloc_entries(struct data_queue *queue) |
1165 | const struct data_queue_desc *qdesc) | ||
1166 | { | 1165 | { |
1167 | struct queue_entry *entries; | 1166 | struct queue_entry *entries; |
1168 | unsigned int entry_size; | 1167 | unsigned int entry_size; |
@@ -1173,7 +1172,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, | |||
1173 | /* | 1172 | /* |
1174 | * Allocate all queue entries. | 1173 | * Allocate all queue entries. |
1175 | */ | 1174 | */ |
1176 | entry_size = sizeof(*entries) + qdesc->priv_size; | 1175 | entry_size = sizeof(*entries) + queue->priv_size; |
1177 | entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); | 1176 | entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); |
1178 | if (!entries) | 1177 | if (!entries) |
1179 | return -ENOMEM; | 1178 | return -ENOMEM; |
@@ -1189,7 +1188,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, | |||
1189 | entries[i].entry_idx = i; | 1188 | entries[i].entry_idx = i; |
1190 | entries[i].priv_data = | 1189 | entries[i].priv_data = |
1191 | QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, | 1190 | QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, |
1192 | sizeof(*entries), qdesc->priv_size); | 1191 | sizeof(*entries), queue->priv_size); |
1193 | } | 1192 | } |
1194 | 1193 | ||
1195 | #undef QUEUE_ENTRY_PRIV_OFFSET | 1194 | #undef QUEUE_ENTRY_PRIV_OFFSET |
@@ -1231,23 +1230,22 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) | |||
1231 | struct data_queue *queue; | 1230 | struct data_queue *queue; |
1232 | int status; | 1231 | int status; |
1233 | 1232 | ||
1234 | status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); | 1233 | status = rt2x00queue_alloc_entries(rt2x00dev->rx); |
1235 | if (status) | 1234 | if (status) |
1236 | goto exit; | 1235 | goto exit; |
1237 | 1236 | ||
1238 | tx_queue_for_each(rt2x00dev, queue) { | 1237 | tx_queue_for_each(rt2x00dev, queue) { |
1239 | status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); | 1238 | status = rt2x00queue_alloc_entries(queue); |
1240 | if (status) | 1239 | if (status) |
1241 | goto exit; | 1240 | goto exit; |
1242 | } | 1241 | } |
1243 | 1242 | ||
1244 | status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); | 1243 | status = rt2x00queue_alloc_entries(rt2x00dev->bcn); |
1245 | if (status) | 1244 | if (status) |
1246 | goto exit; | 1245 | goto exit; |
1247 | 1246 | ||
1248 | if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { | 1247 | if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { |
1249 | status = rt2x00queue_alloc_entries(rt2x00dev->atim, | 1248 | status = rt2x00queue_alloc_entries(rt2x00dev->atim); |
1250 | rt2x00dev->ops->atim); | ||
1251 | if (status) | 1249 | if (status) |
1252 | goto exit; | 1250 | goto exit; |
1253 | } | 1251 | } |
@@ -1278,38 +1276,9 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) | |||
1278 | } | 1276 | } |
1279 | } | 1277 | } |
1280 | 1278 | ||
1281 | static const struct data_queue_desc * | ||
1282 | rt2x00queue_get_qdesc_by_qid(struct rt2x00_dev *rt2x00dev, | ||
1283 | enum data_queue_qid qid) | ||
1284 | { | ||
1285 | switch (qid) { | ||
1286 | case QID_RX: | ||
1287 | return rt2x00dev->ops->rx; | ||
1288 | |||
1289 | case QID_AC_BE: | ||
1290 | case QID_AC_BK: | ||
1291 | case QID_AC_VO: | ||
1292 | case QID_AC_VI: | ||
1293 | return rt2x00dev->ops->tx; | ||
1294 | |||
1295 | case QID_BEACON: | ||
1296 | return rt2x00dev->ops->bcn; | ||
1297 | |||
1298 | case QID_ATIM: | ||
1299 | return rt2x00dev->ops->atim; | ||
1300 | |||
1301 | default: | ||
1302 | break; | ||
1303 | } | ||
1304 | |||
1305 | return NULL; | ||
1306 | } | ||
1307 | |||
1308 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, | 1279 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, |
1309 | struct data_queue *queue, enum data_queue_qid qid) | 1280 | struct data_queue *queue, enum data_queue_qid qid) |
1310 | { | 1281 | { |
1311 | const struct data_queue_desc *qdesc; | ||
1312 | |||
1313 | mutex_init(&queue->status_lock); | 1282 | mutex_init(&queue->status_lock); |
1314 | spin_lock_init(&queue->tx_lock); | 1283 | spin_lock_init(&queue->tx_lock); |
1315 | spin_lock_init(&queue->index_lock); | 1284 | spin_lock_init(&queue->index_lock); |
@@ -1321,14 +1290,9 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, | |||
1321 | queue->cw_min = 5; | 1290 | queue->cw_min = 5; |
1322 | queue->cw_max = 10; | 1291 | queue->cw_max = 10; |
1323 | 1292 | ||
1324 | qdesc = rt2x00queue_get_qdesc_by_qid(rt2x00dev, qid); | 1293 | rt2x00dev->ops->queue_init(queue); |
1325 | BUG_ON(!qdesc); | ||
1326 | 1294 | ||
1327 | queue->limit = qdesc->entry_num; | 1295 | queue->threshold = DIV_ROUND_UP(queue->limit, 10); |
1328 | queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); | ||
1329 | queue->data_size = qdesc->data_size; | ||
1330 | queue->desc_size = qdesc->desc_size; | ||
1331 | queue->winfo_size = qdesc->winfo_size; | ||
1332 | } | 1296 | } |
1333 | 1297 | ||
1334 | int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) | 1298 | int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h index 4a7b34e9261b..ebe117224979 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/rt2x00/rt2x00queue.h | |||
@@ -453,6 +453,7 @@ enum data_queue_flags { | |||
453 | * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). | 453 | * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). |
454 | * @data_size: Maximum data size for the frames in this queue. | 454 | * @data_size: Maximum data size for the frames in this queue. |
455 | * @desc_size: Hardware descriptor size for the data in this queue. | 455 | * @desc_size: Hardware descriptor size for the data in this queue. |
456 | * @priv_size: Size of per-queue_entry private data. | ||
456 | * @usb_endpoint: Device endpoint used for communication (USB only) | 457 | * @usb_endpoint: Device endpoint used for communication (USB only) |
457 | * @usb_maxpacket: Max packet size for given endpoint (USB only) | 458 | * @usb_maxpacket: Max packet size for given endpoint (USB only) |
458 | */ | 459 | */ |
@@ -481,31 +482,13 @@ struct data_queue { | |||
481 | unsigned short data_size; | 482 | unsigned short data_size; |
482 | unsigned char desc_size; | 483 | unsigned char desc_size; |
483 | unsigned char winfo_size; | 484 | unsigned char winfo_size; |
485 | unsigned short priv_size; | ||
484 | 486 | ||
485 | unsigned short usb_endpoint; | 487 | unsigned short usb_endpoint; |
486 | unsigned short usb_maxpacket; | 488 | unsigned short usb_maxpacket; |
487 | }; | 489 | }; |
488 | 490 | ||
489 | /** | 491 | /** |
490 | * struct data_queue_desc: Data queue description | ||
491 | * | ||
492 | * The information in this structure is used by drivers | ||
493 | * to inform rt2x00lib about the creation of the data queue. | ||
494 | * | ||
495 | * @entry_num: Maximum number of entries for a queue. | ||
496 | * @data_size: Maximum data size for the frames in this queue. | ||
497 | * @desc_size: Hardware descriptor size for the data in this queue. | ||
498 | * @priv_size: Size of per-queue_entry private data. | ||
499 | */ | ||
500 | struct data_queue_desc { | ||
501 | unsigned short entry_num; | ||
502 | unsigned short data_size; | ||
503 | unsigned char desc_size; | ||
504 | unsigned char winfo_size; | ||
505 | unsigned short priv_size; | ||
506 | }; | ||
507 | |||
508 | /** | ||
509 | * queue_end - Return pointer to the last queue (HELPER MACRO). | 492 | * queue_end - Return pointer to the last queue (HELPER MACRO). |
510 | * @__dev: Pointer to &struct rt2x00_dev | 493 | * @__dev: Pointer to &struct rt2x00_dev |
511 | * | 494 | * |
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 7e1759b3e49a..53754bc66d05 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c | |||
@@ -3025,26 +3025,40 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { | |||
3025 | .config = rt61pci_config, | 3025 | .config = rt61pci_config, |
3026 | }; | 3026 | }; |
3027 | 3027 | ||
3028 | static const struct data_queue_desc rt61pci_queue_rx = { | 3028 | static void rt61pci_queue_init(struct data_queue *queue) |
3029 | .entry_num = 32, | 3029 | { |
3030 | .data_size = DATA_FRAME_SIZE, | 3030 | switch (queue->qid) { |
3031 | .desc_size = RXD_DESC_SIZE, | 3031 | case QID_RX: |
3032 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 3032 | queue->limit = 32; |
3033 | }; | 3033 | queue->data_size = DATA_FRAME_SIZE; |
3034 | queue->desc_size = RXD_DESC_SIZE; | ||
3035 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
3036 | break; | ||
3034 | 3037 | ||
3035 | static const struct data_queue_desc rt61pci_queue_tx = { | 3038 | case QID_AC_VO: |
3036 | .entry_num = 32, | 3039 | case QID_AC_VI: |
3037 | .data_size = DATA_FRAME_SIZE, | 3040 | case QID_AC_BE: |
3038 | .desc_size = TXD_DESC_SIZE, | 3041 | case QID_AC_BK: |
3039 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 3042 | queue->limit = 32; |
3040 | }; | 3043 | queue->data_size = DATA_FRAME_SIZE; |
3044 | queue->desc_size = TXD_DESC_SIZE; | ||
3045 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); | ||
3046 | break; | ||
3041 | 3047 | ||
3042 | static const struct data_queue_desc rt61pci_queue_bcn = { | 3048 | case QID_BEACON: |
3043 | .entry_num = 4, | 3049 | queue->limit = 4; |
3044 | .data_size = 0, /* No DMA required for beacons */ | 3050 | queue->data_size = 0; /* No DMA required for beacons */ |
3045 | .desc_size = TXINFO_SIZE, | 3051 | queue->desc_size = TXINFO_SIZE; |
3046 | .priv_size = sizeof(struct queue_entry_priv_mmio), | 3052 | queue->priv_size = sizeof(struct queue_entry_priv_mmio); |
3047 | }; | 3053 | break; |
3054 | |||
3055 | case QID_ATIM: | ||
3056 | /* fallthrough */ | ||
3057 | default: | ||
3058 | BUG(); | ||
3059 | break; | ||
3060 | } | ||
3061 | } | ||
3048 | 3062 | ||
3049 | static const struct rt2x00_ops rt61pci_ops = { | 3063 | static const struct rt2x00_ops rt61pci_ops = { |
3050 | .name = KBUILD_MODNAME, | 3064 | .name = KBUILD_MODNAME, |
@@ -3052,10 +3066,7 @@ static const struct rt2x00_ops rt61pci_ops = { | |||
3052 | .eeprom_size = EEPROM_SIZE, | 3066 | .eeprom_size = EEPROM_SIZE, |
3053 | .rf_size = RF_SIZE, | 3067 | .rf_size = RF_SIZE, |
3054 | .tx_queues = NUM_TX_QUEUES, | 3068 | .tx_queues = NUM_TX_QUEUES, |
3055 | .extra_tx_headroom = 0, | 3069 | .queue_init = rt61pci_queue_init, |
3056 | .rx = &rt61pci_queue_rx, | ||
3057 | .tx = &rt61pci_queue_tx, | ||
3058 | .bcn = &rt61pci_queue_bcn, | ||
3059 | .lib = &rt61pci_rt2x00_ops, | 3070 | .lib = &rt61pci_rt2x00_ops, |
3060 | .hw = &rt61pci_mac80211_ops, | 3071 | .hw = &rt61pci_mac80211_ops, |
3061 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS | 3072 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS |
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 377e09bb0b81..1616ed484ceb 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c | |||
@@ -2359,26 +2359,40 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { | |||
2359 | .config = rt73usb_config, | 2359 | .config = rt73usb_config, |
2360 | }; | 2360 | }; |
2361 | 2361 | ||
2362 | static const struct data_queue_desc rt73usb_queue_rx = { | 2362 | static void rt73usb_queue_init(struct data_queue *queue) |
2363 | .entry_num = 32, | 2363 | { |
2364 | .data_size = DATA_FRAME_SIZE, | 2364 | switch (queue->qid) { |
2365 | .desc_size = RXD_DESC_SIZE, | 2365 | case QID_RX: |
2366 | .priv_size = sizeof(struct queue_entry_priv_usb), | 2366 | queue->limit = 32; |
2367 | }; | 2367 | queue->data_size = DATA_FRAME_SIZE; |
2368 | queue->desc_size = RXD_DESC_SIZE; | ||
2369 | queue->priv_size = sizeof(struct queue_entry_priv_usb); | ||
2370 | break; | ||
2368 | 2371 | ||
2369 | static const struct data_queue_desc rt73usb_queue_tx = { | 2372 | case QID_AC_VO: |
2370 | .entry_num = 32, | 2373 | case QID_AC_VI: |
2371 | .data_size = DATA_FRAME_SIZE, | 2374 | case QID_AC_BE: |
2372 | .desc_size = TXD_DESC_SIZE, | 2375 | case QID_AC_BK: |
2373 | .priv_size = sizeof(struct queue_entry_priv_usb), | 2376 | queue->limit = 32; |
2374 | }; | 2377 | queue->data_size = DATA_FRAME_SIZE; |
2378 | queue->desc_size = TXD_DESC_SIZE; | ||
2379 | queue->priv_size = sizeof(struct queue_entry_priv_usb); | ||
2380 | break; | ||
2375 | 2381 | ||
2376 | static const struct data_queue_desc rt73usb_queue_bcn = { | 2382 | case QID_BEACON: |
2377 | .entry_num = 4, | 2383 | queue->limit = 4; |
2378 | .data_size = MGMT_FRAME_SIZE, | 2384 | queue->data_size = MGMT_FRAME_SIZE; |
2379 | .desc_size = TXINFO_SIZE, | 2385 | queue->desc_size = TXINFO_SIZE; |
2380 | .priv_size = sizeof(struct queue_entry_priv_usb), | 2386 | queue->priv_size = sizeof(struct queue_entry_priv_usb); |
2381 | }; | 2387 | break; |
2388 | |||
2389 | case QID_ATIM: | ||
2390 | /* fallthrough */ | ||
2391 | default: | ||
2392 | BUG(); | ||
2393 | break; | ||
2394 | } | ||
2395 | } | ||
2382 | 2396 | ||
2383 | static const struct rt2x00_ops rt73usb_ops = { | 2397 | static const struct rt2x00_ops rt73usb_ops = { |
2384 | .name = KBUILD_MODNAME, | 2398 | .name = KBUILD_MODNAME, |
@@ -2386,10 +2400,7 @@ static const struct rt2x00_ops rt73usb_ops = { | |||
2386 | .eeprom_size = EEPROM_SIZE, | 2400 | .eeprom_size = EEPROM_SIZE, |
2387 | .rf_size = RF_SIZE, | 2401 | .rf_size = RF_SIZE, |
2388 | .tx_queues = NUM_TX_QUEUES, | 2402 | .tx_queues = NUM_TX_QUEUES, |
2389 | .extra_tx_headroom = TXD_DESC_SIZE, | 2403 | .queue_init = rt73usb_queue_init, |
2390 | .rx = &rt73usb_queue_rx, | ||
2391 | .tx = &rt73usb_queue_tx, | ||
2392 | .bcn = &rt73usb_queue_bcn, | ||
2393 | .lib = &rt73usb_rt2x00_ops, | 2404 | .lib = &rt73usb_rt2x00_ops, |
2394 | .hw = &rt73usb_mac80211_ops, | 2405 | .hw = &rt73usb_mac80211_ops, |
2395 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS | 2406 | #ifdef CONFIG_RT2X00_LIB_DEBUGFS |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c index 953f1a0f8532..2119313a737b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c | |||
@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, | |||
104 | tx_agc[RF90_PATH_A] = 0x10101010; | 104 | tx_agc[RF90_PATH_A] = 0x10101010; |
105 | tx_agc[RF90_PATH_B] = 0x10101010; | 105 | tx_agc[RF90_PATH_B] = 0x10101010; |
106 | } else if (rtlpriv->dm.dynamic_txhighpower_lvl == | 106 | } else if (rtlpriv->dm.dynamic_txhighpower_lvl == |
107 | TXHIGHPWRLEVEL_LEVEL1) { | 107 | TXHIGHPWRLEVEL_LEVEL2) { |
108 | tx_agc[RF90_PATH_A] = 0x00000000; | 108 | tx_agc[RF90_PATH_A] = 0x00000000; |
109 | tx_agc[RF90_PATH_B] = 0x00000000; | 109 | tx_agc[RF90_PATH_B] = 0x00000000; |
110 | } else{ | 110 | } else{ |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 826f085c29dd..2bd598526217 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | |||
@@ -359,6 +359,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { | |||
359 | {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ | 359 | {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ |
360 | {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ | 360 | {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ |
361 | {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ | 361 | {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ |
362 | {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/ | ||
362 | {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ | 363 | {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ |
363 | {} | 364 | {} |
364 | }; | 365 | }; |
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index 4c67c2f9ea71..c7dc6feab2ff 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c | |||
@@ -93,8 +93,7 @@ static void wl1251_spi_wake(struct wl1251 *wl) | |||
93 | memset(&t, 0, sizeof(t)); | 93 | memset(&t, 0, sizeof(t)); |
94 | spi_message_init(&m); | 94 | spi_message_init(&m); |
95 | 95 | ||
96 | /* | 96 | /* Set WSPI_INIT_COMMAND |
97 | * Set WSPI_INIT_COMMAND | ||
98 | * the data is being send from the MSB to LSB | 97 | * the data is being send from the MSB to LSB |
99 | */ | 98 | */ |
100 | cmd[2] = 0xff; | 99 | cmd[2] = 0xff; |
@@ -262,7 +261,8 @@ static int wl1251_spi_probe(struct spi_device *spi) | |||
262 | wl->if_ops = &wl1251_spi_ops; | 261 | wl->if_ops = &wl1251_spi_ops; |
263 | 262 | ||
264 | /* This is the only SPI value that we need to set here, the rest | 263 | /* This is the only SPI value that we need to set here, the rest |
265 | * comes from the board-peripherals file */ | 264 | * comes from the board-peripherals file |
265 | */ | ||
266 | spi->bits_per_word = 32; | 266 | spi->bits_per_word = 32; |
267 | 267 | ||
268 | ret = spi_setup(spi); | 268 | ret = spi_setup(spi); |
@@ -329,29 +329,7 @@ static struct spi_driver wl1251_spi_driver = { | |||
329 | .remove = wl1251_spi_remove, | 329 | .remove = wl1251_spi_remove, |
330 | }; | 330 | }; |
331 | 331 | ||
332 | static int __init wl1251_spi_init(void) | 332 | module_spi_driver(wl1251_spi_driver); |
333 | { | ||
334 | int ret; | ||
335 | |||
336 | ret = spi_register_driver(&wl1251_spi_driver); | ||
337 | if (ret < 0) { | ||
338 | wl1251_error("failed to register spi driver: %d", ret); | ||
339 | goto out; | ||
340 | } | ||
341 | |||
342 | out: | ||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | static void __exit wl1251_spi_exit(void) | ||
347 | { | ||
348 | spi_unregister_driver(&wl1251_spi_driver); | ||
349 | |||
350 | wl1251_notice("unloaded"); | ||
351 | } | ||
352 | |||
353 | module_init(wl1251_spi_init); | ||
354 | module_exit(wl1251_spi_exit); | ||
355 | 333 | ||
356 | MODULE_LICENSE("GPL"); | 334 | MODULE_LICENSE("GPL"); |
357 | MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); | 335 | MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>"); |
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 9fa692d11025..7aa0eb848c5a 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/ip.h> | 24 | #include <linux/ip.h> |
25 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
26 | #include <linux/etherdevice.h> | ||
26 | 27 | ||
27 | #include "../wlcore/wlcore.h" | 28 | #include "../wlcore/wlcore.h" |
28 | #include "../wlcore/debug.h" | 29 | #include "../wlcore/debug.h" |
@@ -594,8 +595,8 @@ static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = { | |||
594 | .mem3 = { .start = 0x00000000, .size = 0x00000000 }, | 595 | .mem3 = { .start = 0x00000000, .size = 0x00000000 }, |
595 | }, | 596 | }, |
596 | [PART_PHY_INIT] = { | 597 | [PART_PHY_INIT] = { |
597 | .mem = { .start = 0x80926000, | 598 | .mem = { .start = WL18XX_PHY_INIT_MEM_ADDR, |
598 | .size = sizeof(struct wl18xx_mac_and_phy_params) }, | 599 | .size = WL18XX_PHY_INIT_MEM_SIZE }, |
599 | .reg = { .start = 0x00000000, .size = 0x00000000 }, | 600 | .reg = { .start = 0x00000000, .size = 0x00000000 }, |
600 | .mem2 = { .start = 0x00000000, .size = 0x00000000 }, | 601 | .mem2 = { .start = 0x00000000, .size = 0x00000000 }, |
601 | .mem3 = { .start = 0x00000000, .size = 0x00000000 }, | 602 | .mem3 = { .start = 0x00000000, .size = 0x00000000 }, |
@@ -799,6 +800,9 @@ static int wl18xx_pre_upload(struct wl1271 *wl) | |||
799 | u32 tmp; | 800 | u32 tmp; |
800 | int ret; | 801 | int ret; |
801 | 802 | ||
803 | BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) > | ||
804 | WL18XX_PHY_INIT_MEM_SIZE); | ||
805 | |||
802 | ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); | 806 | ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); |
803 | if (ret < 0) | 807 | if (ret < 0) |
804 | goto out; | 808 | goto out; |
@@ -815,6 +819,35 @@ static int wl18xx_pre_upload(struct wl1271 *wl) | |||
815 | wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); | 819 | wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); |
816 | 820 | ||
817 | ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp); | 821 | ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp); |
822 | if (ret < 0) | ||
823 | goto out; | ||
824 | |||
825 | /* | ||
826 | * Workaround for FDSP code RAM corruption (needed for PG2.1 | ||
827 | * and newer; for older chips it's a NOP). Change FDSP clock | ||
828 | * settings so that it's muxed to the ATGP clock instead of | ||
829 | * its own clock. | ||
830 | */ | ||
831 | |||
832 | ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]); | ||
833 | if (ret < 0) | ||
834 | goto out; | ||
835 | |||
836 | /* disable FDSP clock */ | ||
837 | ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1, | ||
838 | MEM_FDSP_CLK_120_DISABLE); | ||
839 | if (ret < 0) | ||
840 | goto out; | ||
841 | |||
842 | /* set ATPG clock toward FDSP Code RAM rather than its own clock */ | ||
843 | ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1, | ||
844 | MEM_FDSP_CODERAM_FUNC_CLK_SEL); | ||
845 | if (ret < 0) | ||
846 | goto out; | ||
847 | |||
848 | /* re-enable FDSP clock */ | ||
849 | ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1, | ||
850 | MEM_FDSP_CLK_120_ENABLE); | ||
818 | 851 | ||
819 | out: | 852 | out: |
820 | return ret; | 853 | return ret; |
@@ -1286,6 +1319,16 @@ static int wl18xx_get_mac(struct wl1271 *wl) | |||
1286 | ((mac1 & 0xff000000) >> 24); | 1319 | ((mac1 & 0xff000000) >> 24); |
1287 | wl->fuse_nic_addr = (mac1 & 0xffffff); | 1320 | wl->fuse_nic_addr = (mac1 & 0xffffff); |
1288 | 1321 | ||
1322 | if (!wl->fuse_oui_addr && !wl->fuse_nic_addr) { | ||
1323 | u8 mac[ETH_ALEN]; | ||
1324 | |||
1325 | eth_random_addr(mac); | ||
1326 | |||
1327 | wl->fuse_oui_addr = (mac[0] << 16) + (mac[1] << 8) + mac[2]; | ||
1328 | wl->fuse_nic_addr = (mac[3] << 16) + (mac[4] << 8) + mac[5]; | ||
1329 | wl1271_warning("MAC address from fuse not available, using random locally administered addresses."); | ||
1330 | } | ||
1331 | |||
1289 | ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]); | 1332 | ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]); |
1290 | 1333 | ||
1291 | out: | 1334 | out: |
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h index 6306e04cd258..05dd8bad2746 100644 --- a/drivers/net/wireless/ti/wl18xx/reg.h +++ b/drivers/net/wireless/ti/wl18xx/reg.h | |||
@@ -38,6 +38,9 @@ | |||
38 | #define WL18XX_REG_BOOT_PART_SIZE 0x00014578 | 38 | #define WL18XX_REG_BOOT_PART_SIZE 0x00014578 |
39 | 39 | ||
40 | #define WL18XX_PHY_INIT_MEM_ADDR 0x80926000 | 40 | #define WL18XX_PHY_INIT_MEM_ADDR 0x80926000 |
41 | #define WL18XX_PHY_END_MEM_ADDR 0x8093CA44 | ||
42 | #define WL18XX_PHY_INIT_MEM_SIZE \ | ||
43 | (WL18XX_PHY_END_MEM_ADDR - WL18XX_PHY_INIT_MEM_ADDR) | ||
41 | 44 | ||
42 | #define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE) | 45 | #define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE) |
43 | #define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000) | 46 | #define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000) |
@@ -217,4 +220,16 @@ static const char * const rdl_names[] = { | |||
217 | [RDL_4_SP] = "1897 MIMO", | 220 | [RDL_4_SP] = "1897 MIMO", |
218 | }; | 221 | }; |
219 | 222 | ||
223 | /* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */ | ||
224 | #define WL18XX_PHY_FPGA_SPARE_1 0x8093CA40 | ||
225 | |||
226 | /* command to disable FDSP clock */ | ||
227 | #define MEM_FDSP_CLK_120_DISABLE 0x80000000 | ||
228 | |||
229 | /* command to set ATPG clock toward FDSP Code RAM rather than its own clock */ | ||
230 | #define MEM_FDSP_CODERAM_FUNC_CLK_SEL 0xC0000000 | ||
231 | |||
232 | /* command to re-enable FDSP clock */ | ||
233 | #define MEM_FDSP_CLK_120_ENABLE 0x40000000 | ||
234 | |||
220 | #endif /* __REG_H__ */ | 235 | #endif /* __REG_H__ */ |
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile index b21398f6c3ec..4f23931d7bd5 100644 --- a/drivers/net/wireless/ti/wlcore/Makefile +++ b/drivers/net/wireless/ti/wlcore/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ | 1 | wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \ |
2 | boot.o init.o debugfs.o scan.o | 2 | boot.o init.o debugfs.o scan.o sysfs.o |
3 | 3 | ||
4 | wlcore_spi-objs = spi.o | 4 | wlcore_spi-objs = spi.o |
5 | wlcore_sdio-objs = sdio.o | 5 | wlcore_sdio-objs = sdio.o |
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 953111a502ee..b8db55c868c7 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c | |||
@@ -1,10 +1,9 @@ | |||
1 | 1 | ||
2 | /* | 2 | /* |
3 | * This file is part of wl1271 | 3 | * This file is part of wlcore |
4 | * | 4 | * |
5 | * Copyright (C) 2008-2010 Nokia Corporation | 5 | * Copyright (C) 2008-2010 Nokia Corporation |
6 | * | 6 | * Copyright (C) 2011-2013 Texas Instruments Inc. |
7 | * Contact: Luciano Coelho <luciano.coelho@nokia.com> | ||
8 | * | 7 | * |
9 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
@@ -24,34 +23,23 @@ | |||
24 | 23 | ||
25 | #include <linux/module.h> | 24 | #include <linux/module.h> |
26 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
27 | #include <linux/delay.h> | ||
28 | #include <linux/spi/spi.h> | ||
29 | #include <linux/crc32.h> | ||
30 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
31 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/wl12xx.h> | 28 | #include <linux/wl12xx.h> |
35 | #include <linux/sched.h> | ||
36 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
37 | 30 | ||
38 | #include "wlcore.h" | 31 | #include "wlcore.h" |
39 | #include "debug.h" | 32 | #include "debug.h" |
40 | #include "wl12xx_80211.h" | 33 | #include "wl12xx_80211.h" |
41 | #include "io.h" | 34 | #include "io.h" |
42 | #include "event.h" | ||
43 | #include "tx.h" | 35 | #include "tx.h" |
44 | #include "rx.h" | ||
45 | #include "ps.h" | 36 | #include "ps.h" |
46 | #include "init.h" | 37 | #include "init.h" |
47 | #include "debugfs.h" | 38 | #include "debugfs.h" |
48 | #include "cmd.h" | ||
49 | #include "boot.h" | ||
50 | #include "testmode.h" | 39 | #include "testmode.h" |
51 | #include "scan.h" | 40 | #include "scan.h" |
52 | #include "hw_ops.h" | 41 | #include "hw_ops.h" |
53 | 42 | #include "sysfs.h" | |
54 | #define WL1271_BOOT_RETRIES 3 | ||
55 | 43 | ||
56 | #define WL1271_BOOT_RETRIES 3 | 44 | #define WL1271_BOOT_RETRIES 3 |
57 | 45 | ||
@@ -65,8 +53,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, | |||
65 | static void wlcore_op_stop_locked(struct wl1271 *wl); | 53 | static void wlcore_op_stop_locked(struct wl1271 *wl); |
66 | static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); | 54 | static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); |
67 | 55 | ||
68 | static int wl12xx_set_authorized(struct wl1271 *wl, | 56 | static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
69 | struct wl12xx_vif *wlvif) | ||
70 | { | 57 | { |
71 | int ret; | 58 | int ret; |
72 | 59 | ||
@@ -983,7 +970,7 @@ static int wlcore_fw_wakeup(struct wl1271 *wl) | |||
983 | 970 | ||
984 | static int wl1271_setup(struct wl1271 *wl) | 971 | static int wl1271_setup(struct wl1271 *wl) |
985 | { | 972 | { |
986 | wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + | 973 | wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + |
987 | sizeof(*wl->fw_status_2) + | 974 | sizeof(*wl->fw_status_2) + |
988 | wl->fw_status_priv_len, GFP_KERNEL); | 975 | wl->fw_status_priv_len, GFP_KERNEL); |
989 | if (!wl->fw_status_1) | 976 | if (!wl->fw_status_1) |
@@ -993,7 +980,7 @@ static int wl1271_setup(struct wl1271 *wl) | |||
993 | (((u8 *) wl->fw_status_1) + | 980 | (((u8 *) wl->fw_status_1) + |
994 | WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc)); | 981 | WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc)); |
995 | 982 | ||
996 | wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); | 983 | wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); |
997 | if (!wl->tx_res_if) { | 984 | if (!wl->tx_res_if) { |
998 | kfree(wl->fw_status_1); | 985 | kfree(wl->fw_status_1); |
999 | return -ENOMEM; | 986 | return -ENOMEM; |
@@ -1668,8 +1655,7 @@ static int wl1271_configure_suspend(struct wl1271 *wl, | |||
1668 | return 0; | 1655 | return 0; |
1669 | } | 1656 | } |
1670 | 1657 | ||
1671 | static void wl1271_configure_resume(struct wl1271 *wl, | 1658 | static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
1672 | struct wl12xx_vif *wlvif) | ||
1673 | { | 1659 | { |
1674 | int ret = 0; | 1660 | int ret = 0; |
1675 | bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; | 1661 | bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; |
@@ -2603,6 +2589,7 @@ unlock: | |||
2603 | cancel_work_sync(&wlvif->rx_streaming_enable_work); | 2589 | cancel_work_sync(&wlvif->rx_streaming_enable_work); |
2604 | cancel_work_sync(&wlvif->rx_streaming_disable_work); | 2590 | cancel_work_sync(&wlvif->rx_streaming_disable_work); |
2605 | cancel_delayed_work_sync(&wlvif->connection_loss_work); | 2591 | cancel_delayed_work_sync(&wlvif->connection_loss_work); |
2592 | cancel_delayed_work_sync(&wlvif->channel_switch_work); | ||
2606 | 2593 | ||
2607 | mutex_lock(&wl->mutex); | 2594 | mutex_lock(&wl->mutex); |
2608 | } | 2595 | } |
@@ -3210,14 +3197,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, | |||
3210 | if (ret < 0) | 3197 | if (ret < 0) |
3211 | return ret; | 3198 | return ret; |
3212 | 3199 | ||
3213 | /* the default WEP key needs to be configured at least once */ | ||
3214 | if (key_type == KEY_WEP) { | ||
3215 | ret = wl12xx_cmd_set_default_wep_key(wl, | ||
3216 | wlvif->default_key, | ||
3217 | wlvif->sta.hlid); | ||
3218 | if (ret < 0) | ||
3219 | return ret; | ||
3220 | } | ||
3221 | } | 3200 | } |
3222 | 3201 | ||
3223 | return 0; | 3202 | return 0; |
@@ -3374,6 +3353,46 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, | |||
3374 | } | 3353 | } |
3375 | EXPORT_SYMBOL_GPL(wlcore_set_key); | 3354 | EXPORT_SYMBOL_GPL(wlcore_set_key); |
3376 | 3355 | ||
3356 | static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, | ||
3357 | struct ieee80211_vif *vif, | ||
3358 | int key_idx) | ||
3359 | { | ||
3360 | struct wl1271 *wl = hw->priv; | ||
3361 | struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); | ||
3362 | int ret; | ||
3363 | |||
3364 | wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d", | ||
3365 | key_idx); | ||
3366 | |||
3367 | mutex_lock(&wl->mutex); | ||
3368 | |||
3369 | if (unlikely(wl->state != WLCORE_STATE_ON)) { | ||
3370 | ret = -EAGAIN; | ||
3371 | goto out_unlock; | ||
3372 | } | ||
3373 | |||
3374 | ret = wl1271_ps_elp_wakeup(wl); | ||
3375 | if (ret < 0) | ||
3376 | goto out_unlock; | ||
3377 | |||
3378 | wlvif->default_key = key_idx; | ||
3379 | |||
3380 | /* the default WEP key needs to be configured at least once */ | ||
3381 | if (wlvif->encryption_type == KEY_WEP) { | ||
3382 | ret = wl12xx_cmd_set_default_wep_key(wl, | ||
3383 | key_idx, | ||
3384 | wlvif->sta.hlid); | ||
3385 | if (ret < 0) | ||
3386 | goto out_sleep; | ||
3387 | } | ||
3388 | |||
3389 | out_sleep: | ||
3390 | wl1271_ps_elp_sleep(wl); | ||
3391 | |||
3392 | out_unlock: | ||
3393 | mutex_unlock(&wl->mutex); | ||
3394 | } | ||
3395 | |||
3377 | void wlcore_regdomain_config(struct wl1271 *wl) | 3396 | void wlcore_regdomain_config(struct wl1271 *wl) |
3378 | { | 3397 | { |
3379 | int ret; | 3398 | int ret; |
@@ -3782,8 +3801,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl, | |||
3782 | struct ieee80211_hdr *hdr; | 3801 | struct ieee80211_hdr *hdr; |
3783 | u32 min_rate; | 3802 | u32 min_rate; |
3784 | int ret; | 3803 | int ret; |
3785 | int ieoffset = offsetof(struct ieee80211_mgmt, | 3804 | int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); |
3786 | u.beacon.variable); | ||
3787 | struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); | 3805 | struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); |
3788 | u16 tmpl_id; | 3806 | u16 tmpl_id; |
3789 | 3807 | ||
@@ -4230,8 +4248,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, | |||
4230 | } | 4248 | } |
4231 | 4249 | ||
4232 | /* Handle new association with HT. Do this after join. */ | 4250 | /* Handle new association with HT. Do this after join. */ |
4233 | if (sta_exists && | 4251 | if (sta_exists) { |
4234 | (changed & BSS_CHANGED_HT)) { | ||
4235 | bool enabled = | 4252 | bool enabled = |
4236 | bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT; | 4253 | bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT; |
4237 | 4254 | ||
@@ -5368,6 +5385,7 @@ static const struct ieee80211_ops wl1271_ops = { | |||
5368 | .ampdu_action = wl1271_op_ampdu_action, | 5385 | .ampdu_action = wl1271_op_ampdu_action, |
5369 | .tx_frames_pending = wl1271_tx_frames_pending, | 5386 | .tx_frames_pending = wl1271_tx_frames_pending, |
5370 | .set_bitrate_mask = wl12xx_set_bitrate_mask, | 5387 | .set_bitrate_mask = wl12xx_set_bitrate_mask, |
5388 | .set_default_unicast_key = wl1271_op_set_default_key_idx, | ||
5371 | .channel_switch = wl12xx_op_channel_switch, | 5389 | .channel_switch = wl12xx_op_channel_switch, |
5372 | .flush = wlcore_op_flush, | 5390 | .flush = wlcore_op_flush, |
5373 | .remain_on_channel = wlcore_op_remain_on_channel, | 5391 | .remain_on_channel = wlcore_op_remain_on_channel, |
@@ -5403,151 +5421,6 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band) | |||
5403 | return idx; | 5421 | return idx; |
5404 | } | 5422 | } |
5405 | 5423 | ||
5406 | static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev, | ||
5407 | struct device_attribute *attr, | ||
5408 | char *buf) | ||
5409 | { | ||
5410 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
5411 | ssize_t len; | ||
5412 | |||
5413 | len = PAGE_SIZE; | ||
5414 | |||
5415 | mutex_lock(&wl->mutex); | ||
5416 | len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n", | ||
5417 | wl->sg_enabled); | ||
5418 | mutex_unlock(&wl->mutex); | ||
5419 | |||
5420 | return len; | ||
5421 | |||
5422 | } | ||
5423 | |||
5424 | static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, | ||
5425 | struct device_attribute *attr, | ||
5426 | const char *buf, size_t count) | ||
5427 | { | ||
5428 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
5429 | unsigned long res; | ||
5430 | int ret; | ||
5431 | |||
5432 | ret = kstrtoul(buf, 10, &res); | ||
5433 | if (ret < 0) { | ||
5434 | wl1271_warning("incorrect value written to bt_coex_mode"); | ||
5435 | return count; | ||
5436 | } | ||
5437 | |||
5438 | mutex_lock(&wl->mutex); | ||
5439 | |||
5440 | res = !!res; | ||
5441 | |||
5442 | if (res == wl->sg_enabled) | ||
5443 | goto out; | ||
5444 | |||
5445 | wl->sg_enabled = res; | ||
5446 | |||
5447 | if (unlikely(wl->state != WLCORE_STATE_ON)) | ||
5448 | goto out; | ||
5449 | |||
5450 | ret = wl1271_ps_elp_wakeup(wl); | ||
5451 | if (ret < 0) | ||
5452 | goto out; | ||
5453 | |||
5454 | wl1271_acx_sg_enable(wl, wl->sg_enabled); | ||
5455 | wl1271_ps_elp_sleep(wl); | ||
5456 | |||
5457 | out: | ||
5458 | mutex_unlock(&wl->mutex); | ||
5459 | return count; | ||
5460 | } | ||
5461 | |||
5462 | static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR, | ||
5463 | wl1271_sysfs_show_bt_coex_state, | ||
5464 | wl1271_sysfs_store_bt_coex_state); | ||
5465 | |||
5466 | static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev, | ||
5467 | struct device_attribute *attr, | ||
5468 | char *buf) | ||
5469 | { | ||
5470 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
5471 | ssize_t len; | ||
5472 | |||
5473 | len = PAGE_SIZE; | ||
5474 | |||
5475 | mutex_lock(&wl->mutex); | ||
5476 | if (wl->hw_pg_ver >= 0) | ||
5477 | len = snprintf(buf, len, "%d\n", wl->hw_pg_ver); | ||
5478 | else | ||
5479 | len = snprintf(buf, len, "n/a\n"); | ||
5480 | mutex_unlock(&wl->mutex); | ||
5481 | |||
5482 | return len; | ||
5483 | } | ||
5484 | |||
5485 | static DEVICE_ATTR(hw_pg_ver, S_IRUGO, | ||
5486 | wl1271_sysfs_show_hw_pg_ver, NULL); | ||
5487 | |||
5488 | static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, | ||
5489 | struct bin_attribute *bin_attr, | ||
5490 | char *buffer, loff_t pos, size_t count) | ||
5491 | { | ||
5492 | struct device *dev = container_of(kobj, struct device, kobj); | ||
5493 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
5494 | ssize_t len; | ||
5495 | int ret; | ||
5496 | |||
5497 | ret = mutex_lock_interruptible(&wl->mutex); | ||
5498 | if (ret < 0) | ||
5499 | return -ERESTARTSYS; | ||
5500 | |||
5501 | /* Let only one thread read the log at a time, blocking others */ | ||
5502 | while (wl->fwlog_size == 0) { | ||
5503 | DEFINE_WAIT(wait); | ||
5504 | |||
5505 | prepare_to_wait_exclusive(&wl->fwlog_waitq, | ||
5506 | &wait, | ||
5507 | TASK_INTERRUPTIBLE); | ||
5508 | |||
5509 | if (wl->fwlog_size != 0) { | ||
5510 | finish_wait(&wl->fwlog_waitq, &wait); | ||
5511 | break; | ||
5512 | } | ||
5513 | |||
5514 | mutex_unlock(&wl->mutex); | ||
5515 | |||
5516 | schedule(); | ||
5517 | finish_wait(&wl->fwlog_waitq, &wait); | ||
5518 | |||
5519 | if (signal_pending(current)) | ||
5520 | return -ERESTARTSYS; | ||
5521 | |||
5522 | ret = mutex_lock_interruptible(&wl->mutex); | ||
5523 | if (ret < 0) | ||
5524 | return -ERESTARTSYS; | ||
5525 | } | ||
5526 | |||
5527 | /* Check if the fwlog is still valid */ | ||
5528 | if (wl->fwlog_size < 0) { | ||
5529 | mutex_unlock(&wl->mutex); | ||
5530 | return 0; | ||
5531 | } | ||
5532 | |||
5533 | /* Seeking is not supported - old logs are not kept. Disregard pos. */ | ||
5534 | len = min(count, (size_t)wl->fwlog_size); | ||
5535 | wl->fwlog_size -= len; | ||
5536 | memcpy(buffer, wl->fwlog, len); | ||
5537 | |||
5538 | /* Make room for new messages */ | ||
5539 | memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size); | ||
5540 | |||
5541 | mutex_unlock(&wl->mutex); | ||
5542 | |||
5543 | return len; | ||
5544 | } | ||
5545 | |||
5546 | static struct bin_attribute fwlog_attr = { | ||
5547 | .attr = {.name = "fwlog", .mode = S_IRUSR}, | ||
5548 | .read = wl1271_sysfs_read_fwlog, | ||
5549 | }; | ||
5550 | |||
5551 | static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic) | 5424 | static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic) |
5552 | { | 5425 | { |
5553 | int i; | 5426 | int i; |
@@ -5827,8 +5700,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) | |||
5827 | return 0; | 5700 | return 0; |
5828 | } | 5701 | } |
5829 | 5702 | ||
5830 | #define WL1271_DEFAULT_CHANNEL 0 | ||
5831 | |||
5832 | struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, | 5703 | struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, |
5833 | u32 mbox_size) | 5704 | u32 mbox_size) |
5834 | { | 5705 | { |
@@ -5881,7 +5752,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, | |||
5881 | goto err_hw; | 5752 | goto err_hw; |
5882 | } | 5753 | } |
5883 | 5754 | ||
5884 | wl->channel = WL1271_DEFAULT_CHANNEL; | 5755 | wl->channel = 0; |
5885 | wl->rx_counter = 0; | 5756 | wl->rx_counter = 0; |
5886 | wl->power_level = WL1271_DEFAULT_POWER_LEVEL; | 5757 | wl->power_level = WL1271_DEFAULT_POWER_LEVEL; |
5887 | wl->band = IEEE80211_BAND_2GHZ; | 5758 | wl->band = IEEE80211_BAND_2GHZ; |
@@ -5988,11 +5859,8 @@ int wlcore_free_hw(struct wl1271 *wl) | |||
5988 | wake_up_interruptible_all(&wl->fwlog_waitq); | 5859 | wake_up_interruptible_all(&wl->fwlog_waitq); |
5989 | mutex_unlock(&wl->mutex); | 5860 | mutex_unlock(&wl->mutex); |
5990 | 5861 | ||
5991 | device_remove_bin_file(wl->dev, &fwlog_attr); | 5862 | wlcore_sysfs_free(wl); |
5992 | |||
5993 | device_remove_file(wl->dev, &dev_attr_hw_pg_ver); | ||
5994 | 5863 | ||
5995 | device_remove_file(wl->dev, &dev_attr_bt_coex_state); | ||
5996 | kfree(wl->buffer_32); | 5864 | kfree(wl->buffer_32); |
5997 | kfree(wl->mbox); | 5865 | kfree(wl->mbox); |
5998 | free_page((unsigned long)wl->fwlog); | 5866 | free_page((unsigned long)wl->fwlog); |
@@ -6018,6 +5886,15 @@ int wlcore_free_hw(struct wl1271 *wl) | |||
6018 | } | 5886 | } |
6019 | EXPORT_SYMBOL_GPL(wlcore_free_hw); | 5887 | EXPORT_SYMBOL_GPL(wlcore_free_hw); |
6020 | 5888 | ||
5889 | #ifdef CONFIG_PM | ||
5890 | static const struct wiphy_wowlan_support wlcore_wowlan_support = { | ||
5891 | .flags = WIPHY_WOWLAN_ANY, | ||
5892 | .n_patterns = WL1271_MAX_RX_FILTERS, | ||
5893 | .pattern_min_len = 1, | ||
5894 | .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE, | ||
5895 | }; | ||
5896 | #endif | ||
5897 | |||
6021 | static void wlcore_nvs_cb(const struct firmware *fw, void *context) | 5898 | static void wlcore_nvs_cb(const struct firmware *fw, void *context) |
6022 | { | 5899 | { |
6023 | struct wl1271 *wl = context; | 5900 | struct wl1271 *wl = context; |
@@ -6071,14 +5948,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) | |||
6071 | if (!ret) { | 5948 | if (!ret) { |
6072 | wl->irq_wake_enabled = true; | 5949 | wl->irq_wake_enabled = true; |
6073 | device_init_wakeup(wl->dev, 1); | 5950 | device_init_wakeup(wl->dev, 1); |
6074 | if (pdata->pwr_in_suspend) { | 5951 | if (pdata->pwr_in_suspend) |
6075 | wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; | 5952 | wl->hw->wiphy->wowlan = &wlcore_wowlan_support; |
6076 | wl->hw->wiphy->wowlan.n_patterns = | ||
6077 | WL1271_MAX_RX_FILTERS; | ||
6078 | wl->hw->wiphy->wowlan.pattern_min_len = 1; | ||
6079 | wl->hw->wiphy->wowlan.pattern_max_len = | ||
6080 | WL1271_RX_FILTER_MAX_PATTERN_SIZE; | ||
6081 | } | ||
6082 | } | 5953 | } |
6083 | #endif | 5954 | #endif |
6084 | disable_irq(wl->irq); | 5955 | disable_irq(wl->irq); |
@@ -6101,36 +5972,13 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) | |||
6101 | if (ret) | 5972 | if (ret) |
6102 | goto out_irq; | 5973 | goto out_irq; |
6103 | 5974 | ||
6104 | /* Create sysfs file to control bt coex state */ | 5975 | ret = wlcore_sysfs_init(wl); |
6105 | ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); | 5976 | if (ret) |
6106 | if (ret < 0) { | ||
6107 | wl1271_error("failed to create sysfs file bt_coex_state"); | ||
6108 | goto out_unreg; | 5977 | goto out_unreg; |
6109 | } | ||
6110 | |||
6111 | /* Create sysfs file to get HW PG version */ | ||
6112 | ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver); | ||
6113 | if (ret < 0) { | ||
6114 | wl1271_error("failed to create sysfs file hw_pg_ver"); | ||
6115 | goto out_bt_coex_state; | ||
6116 | } | ||
6117 | |||
6118 | /* Create sysfs file for the FW log */ | ||
6119 | ret = device_create_bin_file(wl->dev, &fwlog_attr); | ||
6120 | if (ret < 0) { | ||
6121 | wl1271_error("failed to create sysfs file fwlog"); | ||
6122 | goto out_hw_pg_ver; | ||
6123 | } | ||
6124 | 5978 | ||
6125 | wl->initialized = true; | 5979 | wl->initialized = true; |
6126 | goto out; | 5980 | goto out; |
6127 | 5981 | ||
6128 | out_hw_pg_ver: | ||
6129 | device_remove_file(wl->dev, &dev_attr_hw_pg_ver); | ||
6130 | |||
6131 | out_bt_coex_state: | ||
6132 | device_remove_file(wl->dev, &dev_attr_bt_coex_state); | ||
6133 | |||
6134 | out_unreg: | 5982 | out_unreg: |
6135 | wl1271_unregister_hw(wl); | 5983 | wl1271_unregister_hw(wl); |
6136 | 5984 | ||
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c index 9654577efd01..98066d40c2ad 100644 --- a/drivers/net/wireless/ti/wlcore/ps.c +++ b/drivers/net/wireless/ti/wlcore/ps.c | |||
@@ -110,7 +110,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl) | |||
110 | DECLARE_COMPLETION_ONSTACK(compl); | 110 | DECLARE_COMPLETION_ONSTACK(compl); |
111 | unsigned long flags; | 111 | unsigned long flags; |
112 | int ret; | 112 | int ret; |
113 | u32 start_time = jiffies; | 113 | unsigned long start_time = jiffies; |
114 | bool pending = false; | 114 | bool pending = false; |
115 | 115 | ||
116 | /* | 116 | /* |
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index e26447832683..1b0cd98e35f1 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c | |||
@@ -434,19 +434,7 @@ static struct spi_driver wl1271_spi_driver = { | |||
434 | .remove = wl1271_remove, | 434 | .remove = wl1271_remove, |
435 | }; | 435 | }; |
436 | 436 | ||
437 | static int __init wl1271_init(void) | 437 | module_spi_driver(wl1271_spi_driver); |
438 | { | ||
439 | return spi_register_driver(&wl1271_spi_driver); | ||
440 | } | ||
441 | |||
442 | static void __exit wl1271_exit(void) | ||
443 | { | ||
444 | spi_unregister_driver(&wl1271_spi_driver); | ||
445 | } | ||
446 | |||
447 | module_init(wl1271_init); | ||
448 | module_exit(wl1271_exit); | ||
449 | |||
450 | MODULE_LICENSE("GPL"); | 438 | MODULE_LICENSE("GPL"); |
451 | MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); | 439 | MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); |
452 | MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); | 440 | MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); |
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c new file mode 100644 index 000000000000..8e583497940d --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/sysfs.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | * This file is part of wlcore | ||
3 | * | ||
4 | * Copyright (C) 2013 Texas Instruments Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
18 | * 02110-1301 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "wlcore.h" | ||
23 | #include "debug.h" | ||
24 | #include "ps.h" | ||
25 | #include "sysfs.h" | ||
26 | |||
27 | static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev, | ||
28 | struct device_attribute *attr, | ||
29 | char *buf) | ||
30 | { | ||
31 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
32 | ssize_t len; | ||
33 | |||
34 | len = PAGE_SIZE; | ||
35 | |||
36 | mutex_lock(&wl->mutex); | ||
37 | len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n", | ||
38 | wl->sg_enabled); | ||
39 | mutex_unlock(&wl->mutex); | ||
40 | |||
41 | return len; | ||
42 | |||
43 | } | ||
44 | |||
45 | static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, | ||
46 | struct device_attribute *attr, | ||
47 | const char *buf, size_t count) | ||
48 | { | ||
49 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
50 | unsigned long res; | ||
51 | int ret; | ||
52 | |||
53 | ret = kstrtoul(buf, 10, &res); | ||
54 | if (ret < 0) { | ||
55 | wl1271_warning("incorrect value written to bt_coex_mode"); | ||
56 | return count; | ||
57 | } | ||
58 | |||
59 | mutex_lock(&wl->mutex); | ||
60 | |||
61 | res = !!res; | ||
62 | |||
63 | if (res == wl->sg_enabled) | ||
64 | goto out; | ||
65 | |||
66 | wl->sg_enabled = res; | ||
67 | |||
68 | if (unlikely(wl->state != WLCORE_STATE_ON)) | ||
69 | goto out; | ||
70 | |||
71 | ret = wl1271_ps_elp_wakeup(wl); | ||
72 | if (ret < 0) | ||
73 | goto out; | ||
74 | |||
75 | wl1271_acx_sg_enable(wl, wl->sg_enabled); | ||
76 | wl1271_ps_elp_sleep(wl); | ||
77 | |||
78 | out: | ||
79 | mutex_unlock(&wl->mutex); | ||
80 | return count; | ||
81 | } | ||
82 | |||
83 | static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR, | ||
84 | wl1271_sysfs_show_bt_coex_state, | ||
85 | wl1271_sysfs_store_bt_coex_state); | ||
86 | |||
87 | static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev, | ||
88 | struct device_attribute *attr, | ||
89 | char *buf) | ||
90 | { | ||
91 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
92 | ssize_t len; | ||
93 | |||
94 | len = PAGE_SIZE; | ||
95 | |||
96 | mutex_lock(&wl->mutex); | ||
97 | if (wl->hw_pg_ver >= 0) | ||
98 | len = snprintf(buf, len, "%d\n", wl->hw_pg_ver); | ||
99 | else | ||
100 | len = snprintf(buf, len, "n/a\n"); | ||
101 | mutex_unlock(&wl->mutex); | ||
102 | |||
103 | return len; | ||
104 | } | ||
105 | |||
106 | static DEVICE_ATTR(hw_pg_ver, S_IRUGO, | ||
107 | wl1271_sysfs_show_hw_pg_ver, NULL); | ||
108 | |||
109 | static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, | ||
110 | struct bin_attribute *bin_attr, | ||
111 | char *buffer, loff_t pos, size_t count) | ||
112 | { | ||
113 | struct device *dev = container_of(kobj, struct device, kobj); | ||
114 | struct wl1271 *wl = dev_get_drvdata(dev); | ||
115 | ssize_t len; | ||
116 | int ret; | ||
117 | |||
118 | ret = mutex_lock_interruptible(&wl->mutex); | ||
119 | if (ret < 0) | ||
120 | return -ERESTARTSYS; | ||
121 | |||
122 | /* Let only one thread read the log at a time, blocking others */ | ||
123 | while (wl->fwlog_size == 0) { | ||
124 | DEFINE_WAIT(wait); | ||
125 | |||
126 | prepare_to_wait_exclusive(&wl->fwlog_waitq, | ||
127 | &wait, | ||
128 | TASK_INTERRUPTIBLE); | ||
129 | |||
130 | if (wl->fwlog_size != 0) { | ||
131 | finish_wait(&wl->fwlog_waitq, &wait); | ||
132 | break; | ||
133 | } | ||
134 | |||
135 | mutex_unlock(&wl->mutex); | ||
136 | |||
137 | schedule(); | ||
138 | finish_wait(&wl->fwlog_waitq, &wait); | ||
139 | |||
140 | if (signal_pending(current)) | ||
141 | return -ERESTARTSYS; | ||
142 | |||
143 | ret = mutex_lock_interruptible(&wl->mutex); | ||
144 | if (ret < 0) | ||
145 | return -ERESTARTSYS; | ||
146 | } | ||
147 | |||
148 | /* Check if the fwlog is still valid */ | ||
149 | if (wl->fwlog_size < 0) { | ||
150 | mutex_unlock(&wl->mutex); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* Seeking is not supported - old logs are not kept. Disregard pos. */ | ||
155 | len = min(count, (size_t)wl->fwlog_size); | ||
156 | wl->fwlog_size -= len; | ||
157 | memcpy(buffer, wl->fwlog, len); | ||
158 | |||
159 | /* Make room for new messages */ | ||
160 | memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size); | ||
161 | |||
162 | mutex_unlock(&wl->mutex); | ||
163 | |||
164 | return len; | ||
165 | } | ||
166 | |||
167 | static struct bin_attribute fwlog_attr = { | ||
168 | .attr = {.name = "fwlog", .mode = S_IRUSR}, | ||
169 | .read = wl1271_sysfs_read_fwlog, | ||
170 | }; | ||
171 | |||
172 | int wlcore_sysfs_init(struct wl1271 *wl) | ||
173 | { | ||
174 | int ret; | ||
175 | |||
176 | /* Create sysfs file to control bt coex state */ | ||
177 | ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); | ||
178 | if (ret < 0) { | ||
179 | wl1271_error("failed to create sysfs file bt_coex_state"); | ||
180 | goto out; | ||
181 | } | ||
182 | |||
183 | /* Create sysfs file to get HW PG version */ | ||
184 | ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver); | ||
185 | if (ret < 0) { | ||
186 | wl1271_error("failed to create sysfs file hw_pg_ver"); | ||
187 | goto out_bt_coex_state; | ||
188 | } | ||
189 | |||
190 | /* Create sysfs file for the FW log */ | ||
191 | ret = device_create_bin_file(wl->dev, &fwlog_attr); | ||
192 | if (ret < 0) { | ||
193 | wl1271_error("failed to create sysfs file fwlog"); | ||
194 | goto out_hw_pg_ver; | ||
195 | } | ||
196 | |||
197 | goto out; | ||
198 | |||
199 | out_hw_pg_ver: | ||
200 | device_remove_file(wl->dev, &dev_attr_hw_pg_ver); | ||
201 | |||
202 | out_bt_coex_state: | ||
203 | device_remove_file(wl->dev, &dev_attr_bt_coex_state); | ||
204 | |||
205 | out: | ||
206 | return ret; | ||
207 | } | ||
208 | |||
209 | void wlcore_sysfs_free(struct wl1271 *wl) | ||
210 | { | ||
211 | device_remove_bin_file(wl->dev, &fwlog_attr); | ||
212 | |||
213 | device_remove_file(wl->dev, &dev_attr_hw_pg_ver); | ||
214 | |||
215 | device_remove_file(wl->dev, &dev_attr_bt_coex_state); | ||
216 | } | ||
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.h b/drivers/net/wireless/ti/wlcore/sysfs.h new file mode 100644 index 000000000000..c1488921839d --- /dev/null +++ b/drivers/net/wireless/ti/wlcore/sysfs.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * This file is part of wlcore | ||
3 | * | ||
4 | * Copyright (C) 2013 Texas Instruments Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
18 | * 02110-1301 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #ifndef __SYSFS_H__ | ||
23 | #define __SYSFS_H__ | ||
24 | |||
25 | int wlcore_sysfs_init(struct wl1271 *wl); | ||
26 | void wlcore_sysfs_free(struct wl1271 *wl); | ||
27 | |||
28 | #endif | ||
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 004d02e71f01..7e93fe63a2c7 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c | |||
@@ -386,7 +386,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, | |||
386 | is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || | 386 | is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || |
387 | (cipher == WLAN_CIPHER_SUITE_WEP104); | 387 | (cipher == WLAN_CIPHER_SUITE_WEP104); |
388 | 388 | ||
389 | if (unlikely(is_wep && wlvif->default_key != idx)) { | 389 | if (WARN_ON(is_wep && wlvif->default_key != idx)) { |
390 | ret = wl1271_set_default_wep_key(wl, wlvif, idx); | 390 | ret = wl1271_set_default_wep_key(wl, wlvif, idx); |
391 | if (ret < 0) | 391 | if (ret < 0) |
392 | return ret; | 392 | return ret; |
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index 74a852e4e41f..b0b64ccb7d7d 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig | |||
@@ -36,6 +36,16 @@ config NFC_MEI_PHY | |||
36 | 36 | ||
37 | If unsure, say N. | 37 | If unsure, say N. |
38 | 38 | ||
39 | config NFC_SIM | ||
40 | tristate "NFC hardware simulator driver" | ||
41 | help | ||
42 | This driver declares two virtual NFC devices supporting NFC-DEP | ||
43 | protocol. An LLCP connection can be established between them and | ||
44 | all packets sent from one device is sent back to the other, acting as | ||
45 | loopback devices. | ||
46 | |||
47 | If unsure, say N. | ||
48 | |||
39 | source "drivers/nfc/pn544/Kconfig" | 49 | source "drivers/nfc/pn544/Kconfig" |
40 | source "drivers/nfc/microread/Kconfig" | 50 | source "drivers/nfc/microread/Kconfig" |
41 | 51 | ||
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile index aa6bd657ef40..be7636abcb3f 100644 --- a/drivers/nfc/Makefile +++ b/drivers/nfc/Makefile | |||
@@ -7,5 +7,6 @@ obj-$(CONFIG_NFC_MICROREAD) += microread/ | |||
7 | obj-$(CONFIG_NFC_PN533) += pn533.o | 7 | obj-$(CONFIG_NFC_PN533) += pn533.o |
8 | obj-$(CONFIG_NFC_WILINK) += nfcwilink.o | 8 | obj-$(CONFIG_NFC_WILINK) += nfcwilink.o |
9 | obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o | 9 | obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o |
10 | obj-$(CONFIG_NFC_SIM) += nfcsim.o | ||
10 | 11 | ||
11 | ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG | 12 | ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG |
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index 1201bdbfb791..606bf55e76ec 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c | |||
@@ -30,7 +30,7 @@ struct mei_nfc_hdr { | |||
30 | u16 req_id; | 30 | u16 req_id; |
31 | u32 reserved; | 31 | u32 reserved; |
32 | u16 data_size; | 32 | u16 data_size; |
33 | } __attribute__((packed)); | 33 | } __packed; |
34 | 34 | ||
35 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) | 35 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) |
36 | 36 | ||
@@ -60,8 +60,8 @@ int nfc_mei_phy_enable(void *phy_id) | |||
60 | 60 | ||
61 | r = mei_cl_enable_device(phy->device); | 61 | r = mei_cl_enable_device(phy->device); |
62 | if (r < 0) { | 62 | if (r < 0) { |
63 | pr_err("MEI_PHY: Could not enable device\n"); | 63 | pr_err("MEI_PHY: Could not enable device\n"); |
64 | return r; | 64 | return r; |
65 | } | 65 | } |
66 | 66 | ||
67 | r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy); | 67 | r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy); |
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c index 3420d833db17..cdb9f6de132a 100644 --- a/drivers/nfc/microread/microread.c +++ b/drivers/nfc/microread/microread.c | |||
@@ -650,7 +650,7 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, | |||
650 | { | 650 | { |
651 | struct microread_info *info; | 651 | struct microread_info *info; |
652 | unsigned long quirks = 0; | 652 | unsigned long quirks = 0; |
653 | u32 protocols, se; | 653 | u32 protocols; |
654 | struct nfc_hci_init_data init_data; | 654 | struct nfc_hci_init_data init_data; |
655 | int r; | 655 | int r; |
656 | 656 | ||
@@ -678,10 +678,8 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, | |||
678 | NFC_PROTO_ISO14443_B_MASK | | 678 | NFC_PROTO_ISO14443_B_MASK | |
679 | NFC_PROTO_NFC_DEP_MASK; | 679 | NFC_PROTO_NFC_DEP_MASK; |
680 | 680 | ||
681 | se = NFC_SE_UICC | NFC_SE_EMBEDDED; | ||
682 | |||
683 | info->hdev = nfc_hci_allocate_device(µread_hci_ops, &init_data, | 681 | info->hdev = nfc_hci_allocate_device(µread_hci_ops, &init_data, |
684 | quirks, protocols, se, llc_name, | 682 | quirks, protocols, llc_name, |
685 | phy_headroom + | 683 | phy_headroom + |
686 | MICROREAD_CMDS_HEADROOM, | 684 | MICROREAD_CMDS_HEADROOM, |
687 | phy_tailroom + | 685 | phy_tailroom + |
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c new file mode 100644 index 000000000000..c5c30fb1d7bf --- /dev/null +++ b/drivers/nfc/nfcsim.c | |||
@@ -0,0 +1,541 @@ | |||
1 | /* | ||
2 | * NFC hardware simulation driver | ||
3 | * Copyright (c) 2013, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/nfc.h> | ||
20 | #include <net/nfc/nfc.h> | ||
21 | |||
22 | #define DEV_ERR(_dev, fmt, args...) nfc_dev_err(&_dev->nfc_dev->dev, \ | ||
23 | "%s: " fmt, __func__, ## args) | ||
24 | |||
25 | #define DEV_DBG(_dev, fmt, args...) nfc_dev_dbg(&_dev->nfc_dev->dev, \ | ||
26 | "%s: " fmt, __func__, ## args) | ||
27 | |||
28 | #define NFCSIM_VERSION "0.1" | ||
29 | |||
30 | #define NFCSIM_POLL_NONE 0 | ||
31 | #define NFCSIM_POLL_INITIATOR 1 | ||
32 | #define NFCSIM_POLL_TARGET 2 | ||
33 | #define NFCSIM_POLL_DUAL (NFCSIM_POLL_INITIATOR | NFCSIM_POLL_TARGET) | ||
34 | |||
35 | struct nfcsim { | ||
36 | struct nfc_dev *nfc_dev; | ||
37 | |||
38 | struct mutex lock; | ||
39 | |||
40 | struct delayed_work recv_work; | ||
41 | |||
42 | struct sk_buff *clone_skb; | ||
43 | |||
44 | struct delayed_work poll_work; | ||
45 | u8 polling_mode; | ||
46 | u8 curr_polling_mode; | ||
47 | |||
48 | u8 shutting_down; | ||
49 | |||
50 | u8 up; | ||
51 | |||
52 | u8 initiator; | ||
53 | |||
54 | data_exchange_cb_t cb; | ||
55 | void *cb_context; | ||
56 | |||
57 | struct nfcsim *peer_dev; | ||
58 | }; | ||
59 | |||
60 | static struct nfcsim *dev0; | ||
61 | static struct nfcsim *dev1; | ||
62 | |||
63 | struct workqueue_struct *wq; | ||
64 | |||
65 | static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown) | ||
66 | { | ||
67 | DEV_DBG(dev, "shutdown=%d", shutdown); | ||
68 | |||
69 | mutex_lock(&dev->lock); | ||
70 | |||
71 | dev->polling_mode = NFCSIM_POLL_NONE; | ||
72 | dev->shutting_down = shutdown; | ||
73 | dev->cb = NULL; | ||
74 | dev_kfree_skb(dev->clone_skb); | ||
75 | dev->clone_skb = NULL; | ||
76 | |||
77 | mutex_unlock(&dev->lock); | ||
78 | |||
79 | cancel_delayed_work_sync(&dev->poll_work); | ||
80 | cancel_delayed_work_sync(&dev->recv_work); | ||
81 | } | ||
82 | |||
83 | static int nfcsim_target_found(struct nfcsim *dev) | ||
84 | { | ||
85 | struct nfc_target nfc_tgt; | ||
86 | |||
87 | DEV_DBG(dev, ""); | ||
88 | |||
89 | memset(&nfc_tgt, 0, sizeof(struct nfc_target)); | ||
90 | |||
91 | nfc_tgt.supported_protocols = NFC_PROTO_NFC_DEP_MASK; | ||
92 | nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1); | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int nfcsim_dev_up(struct nfc_dev *nfc_dev) | ||
98 | { | ||
99 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
100 | |||
101 | DEV_DBG(dev, ""); | ||
102 | |||
103 | mutex_lock(&dev->lock); | ||
104 | |||
105 | dev->up = 1; | ||
106 | |||
107 | mutex_unlock(&dev->lock); | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int nfcsim_dev_down(struct nfc_dev *nfc_dev) | ||
113 | { | ||
114 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
115 | |||
116 | DEV_DBG(dev, ""); | ||
117 | |||
118 | mutex_lock(&dev->lock); | ||
119 | |||
120 | dev->up = 0; | ||
121 | |||
122 | mutex_unlock(&dev->lock); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev, | ||
128 | struct nfc_target *target, | ||
129 | u8 comm_mode, u8 *gb, size_t gb_len) | ||
130 | { | ||
131 | int rc; | ||
132 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
133 | struct nfcsim *peer = dev->peer_dev; | ||
134 | u8 *remote_gb; | ||
135 | size_t remote_gb_len; | ||
136 | |||
137 | DEV_DBG(dev, "target_idx: %d, comm_mode: %d\n", target->idx, comm_mode); | ||
138 | |||
139 | mutex_lock(&peer->lock); | ||
140 | |||
141 | nfc_tm_activated(peer->nfc_dev, NFC_PROTO_NFC_DEP_MASK, | ||
142 | NFC_COMM_ACTIVE, gb, gb_len); | ||
143 | |||
144 | remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len); | ||
145 | if (!remote_gb) { | ||
146 | DEV_ERR(peer, "Can't get remote general bytes"); | ||
147 | |||
148 | mutex_unlock(&peer->lock); | ||
149 | return -EINVAL; | ||
150 | } | ||
151 | |||
152 | mutex_unlock(&peer->lock); | ||
153 | |||
154 | mutex_lock(&dev->lock); | ||
155 | |||
156 | rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len); | ||
157 | if (rc) { | ||
158 | DEV_ERR(dev, "Can't set remote general bytes"); | ||
159 | mutex_unlock(&dev->lock); | ||
160 | return rc; | ||
161 | } | ||
162 | |||
163 | rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_ACTIVE, | ||
164 | NFC_RF_INITIATOR); | ||
165 | |||
166 | mutex_unlock(&dev->lock); | ||
167 | |||
168 | return rc; | ||
169 | } | ||
170 | |||
171 | static int nfcsim_dep_link_down(struct nfc_dev *nfc_dev) | ||
172 | { | ||
173 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
174 | |||
175 | DEV_DBG(dev, ""); | ||
176 | |||
177 | nfcsim_cleanup_dev(dev, 0); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static int nfcsim_start_poll(struct nfc_dev *nfc_dev, | ||
183 | u32 im_protocols, u32 tm_protocols) | ||
184 | { | ||
185 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
186 | int rc; | ||
187 | |||
188 | mutex_lock(&dev->lock); | ||
189 | |||
190 | if (dev->polling_mode != NFCSIM_POLL_NONE) { | ||
191 | DEV_ERR(dev, "Already in polling mode"); | ||
192 | rc = -EBUSY; | ||
193 | goto exit; | ||
194 | } | ||
195 | |||
196 | if (im_protocols & NFC_PROTO_NFC_DEP_MASK) | ||
197 | dev->polling_mode |= NFCSIM_POLL_INITIATOR; | ||
198 | |||
199 | if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) | ||
200 | dev->polling_mode |= NFCSIM_POLL_TARGET; | ||
201 | |||
202 | if (dev->polling_mode == NFCSIM_POLL_NONE) { | ||
203 | DEV_ERR(dev, "Unsupported polling mode"); | ||
204 | rc = -EINVAL; | ||
205 | goto exit; | ||
206 | } | ||
207 | |||
208 | dev->initiator = 0; | ||
209 | dev->curr_polling_mode = NFCSIM_POLL_NONE; | ||
210 | |||
211 | queue_delayed_work(wq, &dev->poll_work, 0); | ||
212 | |||
213 | DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X", im_protocols, | ||
214 | tm_protocols); | ||
215 | |||
216 | rc = 0; | ||
217 | exit: | ||
218 | mutex_unlock(&dev->lock); | ||
219 | |||
220 | return rc; | ||
221 | } | ||
222 | |||
223 | static void nfcsim_stop_poll(struct nfc_dev *nfc_dev) | ||
224 | { | ||
225 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
226 | |||
227 | DEV_DBG(dev, "Stop poll"); | ||
228 | |||
229 | mutex_lock(&dev->lock); | ||
230 | |||
231 | dev->polling_mode = NFCSIM_POLL_NONE; | ||
232 | |||
233 | mutex_unlock(&dev->lock); | ||
234 | |||
235 | cancel_delayed_work_sync(&dev->poll_work); | ||
236 | } | ||
237 | |||
238 | static int nfcsim_activate_target(struct nfc_dev *nfc_dev, | ||
239 | struct nfc_target *target, u32 protocol) | ||
240 | { | ||
241 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
242 | |||
243 | DEV_DBG(dev, ""); | ||
244 | |||
245 | return -ENOTSUPP; | ||
246 | } | ||
247 | |||
248 | static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev, | ||
249 | struct nfc_target *target) | ||
250 | { | ||
251 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
252 | |||
253 | DEV_DBG(dev, ""); | ||
254 | } | ||
255 | |||
256 | static void nfcsim_wq_recv(struct work_struct *work) | ||
257 | { | ||
258 | struct nfcsim *dev = container_of(work, struct nfcsim, | ||
259 | recv_work.work); | ||
260 | |||
261 | mutex_lock(&dev->lock); | ||
262 | |||
263 | if (dev->shutting_down || !dev->up || !dev->clone_skb) { | ||
264 | dev_kfree_skb(dev->clone_skb); | ||
265 | goto exit; | ||
266 | } | ||
267 | |||
268 | if (dev->initiator) { | ||
269 | if (!dev->cb) { | ||
270 | DEV_ERR(dev, "Null recv callback"); | ||
271 | dev_kfree_skb(dev->clone_skb); | ||
272 | goto exit; | ||
273 | } | ||
274 | |||
275 | dev->cb(dev->cb_context, dev->clone_skb, 0); | ||
276 | dev->cb = NULL; | ||
277 | } else { | ||
278 | nfc_tm_data_received(dev->nfc_dev, dev->clone_skb); | ||
279 | } | ||
280 | |||
281 | exit: | ||
282 | dev->clone_skb = NULL; | ||
283 | |||
284 | mutex_unlock(&dev->lock); | ||
285 | } | ||
286 | |||
287 | static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target, | ||
288 | struct sk_buff *skb, data_exchange_cb_t cb, | ||
289 | void *cb_context) | ||
290 | { | ||
291 | struct nfcsim *dev = nfc_get_drvdata(nfc_dev); | ||
292 | struct nfcsim *peer = dev->peer_dev; | ||
293 | int err; | ||
294 | |||
295 | mutex_lock(&dev->lock); | ||
296 | |||
297 | if (dev->shutting_down || !dev->up) { | ||
298 | mutex_unlock(&dev->lock); | ||
299 | err = -ENODEV; | ||
300 | goto exit; | ||
301 | } | ||
302 | |||
303 | dev->cb = cb; | ||
304 | dev->cb_context = cb_context; | ||
305 | |||
306 | mutex_unlock(&dev->lock); | ||
307 | |||
308 | mutex_lock(&peer->lock); | ||
309 | |||
310 | peer->clone_skb = skb_clone(skb, GFP_KERNEL); | ||
311 | |||
312 | if (!peer->clone_skb) { | ||
313 | DEV_ERR(dev, "skb_clone failed"); | ||
314 | mutex_unlock(&peer->lock); | ||
315 | err = -ENOMEM; | ||
316 | goto exit; | ||
317 | } | ||
318 | |||
319 | /* This simulates an arbitrary transmission delay between the 2 devices. | ||
320 | * If packet transmission occurs immediately between them, we have a | ||
321 | * non-stop flow of several tens of thousands SYMM packets per second | ||
322 | * and a burning cpu. | ||
323 | * | ||
324 | * TODO: Add support for a sysfs entry to control this delay. | ||
325 | */ | ||
326 | queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5)); | ||
327 | |||
328 | mutex_unlock(&peer->lock); | ||
329 | |||
330 | err = 0; | ||
331 | exit: | ||
332 | dev_kfree_skb(skb); | ||
333 | |||
334 | return err; | ||
335 | } | ||
336 | |||
337 | static int nfcsim_im_transceive(struct nfc_dev *nfc_dev, | ||
338 | struct nfc_target *target, struct sk_buff *skb, | ||
339 | data_exchange_cb_t cb, void *cb_context) | ||
340 | { | ||
341 | return nfcsim_tx(nfc_dev, target, skb, cb, cb_context); | ||
342 | } | ||
343 | |||
344 | static int nfcsim_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) | ||
345 | { | ||
346 | return nfcsim_tx(nfc_dev, NULL, skb, NULL, NULL); | ||
347 | } | ||
348 | |||
349 | static struct nfc_ops nfcsim_nfc_ops = { | ||
350 | .dev_up = nfcsim_dev_up, | ||
351 | .dev_down = nfcsim_dev_down, | ||
352 | .dep_link_up = nfcsim_dep_link_up, | ||
353 | .dep_link_down = nfcsim_dep_link_down, | ||
354 | .start_poll = nfcsim_start_poll, | ||
355 | .stop_poll = nfcsim_stop_poll, | ||
356 | .activate_target = nfcsim_activate_target, | ||
357 | .deactivate_target = nfcsim_deactivate_target, | ||
358 | .im_transceive = nfcsim_im_transceive, | ||
359 | .tm_send = nfcsim_tm_send, | ||
360 | }; | ||
361 | |||
362 | static void nfcsim_set_polling_mode(struct nfcsim *dev) | ||
363 | { | ||
364 | if (dev->polling_mode == NFCSIM_POLL_NONE) { | ||
365 | dev->curr_polling_mode = NFCSIM_POLL_NONE; | ||
366 | return; | ||
367 | } | ||
368 | |||
369 | if (dev->curr_polling_mode == NFCSIM_POLL_NONE) { | ||
370 | if (dev->polling_mode & NFCSIM_POLL_INITIATOR) | ||
371 | dev->curr_polling_mode = NFCSIM_POLL_INITIATOR; | ||
372 | else | ||
373 | dev->curr_polling_mode = NFCSIM_POLL_TARGET; | ||
374 | |||
375 | return; | ||
376 | } | ||
377 | |||
378 | if (dev->polling_mode == NFCSIM_POLL_DUAL) { | ||
379 | if (dev->curr_polling_mode == NFCSIM_POLL_TARGET) | ||
380 | dev->curr_polling_mode = NFCSIM_POLL_INITIATOR; | ||
381 | else | ||
382 | dev->curr_polling_mode = NFCSIM_POLL_TARGET; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | static void nfcsim_wq_poll(struct work_struct *work) | ||
387 | { | ||
388 | struct nfcsim *dev = container_of(work, struct nfcsim, poll_work.work); | ||
389 | struct nfcsim *peer = dev->peer_dev; | ||
390 | |||
391 | /* These work items run on an ordered workqueue and are therefore | ||
392 | * serialized. So we can take both mutexes without being dead locked. | ||
393 | */ | ||
394 | mutex_lock(&dev->lock); | ||
395 | mutex_lock(&peer->lock); | ||
396 | |||
397 | nfcsim_set_polling_mode(dev); | ||
398 | |||
399 | if (dev->curr_polling_mode == NFCSIM_POLL_NONE) { | ||
400 | DEV_DBG(dev, "Not polling"); | ||
401 | goto unlock; | ||
402 | } | ||
403 | |||
404 | DEV_DBG(dev, "Polling as %s", | ||
405 | dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ? | ||
406 | "initiator" : "target"); | ||
407 | |||
408 | if (dev->curr_polling_mode == NFCSIM_POLL_TARGET) | ||
409 | goto sched_work; | ||
410 | |||
411 | if (peer->curr_polling_mode == NFCSIM_POLL_TARGET) { | ||
412 | peer->polling_mode = NFCSIM_POLL_NONE; | ||
413 | dev->polling_mode = NFCSIM_POLL_NONE; | ||
414 | |||
415 | dev->initiator = 1; | ||
416 | |||
417 | nfcsim_target_found(dev); | ||
418 | |||
419 | goto unlock; | ||
420 | } | ||
421 | |||
422 | sched_work: | ||
423 | /* This defines the delay for an initiator to check if the other device | ||
424 | * is polling in target mode. | ||
425 | * If the device starts in dual mode polling, it switches between | ||
426 | * initiator and target at every round. | ||
427 | * Because the wq is ordered and only 1 work item is executed at a time, | ||
428 | * we'll always have one device polling as initiator and the other as | ||
429 | * target at some point, even if both are started in dual mode. | ||
430 | */ | ||
431 | queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200)); | ||
432 | |||
433 | unlock: | ||
434 | mutex_unlock(&peer->lock); | ||
435 | mutex_unlock(&dev->lock); | ||
436 | } | ||
437 | |||
438 | static struct nfcsim *nfcsim_init_dev(void) | ||
439 | { | ||
440 | struct nfcsim *dev; | ||
441 | int rc = -ENOMEM; | ||
442 | |||
443 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
444 | if (dev == NULL) | ||
445 | return ERR_PTR(-ENOMEM); | ||
446 | |||
447 | mutex_init(&dev->lock); | ||
448 | |||
449 | INIT_DELAYED_WORK(&dev->recv_work, nfcsim_wq_recv); | ||
450 | INIT_DELAYED_WORK(&dev->poll_work, nfcsim_wq_poll); | ||
451 | |||
452 | dev->nfc_dev = nfc_allocate_device(&nfcsim_nfc_ops, | ||
453 | NFC_PROTO_NFC_DEP_MASK, | ||
454 | 0, 0); | ||
455 | if (!dev->nfc_dev) | ||
456 | goto error; | ||
457 | |||
458 | nfc_set_drvdata(dev->nfc_dev, dev); | ||
459 | |||
460 | rc = nfc_register_device(dev->nfc_dev); | ||
461 | if (rc) | ||
462 | goto free_nfc_dev; | ||
463 | |||
464 | return dev; | ||
465 | |||
466 | free_nfc_dev: | ||
467 | nfc_free_device(dev->nfc_dev); | ||
468 | |||
469 | error: | ||
470 | kfree(dev); | ||
471 | |||
472 | return ERR_PTR(rc); | ||
473 | } | ||
474 | |||
475 | static void nfcsim_free_device(struct nfcsim *dev) | ||
476 | { | ||
477 | nfc_unregister_device(dev->nfc_dev); | ||
478 | |||
479 | nfc_free_device(dev->nfc_dev); | ||
480 | |||
481 | kfree(dev); | ||
482 | } | ||
483 | |||
484 | int __init nfcsim_init(void) | ||
485 | { | ||
486 | int rc; | ||
487 | |||
488 | /* We need an ordered wq to ensure that poll_work items are executed | ||
489 | * one at a time. | ||
490 | */ | ||
491 | wq = alloc_ordered_workqueue("nfcsim", 0); | ||
492 | if (!wq) { | ||
493 | rc = -ENOMEM; | ||
494 | goto exit; | ||
495 | } | ||
496 | |||
497 | dev0 = nfcsim_init_dev(); | ||
498 | if (IS_ERR(dev0)) { | ||
499 | rc = PTR_ERR(dev0); | ||
500 | goto exit; | ||
501 | } | ||
502 | |||
503 | dev1 = nfcsim_init_dev(); | ||
504 | if (IS_ERR(dev1)) { | ||
505 | kfree(dev0); | ||
506 | |||
507 | rc = PTR_ERR(dev1); | ||
508 | goto exit; | ||
509 | } | ||
510 | |||
511 | dev0->peer_dev = dev1; | ||
512 | dev1->peer_dev = dev0; | ||
513 | |||
514 | pr_debug("NFCsim " NFCSIM_VERSION " initialized\n"); | ||
515 | |||
516 | rc = 0; | ||
517 | exit: | ||
518 | if (rc) | ||
519 | pr_err("Failed to initialize nfcsim driver (%d)\n", | ||
520 | rc); | ||
521 | |||
522 | return rc; | ||
523 | } | ||
524 | |||
525 | void __exit nfcsim_exit(void) | ||
526 | { | ||
527 | nfcsim_cleanup_dev(dev0, 1); | ||
528 | nfcsim_cleanup_dev(dev1, 1); | ||
529 | |||
530 | nfcsim_free_device(dev0); | ||
531 | nfcsim_free_device(dev1); | ||
532 | |||
533 | destroy_workqueue(wq); | ||
534 | } | ||
535 | |||
536 | module_init(nfcsim_init); | ||
537 | module_exit(nfcsim_exit); | ||
538 | |||
539 | MODULE_DESCRIPTION("NFCSim driver ver " NFCSIM_VERSION); | ||
540 | MODULE_VERSION(NFCSIM_VERSION); | ||
541 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c index 3b731acbc408..59f95d8fc98c 100644 --- a/drivers/nfc/nfcwilink.c +++ b/drivers/nfc/nfcwilink.c | |||
@@ -109,7 +109,7 @@ enum { | |||
109 | NFCWILINK_FW_DOWNLOAD, | 109 | NFCWILINK_FW_DOWNLOAD, |
110 | }; | 110 | }; |
111 | 111 | ||
112 | static int nfcwilink_send(struct sk_buff *skb); | 112 | static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb); |
113 | 113 | ||
114 | static inline struct sk_buff *nfcwilink_skb_alloc(unsigned int len, gfp_t how) | 114 | static inline struct sk_buff *nfcwilink_skb_alloc(unsigned int len, gfp_t how) |
115 | { | 115 | { |
@@ -156,8 +156,6 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name) | |||
156 | return -ENOMEM; | 156 | return -ENOMEM; |
157 | } | 157 | } |
158 | 158 | ||
159 | skb->dev = (void *)drv->ndev; | ||
160 | |||
161 | cmd = (struct nci_vs_nfcc_info_cmd *) | 159 | cmd = (struct nci_vs_nfcc_info_cmd *) |
162 | skb_put(skb, sizeof(struct nci_vs_nfcc_info_cmd)); | 160 | skb_put(skb, sizeof(struct nci_vs_nfcc_info_cmd)); |
163 | cmd->gid = NCI_VS_NFCC_INFO_CMD_GID; | 161 | cmd->gid = NCI_VS_NFCC_INFO_CMD_GID; |
@@ -166,7 +164,7 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name) | |||
166 | 164 | ||
167 | drv->nfcc_info.plen = 0; | 165 | drv->nfcc_info.plen = 0; |
168 | 166 | ||
169 | rc = nfcwilink_send(skb); | 167 | rc = nfcwilink_send(drv->ndev, skb); |
170 | if (rc) | 168 | if (rc) |
171 | return rc; | 169 | return rc; |
172 | 170 | ||
@@ -232,11 +230,9 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len) | |||
232 | return -ENOMEM; | 230 | return -ENOMEM; |
233 | } | 231 | } |
234 | 232 | ||
235 | skb->dev = (void *)drv->ndev; | ||
236 | |||
237 | memcpy(skb_put(skb, len), data, len); | 233 | memcpy(skb_put(skb, len), data, len); |
238 | 234 | ||
239 | rc = nfcwilink_send(skb); | 235 | rc = nfcwilink_send(drv->ndev, skb); |
240 | if (rc) | 236 | if (rc) |
241 | return rc; | 237 | return rc; |
242 | 238 | ||
@@ -371,10 +367,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb) | |||
371 | return 0; | 367 | return 0; |
372 | } | 368 | } |
373 | 369 | ||
374 | skb->dev = (void *) drv->ndev; | ||
375 | |||
376 | /* Forward skb to NCI core layer */ | 370 | /* Forward skb to NCI core layer */ |
377 | rc = nci_recv_frame(skb); | 371 | rc = nci_recv_frame(drv->ndev, skb); |
378 | if (rc < 0) { | 372 | if (rc < 0) { |
379 | nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc); | 373 | nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc); |
380 | return rc; | 374 | return rc; |
@@ -480,9 +474,8 @@ static int nfcwilink_close(struct nci_dev *ndev) | |||
480 | return rc; | 474 | return rc; |
481 | } | 475 | } |
482 | 476 | ||
483 | static int nfcwilink_send(struct sk_buff *skb) | 477 | static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb) |
484 | { | 478 | { |
485 | struct nci_dev *ndev = (struct nci_dev *)skb->dev; | ||
486 | struct nfcwilink *drv = nci_get_drvdata(ndev); | 479 | struct nfcwilink *drv = nci_get_drvdata(ndev); |
487 | struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000}; | 480 | struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000}; |
488 | long len; | 481 | long len; |
@@ -542,7 +535,6 @@ static int nfcwilink_probe(struct platform_device *pdev) | |||
542 | 535 | ||
543 | drv->ndev = nci_allocate_device(&nfcwilink_ops, | 536 | drv->ndev = nci_allocate_device(&nfcwilink_ops, |
544 | protocols, | 537 | protocols, |
545 | NFC_SE_NONE, | ||
546 | NFCWILINK_HDR_LEN, | 538 | NFCWILINK_HDR_LEN, |
547 | 0); | 539 | 0); |
548 | if (!drv->ndev) { | 540 | if (!drv->ndev) { |
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 8f6f2baa930d..bfb4a4e7c604 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c | |||
@@ -258,7 +258,7 @@ static const struct pn533_poll_modulations poll_mod[] = { | |||
258 | .opcode = PN533_FELICA_OPC_SENSF_REQ, | 258 | .opcode = PN533_FELICA_OPC_SENSF_REQ, |
259 | .sc = PN533_FELICA_SENSF_SC_ALL, | 259 | .sc = PN533_FELICA_SENSF_SC_ALL, |
260 | .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, | 260 | .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, |
261 | .tsn = 0, | 261 | .tsn = 0x03, |
262 | }, | 262 | }, |
263 | }, | 263 | }, |
264 | .len = 7, | 264 | .len = 7, |
@@ -271,7 +271,7 @@ static const struct pn533_poll_modulations poll_mod[] = { | |||
271 | .opcode = PN533_FELICA_OPC_SENSF_REQ, | 271 | .opcode = PN533_FELICA_OPC_SENSF_REQ, |
272 | .sc = PN533_FELICA_SENSF_SC_ALL, | 272 | .sc = PN533_FELICA_SENSF_SC_ALL, |
273 | .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, | 273 | .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE, |
274 | .tsn = 0, | 274 | .tsn = 0x03, |
275 | }, | 275 | }, |
276 | }, | 276 | }, |
277 | .len = 7, | 277 | .len = 7, |
@@ -1235,7 +1235,7 @@ static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data, | |||
1235 | struct pn533_target_felica { | 1235 | struct pn533_target_felica { |
1236 | u8 pol_res; | 1236 | u8 pol_res; |
1237 | u8 opcode; | 1237 | u8 opcode; |
1238 | u8 nfcid2[8]; | 1238 | u8 nfcid2[NFC_NFCID2_MAXSIZE]; |
1239 | u8 pad[8]; | 1239 | u8 pad[8]; |
1240 | /* optional */ | 1240 | /* optional */ |
1241 | u8 syst_code[]; | 1241 | u8 syst_code[]; |
@@ -1275,6 +1275,9 @@ static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data, | |||
1275 | memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9); | 1275 | memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9); |
1276 | nfc_tgt->sensf_res_len = 9; | 1276 | nfc_tgt->sensf_res_len = 9; |
1277 | 1277 | ||
1278 | memcpy(nfc_tgt->nfcid2, tgt_felica->nfcid2, NFC_NFCID2_MAXSIZE); | ||
1279 | nfc_tgt->nfcid2_len = NFC_NFCID2_MAXSIZE; | ||
1280 | |||
1278 | return 0; | 1281 | return 0; |
1279 | } | 1282 | } |
1280 | 1283 | ||
@@ -2084,6 +2087,9 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
2084 | if (comm_mode == NFC_COMM_PASSIVE) | 2087 | if (comm_mode == NFC_COMM_PASSIVE) |
2085 | skb_len += PASSIVE_DATA_LEN; | 2088 | skb_len += PASSIVE_DATA_LEN; |
2086 | 2089 | ||
2090 | if (target && target->nfcid2_len) | ||
2091 | skb_len += NFC_NFCID3_MAXSIZE; | ||
2092 | |||
2087 | skb = pn533_alloc_skb(dev, skb_len); | 2093 | skb = pn533_alloc_skb(dev, skb_len); |
2088 | if (!skb) | 2094 | if (!skb) |
2089 | return -ENOMEM; | 2095 | return -ENOMEM; |
@@ -2100,6 +2106,12 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
2100 | *next |= 1; | 2106 | *next |= 1; |
2101 | } | 2107 | } |
2102 | 2108 | ||
2109 | if (target && target->nfcid2_len) { | ||
2110 | memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2, | ||
2111 | target->nfcid2_len); | ||
2112 | *next |= 2; | ||
2113 | } | ||
2114 | |||
2103 | if (gb != NULL && gb_len > 0) { | 2115 | if (gb != NULL && gb_len > 0) { |
2104 | memcpy(skb_put(skb, gb_len), gb, gb_len); | 2116 | memcpy(skb_put(skb, gb_len), gb, gb_len); |
2105 | *next |= 4; /* We have some Gi */ | 2117 | *next |= 4; /* We have some Gi */ |
@@ -2489,7 +2501,7 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb) | |||
2489 | 2501 | ||
2490 | nfc_dev_dbg(&urb->dev->dev, "%s", __func__); | 2502 | nfc_dev_dbg(&urb->dev->dev, "%s", __func__); |
2491 | 2503 | ||
2492 | print_hex_dump(KERN_ERR, "ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, | 2504 | print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, |
2493 | urb->transfer_buffer, urb->transfer_buffer_length, | 2505 | urb->transfer_buffer, urb->transfer_buffer_length, |
2494 | false); | 2506 | false); |
2495 | 2507 | ||
@@ -2520,7 +2532,7 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev) | |||
2520 | dev->out_urb->transfer_buffer = cmd; | 2532 | dev->out_urb->transfer_buffer = cmd; |
2521 | dev->out_urb->transfer_buffer_length = sizeof(cmd); | 2533 | dev->out_urb->transfer_buffer_length = sizeof(cmd); |
2522 | 2534 | ||
2523 | print_hex_dump(KERN_ERR, "ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, | 2535 | print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, |
2524 | cmd, sizeof(cmd), false); | 2536 | cmd, sizeof(cmd), false); |
2525 | 2537 | ||
2526 | rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); | 2538 | rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); |
@@ -2774,17 +2786,18 @@ static int pn533_probe(struct usb_interface *interface, | |||
2774 | goto destroy_wq; | 2786 | goto destroy_wq; |
2775 | 2787 | ||
2776 | nfc_dev_info(&dev->interface->dev, | 2788 | nfc_dev_info(&dev->interface->dev, |
2777 | "NXP PN533 firmware ver %d.%d now attached", | 2789 | "NXP PN5%02X firmware ver %d.%d now attached", |
2778 | fw_ver.ver, fw_ver.rev); | 2790 | fw_ver.ic, fw_ver.ver, fw_ver.rev); |
2779 | 2791 | ||
2780 | 2792 | ||
2781 | dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, | 2793 | dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, |
2782 | NFC_SE_NONE, | ||
2783 | dev->ops->tx_header_len + | 2794 | dev->ops->tx_header_len + |
2784 | PN533_CMD_DATAEXCH_HEAD_LEN, | 2795 | PN533_CMD_DATAEXCH_HEAD_LEN, |
2785 | dev->ops->tx_tail_len); | 2796 | dev->ops->tx_tail_len); |
2786 | if (!dev->nfc_dev) | 2797 | if (!dev->nfc_dev) { |
2798 | rc = -ENOMEM; | ||
2787 | goto destroy_wq; | 2799 | goto destroy_wq; |
2800 | } | ||
2788 | 2801 | ||
2789 | nfc_set_parent_dev(dev->nfc_dev, &interface->dev); | 2802 | nfc_set_parent_dev(dev->nfc_dev, &interface->dev); |
2790 | nfc_set_drvdata(dev->nfc_dev, dev); | 2803 | nfc_set_drvdata(dev->nfc_dev, dev); |
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index 9c5f16e7baef..0d17da7675b7 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c | |||
@@ -551,20 +551,25 @@ static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev, | |||
551 | return -EPROTO; | 551 | return -EPROTO; |
552 | } | 552 | } |
553 | 553 | ||
554 | r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE, | 554 | /* Type F NFC-DEP IDm has prefix 0x01FE */ |
555 | PN544_RF_READER_CMD_ACTIVATE_NEXT, | 555 | if ((uid_skb->data[0] == 0x01) && (uid_skb->data[1] == 0xfe)) { |
556 | uid_skb->data, uid_skb->len, NULL); | 556 | kfree_skb(uid_skb); |
557 | kfree_skb(uid_skb); | 557 | r = nfc_hci_send_cmd(hdev, |
558 | |||
559 | r = nfc_hci_send_cmd(hdev, | ||
560 | PN544_RF_READER_NFCIP1_INITIATOR_GATE, | 558 | PN544_RF_READER_NFCIP1_INITIATOR_GATE, |
561 | PN544_HCI_CMD_CONTINUE_ACTIVATION, | 559 | PN544_HCI_CMD_CONTINUE_ACTIVATION, |
562 | NULL, 0, NULL); | 560 | NULL, 0, NULL); |
563 | if (r < 0) | 561 | if (r < 0) |
564 | return r; | 562 | return r; |
565 | 563 | ||
566 | target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE; | 564 | target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; |
567 | target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; | 565 | target->hci_reader_gate = |
566 | PN544_RF_READER_NFCIP1_INITIATOR_GATE; | ||
567 | } else { | ||
568 | r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE, | ||
569 | PN544_RF_READER_CMD_ACTIVATE_NEXT, | ||
570 | uid_skb->data, uid_skb->len, NULL); | ||
571 | kfree_skb(uid_skb); | ||
572 | } | ||
568 | } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) { | 573 | } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) { |
569 | /* | 574 | /* |
570 | * TODO: maybe other ISO 14443 require some kind of continue | 575 | * TODO: maybe other ISO 14443 require some kind of continue |
@@ -706,12 +711,9 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev, | |||
706 | return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, | 711 | return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, |
707 | PN544_RF_READER_CMD_ACTIVATE_NEXT, | 712 | PN544_RF_READER_CMD_ACTIVATE_NEXT, |
708 | target->nfcid1, target->nfcid1_len, NULL); | 713 | target->nfcid1, target->nfcid1_len, NULL); |
709 | } else if (target->supported_protocols & NFC_PROTO_JEWEL_MASK) { | 714 | } else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK | |
710 | return nfc_hci_send_cmd(hdev, target->hci_reader_gate, | 715 | NFC_PROTO_FELICA_MASK)) { |
711 | PN544_JEWEL_RAW_CMD, NULL, 0, NULL); | 716 | return -EOPNOTSUPP; |
712 | } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) { | ||
713 | return nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE, | ||
714 | PN544_FELICA_RAW, NULL, 0, NULL); | ||
715 | } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) { | 717 | } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) { |
716 | return nfc_hci_send_cmd(hdev, target->hci_reader_gate, | 718 | return nfc_hci_send_cmd(hdev, target->hci_reader_gate, |
717 | PN544_HCI_CMD_ATTREQUEST, | 719 | PN544_HCI_CMD_ATTREQUEST, |
@@ -801,7 +803,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, | |||
801 | struct nfc_hci_dev **hdev) | 803 | struct nfc_hci_dev **hdev) |
802 | { | 804 | { |
803 | struct pn544_hci_info *info; | 805 | struct pn544_hci_info *info; |
804 | u32 protocols, se; | 806 | u32 protocols; |
805 | struct nfc_hci_init_data init_data; | 807 | struct nfc_hci_init_data init_data; |
806 | int r; | 808 | int r; |
807 | 809 | ||
@@ -834,10 +836,8 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name, | |||
834 | NFC_PROTO_ISO14443_B_MASK | | 836 | NFC_PROTO_ISO14443_B_MASK | |
835 | NFC_PROTO_NFC_DEP_MASK; | 837 | NFC_PROTO_NFC_DEP_MASK; |
836 | 838 | ||
837 | se = NFC_SE_UICC | NFC_SE_EMBEDDED; | ||
838 | |||
839 | info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 0, | 839 | info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 0, |
840 | protocols, se, llc_name, | 840 | protocols, llc_name, |
841 | phy_headroom + PN544_CMDS_HEADROOM, | 841 | phy_headroom + PN544_CMDS_HEADROOM, |
842 | phy_tailroom, phy_payload); | 842 | phy_tailroom, phy_payload); |
843 | if (!info->hdev) { | 843 | if (!info->hdev) { |
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c index 720665ca2bb1..e84cf04f4416 100644 --- a/drivers/ssb/driver_chipcommon_sflash.c +++ b/drivers/ssb/driver_chipcommon_sflash.c | |||
@@ -9,6 +9,19 @@ | |||
9 | 9 | ||
10 | #include "ssb_private.h" | 10 | #include "ssb_private.h" |
11 | 11 | ||
12 | static struct resource ssb_sflash_resource = { | ||
13 | .name = "ssb_sflash", | ||
14 | .start = SSB_FLASH2, | ||
15 | .end = 0, | ||
16 | .flags = IORESOURCE_MEM | IORESOURCE_READONLY, | ||
17 | }; | ||
18 | |||
19 | struct platform_device ssb_sflash_dev = { | ||
20 | .name = "ssb_sflash", | ||
21 | .resource = &ssb_sflash_resource, | ||
22 | .num_resources = 1, | ||
23 | }; | ||
24 | |||
12 | struct ssb_sflash_tbl_e { | 25 | struct ssb_sflash_tbl_e { |
13 | char *name; | 26 | char *name; |
14 | u32 id; | 27 | u32 id; |
@@ -16,7 +29,7 @@ struct ssb_sflash_tbl_e { | |||
16 | u16 numblocks; | 29 | u16 numblocks; |
17 | }; | 30 | }; |
18 | 31 | ||
19 | static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = { | 32 | static const struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = { |
20 | { "M25P20", 0x11, 0x10000, 4, }, | 33 | { "M25P20", 0x11, 0x10000, 4, }, |
21 | { "M25P40", 0x12, 0x10000, 8, }, | 34 | { "M25P40", 0x12, 0x10000, 8, }, |
22 | 35 | ||
@@ -27,7 +40,7 @@ static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = { | |||
27 | { 0 }, | 40 | { 0 }, |
28 | }; | 41 | }; |
29 | 42 | ||
30 | static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = { | 43 | static const struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = { |
31 | { "SST25WF512", 1, 0x1000, 16, }, | 44 | { "SST25WF512", 1, 0x1000, 16, }, |
32 | { "SST25VF512", 0x48, 0x1000, 16, }, | 45 | { "SST25VF512", 0x48, 0x1000, 16, }, |
33 | { "SST25WF010", 2, 0x1000, 32, }, | 46 | { "SST25WF010", 2, 0x1000, 32, }, |
@@ -45,7 +58,7 @@ static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = { | |||
45 | { 0 }, | 58 | { 0 }, |
46 | }; | 59 | }; |
47 | 60 | ||
48 | static struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = { | 61 | static const struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = { |
49 | { "AT45DB011", 0xc, 256, 512, }, | 62 | { "AT45DB011", 0xc, 256, 512, }, |
50 | { "AT45DB021", 0x14, 256, 1024, }, | 63 | { "AT45DB021", 0x14, 256, 1024, }, |
51 | { "AT45DB041", 0x1c, 256, 2048, }, | 64 | { "AT45DB041", 0x1c, 256, 2048, }, |
@@ -73,7 +86,8 @@ static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode) | |||
73 | /* Initialize serial flash access */ | 86 | /* Initialize serial flash access */ |
74 | int ssb_sflash_init(struct ssb_chipcommon *cc) | 87 | int ssb_sflash_init(struct ssb_chipcommon *cc) |
75 | { | 88 | { |
76 | struct ssb_sflash_tbl_e *e; | 89 | struct ssb_sflash *sflash = &cc->dev->bus->mipscore.sflash; |
90 | const struct ssb_sflash_tbl_e *e; | ||
77 | u32 id, id2; | 91 | u32 id, id2; |
78 | 92 | ||
79 | switch (cc->capabilities & SSB_CHIPCO_CAP_FLASHT) { | 93 | switch (cc->capabilities & SSB_CHIPCO_CAP_FLASHT) { |
@@ -131,9 +145,21 @@ int ssb_sflash_init(struct ssb_chipcommon *cc) | |||
131 | return -ENOTSUPP; | 145 | return -ENOTSUPP; |
132 | } | 146 | } |
133 | 147 | ||
148 | sflash->window = SSB_FLASH2; | ||
149 | sflash->blocksize = e->blocksize; | ||
150 | sflash->numblocks = e->numblocks; | ||
151 | sflash->size = sflash->blocksize * sflash->numblocks; | ||
152 | sflash->present = true; | ||
153 | |||
134 | pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n", | 154 | pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n", |
135 | e->name, e->blocksize, e->numblocks); | 155 | e->name, e->blocksize, e->numblocks); |
136 | 156 | ||
157 | /* Prepare platform device, but don't register it yet. It's too early, | ||
158 | * malloc (required by device_private_init) is not available yet. */ | ||
159 | ssb_sflash_dev.resource[0].end = ssb_sflash_dev.resource[0].start + | ||
160 | sflash->size; | ||
161 | ssb_sflash_dev.dev.platform_data = sflash; | ||
162 | |||
137 | pr_err("Serial flash support is not implemented yet!\n"); | 163 | pr_err("Serial flash support is not implemented yet!\n"); |
138 | 164 | ||
139 | return -ENOTSUPP; | 165 | return -ENOTSUPP; |
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 812775a4bfb6..e55ddf7cd7c2 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
@@ -553,6 +553,14 @@ static int ssb_devices_register(struct ssb_bus *bus) | |||
553 | } | 553 | } |
554 | #endif | 554 | #endif |
555 | 555 | ||
556 | #ifdef CONFIG_SSB_SFLASH | ||
557 | if (bus->mipscore.sflash.present) { | ||
558 | err = platform_device_register(&ssb_sflash_dev); | ||
559 | if (err) | ||
560 | pr_err("Error registering serial flash\n"); | ||
561 | } | ||
562 | #endif | ||
563 | |||
556 | return 0; | 564 | return 0; |
557 | error: | 565 | error: |
558 | /* Unwind the already registered devices. */ | 566 | /* Unwind the already registered devices. */ |
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 4671f17f09af..eb507a50a564 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h | |||
@@ -243,6 +243,10 @@ static inline int ssb_sflash_init(struct ssb_chipcommon *cc) | |||
243 | extern struct platform_device ssb_pflash_dev; | 243 | extern struct platform_device ssb_pflash_dev; |
244 | #endif | 244 | #endif |
245 | 245 | ||
246 | #ifdef CONFIG_SSB_SFLASH | ||
247 | extern struct platform_device ssb_sflash_dev; | ||
248 | #endif | ||
249 | |||
246 | #ifdef CONFIG_SSB_DRIVER_EXTIF | 250 | #ifdef CONFIG_SSB_DRIVER_EXTIF |
247 | extern u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks); | 251 | extern u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks); |
248 | extern u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms); | 252 | extern u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms); |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index d826e5a84af0..b0dc87a2a376 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -146,6 +146,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) | |||
146 | #define IEEE80211_MAX_RTS_THRESHOLD 2353 | 146 | #define IEEE80211_MAX_RTS_THRESHOLD 2353 |
147 | #define IEEE80211_MAX_AID 2007 | 147 | #define IEEE80211_MAX_AID 2007 |
148 | #define IEEE80211_MAX_TIM_LEN 251 | 148 | #define IEEE80211_MAX_TIM_LEN 251 |
149 | #define IEEE80211_MAX_MESH_PEERINGS 63 | ||
149 | /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section | 150 | /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section |
150 | 6.2.1.1.2. | 151 | 6.2.1.1.2. |
151 | 152 | ||
diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h index afe79d40a99e..6535e4718fde 100644 --- a/include/linux/ssb/ssb_driver_mips.h +++ b/include/linux/ssb/ssb_driver_mips.h | |||
@@ -20,6 +20,18 @@ struct ssb_pflash { | |||
20 | u32 window_size; | 20 | u32 window_size; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | #ifdef CONFIG_SSB_SFLASH | ||
24 | struct ssb_sflash { | ||
25 | bool present; | ||
26 | u32 window; | ||
27 | u32 blocksize; | ||
28 | u16 numblocks; | ||
29 | u32 size; | ||
30 | |||
31 | void *priv; | ||
32 | }; | ||
33 | #endif | ||
34 | |||
23 | struct ssb_mipscore { | 35 | struct ssb_mipscore { |
24 | struct ssb_device *dev; | 36 | struct ssb_device *dev; |
25 | 37 | ||
@@ -27,6 +39,9 @@ struct ssb_mipscore { | |||
27 | struct ssb_serial_port serial_ports[4]; | 39 | struct ssb_serial_port serial_ports[4]; |
28 | 40 | ||
29 | struct ssb_pflash pflash; | 41 | struct ssb_pflash pflash; |
42 | #ifdef CONFIG_SSB_SFLASH | ||
43 | struct ssb_sflash sflash; | ||
44 | #endif | ||
30 | }; | 45 | }; |
31 | 46 | ||
32 | extern void ssb_mipscore_init(struct ssb_mipscore *mcore); | 47 | extern void ssb_mipscore_init(struct ssb_mipscore *mcore); |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6dd19593e333..6a43c34ce96f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -1124,6 +1124,9 @@ struct bss_parameters { | |||
1124 | * setting for new peer links. | 1124 | * setting for new peer links. |
1125 | * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake | 1125 | * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake |
1126 | * after transmitting its beacon. | 1126 | * after transmitting its beacon. |
1127 | * @plink_timeout: If no tx activity is seen from a STA we've established | ||
1128 | * peering with for longer than this time (in seconds), then remove it | ||
1129 | * from the STA's list of peers. Default is 30 minutes. | ||
1127 | */ | 1130 | */ |
1128 | struct mesh_config { | 1131 | struct mesh_config { |
1129 | u16 dot11MeshRetryTimeout; | 1132 | u16 dot11MeshRetryTimeout; |
@@ -1153,6 +1156,7 @@ struct mesh_config { | |||
1153 | u16 dot11MeshHWMPconfirmationInterval; | 1156 | u16 dot11MeshHWMPconfirmationInterval; |
1154 | enum nl80211_mesh_power_mode power_mode; | 1157 | enum nl80211_mesh_power_mode power_mode; |
1155 | u16 dot11MeshAwakeWindowDuration; | 1158 | u16 dot11MeshAwakeWindowDuration; |
1159 | u32 plink_timeout; | ||
1156 | }; | 1160 | }; |
1157 | 1161 | ||
1158 | /** | 1162 | /** |
@@ -1172,6 +1176,7 @@ struct mesh_config { | |||
1172 | * @dtim_period: DTIM period to use | 1176 | * @dtim_period: DTIM period to use |
1173 | * @beacon_interval: beacon interval to use | 1177 | * @beacon_interval: beacon interval to use |
1174 | * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] | 1178 | * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] |
1179 | * @basic_rates: basic rates to use when creating the mesh | ||
1175 | * | 1180 | * |
1176 | * These parameters are fixed when the mesh is created. | 1181 | * These parameters are fixed when the mesh is created. |
1177 | */ | 1182 | */ |
@@ -1191,6 +1196,7 @@ struct mesh_setup { | |||
1191 | u8 dtim_period; | 1196 | u8 dtim_period; |
1192 | u16 beacon_interval; | 1197 | u16 beacon_interval; |
1193 | int mcast_rate[IEEE80211_NUM_BANDS]; | 1198 | int mcast_rate[IEEE80211_NUM_BANDS]; |
1199 | u32 basic_rates; | ||
1194 | }; | 1200 | }; |
1195 | 1201 | ||
1196 | /** | 1202 | /** |
@@ -2654,7 +2660,7 @@ struct wiphy { | |||
2654 | u32 hw_version; | 2660 | u32 hw_version; |
2655 | 2661 | ||
2656 | #ifdef CONFIG_PM | 2662 | #ifdef CONFIG_PM |
2657 | struct wiphy_wowlan_support wowlan; | 2663 | const struct wiphy_wowlan_support *wowlan; |
2658 | struct cfg80211_wowlan *wowlan_config; | 2664 | struct cfg80211_wowlan *wowlan_config; |
2659 | #endif | 2665 | #endif |
2660 | 2666 | ||
@@ -2853,7 +2859,7 @@ struct cfg80211_cached_keys; | |||
2853 | * @current_bss: (private) Used by the internal configuration code | 2859 | * @current_bss: (private) Used by the internal configuration code |
2854 | * @channel: (private) Used by the internal configuration code to track | 2860 | * @channel: (private) Used by the internal configuration code to track |
2855 | * the user-set AP, monitor and WDS channel | 2861 | * the user-set AP, monitor and WDS channel |
2856 | * @preset_chan: (private) Used by the internal configuration code to | 2862 | * @preset_chandef: (private) Used by the internal configuration code to |
2857 | * track the channel to be used for AP later | 2863 | * track the channel to be used for AP later |
2858 | * @bssid: (private) Used by the internal configuration code | 2864 | * @bssid: (private) Used by the internal configuration code |
2859 | * @ssid: (private) Used by the internal configuration code | 2865 | * @ssid: (private) Used by the internal configuration code |
@@ -2875,6 +2881,15 @@ struct cfg80211_cached_keys; | |||
2875 | * @p2p_started: true if this is a P2P Device that has been started | 2881 | * @p2p_started: true if this is a P2P Device that has been started |
2876 | * @cac_started: true if DFS channel availability check has been started | 2882 | * @cac_started: true if DFS channel availability check has been started |
2877 | * @cac_start_time: timestamp (jiffies) when the dfs state was entered. | 2883 | * @cac_start_time: timestamp (jiffies) when the dfs state was entered. |
2884 | * @ps: powersave mode is enabled | ||
2885 | * @ps_timeout: dynamic powersave timeout | ||
2886 | * @ap_unexpected_nlportid: (private) netlink port ID of application | ||
2887 | * registered for unexpected class 3 frames (AP mode) | ||
2888 | * @conn: (private) cfg80211 software SME connection state machine data | ||
2889 | * @connect_keys: (private) keys to set after connection is established | ||
2890 | * @ibss_fixed: (private) IBSS is using fixed BSSID | ||
2891 | * @event_list: (private) list for internal event processing | ||
2892 | * @event_lock: (private) lock for event list | ||
2878 | */ | 2893 | */ |
2879 | struct wireless_dev { | 2894 | struct wireless_dev { |
2880 | struct wiphy *wiphy; | 2895 | struct wiphy *wiphy; |
@@ -2898,11 +2913,6 @@ struct wireless_dev { | |||
2898 | /* currently used for IBSS and SME - might be rearranged later */ | 2913 | /* currently used for IBSS and SME - might be rearranged later */ |
2899 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | 2914 | u8 ssid[IEEE80211_MAX_SSID_LEN]; |
2900 | u8 ssid_len, mesh_id_len, mesh_id_up_len; | 2915 | u8 ssid_len, mesh_id_len, mesh_id_up_len; |
2901 | enum { | ||
2902 | CFG80211_SME_IDLE, | ||
2903 | CFG80211_SME_CONNECTING, | ||
2904 | CFG80211_SME_CONNECTED, | ||
2905 | } sme_state; | ||
2906 | struct cfg80211_conn *conn; | 2916 | struct cfg80211_conn *conn; |
2907 | struct cfg80211_cached_keys *connect_keys; | 2917 | struct cfg80211_cached_keys *connect_keys; |
2908 | 2918 | ||
@@ -3432,59 +3442,66 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); | |||
3432 | void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); | 3442 | void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); |
3433 | 3443 | ||
3434 | /** | 3444 | /** |
3435 | * cfg80211_send_rx_auth - notification of processed authentication | 3445 | * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame |
3436 | * @dev: network device | 3446 | * @dev: network device |
3437 | * @buf: authentication frame (header + body) | 3447 | * @buf: authentication frame (header + body) |
3438 | * @len: length of the frame data | 3448 | * @len: length of the frame data |
3439 | * | 3449 | * |
3440 | * This function is called whenever an authentication has been processed in | 3450 | * This function is called whenever an authentication, disassociation or |
3441 | * station mode. The driver is required to call either this function or | 3451 | * deauthentication frame has been received and processed in station mode. |
3442 | * cfg80211_send_auth_timeout() to indicate the result of cfg80211_ops::auth() | 3452 | * After being asked to authenticate via cfg80211_ops::auth() the driver must |
3443 | * call. This function may sleep. The caller must hold the corresponding wdev's | 3453 | * call either this function or cfg80211_auth_timeout(). |
3444 | * mutex. | 3454 | * After being asked to associate via cfg80211_ops::assoc() the driver must |
3455 | * call either this function or cfg80211_auth_timeout(). | ||
3456 | * While connected, the driver must calls this for received and processed | ||
3457 | * disassociation and deauthentication frames. If the frame couldn't be used | ||
3458 | * because it was unprotected, the driver must call the function | ||
3459 | * cfg80211_rx_unprot_mlme_mgmt() instead. | ||
3460 | * | ||
3461 | * This function may sleep. The caller must hold the corresponding wdev's mutex. | ||
3445 | */ | 3462 | */ |
3446 | void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len); | 3463 | void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); |
3447 | 3464 | ||
3448 | /** | 3465 | /** |
3449 | * cfg80211_send_auth_timeout - notification of timed out authentication | 3466 | * cfg80211_auth_timeout - notification of timed out authentication |
3450 | * @dev: network device | 3467 | * @dev: network device |
3451 | * @addr: The MAC address of the device with which the authentication timed out | 3468 | * @addr: The MAC address of the device with which the authentication timed out |
3452 | * | 3469 | * |
3453 | * This function may sleep. The caller must hold the corresponding wdev's | 3470 | * This function may sleep. The caller must hold the corresponding wdev's |
3454 | * mutex. | 3471 | * mutex. |
3455 | */ | 3472 | */ |
3456 | void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr); | 3473 | void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); |
3457 | 3474 | ||
3458 | /** | 3475 | /** |
3459 | * cfg80211_send_rx_assoc - notification of processed association | 3476 | * cfg80211_rx_assoc_resp - notification of processed association response |
3460 | * @dev: network device | 3477 | * @dev: network device |
3461 | * @bss: the BSS struct association was requested for, the struct reference | 3478 | * @bss: the BSS that association was requested with, ownership of the pointer |
3462 | * is owned by cfg80211 after this call | 3479 | * moves to cfg80211 in this call |
3463 | * @buf: (re)association response frame (header + body) | 3480 | * @buf: authentication frame (header + body) |
3464 | * @len: length of the frame data | 3481 | * @len: length of the frame data |
3465 | * | 3482 | * |
3466 | * This function is called whenever a (re)association response has been | 3483 | * After being asked to associate via cfg80211_ops::assoc() the driver must |
3467 | * processed in station mode. The driver is required to call either this | 3484 | * call either this function or cfg80211_auth_timeout(). |
3468 | * function or cfg80211_send_assoc_timeout() to indicate the result of | 3485 | * |
3469 | * cfg80211_ops::assoc() call. This function may sleep. The caller must hold | 3486 | * This function may sleep. The caller must hold the corresponding wdev's mutex. |
3470 | * the corresponding wdev's mutex. | ||
3471 | */ | 3487 | */ |
3472 | void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, | 3488 | void cfg80211_rx_assoc_resp(struct net_device *dev, |
3489 | struct cfg80211_bss *bss, | ||
3473 | const u8 *buf, size_t len); | 3490 | const u8 *buf, size_t len); |
3474 | 3491 | ||
3475 | /** | 3492 | /** |
3476 | * cfg80211_send_assoc_timeout - notification of timed out association | 3493 | * cfg80211_assoc_timeout - notification of timed out association |
3477 | * @dev: network device | 3494 | * @dev: network device |
3478 | * @addr: The MAC address of the device with which the association timed out | 3495 | * @addr: The MAC address of the device with which the association timed out |
3479 | * | 3496 | * |
3480 | * This function may sleep. The caller must hold the corresponding wdev's mutex. | 3497 | * This function may sleep. The caller must hold the corresponding wdev's mutex. |
3481 | */ | 3498 | */ |
3482 | void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); | 3499 | void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr); |
3483 | 3500 | ||
3484 | /** | 3501 | /** |
3485 | * cfg80211_send_deauth - notification of processed deauthentication | 3502 | * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame |
3486 | * @dev: network device | 3503 | * @dev: network device |
3487 | * @buf: deauthentication frame (header + body) | 3504 | * @buf: 802.11 frame (header + body) |
3488 | * @len: length of the frame data | 3505 | * @len: length of the frame data |
3489 | * | 3506 | * |
3490 | * This function is called whenever deauthentication has been processed in | 3507 | * This function is called whenever deauthentication has been processed in |
@@ -3492,46 +3509,20 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); | |||
3492 | * locally generated ones. This function may sleep. The caller must hold the | 3509 | * locally generated ones. This function may sleep. The caller must hold the |
3493 | * corresponding wdev's mutex. | 3510 | * corresponding wdev's mutex. |
3494 | */ | 3511 | */ |
3495 | void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); | 3512 | void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); |
3496 | |||
3497 | /** | ||
3498 | * cfg80211_send_disassoc - notification of processed disassociation | ||
3499 | * @dev: network device | ||
3500 | * @buf: disassociation response frame (header + body) | ||
3501 | * @len: length of the frame data | ||
3502 | * | ||
3503 | * This function is called whenever disassociation has been processed in | ||
3504 | * station mode. This includes both received disassociation frames and locally | ||
3505 | * generated ones. This function may sleep. The caller must hold the | ||
3506 | * corresponding wdev's mutex. | ||
3507 | */ | ||
3508 | void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len); | ||
3509 | 3513 | ||
3510 | /** | 3514 | /** |
3511 | * cfg80211_send_unprot_deauth - notification of unprotected deauthentication | 3515 | * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame |
3512 | * @dev: network device | 3516 | * @dev: network device |
3513 | * @buf: deauthentication frame (header + body) | 3517 | * @buf: deauthentication frame (header + body) |
3514 | * @len: length of the frame data | 3518 | * @len: length of the frame data |
3515 | * | 3519 | * |
3516 | * This function is called whenever a received Deauthentication frame has been | 3520 | * This function is called whenever a received deauthentication or dissassoc |
3517 | * dropped in station mode because of MFP being used but the Deauthentication | 3521 | * frame has been dropped in station mode because of MFP being used but the |
3518 | * frame was not protected. This function may sleep. | ||
3519 | */ | ||
3520 | void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, | ||
3521 | size_t len); | ||
3522 | |||
3523 | /** | ||
3524 | * cfg80211_send_unprot_disassoc - notification of unprotected disassociation | ||
3525 | * @dev: network device | ||
3526 | * @buf: disassociation frame (header + body) | ||
3527 | * @len: length of the frame data | ||
3528 | * | ||
3529 | * This function is called whenever a received Disassociation frame has been | ||
3530 | * dropped in station mode because of MFP being used but the Disassociation | ||
3531 | * frame was not protected. This function may sleep. | 3522 | * frame was not protected. This function may sleep. |
3532 | */ | 3523 | */ |
3533 | void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, | 3524 | void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, |
3534 | size_t len); | 3525 | const u8 *buf, size_t len); |
3535 | 3526 | ||
3536 | /** | 3527 | /** |
3537 | * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) | 3528 | * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 1f0014bd4d87..a405a7a9775c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -217,8 +217,8 @@ struct ieee80211_chanctx_conf { | |||
217 | * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface | 217 | * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface |
218 | * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS) | 218 | * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS) |
219 | * changed (currently only in P2P client mode, GO mode will be later) | 219 | * changed (currently only in P2P client mode, GO mode will be later) |
220 | * @BSS_CHANGED_DTIM_PERIOD: the DTIM period value was changed (set when | 220 | * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available: |
221 | * it becomes valid, managed mode only) | 221 | * currently dtim_period only is under consideration. |
222 | * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, | 222 | * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, |
223 | * note that this is only called when it changes after the channel | 223 | * note that this is only called when it changes after the channel |
224 | * context had been assigned. | 224 | * context had been assigned. |
@@ -244,7 +244,7 @@ enum ieee80211_bss_change { | |||
244 | BSS_CHANGED_PS = 1<<17, | 244 | BSS_CHANGED_PS = 1<<17, |
245 | BSS_CHANGED_TXPOWER = 1<<18, | 245 | BSS_CHANGED_TXPOWER = 1<<18, |
246 | BSS_CHANGED_P2P_PS = 1<<19, | 246 | BSS_CHANGED_P2P_PS = 1<<19, |
247 | BSS_CHANGED_DTIM_PERIOD = 1<<20, | 247 | BSS_CHANGED_BEACON_INFO = 1<<20, |
248 | BSS_CHANGED_BANDWIDTH = 1<<21, | 248 | BSS_CHANGED_BANDWIDTH = 1<<21, |
249 | 249 | ||
250 | /* when adding here, make sure to change ieee80211_reconfig */ | 250 | /* when adding here, make sure to change ieee80211_reconfig */ |
@@ -288,7 +288,7 @@ enum ieee80211_rssi_event { | |||
288 | * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag | 288 | * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag |
289 | * @dtim_period: num of beacons before the next DTIM, for beaconing, | 289 | * @dtim_period: num of beacons before the next DTIM, for beaconing, |
290 | * valid in station mode only if after the driver was notified | 290 | * valid in station mode only if after the driver was notified |
291 | * with the %BSS_CHANGED_DTIM_PERIOD flag, will be non-zero then. | 291 | * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then. |
292 | * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old | 292 | * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old |
293 | * as it may have been received during scanning long ago). If the | 293 | * as it may have been received during scanning long ago). If the |
294 | * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can | 294 | * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can |
@@ -460,6 +460,8 @@ struct ieee80211_bss_conf { | |||
460 | * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it | 460 | * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it |
461 | * would be fragmented by size (this is optional, only used for | 461 | * would be fragmented by size (this is optional, only used for |
462 | * monitor injection). | 462 | * monitor injection). |
463 | * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll | ||
464 | * frame (PS-Poll or uAPSD). | ||
463 | * | 465 | * |
464 | * Note: If you have to add new flags to the enumeration, then don't | 466 | * Note: If you have to add new flags to the enumeration, then don't |
465 | * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. | 467 | * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. |
@@ -495,6 +497,7 @@ enum mac80211_tx_control_flags { | |||
495 | IEEE80211_TX_STATUS_EOSP = BIT(28), | 497 | IEEE80211_TX_STATUS_EOSP = BIT(28), |
496 | IEEE80211_TX_CTL_USE_MINRATE = BIT(29), | 498 | IEEE80211_TX_CTL_USE_MINRATE = BIT(29), |
497 | IEEE80211_TX_CTL_DONTFRAG = BIT(30), | 499 | IEEE80211_TX_CTL_DONTFRAG = BIT(30), |
500 | IEEE80211_TX_CTL_PS_RESPONSE = BIT(31), | ||
498 | }; | 501 | }; |
499 | 502 | ||
500 | #define IEEE80211_TX_CTL_STBC_SHIFT 23 | 503 | #define IEEE80211_TX_CTL_STBC_SHIFT 23 |
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index b87a1692b086..0af851c3b038 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h | |||
@@ -59,8 +59,10 @@ struct nfc_hci_ops { | |||
59 | struct nfc_target *target); | 59 | struct nfc_target *target); |
60 | int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event, | 60 | int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event, |
61 | struct sk_buff *skb); | 61 | struct sk_buff *skb); |
62 | int (*enable_se)(struct nfc_dev *dev, u32 secure_element); | 62 | int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name); |
63 | int (*disable_se)(struct nfc_dev *dev, u32 secure_element); | 63 | int (*discover_se)(struct nfc_hci_dev *dev); |
64 | int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); | ||
65 | int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); | ||
64 | }; | 66 | }; |
65 | 67 | ||
66 | /* Pipes */ | 68 | /* Pipes */ |
@@ -152,7 +154,6 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, | |||
152 | struct nfc_hci_init_data *init_data, | 154 | struct nfc_hci_init_data *init_data, |
153 | unsigned long quirks, | 155 | unsigned long quirks, |
154 | u32 protocols, | 156 | u32 protocols, |
155 | u32 supported_se, | ||
156 | const char *llc_name, | 157 | const char *llc_name, |
157 | int tx_headroom, | 158 | int tx_headroom, |
158 | int tx_tailroom, | 159 | int tx_tailroom, |
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 5bc0c460edc0..99fc1f3a392a 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h | |||
@@ -3,6 +3,7 @@ | |||
3 | * NFC Controller (NFCC) and a Device Host (DH). | 3 | * NFC Controller (NFCC) and a Device Host (DH). |
4 | * | 4 | * |
5 | * Copyright (C) 2011 Texas Instruments, Inc. | 5 | * Copyright (C) 2011 Texas Instruments, Inc. |
6 | * Copyright (C) 2013 Intel Corporation. All rights reserved. | ||
6 | * | 7 | * |
7 | * Written by Ilan Elias <ilane@ti.com> | 8 | * Written by Ilan Elias <ilane@ti.com> |
8 | * | 9 | * |
@@ -66,7 +67,7 @@ struct nci_dev; | |||
66 | struct nci_ops { | 67 | struct nci_ops { |
67 | int (*open)(struct nci_dev *ndev); | 68 | int (*open)(struct nci_dev *ndev); |
68 | int (*close)(struct nci_dev *ndev); | 69 | int (*close)(struct nci_dev *ndev); |
69 | int (*send)(struct sk_buff *skb); | 70 | int (*send)(struct nci_dev *ndev, struct sk_buff *skb); |
70 | }; | 71 | }; |
71 | 72 | ||
72 | #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 | 73 | #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 |
@@ -147,13 +148,12 @@ struct nci_dev { | |||
147 | /* ----- NCI Devices ----- */ | 148 | /* ----- NCI Devices ----- */ |
148 | struct nci_dev *nci_allocate_device(struct nci_ops *ops, | 149 | struct nci_dev *nci_allocate_device(struct nci_ops *ops, |
149 | __u32 supported_protocols, | 150 | __u32 supported_protocols, |
150 | __u32 supported_se, | ||
151 | int tx_headroom, | 151 | int tx_headroom, |
152 | int tx_tailroom); | 152 | int tx_tailroom); |
153 | void nci_free_device(struct nci_dev *ndev); | 153 | void nci_free_device(struct nci_dev *ndev); |
154 | int nci_register_device(struct nci_dev *ndev); | 154 | int nci_register_device(struct nci_dev *ndev); |
155 | void nci_unregister_device(struct nci_dev *ndev); | 155 | void nci_unregister_device(struct nci_dev *ndev); |
156 | int nci_recv_frame(struct sk_buff *skb); | 156 | int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); |
157 | 157 | ||
158 | static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev, | 158 | static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev, |
159 | unsigned int len, | 159 | unsigned int len, |
@@ -202,4 +202,56 @@ void nci_req_complete(struct nci_dev *ndev, int result); | |||
202 | /* ----- NCI status code ----- */ | 202 | /* ----- NCI status code ----- */ |
203 | int nci_to_errno(__u8 code); | 203 | int nci_to_errno(__u8 code); |
204 | 204 | ||
205 | /* ----- NCI over SPI acknowledge modes ----- */ | ||
206 | #define NCI_SPI_CRC_DISABLED 0x00 | ||
207 | #define NCI_SPI_CRC_ENABLED 0x01 | ||
208 | |||
209 | /* ----- NCI SPI structures ----- */ | ||
210 | struct nci_spi_dev; | ||
211 | |||
212 | struct nci_spi_ops { | ||
213 | int (*open)(struct nci_spi_dev *ndev); | ||
214 | int (*close)(struct nci_spi_dev *ndev); | ||
215 | void (*assert_int)(struct nci_spi_dev *ndev); | ||
216 | void (*deassert_int)(struct nci_spi_dev *ndev); | ||
217 | }; | ||
218 | |||
219 | struct nci_spi_dev { | ||
220 | struct nci_dev *nci_dev; | ||
221 | struct spi_device *spi; | ||
222 | struct nci_spi_ops *ops; | ||
223 | |||
224 | unsigned int xfer_udelay; /* microseconds delay between | ||
225 | transactions */ | ||
226 | u8 acknowledge_mode; | ||
227 | |||
228 | struct completion req_completion; | ||
229 | u8 req_result; | ||
230 | |||
231 | void *driver_data; | ||
232 | }; | ||
233 | |||
234 | /* ----- NCI SPI Devices ----- */ | ||
235 | struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi, | ||
236 | struct nci_spi_ops *ops, | ||
237 | u32 supported_protocols, | ||
238 | u32 supported_se, | ||
239 | u8 acknowledge_mode, | ||
240 | unsigned int delay); | ||
241 | void nci_spi_free_device(struct nci_spi_dev *ndev); | ||
242 | int nci_spi_register_device(struct nci_spi_dev *ndev); | ||
243 | void nci_spi_unregister_device(struct nci_spi_dev *ndev); | ||
244 | int nci_spi_recv_frame(struct nci_spi_dev *ndev); | ||
245 | |||
246 | static inline void nci_spi_set_drvdata(struct nci_spi_dev *ndev, | ||
247 | void *data) | ||
248 | { | ||
249 | ndev->driver_data = data; | ||
250 | } | ||
251 | |||
252 | static inline void *nci_spi_get_drvdata(struct nci_spi_dev *ndev) | ||
253 | { | ||
254 | return ndev->driver_data; | ||
255 | } | ||
256 | |||
205 | #endif /* __NCI_CORE_H */ | 257 | #endif /* __NCI_CORE_H */ |
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 5eb80bb3cbb2..0e353f1658bb 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h | |||
@@ -68,8 +68,12 @@ struct nfc_ops { | |||
68 | void *cb_context); | 68 | void *cb_context); |
69 | int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb); | 69 | int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb); |
70 | int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); | 70 | int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); |
71 | int (*enable_se)(struct nfc_dev *dev, u32 secure_element); | 71 | int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name); |
72 | int (*disable_se)(struct nfc_dev *dev, u32 secure_element); | 72 | |
73 | /* Secure Element API */ | ||
74 | int (*discover_se)(struct nfc_dev *dev); | ||
75 | int (*enable_se)(struct nfc_dev *dev, u32 se_idx); | ||
76 | int (*disable_se)(struct nfc_dev *dev, u32 se_idx); | ||
73 | }; | 77 | }; |
74 | 78 | ||
75 | #define NFC_TARGET_IDX_ANY -1 | 79 | #define NFC_TARGET_IDX_ANY -1 |
@@ -83,6 +87,8 @@ struct nfc_target { | |||
83 | u8 sel_res; | 87 | u8 sel_res; |
84 | u8 nfcid1_len; | 88 | u8 nfcid1_len; |
85 | u8 nfcid1[NFC_NFCID1_MAXSIZE]; | 89 | u8 nfcid1[NFC_NFCID1_MAXSIZE]; |
90 | u8 nfcid2_len; | ||
91 | u8 nfcid2[NFC_NFCID2_MAXSIZE]; | ||
86 | u8 sensb_res_len; | 92 | u8 sensb_res_len; |
87 | u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; | 93 | u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; |
88 | u8 sensf_res_len; | 94 | u8 sensf_res_len; |
@@ -91,6 +97,23 @@ struct nfc_target { | |||
91 | u8 logical_idx; | 97 | u8 logical_idx; |
92 | }; | 98 | }; |
93 | 99 | ||
100 | /** | ||
101 | * nfc_se - A structure for NFC accessible secure elements. | ||
102 | * | ||
103 | * @idx: The secure element index. User space will enable or | ||
104 | * disable a secure element by its index. | ||
105 | * @type: The secure element type. It can be SE_UICC or | ||
106 | * SE_EMBEDDED. | ||
107 | * @state: The secure element state, either enabled or disabled. | ||
108 | * | ||
109 | */ | ||
110 | struct nfc_se { | ||
111 | struct list_head list; | ||
112 | u32 idx; | ||
113 | u16 type; | ||
114 | u16 state; | ||
115 | }; | ||
116 | |||
94 | struct nfc_genl_data { | 117 | struct nfc_genl_data { |
95 | u32 poll_req_portid; | 118 | u32 poll_req_portid; |
96 | struct mutex genl_data_mutex; | 119 | struct mutex genl_data_mutex; |
@@ -104,6 +127,7 @@ struct nfc_dev { | |||
104 | int targets_generation; | 127 | int targets_generation; |
105 | struct device dev; | 128 | struct device dev; |
106 | bool dev_up; | 129 | bool dev_up; |
130 | bool fw_upload_in_progress; | ||
107 | u8 rf_mode; | 131 | u8 rf_mode; |
108 | bool polling; | 132 | bool polling; |
109 | struct nfc_target *active_target; | 133 | struct nfc_target *active_target; |
@@ -111,8 +135,7 @@ struct nfc_dev { | |||
111 | struct nfc_genl_data genl_data; | 135 | struct nfc_genl_data genl_data; |
112 | u32 supported_protocols; | 136 | u32 supported_protocols; |
113 | 137 | ||
114 | u32 supported_se; | 138 | struct list_head secure_elements; |
115 | u32 active_se; | ||
116 | 139 | ||
117 | int tx_headroom; | 140 | int tx_headroom; |
118 | int tx_tailroom; | 141 | int tx_tailroom; |
@@ -132,7 +155,6 @@ extern struct class nfc_class; | |||
132 | 155 | ||
133 | struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, | 156 | struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, |
134 | u32 supported_protocols, | 157 | u32 supported_protocols, |
135 | u32 supported_se, | ||
136 | int tx_headroom, | 158 | int tx_headroom, |
137 | int tx_tailroom); | 159 | int tx_tailroom); |
138 | 160 | ||
@@ -216,4 +238,7 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb); | |||
216 | 238 | ||
217 | void nfc_driver_failure(struct nfc_dev *dev, int err); | 239 | void nfc_driver_failure(struct nfc_dev *dev, int err); |
218 | 240 | ||
241 | int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type); | ||
242 | int nfc_remove_se(struct nfc_dev *dev, u32 se_idx); | ||
243 | |||
219 | #endif /* __NET_NFC_H */ | 244 | #endif /* __NET_NFC_H */ |
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 7c6f627a717d..caed0f324d5f 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h | |||
@@ -69,6 +69,8 @@ | |||
69 | * starting a poll from a device which has a secure element enabled means | 69 | * starting a poll from a device which has a secure element enabled means |
70 | * we want to do SE based card emulation. | 70 | * we want to do SE based card emulation. |
71 | * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element. | 71 | * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element. |
72 | * @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that | ||
73 | * some firmware was loaded | ||
72 | */ | 74 | */ |
73 | enum nfc_commands { | 75 | enum nfc_commands { |
74 | NFC_CMD_UNSPEC, | 76 | NFC_CMD_UNSPEC, |
@@ -92,6 +94,9 @@ enum nfc_commands { | |||
92 | NFC_CMD_DISABLE_SE, | 94 | NFC_CMD_DISABLE_SE, |
93 | NFC_CMD_LLC_SDREQ, | 95 | NFC_CMD_LLC_SDREQ, |
94 | NFC_EVENT_LLC_SDRES, | 96 | NFC_EVENT_LLC_SDRES, |
97 | NFC_CMD_FW_UPLOAD, | ||
98 | NFC_EVENT_SE_ADDED, | ||
99 | NFC_EVENT_SE_REMOVED, | ||
95 | /* private: internal use only */ | 100 | /* private: internal use only */ |
96 | __NFC_CMD_AFTER_LAST | 101 | __NFC_CMD_AFTER_LAST |
97 | }; | 102 | }; |
@@ -121,6 +126,9 @@ enum nfc_commands { | |||
121 | * @NFC_ATTR_LLC_PARAM_RW: Receive Window size parameter | 126 | * @NFC_ATTR_LLC_PARAM_RW: Receive Window size parameter |
122 | * @NFC_ATTR_LLC_PARAM_MIUX: MIU eXtension parameter | 127 | * @NFC_ATTR_LLC_PARAM_MIUX: MIU eXtension parameter |
123 | * @NFC_ATTR_SE: Available Secure Elements | 128 | * @NFC_ATTR_SE: Available Secure Elements |
129 | * @NFC_ATTR_FIRMWARE_NAME: Free format firmware version | ||
130 | * @NFC_ATTR_SE_INDEX: Secure element index | ||
131 | * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED) | ||
124 | */ | 132 | */ |
125 | enum nfc_attrs { | 133 | enum nfc_attrs { |
126 | NFC_ATTR_UNSPEC, | 134 | NFC_ATTR_UNSPEC, |
@@ -143,6 +151,9 @@ enum nfc_attrs { | |||
143 | NFC_ATTR_LLC_PARAM_MIUX, | 151 | NFC_ATTR_LLC_PARAM_MIUX, |
144 | NFC_ATTR_SE, | 152 | NFC_ATTR_SE, |
145 | NFC_ATTR_LLC_SDP, | 153 | NFC_ATTR_LLC_SDP, |
154 | NFC_ATTR_FIRMWARE_NAME, | ||
155 | NFC_ATTR_SE_INDEX, | ||
156 | NFC_ATTR_SE_TYPE, | ||
146 | /* private: internal use only */ | 157 | /* private: internal use only */ |
147 | __NFC_ATTR_AFTER_LAST | 158 | __NFC_ATTR_AFTER_LAST |
148 | }; | 159 | }; |
@@ -159,9 +170,12 @@ enum nfc_sdp_attr { | |||
159 | 170 | ||
160 | #define NFC_DEVICE_NAME_MAXSIZE 8 | 171 | #define NFC_DEVICE_NAME_MAXSIZE 8 |
161 | #define NFC_NFCID1_MAXSIZE 10 | 172 | #define NFC_NFCID1_MAXSIZE 10 |
173 | #define NFC_NFCID2_MAXSIZE 8 | ||
174 | #define NFC_NFCID3_MAXSIZE 10 | ||
162 | #define NFC_SENSB_RES_MAXSIZE 12 | 175 | #define NFC_SENSB_RES_MAXSIZE 12 |
163 | #define NFC_SENSF_RES_MAXSIZE 18 | 176 | #define NFC_SENSF_RES_MAXSIZE 18 |
164 | #define NFC_GB_MAXSIZE 48 | 177 | #define NFC_GB_MAXSIZE 48 |
178 | #define NFC_FIRMWARE_NAME_MAXSIZE 32 | ||
165 | 179 | ||
166 | /* NFC protocols */ | 180 | /* NFC protocols */ |
167 | #define NFC_PROTO_JEWEL 1 | 181 | #define NFC_PROTO_JEWEL 1 |
@@ -191,10 +205,12 @@ enum nfc_sdp_attr { | |||
191 | #define NFC_PROTO_ISO14443_B_MASK (1 << NFC_PROTO_ISO14443_B) | 205 | #define NFC_PROTO_ISO14443_B_MASK (1 << NFC_PROTO_ISO14443_B) |
192 | 206 | ||
193 | /* NFC Secure Elements */ | 207 | /* NFC Secure Elements */ |
194 | #define NFC_SE_NONE 0x0 | ||
195 | #define NFC_SE_UICC 0x1 | 208 | #define NFC_SE_UICC 0x1 |
196 | #define NFC_SE_EMBEDDED 0x2 | 209 | #define NFC_SE_EMBEDDED 0x2 |
197 | 210 | ||
211 | #define NFC_SE_DISABLED 0x0 | ||
212 | #define NFC_SE_ENABLED 0x1 | ||
213 | |||
198 | struct sockaddr_nfc { | 214 | struct sockaddr_nfc { |
199 | sa_family_t sa_family; | 215 | sa_family_t sa_family; |
200 | __u32 dev_idx; | 216 | __u32 dev_idx; |
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5920715278c2..ca6facf4df0c 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h | |||
@@ -2577,6 +2577,10 @@ enum nl80211_mesh_power_mode { | |||
2577 | * | 2577 | * |
2578 | * @NL80211_MESHCONF_AWAKE_WINDOW: awake window duration (in TUs) | 2578 | * @NL80211_MESHCONF_AWAKE_WINDOW: awake window duration (in TUs) |
2579 | * | 2579 | * |
2580 | * @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've | ||
2581 | * established peering with for longer than this time (in seconds), then | ||
2582 | * remove it from the STA's list of peers. Default is 30 minutes. | ||
2583 | * | ||
2580 | * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use | 2584 | * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use |
2581 | */ | 2585 | */ |
2582 | enum nl80211_meshconf_params { | 2586 | enum nl80211_meshconf_params { |
@@ -2608,6 +2612,7 @@ enum nl80211_meshconf_params { | |||
2608 | NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, | 2612 | NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, |
2609 | NL80211_MESHCONF_POWER_MODE, | 2613 | NL80211_MESHCONF_POWER_MODE, |
2610 | NL80211_MESHCONF_AWAKE_WINDOW, | 2614 | NL80211_MESHCONF_AWAKE_WINDOW, |
2615 | NL80211_MESHCONF_PLINK_TIMEOUT, | ||
2611 | 2616 | ||
2612 | /* keep last */ | 2617 | /* keep last */ |
2613 | __NL80211_MESHCONF_ATTR_AFTER_LAST, | 2618 | __NL80211_MESHCONF_ATTR_AFTER_LAST, |
@@ -3579,6 +3584,10 @@ enum nl80211_ap_sme_features { | |||
3579 | * Peering Management entity which may be implemented by registering for | 3584 | * Peering Management entity which may be implemented by registering for |
3580 | * beacons or NL80211_CMD_NEW_PEER_CANDIDATE events. The mesh beacon is | 3585 | * beacons or NL80211_CMD_NEW_PEER_CANDIDATE events. The mesh beacon is |
3581 | * still generated by the driver. | 3586 | * still generated by the driver. |
3587 | * @NL80211_FEATURE_ACTIVE_MONITOR: This driver supports an active monitor | ||
3588 | * interface. An active monitor interface behaves like a normal monitor | ||
3589 | * interface, but gets added to the driver. It ensures that incoming | ||
3590 | * unicast packets directed at the configured interface address get ACKed. | ||
3582 | */ | 3591 | */ |
3583 | enum nl80211_feature_flags { | 3592 | enum nl80211_feature_flags { |
3584 | NL80211_FEATURE_SK_TX_STATUS = 1 << 0, | 3593 | NL80211_FEATURE_SK_TX_STATUS = 1 << 0, |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index a1c6e1ceede8..082f270b5912 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1765,6 +1765,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, | |||
1765 | /* mcast rate setting in Mesh Node */ | 1765 | /* mcast rate setting in Mesh Node */ |
1766 | memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, | 1766 | memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, |
1767 | sizeof(setup->mcast_rate)); | 1767 | sizeof(setup->mcast_rate)); |
1768 | sdata->vif.bss_conf.basic_rates = setup->basic_rates; | ||
1768 | 1769 | ||
1769 | sdata->vif.bss_conf.beacon_int = setup->beacon_interval; | 1770 | sdata->vif.bss_conf.beacon_int = setup->beacon_interval; |
1770 | sdata->vif.bss_conf.dtim_period = setup->dtim_period; | 1771 | sdata->vif.bss_conf.dtim_period = setup->dtim_period; |
@@ -1877,6 +1878,8 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, | |||
1877 | if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask)) | 1878 | if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask)) |
1878 | conf->dot11MeshAwakeWindowDuration = | 1879 | conf->dot11MeshAwakeWindowDuration = |
1879 | nconf->dot11MeshAwakeWindowDuration; | 1880 | nconf->dot11MeshAwakeWindowDuration; |
1881 | if (_chg_mesh_attr(NL80211_MESHCONF_PLINK_TIMEOUT, mask)) | ||
1882 | conf->plink_timeout = nconf->plink_timeout; | ||
1880 | ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); | 1883 | ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); |
1881 | return 0; | 1884 | return 0; |
1882 | } | 1885 | } |
@@ -2844,6 +2847,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
2844 | return -EOPNOTSUPP; | 2847 | return -EOPNOTSUPP; |
2845 | } | 2848 | } |
2846 | 2849 | ||
2850 | /* configurations requiring offchan cannot work if no channel has been | ||
2851 | * specified | ||
2852 | */ | ||
2853 | if (need_offchan && !chan) | ||
2854 | return -EINVAL; | ||
2855 | |||
2847 | mutex_lock(&local->mtx); | 2856 | mutex_lock(&local->mtx); |
2848 | 2857 | ||
2849 | /* Check if the operating channel is the requested channel */ | 2858 | /* Check if the operating channel is the requested channel */ |
@@ -2853,10 +2862,15 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
2853 | rcu_read_lock(); | 2862 | rcu_read_lock(); |
2854 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | 2863 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
2855 | 2864 | ||
2856 | if (chanctx_conf) | 2865 | if (chanctx_conf) { |
2857 | need_offchan = chan != chanctx_conf->def.chan; | 2866 | need_offchan = chan && (chan != chanctx_conf->def.chan); |
2858 | else | 2867 | } else if (!chan) { |
2868 | ret = -EINVAL; | ||
2869 | rcu_read_unlock(); | ||
2870 | goto out_unlock; | ||
2871 | } else { | ||
2859 | need_offchan = true; | 2872 | need_offchan = true; |
2873 | } | ||
2860 | rcu_read_unlock(); | 2874 | rcu_read_unlock(); |
2861 | } | 2875 | } |
2862 | 2876 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 923e1772e8f3..f97cd9d9105f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -366,7 +366,7 @@ struct ieee80211_mgd_assoc_data { | |||
366 | u8 ssid_len; | 366 | u8 ssid_len; |
367 | u8 supp_rates_len; | 367 | u8 supp_rates_len; |
368 | bool wmm, uapsd; | 368 | bool wmm, uapsd; |
369 | bool have_beacon, need_beacon; | 369 | bool need_beacon; |
370 | bool synced; | 370 | bool synced; |
371 | bool timeout_started; | 371 | bool timeout_started; |
372 | 372 | ||
@@ -404,6 +404,7 @@ struct ieee80211_if_managed { | |||
404 | 404 | ||
405 | bool powersave; /* powersave requested for this iface */ | 405 | bool powersave; /* powersave requested for this iface */ |
406 | bool broken_ap; /* AP is broken -- turn off powersave */ | 406 | bool broken_ap; /* AP is broken -- turn off powersave */ |
407 | bool have_beacon; | ||
407 | u8 dtim_period; | 408 | u8 dtim_period; |
408 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ | 409 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ |
409 | driver_smps_mode; /* smps mode request */ | 410 | driver_smps_mode; /* smps mode request */ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 1998f1475267..626c83c042d7 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -686,8 +686,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
686 | return -EINVAL; | 686 | return -EINVAL; |
687 | 687 | ||
688 | #ifdef CONFIG_PM | 688 | #ifdef CONFIG_PM |
689 | if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) && | 689 | if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) |
690 | (!local->ops->suspend || !local->ops->resume)) | ||
691 | return -EINVAL; | 690 | return -EINVAL; |
692 | #endif | 691 | #endif |
693 | 692 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index b3d1fdd46368..6c33af482df4 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -274,8 +274,7 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, | |||
274 | *pos++ = ifmsh->mesh_auth_id; | 274 | *pos++ = ifmsh->mesh_auth_id; |
275 | /* Mesh Formation Info - number of neighbors */ | 275 | /* Mesh Formation Info - number of neighbors */ |
276 | neighbors = atomic_read(&ifmsh->estab_plinks); | 276 | neighbors = atomic_read(&ifmsh->estab_plinks); |
277 | /* Number of neighbor mesh STAs or 15 whichever is smaller */ | 277 | neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); |
278 | neighbors = (neighbors > 15) ? 15 : neighbors; | ||
279 | *pos++ = neighbors << 1; | 278 | *pos++ = neighbors << 1; |
280 | /* Mesh capability */ | 279 | /* Mesh capability */ |
281 | *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; | 280 | *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; |
@@ -576,13 +575,11 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata) | |||
576 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 575 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
577 | u32 changed; | 576 | u32 changed; |
578 | 577 | ||
579 | ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); | 578 | ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ); |
580 | mesh_path_expire(sdata); | 579 | mesh_path_expire(sdata); |
581 | 580 | ||
582 | changed = mesh_accept_plinks_update(sdata); | 581 | changed = mesh_accept_plinks_update(sdata); |
583 | sdata_lock(sdata); | ||
584 | ieee80211_mbss_info_change_notify(sdata, changed); | 582 | ieee80211_mbss_info_change_notify(sdata, changed); |
585 | sdata_unlock(sdata); | ||
586 | 583 | ||
587 | mod_timer(&ifmsh->housekeeping_timer, | 584 | mod_timer(&ifmsh->housekeeping_timer, |
588 | round_jiffies(jiffies + | 585 | round_jiffies(jiffies + |
@@ -741,9 +738,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
741 | BSS_CHANGED_HT | | 738 | BSS_CHANGED_HT | |
742 | BSS_CHANGED_BASIC_RATES | | 739 | BSS_CHANGED_BASIC_RATES | |
743 | BSS_CHANGED_BEACON_INT; | 740 | BSS_CHANGED_BEACON_INT; |
744 | enum ieee80211_band band = ieee80211_get_sdata_band(sdata); | ||
745 | struct ieee80211_supported_band *sband = | ||
746 | sdata->local->hw.wiphy->bands[band]; | ||
747 | 741 | ||
748 | local->fif_other_bss++; | 742 | local->fif_other_bss++; |
749 | /* mesh ifaces must set allmulti to forward mcast traffic */ | 743 | /* mesh ifaces must set allmulti to forward mcast traffic */ |
@@ -761,7 +755,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
761 | sdata->vif.bss_conf.ht_operation_mode = | 755 | sdata->vif.bss_conf.ht_operation_mode = |
762 | ifmsh->mshcfg.ht_opmode; | 756 | ifmsh->mshcfg.ht_opmode; |
763 | sdata->vif.bss_conf.enable_beacon = true; | 757 | sdata->vif.bss_conf.enable_beacon = true; |
764 | sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sband); | ||
765 | 758 | ||
766 | changed |= ieee80211_mps_local_status_update(sdata); | 759 | changed |= ieee80211_mps_local_status_update(sdata); |
767 | 760 | ||
@@ -789,12 +782,10 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
789 | sdata->vif.bss_conf.enable_beacon = false; | 782 | sdata->vif.bss_conf.enable_beacon = false; |
790 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); | 783 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); |
791 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); | 784 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); |
792 | sdata_lock(sdata); | ||
793 | bcn = rcu_dereference_protected(ifmsh->beacon, | 785 | bcn = rcu_dereference_protected(ifmsh->beacon, |
794 | lockdep_is_held(&sdata->wdev.mtx)); | 786 | lockdep_is_held(&sdata->wdev.mtx)); |
795 | rcu_assign_pointer(ifmsh->beacon, NULL); | 787 | rcu_assign_pointer(ifmsh->beacon, NULL); |
796 | kfree_rcu(bcn, rcu_head); | 788 | kfree_rcu(bcn, rcu_head); |
797 | sdata_unlock(sdata); | ||
798 | 789 | ||
799 | /* flush STAs and mpaths on this iface */ | 790 | /* flush STAs and mpaths on this iface */ |
800 | sta_info_flush(sdata); | 791 | sta_info_flush(sdata); |
@@ -807,14 +798,6 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
807 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); | 798 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); |
808 | del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); | 799 | del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); |
809 | del_timer_sync(&sdata->u.mesh.mesh_path_timer); | 800 | del_timer_sync(&sdata->u.mesh.mesh_path_timer); |
810 | /* | ||
811 | * If the timer fired while we waited for it, it will have | ||
812 | * requeued the work. Now the work will be running again | ||
813 | * but will not rearm the timer again because it checks | ||
814 | * whether the interface is running, which, at this point, | ||
815 | * it no longer is. | ||
816 | */ | ||
817 | cancel_work_sync(&sdata->work); | ||
818 | 801 | ||
819 | local->fif_other_bss--; | 802 | local->fif_other_bss--; |
820 | atomic_dec(&local->iff_allmultis); | 803 | atomic_dec(&local->iff_allmultis); |
@@ -955,6 +938,12 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
955 | struct ieee80211_mgmt *mgmt; | 938 | struct ieee80211_mgmt *mgmt; |
956 | u16 stype; | 939 | u16 stype; |
957 | 940 | ||
941 | sdata_lock(sdata); | ||
942 | |||
943 | /* mesh already went down */ | ||
944 | if (!sdata->wdev.mesh_id_len) | ||
945 | goto out; | ||
946 | |||
958 | rx_status = IEEE80211_SKB_RXCB(skb); | 947 | rx_status = IEEE80211_SKB_RXCB(skb); |
959 | mgmt = (struct ieee80211_mgmt *) skb->data; | 948 | mgmt = (struct ieee80211_mgmt *) skb->data; |
960 | stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; | 949 | stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; |
@@ -972,12 +961,20 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
972 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); | 961 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); |
973 | break; | 962 | break; |
974 | } | 963 | } |
964 | out: | ||
965 | sdata_unlock(sdata); | ||
975 | } | 966 | } |
976 | 967 | ||
977 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) | 968 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) |
978 | { | 969 | { |
979 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 970 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
980 | 971 | ||
972 | sdata_lock(sdata); | ||
973 | |||
974 | /* mesh already went down */ | ||
975 | if (!sdata->wdev.mesh_id_len) | ||
976 | goto out; | ||
977 | |||
981 | if (ifmsh->preq_queue_len && | 978 | if (ifmsh->preq_queue_len && |
982 | time_after(jiffies, | 979 | time_after(jiffies, |
983 | ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) | 980 | ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) |
@@ -997,6 +994,9 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) | |||
997 | 994 | ||
998 | if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) | 995 | if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) |
999 | mesh_sync_adjust_tbtt(sdata); | 996 | mesh_sync_adjust_tbtt(sdata); |
997 | |||
998 | out: | ||
999 | sdata_unlock(sdata); | ||
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | 1002 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index da158774eebb..01a28bca6e9b 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -188,7 +188,6 @@ struct mesh_rmc { | |||
188 | u32 idx_mask; | 188 | u32 idx_mask; |
189 | }; | 189 | }; |
190 | 190 | ||
191 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | ||
192 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) | 191 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) |
193 | 192 | ||
194 | #define MESH_PATH_EXPIRE (600 * HZ) | 193 | #define MESH_PATH_EXPIRE (600 * HZ) |
@@ -324,14 +323,14 @@ static inline | |||
324 | u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) | 323 | u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) |
325 | { | 324 | { |
326 | atomic_inc(&sdata->u.mesh.estab_plinks); | 325 | atomic_inc(&sdata->u.mesh.estab_plinks); |
327 | return mesh_accept_plinks_update(sdata); | 326 | return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; |
328 | } | 327 | } |
329 | 328 | ||
330 | static inline | 329 | static inline |
331 | u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) | 330 | u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) |
332 | { | 331 | { |
333 | atomic_dec(&sdata->u.mesh.estab_plinks); | 332 | atomic_dec(&sdata->u.mesh.estab_plinks); |
334 | return mesh_accept_plinks_update(sdata); | 333 | return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; |
335 | } | 334 | } |
336 | 335 | ||
337 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) | 336 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 6c4da99bc4fb..09bebed99416 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -517,9 +517,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, | |||
517 | ieee80211_mps_frame_release(sta, elems); | 517 | ieee80211_mps_frame_release(sta, elems); |
518 | out: | 518 | out: |
519 | rcu_read_unlock(); | 519 | rcu_read_unlock(); |
520 | sdata_lock(sdata); | ||
521 | ieee80211_mbss_info_change_notify(sdata, changed); | 520 | ieee80211_mbss_info_change_notify(sdata, changed); |
522 | sdata_unlock(sdata); | ||
523 | } | 521 | } |
524 | 522 | ||
525 | static void mesh_plink_timer(unsigned long data) | 523 | static void mesh_plink_timer(unsigned long data) |
@@ -1070,9 +1068,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, | |||
1070 | 1068 | ||
1071 | rcu_read_unlock(); | 1069 | rcu_read_unlock(); |
1072 | 1070 | ||
1073 | if (changed) { | 1071 | if (changed) |
1074 | sdata_lock(sdata); | ||
1075 | ieee80211_mbss_info_change_notify(sdata, changed); | 1072 | ieee80211_mbss_info_change_notify(sdata, changed); |
1076 | sdata_unlock(sdata); | ||
1077 | } | ||
1078 | } | 1073 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 118540b16729..9e49f557fa5c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -880,6 +880,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, | |||
880 | 880 | ||
881 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | | 881 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | |
882 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK; | 882 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK; |
883 | |||
884 | if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) | ||
885 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
886 | |||
883 | if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | | 887 | if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | |
884 | IEEE80211_STA_CONNECTION_POLL)) | 888 | IEEE80211_STA_CONNECTION_POLL)) |
885 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; | 889 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; |
@@ -1356,7 +1360,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) | |||
1356 | IEEE80211_STA_CONNECTION_POLL)) | 1360 | IEEE80211_STA_CONNECTION_POLL)) |
1357 | return false; | 1361 | return false; |
1358 | 1362 | ||
1359 | if (!sdata->vif.bss_conf.dtim_period) | 1363 | if (!mgd->have_beacon) |
1360 | return false; | 1364 | return false; |
1361 | 1365 | ||
1362 | rcu_read_lock(); | 1366 | rcu_read_lock(); |
@@ -1767,7 +1771,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
1767 | 1771 | ||
1768 | ieee80211_led_assoc(local, 1); | 1772 | ieee80211_led_assoc(local, 1); |
1769 | 1773 | ||
1770 | if (sdata->u.mgd.assoc_data->have_beacon) { | 1774 | if (sdata->u.mgd.have_beacon) { |
1771 | /* | 1775 | /* |
1772 | * If the AP is buggy we may get here with no DTIM period | 1776 | * If the AP is buggy we may get here with no DTIM period |
1773 | * known, so assume it's 1 which is the only safe assumption | 1777 | * known, so assume it's 1 which is the only safe assumption |
@@ -1775,7 +1779,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
1775 | * probably just won't work at all. | 1779 | * probably just won't work at all. |
1776 | */ | 1780 | */ |
1777 | bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1; | 1781 | bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1; |
1778 | bss_info_changed |= BSS_CHANGED_DTIM_PERIOD; | 1782 | bss_info_changed |= BSS_CHANGED_BEACON_INFO; |
1779 | } else { | 1783 | } else { |
1780 | bss_conf->dtim_period = 0; | 1784 | bss_conf->dtim_period = 0; |
1781 | } | 1785 | } |
@@ -1899,6 +1903,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1899 | del_timer_sync(&sdata->u.mgd.chswitch_timer); | 1903 | del_timer_sync(&sdata->u.mgd.chswitch_timer); |
1900 | 1904 | ||
1901 | sdata->vif.bss_conf.dtim_period = 0; | 1905 | sdata->vif.bss_conf.dtim_period = 0; |
1906 | ifmgd->have_beacon = false; | ||
1902 | 1907 | ||
1903 | ifmgd->flags = 0; | 1908 | ifmgd->flags = 0; |
1904 | ieee80211_vif_release_channel(sdata); | 1909 | ieee80211_vif_release_channel(sdata); |
@@ -2151,7 +2156,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) | |||
2151 | IEEE80211_MAX_QUEUE_MAP, | 2156 | IEEE80211_MAX_QUEUE_MAP, |
2152 | IEEE80211_QUEUE_STOP_REASON_CSA); | 2157 | IEEE80211_QUEUE_STOP_REASON_CSA); |
2153 | 2158 | ||
2154 | cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); | 2159 | cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, |
2160 | IEEE80211_DEAUTH_FRAME_LEN); | ||
2155 | sdata_unlock(sdata); | 2161 | sdata_unlock(sdata); |
2156 | } | 2162 | } |
2157 | 2163 | ||
@@ -2298,7 +2304,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |||
2298 | sdata_info(sdata, "%pM denied authentication (status %d)\n", | 2304 | sdata_info(sdata, "%pM denied authentication (status %d)\n", |
2299 | mgmt->sa, status_code); | 2305 | mgmt->sa, status_code); |
2300 | ieee80211_destroy_auth_data(sdata, false); | 2306 | ieee80211_destroy_auth_data(sdata, false); |
2301 | cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); | 2307 | cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); |
2302 | return; | 2308 | return; |
2303 | } | 2309 | } |
2304 | 2310 | ||
@@ -2333,7 +2339,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |||
2333 | * Report auth frame to user space for processing since another | 2339 | * Report auth frame to user space for processing since another |
2334 | * round of Authentication frames is still needed. | 2340 | * round of Authentication frames is still needed. |
2335 | */ | 2341 | */ |
2336 | cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); | 2342 | cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); |
2337 | return; | 2343 | return; |
2338 | } | 2344 | } |
2339 | 2345 | ||
@@ -2350,7 +2356,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |||
2350 | } | 2356 | } |
2351 | mutex_unlock(&sdata->local->sta_mtx); | 2357 | mutex_unlock(&sdata->local->sta_mtx); |
2352 | 2358 | ||
2353 | cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); | 2359 | cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); |
2354 | return; | 2360 | return; |
2355 | out_err: | 2361 | out_err: |
2356 | mutex_unlock(&sdata->local->sta_mtx); | 2362 | mutex_unlock(&sdata->local->sta_mtx); |
@@ -2383,7 +2389,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, | |||
2383 | 2389 | ||
2384 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); | 2390 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); |
2385 | 2391 | ||
2386 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, len); | 2392 | cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); |
2387 | } | 2393 | } |
2388 | 2394 | ||
2389 | 2395 | ||
@@ -2409,7 +2415,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2409 | 2415 | ||
2410 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); | 2416 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); |
2411 | 2417 | ||
2412 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, len); | 2418 | cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); |
2413 | } | 2419 | } |
2414 | 2420 | ||
2415 | static void ieee80211_get_rates(struct ieee80211_supported_band *sband, | 2421 | static void ieee80211_get_rates(struct ieee80211_supported_band *sband, |
@@ -2780,7 +2786,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2780 | /* oops -- internal error -- send timeout for now */ | 2786 | /* oops -- internal error -- send timeout for now */ |
2781 | ieee80211_destroy_assoc_data(sdata, false); | 2787 | ieee80211_destroy_assoc_data(sdata, false); |
2782 | cfg80211_put_bss(sdata->local->hw.wiphy, bss); | 2788 | cfg80211_put_bss(sdata->local->hw.wiphy, bss); |
2783 | cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); | 2789 | cfg80211_assoc_timeout(sdata->dev, mgmt->bssid); |
2784 | return; | 2790 | return; |
2785 | } | 2791 | } |
2786 | sdata_info(sdata, "associated\n"); | 2792 | sdata_info(sdata, "associated\n"); |
@@ -2793,7 +2799,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2793 | ieee80211_destroy_assoc_data(sdata, true); | 2799 | ieee80211_destroy_assoc_data(sdata, true); |
2794 | } | 2800 | } |
2795 | 2801 | ||
2796 | cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, len); | 2802 | cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len); |
2797 | } | 2803 | } |
2798 | 2804 | ||
2799 | static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | 2805 | static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, |
@@ -2805,24 +2811,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
2805 | int freq; | 2811 | int freq; |
2806 | struct ieee80211_bss *bss; | 2812 | struct ieee80211_bss *bss; |
2807 | struct ieee80211_channel *channel; | 2813 | struct ieee80211_channel *channel; |
2808 | bool need_ps = false; | ||
2809 | 2814 | ||
2810 | sdata_assert_lock(sdata); | 2815 | sdata_assert_lock(sdata); |
2811 | 2816 | ||
2812 | if ((sdata->u.mgd.associated && | ||
2813 | ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) || | ||
2814 | (sdata->u.mgd.assoc_data && | ||
2815 | ether_addr_equal(mgmt->bssid, | ||
2816 | sdata->u.mgd.assoc_data->bss->bssid))) { | ||
2817 | /* not previously set so we may need to recalc */ | ||
2818 | need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period; | ||
2819 | |||
2820 | if (elems->tim && !elems->parse_error) { | ||
2821 | const struct ieee80211_tim_ie *tim_ie = elems->tim; | ||
2822 | sdata->u.mgd.dtim_period = tim_ie->dtim_period; | ||
2823 | } | ||
2824 | } | ||
2825 | |||
2826 | if (elems->ds_params) | 2817 | if (elems->ds_params) |
2827 | freq = ieee80211_channel_to_frequency(elems->ds_params[0], | 2818 | freq = ieee80211_channel_to_frequency(elems->ds_params[0], |
2828 | rx_status->band); | 2819 | rx_status->band); |
@@ -2843,12 +2834,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
2843 | !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) | 2834 | !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) |
2844 | return; | 2835 | return; |
2845 | 2836 | ||
2846 | if (need_ps) { | ||
2847 | mutex_lock(&local->iflist_mtx); | ||
2848 | ieee80211_recalc_ps(local, -1); | ||
2849 | mutex_unlock(&local->iflist_mtx); | ||
2850 | } | ||
2851 | |||
2852 | ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, | 2837 | ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, |
2853 | elems, true); | 2838 | elems, true); |
2854 | 2839 | ||
@@ -2962,7 +2947,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2962 | len - baselen, false, &elems); | 2947 | len - baselen, false, &elems); |
2963 | 2948 | ||
2964 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); | 2949 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); |
2965 | ifmgd->assoc_data->have_beacon = true; | 2950 | if (elems.tim && !elems.parse_error) { |
2951 | const struct ieee80211_tim_ie *tim_ie = elems.tim; | ||
2952 | ifmgd->dtim_period = tim_ie->dtim_period; | ||
2953 | } | ||
2954 | ifmgd->have_beacon = true; | ||
2966 | ifmgd->assoc_data->need_beacon = false; | 2955 | ifmgd->assoc_data->need_beacon = false; |
2967 | if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) { | 2956 | if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) { |
2968 | sdata->vif.bss_conf.sync_tsf = | 2957 | sdata->vif.bss_conf.sync_tsf = |
@@ -3144,7 +3133,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
3144 | * If we haven't had a beacon before, tell the driver about the | 3133 | * If we haven't had a beacon before, tell the driver about the |
3145 | * DTIM period (and beacon timing if desired) now. | 3134 | * DTIM period (and beacon timing if desired) now. |
3146 | */ | 3135 | */ |
3147 | if (!bss_conf->dtim_period) { | 3136 | if (!ifmgd->have_beacon) { |
3148 | /* a few bogus AP send dtim_period = 0 or no TIM IE */ | 3137 | /* a few bogus AP send dtim_period = 0 or no TIM IE */ |
3149 | if (elems.tim) | 3138 | if (elems.tim) |
3150 | bss_conf->dtim_period = elems.tim->dtim_period ?: 1; | 3139 | bss_conf->dtim_period = elems.tim->dtim_period ?: 1; |
@@ -3163,7 +3152,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
3163 | sdata->vif.bss_conf.sync_dtim_count = 0; | 3152 | sdata->vif.bss_conf.sync_dtim_count = 0; |
3164 | } | 3153 | } |
3165 | 3154 | ||
3166 | changed |= BSS_CHANGED_DTIM_PERIOD; | 3155 | changed |= BSS_CHANGED_BEACON_INFO; |
3156 | ifmgd->have_beacon = true; | ||
3157 | |||
3158 | mutex_lock(&local->iflist_mtx); | ||
3159 | ieee80211_recalc_ps(local, -1); | ||
3160 | mutex_unlock(&local->iflist_mtx); | ||
3161 | |||
3167 | ieee80211_recalc_ps_vif(sdata); | 3162 | ieee80211_recalc_ps_vif(sdata); |
3168 | } | 3163 | } |
3169 | 3164 | ||
@@ -3186,8 +3181,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
3186 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | 3181 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, |
3187 | WLAN_REASON_DEAUTH_LEAVING, | 3182 | WLAN_REASON_DEAUTH_LEAVING, |
3188 | true, deauth_buf); | 3183 | true, deauth_buf); |
3189 | cfg80211_send_deauth(sdata->dev, deauth_buf, | 3184 | cfg80211_tx_mlme_mgmt(sdata->dev, deauth_buf, |
3190 | sizeof(deauth_buf)); | 3185 | sizeof(deauth_buf)); |
3191 | return; | 3186 | return; |
3192 | } | 3187 | } |
3193 | 3188 | ||
@@ -3305,7 +3300,8 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, | |||
3305 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, | 3300 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, |
3306 | tx, frame_buf); | 3301 | tx, frame_buf); |
3307 | 3302 | ||
3308 | cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); | 3303 | cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, |
3304 | IEEE80211_DEAUTH_FRAME_LEN); | ||
3309 | } | 3305 | } |
3310 | 3306 | ||
3311 | static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) | 3307 | static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) |
@@ -3496,15 +3492,14 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) | |||
3496 | 3492 | ||
3497 | ieee80211_destroy_auth_data(sdata, false); | 3493 | ieee80211_destroy_auth_data(sdata, false); |
3498 | 3494 | ||
3499 | cfg80211_send_auth_timeout(sdata->dev, bssid); | 3495 | cfg80211_auth_timeout(sdata->dev, bssid); |
3500 | } | 3496 | } |
3501 | } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) | 3497 | } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) |
3502 | run_again(sdata, ifmgd->auth_data->timeout); | 3498 | run_again(sdata, ifmgd->auth_data->timeout); |
3503 | 3499 | ||
3504 | if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && | 3500 | if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && |
3505 | time_after(jiffies, ifmgd->assoc_data->timeout)) { | 3501 | time_after(jiffies, ifmgd->assoc_data->timeout)) { |
3506 | if ((ifmgd->assoc_data->need_beacon && | 3502 | if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) || |
3507 | !ifmgd->assoc_data->have_beacon) || | ||
3508 | ieee80211_do_assoc(sdata)) { | 3503 | ieee80211_do_assoc(sdata)) { |
3509 | u8 bssid[ETH_ALEN]; | 3504 | u8 bssid[ETH_ALEN]; |
3510 | 3505 | ||
@@ -3512,7 +3507,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) | |||
3512 | 3507 | ||
3513 | ieee80211_destroy_assoc_data(sdata, false); | 3508 | ieee80211_destroy_assoc_data(sdata, false); |
3514 | 3509 | ||
3515 | cfg80211_send_assoc_timeout(sdata->dev, bssid); | 3510 | cfg80211_assoc_timeout(sdata->dev, bssid); |
3516 | } | 3511 | } |
3517 | } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) | 3512 | } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) |
3518 | run_again(sdata, ifmgd->assoc_data->timeout); | 3513 | run_again(sdata, ifmgd->assoc_data->timeout); |
@@ -4061,8 +4056,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
4061 | WLAN_REASON_UNSPECIFIED, | 4056 | WLAN_REASON_UNSPECIFIED, |
4062 | false, frame_buf); | 4057 | false, frame_buf); |
4063 | 4058 | ||
4064 | cfg80211_send_deauth(sdata->dev, frame_buf, | 4059 | cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, |
4065 | sizeof(frame_buf)); | 4060 | sizeof(frame_buf)); |
4066 | } | 4061 | } |
4067 | 4062 | ||
4068 | sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); | 4063 | sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); |
@@ -4124,8 +4119,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
4124 | WLAN_REASON_UNSPECIFIED, | 4119 | WLAN_REASON_UNSPECIFIED, |
4125 | false, frame_buf); | 4120 | false, frame_buf); |
4126 | 4121 | ||
4127 | cfg80211_send_deauth(sdata->dev, frame_buf, | 4122 | cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, |
4128 | sizeof(frame_buf)); | 4123 | sizeof(frame_buf)); |
4129 | } | 4124 | } |
4130 | 4125 | ||
4131 | if (ifmgd->auth_data && !ifmgd->auth_data->done) { | 4126 | if (ifmgd->auth_data && !ifmgd->auth_data->done) { |
@@ -4272,6 +4267,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
4272 | 4267 | ||
4273 | ifmgd->assoc_data = assoc_data; | 4268 | ifmgd->assoc_data = assoc_data; |
4274 | ifmgd->dtim_period = 0; | 4269 | ifmgd->dtim_period = 0; |
4270 | ifmgd->have_beacon = false; | ||
4275 | 4271 | ||
4276 | err = ieee80211_prep_connection(sdata, req->bss, true); | 4272 | err = ieee80211_prep_connection(sdata, req->bss, true); |
4277 | if (err) | 4273 | if (err) |
@@ -4303,7 +4299,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
4303 | ifmgd->dtim_period = tim->dtim_period; | 4299 | ifmgd->dtim_period = tim->dtim_period; |
4304 | dtim_count = tim->dtim_count; | 4300 | dtim_count = tim->dtim_count; |
4305 | } | 4301 | } |
4306 | assoc_data->have_beacon = true; | 4302 | ifmgd->have_beacon = true; |
4307 | assoc_data->timeout = jiffies; | 4303 | assoc_data->timeout = jiffies; |
4308 | assoc_data->timeout_started = true; | 4304 | assoc_data->timeout_started = true; |
4309 | 4305 | ||
@@ -4378,8 +4374,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
4378 | 4374 | ||
4379 | out: | 4375 | out: |
4380 | if (report_frame) | 4376 | if (report_frame) |
4381 | cfg80211_send_deauth(sdata->dev, frame_buf, | 4377 | cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, |
4382 | IEEE80211_DEAUTH_FRAME_LEN); | 4378 | IEEE80211_DEAUTH_FRAME_LEN); |
4383 | 4379 | ||
4384 | return 0; | 4380 | return 0; |
4385 | } | 4381 | } |
@@ -4409,8 +4405,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
4409 | req->reason_code, !req->local_state_change, | 4405 | req->reason_code, !req->local_state_change, |
4410 | frame_buf); | 4406 | frame_buf); |
4411 | 4407 | ||
4412 | cfg80211_send_disassoc(sdata->dev, frame_buf, | 4408 | cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, |
4413 | IEEE80211_DEAUTH_FRAME_LEN); | 4409 | IEEE80211_DEAUTH_FRAME_LEN); |
4414 | 4410 | ||
4415 | return 0; | 4411 | return 0; |
4416 | } | 4412 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bdd7b4a719e9..23dbcfc69b3b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1747,27 +1747,21 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) | |||
1747 | if (unlikely(!ieee80211_has_protected(fc) && | 1747 | if (unlikely(!ieee80211_has_protected(fc) && |
1748 | ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && | 1748 | ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && |
1749 | rx->key)) { | 1749 | rx->key)) { |
1750 | if (ieee80211_is_deauth(fc)) | 1750 | if (ieee80211_is_deauth(fc) || |
1751 | cfg80211_send_unprot_deauth(rx->sdata->dev, | 1751 | ieee80211_is_disassoc(fc)) |
1752 | rx->skb->data, | 1752 | cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, |
1753 | rx->skb->len); | 1753 | rx->skb->data, |
1754 | else if (ieee80211_is_disassoc(fc)) | 1754 | rx->skb->len); |
1755 | cfg80211_send_unprot_disassoc(rx->sdata->dev, | ||
1756 | rx->skb->data, | ||
1757 | rx->skb->len); | ||
1758 | return -EACCES; | 1755 | return -EACCES; |
1759 | } | 1756 | } |
1760 | /* BIP does not use Protected field, so need to check MMIE */ | 1757 | /* BIP does not use Protected field, so need to check MMIE */ |
1761 | if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && | 1758 | if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && |
1762 | ieee80211_get_mmie_keyidx(rx->skb) < 0)) { | 1759 | ieee80211_get_mmie_keyidx(rx->skb) < 0)) { |
1763 | if (ieee80211_is_deauth(fc)) | 1760 | if (ieee80211_is_deauth(fc) || |
1764 | cfg80211_send_unprot_deauth(rx->sdata->dev, | 1761 | ieee80211_is_disassoc(fc)) |
1765 | rx->skb->data, | 1762 | cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, |
1766 | rx->skb->len); | 1763 | rx->skb->data, |
1767 | else if (ieee80211_is_disassoc(fc)) | 1764 | rx->skb->len); |
1768 | cfg80211_send_unprot_disassoc(rx->sdata->dev, | ||
1769 | rx->skb->data, | ||
1770 | rx->skb->len); | ||
1771 | return -EACCES; | 1765 | return -EACCES; |
1772 | } | 1766 | } |
1773 | /* | 1767 | /* |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a04c5671d7fd..b4297982d34a 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -1132,6 +1132,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, | |||
1132 | * ends the poll/service period. | 1132 | * ends the poll/service period. |
1133 | */ | 1133 | */ |
1134 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | | 1134 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | |
1135 | IEEE80211_TX_CTL_PS_RESPONSE | | ||
1135 | IEEE80211_TX_STATUS_EOSP | | 1136 | IEEE80211_TX_STATUS_EOSP | |
1136 | IEEE80211_TX_CTL_REQ_TX_STATUS; | 1137 | IEEE80211_TX_CTL_REQ_TX_STATUS; |
1137 | 1138 | ||
@@ -1269,7 +1270,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, | |||
1269 | * STA may still remain is PS mode after this frame | 1270 | * STA may still remain is PS mode after this frame |
1270 | * exchange. | 1271 | * exchange. |
1271 | */ | 1272 | */ |
1272 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; | 1273 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | |
1274 | IEEE80211_TX_CTL_PS_RESPONSE; | ||
1273 | 1275 | ||
1274 | /* | 1276 | /* |
1275 | * Use MoreData flag to indicate whether there are | 1277 | * Use MoreData flag to indicate whether there are |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 41c28b977f7c..bd12fc54266c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -297,6 +297,9 @@ struct sta_ampdu_mlme { | |||
297 | * @rcu_head: RCU head used for freeing this station struct | 297 | * @rcu_head: RCU head used for freeing this station struct |
298 | * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, | 298 | * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, |
299 | * taken from HT/VHT capabilities or VHT operating mode notification | 299 | * taken from HT/VHT capabilities or VHT operating mode notification |
300 | * @chains: chains ever used for RX from this station | ||
301 | * @chain_signal_last: last signal (per chain) | ||
302 | * @chain_signal_avg: signal average (per chain) | ||
300 | */ | 303 | */ |
301 | struct sta_info { | 304 | struct sta_info { |
302 | /* General information, mostly static */ | 305 | /* General information, mostly static */ |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 34be9336b5d1..4105d0ca963e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1790,12 +1790,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1790 | break; | 1790 | break; |
1791 | #ifdef CONFIG_MAC80211_MESH | 1791 | #ifdef CONFIG_MAC80211_MESH |
1792 | case NL80211_IFTYPE_MESH_POINT: | 1792 | case NL80211_IFTYPE_MESH_POINT: |
1793 | if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { | ||
1794 | /* Do not send frames with mesh_ttl == 0 */ | ||
1795 | sdata->u.mesh.mshstats.dropped_frames_ttl++; | ||
1796 | goto fail_rcu; | ||
1797 | } | ||
1798 | |||
1799 | if (!is_multicast_ether_addr(skb->data)) { | 1793 | if (!is_multicast_ether_addr(skb->data)) { |
1800 | struct sta_info *next_hop; | 1794 | struct sta_info *next_hop; |
1801 | bool mpp_lookup = true; | 1795 | bool mpp_lookup = true; |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c75d3db2a31c..22654452a561 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1584,8 +1584,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1584 | BSS_CHANGED_ARP_FILTER | | 1584 | BSS_CHANGED_ARP_FILTER | |
1585 | BSS_CHANGED_PS; | 1585 | BSS_CHANGED_PS; |
1586 | 1586 | ||
1587 | if (sdata->u.mgd.dtim_period) | 1587 | /* Re-send beacon info report to the driver */ |
1588 | changed |= BSS_CHANGED_DTIM_PERIOD; | 1588 | if (sdata->u.mgd.have_beacon) |
1589 | changed |= BSS_CHANGED_BEACON_INFO; | ||
1589 | 1590 | ||
1590 | sdata_lock(sdata); | 1591 | sdata_lock(sdata); |
1591 | ieee80211_bss_info_change_notify(sdata, changed); | 1592 | ieee80211_bss_info_change_notify(sdata, changed); |
diff --git a/net/nfc/core.c b/net/nfc/core.c index 40d2527693da..dc96a83aa6ab 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c | |||
@@ -44,6 +44,47 @@ DEFINE_MUTEX(nfc_devlist_mutex); | |||
44 | /* NFC device ID bitmap */ | 44 | /* NFC device ID bitmap */ |
45 | static DEFINE_IDA(nfc_index_ida); | 45 | static DEFINE_IDA(nfc_index_ida); |
46 | 46 | ||
47 | int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name) | ||
48 | { | ||
49 | int rc = 0; | ||
50 | |||
51 | pr_debug("%s do firmware %s\n", dev_name(&dev->dev), firmware_name); | ||
52 | |||
53 | device_lock(&dev->dev); | ||
54 | |||
55 | if (!device_is_registered(&dev->dev)) { | ||
56 | rc = -ENODEV; | ||
57 | goto error; | ||
58 | } | ||
59 | |||
60 | if (dev->dev_up) { | ||
61 | rc = -EBUSY; | ||
62 | goto error; | ||
63 | } | ||
64 | |||
65 | if (!dev->ops->fw_upload) { | ||
66 | rc = -EOPNOTSUPP; | ||
67 | goto error; | ||
68 | } | ||
69 | |||
70 | dev->fw_upload_in_progress = true; | ||
71 | rc = dev->ops->fw_upload(dev, firmware_name); | ||
72 | if (rc) | ||
73 | dev->fw_upload_in_progress = false; | ||
74 | |||
75 | error: | ||
76 | device_unlock(&dev->dev); | ||
77 | return rc; | ||
78 | } | ||
79 | |||
80 | int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name) | ||
81 | { | ||
82 | dev->fw_upload_in_progress = false; | ||
83 | |||
84 | return nfc_genl_fw_upload_done(dev, firmware_name); | ||
85 | } | ||
86 | EXPORT_SYMBOL(nfc_fw_upload_done); | ||
87 | |||
47 | /** | 88 | /** |
48 | * nfc_dev_up - turn on the NFC device | 89 | * nfc_dev_up - turn on the NFC device |
49 | * | 90 | * |
@@ -69,6 +110,11 @@ int nfc_dev_up(struct nfc_dev *dev) | |||
69 | goto error; | 110 | goto error; |
70 | } | 111 | } |
71 | 112 | ||
113 | if (dev->fw_upload_in_progress) { | ||
114 | rc = -EBUSY; | ||
115 | goto error; | ||
116 | } | ||
117 | |||
72 | if (dev->dev_up) { | 118 | if (dev->dev_up) { |
73 | rc = -EALREADY; | 119 | rc = -EALREADY; |
74 | goto error; | 120 | goto error; |
@@ -80,6 +126,13 @@ int nfc_dev_up(struct nfc_dev *dev) | |||
80 | if (!rc) | 126 | if (!rc) |
81 | dev->dev_up = true; | 127 | dev->dev_up = true; |
82 | 128 | ||
129 | /* We have to enable the device before discovering SEs */ | ||
130 | if (dev->ops->discover_se) { | ||
131 | rc = dev->ops->discover_se(dev); | ||
132 | if (!rc) | ||
133 | pr_warn("SE discovery failed\n"); | ||
134 | } | ||
135 | |||
83 | error: | 136 | error: |
84 | device_unlock(&dev->dev); | 137 | device_unlock(&dev->dev); |
85 | return rc; | 138 | return rc; |
@@ -475,6 +528,108 @@ error: | |||
475 | return rc; | 528 | return rc; |
476 | } | 529 | } |
477 | 530 | ||
531 | static struct nfc_se *find_se(struct nfc_dev *dev, u32 se_idx) | ||
532 | { | ||
533 | struct nfc_se *se, *n; | ||
534 | |||
535 | list_for_each_entry_safe(se, n, &dev->secure_elements, list) | ||
536 | if (se->idx == se_idx) | ||
537 | return se; | ||
538 | |||
539 | return NULL; | ||
540 | } | ||
541 | |||
542 | int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) | ||
543 | { | ||
544 | |||
545 | struct nfc_se *se; | ||
546 | int rc; | ||
547 | |||
548 | pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); | ||
549 | |||
550 | device_lock(&dev->dev); | ||
551 | |||
552 | if (!device_is_registered(&dev->dev)) { | ||
553 | rc = -ENODEV; | ||
554 | goto error; | ||
555 | } | ||
556 | |||
557 | if (!dev->dev_up) { | ||
558 | rc = -ENODEV; | ||
559 | goto error; | ||
560 | } | ||
561 | |||
562 | if (dev->polling) { | ||
563 | rc = -EBUSY; | ||
564 | goto error; | ||
565 | } | ||
566 | |||
567 | if (!dev->ops->enable_se || !dev->ops->disable_se) { | ||
568 | rc = -EOPNOTSUPP; | ||
569 | goto error; | ||
570 | } | ||
571 | |||
572 | se = find_se(dev, se_idx); | ||
573 | if (!se) { | ||
574 | rc = -EINVAL; | ||
575 | goto error; | ||
576 | } | ||
577 | |||
578 | if (se->type == NFC_SE_ENABLED) { | ||
579 | rc = -EALREADY; | ||
580 | goto error; | ||
581 | } | ||
582 | |||
583 | rc = dev->ops->enable_se(dev, se_idx); | ||
584 | |||
585 | error: | ||
586 | device_unlock(&dev->dev); | ||
587 | return rc; | ||
588 | } | ||
589 | |||
590 | int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) | ||
591 | { | ||
592 | |||
593 | struct nfc_se *se; | ||
594 | int rc; | ||
595 | |||
596 | pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); | ||
597 | |||
598 | device_lock(&dev->dev); | ||
599 | |||
600 | if (!device_is_registered(&dev->dev)) { | ||
601 | rc = -ENODEV; | ||
602 | goto error; | ||
603 | } | ||
604 | |||
605 | if (!dev->dev_up) { | ||
606 | rc = -ENODEV; | ||
607 | goto error; | ||
608 | } | ||
609 | |||
610 | if (!dev->ops->enable_se || !dev->ops->disable_se) { | ||
611 | rc = -EOPNOTSUPP; | ||
612 | goto error; | ||
613 | } | ||
614 | |||
615 | se = find_se(dev, se_idx); | ||
616 | if (!se) { | ||
617 | rc = -EINVAL; | ||
618 | goto error; | ||
619 | } | ||
620 | |||
621 | if (se->type == NFC_SE_DISABLED) { | ||
622 | rc = -EALREADY; | ||
623 | goto error; | ||
624 | } | ||
625 | |||
626 | rc = dev->ops->disable_se(dev, se_idx); | ||
627 | |||
628 | error: | ||
629 | device_unlock(&dev->dev); | ||
630 | return rc; | ||
631 | } | ||
632 | |||
478 | int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) | 633 | int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) |
479 | { | 634 | { |
480 | pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len); | 635 | pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len); |
@@ -707,14 +862,79 @@ inline void nfc_driver_failure(struct nfc_dev *dev, int err) | |||
707 | } | 862 | } |
708 | EXPORT_SYMBOL(nfc_driver_failure); | 863 | EXPORT_SYMBOL(nfc_driver_failure); |
709 | 864 | ||
865 | int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type) | ||
866 | { | ||
867 | struct nfc_se *se; | ||
868 | int rc; | ||
869 | |||
870 | pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); | ||
871 | |||
872 | se = find_se(dev, se_idx); | ||
873 | if (se) | ||
874 | return -EALREADY; | ||
875 | |||
876 | se = kzalloc(sizeof(struct nfc_se), GFP_KERNEL); | ||
877 | if (!se) | ||
878 | return -ENOMEM; | ||
879 | |||
880 | se->idx = se_idx; | ||
881 | se->type = type; | ||
882 | se->state = NFC_SE_DISABLED; | ||
883 | INIT_LIST_HEAD(&se->list); | ||
884 | |||
885 | list_add(&se->list, &dev->secure_elements); | ||
886 | |||
887 | rc = nfc_genl_se_added(dev, se_idx, type); | ||
888 | if (rc < 0) { | ||
889 | list_del(&se->list); | ||
890 | kfree(se); | ||
891 | |||
892 | return rc; | ||
893 | } | ||
894 | |||
895 | return 0; | ||
896 | } | ||
897 | EXPORT_SYMBOL(nfc_add_se); | ||
898 | |||
899 | int nfc_remove_se(struct nfc_dev *dev, u32 se_idx) | ||
900 | { | ||
901 | struct nfc_se *se, *n; | ||
902 | int rc; | ||
903 | |||
904 | pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); | ||
905 | |||
906 | list_for_each_entry_safe(se, n, &dev->secure_elements, list) | ||
907 | if (se->idx == se_idx) { | ||
908 | rc = nfc_genl_se_removed(dev, se_idx); | ||
909 | if (rc < 0) | ||
910 | return rc; | ||
911 | |||
912 | list_del(&se->list); | ||
913 | kfree(se); | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | |||
918 | return -EINVAL; | ||
919 | } | ||
920 | EXPORT_SYMBOL(nfc_remove_se); | ||
921 | |||
710 | static void nfc_release(struct device *d) | 922 | static void nfc_release(struct device *d) |
711 | { | 923 | { |
712 | struct nfc_dev *dev = to_nfc_dev(d); | 924 | struct nfc_dev *dev = to_nfc_dev(d); |
925 | struct nfc_se *se, *n; | ||
713 | 926 | ||
714 | pr_debug("dev_name=%s\n", dev_name(&dev->dev)); | 927 | pr_debug("dev_name=%s\n", dev_name(&dev->dev)); |
715 | 928 | ||
716 | nfc_genl_data_exit(&dev->genl_data); | 929 | nfc_genl_data_exit(&dev->genl_data); |
717 | kfree(dev->targets); | 930 | kfree(dev->targets); |
931 | |||
932 | list_for_each_entry_safe(se, n, &dev->secure_elements, list) { | ||
933 | nfc_genl_se_removed(dev, se->idx); | ||
934 | list_del(&se->list); | ||
935 | kfree(se); | ||
936 | } | ||
937 | |||
718 | kfree(dev); | 938 | kfree(dev); |
719 | } | 939 | } |
720 | 940 | ||
@@ -786,7 +1006,6 @@ struct nfc_dev *nfc_get_device(unsigned int idx) | |||
786 | */ | 1006 | */ |
787 | struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, | 1007 | struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, |
788 | u32 supported_protocols, | 1008 | u32 supported_protocols, |
789 | u32 supported_se, | ||
790 | int tx_headroom, int tx_tailroom) | 1009 | int tx_headroom, int tx_tailroom) |
791 | { | 1010 | { |
792 | struct nfc_dev *dev; | 1011 | struct nfc_dev *dev; |
@@ -804,10 +1023,9 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, | |||
804 | 1023 | ||
805 | dev->ops = ops; | 1024 | dev->ops = ops; |
806 | dev->supported_protocols = supported_protocols; | 1025 | dev->supported_protocols = supported_protocols; |
807 | dev->supported_se = supported_se; | ||
808 | dev->active_se = NFC_SE_NONE; | ||
809 | dev->tx_headroom = tx_headroom; | 1026 | dev->tx_headroom = tx_headroom; |
810 | dev->tx_tailroom = tx_tailroom; | 1027 | dev->tx_tailroom = tx_tailroom; |
1028 | INIT_LIST_HEAD(&dev->secure_elements); | ||
811 | 1029 | ||
812 | nfc_genl_data_init(&dev->genl_data); | 1030 | nfc_genl_data_init(&dev->genl_data); |
813 | 1031 | ||
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 91020b210d87..7b1c186736eb 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c | |||
@@ -570,21 +570,21 @@ static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
570 | { | 570 | { |
571 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | 571 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); |
572 | 572 | ||
573 | if (hdev->ops->dep_link_up) | 573 | if (!hdev->ops->dep_link_up) |
574 | return hdev->ops->dep_link_up(hdev, target, comm_mode, | 574 | return 0; |
575 | gb, gb_len); | ||
576 | 575 | ||
577 | return 0; | 576 | return hdev->ops->dep_link_up(hdev, target, comm_mode, |
577 | gb, gb_len); | ||
578 | } | 578 | } |
579 | 579 | ||
580 | static int hci_dep_link_down(struct nfc_dev *nfc_dev) | 580 | static int hci_dep_link_down(struct nfc_dev *nfc_dev) |
581 | { | 581 | { |
582 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | 582 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); |
583 | 583 | ||
584 | if (hdev->ops->dep_link_down) | 584 | if (!hdev->ops->dep_link_down) |
585 | return hdev->ops->dep_link_down(hdev); | 585 | return 0; |
586 | 586 | ||
587 | return 0; | 587 | return hdev->ops->dep_link_down(hdev); |
588 | } | 588 | } |
589 | 589 | ||
590 | static int hci_activate_target(struct nfc_dev *nfc_dev, | 590 | static int hci_activate_target(struct nfc_dev *nfc_dev, |
@@ -673,12 +673,12 @@ static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) | |||
673 | { | 673 | { |
674 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | 674 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); |
675 | 675 | ||
676 | if (hdev->ops->tm_send) | 676 | if (!hdev->ops->tm_send) { |
677 | return hdev->ops->tm_send(hdev, skb); | 677 | kfree_skb(skb); |
678 | 678 | return -ENOTSUPP; | |
679 | kfree_skb(skb); | 679 | } |
680 | 680 | ||
681 | return -ENOTSUPP; | 681 | return hdev->ops->tm_send(hdev, skb); |
682 | } | 682 | } |
683 | 683 | ||
684 | static int hci_check_presence(struct nfc_dev *nfc_dev, | 684 | static int hci_check_presence(struct nfc_dev *nfc_dev, |
@@ -686,8 +686,38 @@ static int hci_check_presence(struct nfc_dev *nfc_dev, | |||
686 | { | 686 | { |
687 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | 687 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); |
688 | 688 | ||
689 | if (hdev->ops->check_presence) | 689 | if (!hdev->ops->check_presence) |
690 | return hdev->ops->check_presence(hdev, target); | 690 | return 0; |
691 | |||
692 | return hdev->ops->check_presence(hdev, target); | ||
693 | } | ||
694 | |||
695 | static int hci_discover_se(struct nfc_dev *nfc_dev) | ||
696 | { | ||
697 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | ||
698 | |||
699 | if (hdev->ops->discover_se) | ||
700 | return hdev->ops->discover_se(hdev); | ||
701 | |||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | static int hci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) | ||
706 | { | ||
707 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | ||
708 | |||
709 | if (hdev->ops->enable_se) | ||
710 | return hdev->ops->enable_se(hdev, se_idx); | ||
711 | |||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) | ||
716 | { | ||
717 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | ||
718 | |||
719 | if (hdev->ops->disable_se) | ||
720 | return hdev->ops->enable_se(hdev, se_idx); | ||
691 | 721 | ||
692 | return 0; | 722 | return 0; |
693 | } | 723 | } |
@@ -779,6 +809,16 @@ static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb) | |||
779 | } | 809 | } |
780 | } | 810 | } |
781 | 811 | ||
812 | static int hci_fw_upload(struct nfc_dev *nfc_dev, const char *firmware_name) | ||
813 | { | ||
814 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | ||
815 | |||
816 | if (!hdev->ops->fw_upload) | ||
817 | return -ENOTSUPP; | ||
818 | |||
819 | return hdev->ops->fw_upload(hdev, firmware_name); | ||
820 | } | ||
821 | |||
782 | static struct nfc_ops hci_nfc_ops = { | 822 | static struct nfc_ops hci_nfc_ops = { |
783 | .dev_up = hci_dev_up, | 823 | .dev_up = hci_dev_up, |
784 | .dev_down = hci_dev_down, | 824 | .dev_down = hci_dev_down, |
@@ -791,13 +831,16 @@ static struct nfc_ops hci_nfc_ops = { | |||
791 | .im_transceive = hci_transceive, | 831 | .im_transceive = hci_transceive, |
792 | .tm_send = hci_tm_send, | 832 | .tm_send = hci_tm_send, |
793 | .check_presence = hci_check_presence, | 833 | .check_presence = hci_check_presence, |
834 | .fw_upload = hci_fw_upload, | ||
835 | .discover_se = hci_discover_se, | ||
836 | .enable_se = hci_enable_se, | ||
837 | .disable_se = hci_disable_se, | ||
794 | }; | 838 | }; |
795 | 839 | ||
796 | struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, | 840 | struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, |
797 | struct nfc_hci_init_data *init_data, | 841 | struct nfc_hci_init_data *init_data, |
798 | unsigned long quirks, | 842 | unsigned long quirks, |
799 | u32 protocols, | 843 | u32 protocols, |
800 | u32 supported_se, | ||
801 | const char *llc_name, | 844 | const char *llc_name, |
802 | int tx_headroom, | 845 | int tx_headroom, |
803 | int tx_tailroom, | 846 | int tx_tailroom, |
@@ -823,7 +866,7 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, | |||
823 | return NULL; | 866 | return NULL; |
824 | } | 867 | } |
825 | 868 | ||
826 | hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, supported_se, | 869 | hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, |
827 | tx_headroom + HCI_CMDS_HEADROOM, | 870 | tx_headroom + HCI_CMDS_HEADROOM, |
828 | tx_tailroom); | 871 | tx_tailroom); |
829 | if (!hdev->ndev) { | 872 | if (!hdev->ndev) { |
diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h index ff8c434f7df8..f4d48b57ea11 100644 --- a/net/nfc/llcp.h +++ b/net/nfc/llcp.h | |||
@@ -19,6 +19,8 @@ | |||
19 | 19 | ||
20 | enum llcp_state { | 20 | enum llcp_state { |
21 | LLCP_CONNECTED = 1, /* wait_for_packet() wants that */ | 21 | LLCP_CONNECTED = 1, /* wait_for_packet() wants that */ |
22 | LLCP_CONNECTING, | ||
23 | LLCP_DISCONNECTING, | ||
22 | LLCP_CLOSED, | 24 | LLCP_CLOSED, |
23 | LLCP_BOUND, | 25 | LLCP_BOUND, |
24 | LLCP_LISTEN, | 26 | LLCP_LISTEN, |
@@ -246,7 +248,6 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri, | |||
246 | void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp); | 248 | void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp); |
247 | void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head); | 249 | void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head); |
248 | void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); | 250 | void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); |
249 | int nfc_llcp_disconnect(struct nfc_llcp_sock *sock); | ||
250 | int nfc_llcp_send_symm(struct nfc_dev *dev); | 251 | int nfc_llcp_send_symm(struct nfc_dev *dev); |
251 | int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); | 252 | int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); |
252 | int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); | 253 | int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); |
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index c1b23eef83ca..1017894807c0 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c | |||
@@ -339,7 +339,7 @@ static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock, | |||
339 | return skb; | 339 | return skb; |
340 | } | 340 | } |
341 | 341 | ||
342 | int nfc_llcp_disconnect(struct nfc_llcp_sock *sock) | 342 | int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock) |
343 | { | 343 | { |
344 | struct sk_buff *skb; | 344 | struct sk_buff *skb; |
345 | struct nfc_dev *dev; | 345 | struct nfc_dev *dev; |
@@ -630,26 +630,6 @@ int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) | |||
630 | return 0; | 630 | return 0; |
631 | } | 631 | } |
632 | 632 | ||
633 | int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock) | ||
634 | { | ||
635 | struct sk_buff *skb; | ||
636 | struct nfc_llcp_local *local; | ||
637 | |||
638 | pr_debug("Send DISC\n"); | ||
639 | |||
640 | local = sock->local; | ||
641 | if (local == NULL) | ||
642 | return -ENODEV; | ||
643 | |||
644 | skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0); | ||
645 | if (skb == NULL) | ||
646 | return -ENOMEM; | ||
647 | |||
648 | skb_queue_head(&local->tx_queue, skb); | ||
649 | |||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, | 633 | int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, |
654 | struct msghdr *msg, size_t len) | 634 | struct msghdr *msg, size_t len) |
655 | { | 635 | { |
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index 158bdbf668cc..81cd3416c7d4 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c | |||
@@ -537,6 +537,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) | |||
537 | u8 *lto_tlv, lto_length; | 537 | u8 *lto_tlv, lto_length; |
538 | u8 *wks_tlv, wks_length; | 538 | u8 *wks_tlv, wks_length; |
539 | u8 *miux_tlv, miux_length; | 539 | u8 *miux_tlv, miux_length; |
540 | __be16 wks = cpu_to_be16(local->local_wks); | ||
540 | u8 gb_len = 0; | 541 | u8 gb_len = 0; |
541 | int ret = 0; | 542 | int ret = 0; |
542 | 543 | ||
@@ -549,8 +550,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) | |||
549 | gb_len += lto_length; | 550 | gb_len += lto_length; |
550 | 551 | ||
551 | pr_debug("Local wks 0x%lx\n", local->local_wks); | 552 | pr_debug("Local wks 0x%lx\n", local->local_wks); |
552 | wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2, | 553 | wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length); |
553 | &wks_length); | ||
554 | gb_len += wks_length; | 554 | gb_len += wks_length; |
555 | 555 | ||
556 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, | 556 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, |
@@ -719,6 +719,10 @@ static void nfc_llcp_tx_work(struct work_struct *work) | |||
719 | llcp_sock = nfc_llcp_sock(sk); | 719 | llcp_sock = nfc_llcp_sock(sk); |
720 | 720 | ||
721 | if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) { | 721 | if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) { |
722 | kfree_skb(skb); | ||
723 | nfc_llcp_send_symm(local->dev); | ||
724 | } else if (llcp_sock && !llcp_sock->remote_ready) { | ||
725 | skb_queue_head(&local->tx_queue, skb); | ||
722 | nfc_llcp_send_symm(local->dev); | 726 | nfc_llcp_send_symm(local->dev); |
723 | } else { | 727 | } else { |
724 | struct sk_buff *copy_skb = NULL; | 728 | struct sk_buff *copy_skb = NULL; |
@@ -730,6 +734,13 @@ static void nfc_llcp_tx_work(struct work_struct *work) | |||
730 | DUMP_PREFIX_OFFSET, 16, 1, | 734 | DUMP_PREFIX_OFFSET, 16, 1, |
731 | skb->data, skb->len, true); | 735 | skb->data, skb->len, true); |
732 | 736 | ||
737 | if (ptype == LLCP_PDU_DISC && sk != NULL && | ||
738 | sk->sk_state == LLCP_DISCONNECTING) { | ||
739 | nfc_llcp_sock_unlink(&local->sockets, sk); | ||
740 | sock_orphan(sk); | ||
741 | sock_put(sk); | ||
742 | } | ||
743 | |||
733 | if (ptype == LLCP_PDU_I) | 744 | if (ptype == LLCP_PDU_I) |
734 | copy_skb = skb_copy(skb, GFP_ATOMIC); | 745 | copy_skb = skb_copy(skb, GFP_ATOMIC); |
735 | 746 | ||
@@ -1579,6 +1590,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) | |||
1579 | local->lto = 150; /* 1500 ms */ | 1590 | local->lto = 150; /* 1500 ms */ |
1580 | local->rw = LLCP_MAX_RW; | 1591 | local->rw = LLCP_MAX_RW; |
1581 | local->miux = cpu_to_be16(LLCP_MAX_MIUX); | 1592 | local->miux = cpu_to_be16(LLCP_MAX_MIUX); |
1593 | local->local_wks = 0x1; /* LLC Link Management */ | ||
1582 | 1594 | ||
1583 | nfc_llcp_build_gb(local); | 1595 | nfc_llcp_build_gb(local); |
1584 | 1596 | ||
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 380253eccb74..d308402b67d8 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c | |||
@@ -571,7 +571,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, | |||
571 | if (sk->sk_shutdown == SHUTDOWN_MASK) | 571 | if (sk->sk_shutdown == SHUTDOWN_MASK) |
572 | mask |= POLLHUP; | 572 | mask |= POLLHUP; |
573 | 573 | ||
574 | if (sock_writeable(sk)) | 574 | if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) |
575 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 575 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
576 | else | 576 | else |
577 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 577 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
@@ -603,7 +603,7 @@ static int llcp_sock_release(struct socket *sock) | |||
603 | 603 | ||
604 | /* Send a DISC */ | 604 | /* Send a DISC */ |
605 | if (sk->sk_state == LLCP_CONNECTED) | 605 | if (sk->sk_state == LLCP_CONNECTED) |
606 | nfc_llcp_disconnect(llcp_sock); | 606 | nfc_llcp_send_disconnect(llcp_sock); |
607 | 607 | ||
608 | if (sk->sk_state == LLCP_LISTEN) { | 608 | if (sk->sk_state == LLCP_LISTEN) { |
609 | struct nfc_llcp_sock *lsk, *n; | 609 | struct nfc_llcp_sock *lsk, *n; |
@@ -614,7 +614,7 @@ static int llcp_sock_release(struct socket *sock) | |||
614 | accept_sk = &lsk->sk; | 614 | accept_sk = &lsk->sk; |
615 | lock_sock(accept_sk); | 615 | lock_sock(accept_sk); |
616 | 616 | ||
617 | nfc_llcp_disconnect(lsk); | 617 | nfc_llcp_send_disconnect(lsk); |
618 | nfc_llcp_accept_unlink(accept_sk); | 618 | nfc_llcp_accept_unlink(accept_sk); |
619 | 619 | ||
620 | release_sock(accept_sk); | 620 | release_sock(accept_sk); |
@@ -626,6 +626,13 @@ static int llcp_sock_release(struct socket *sock) | |||
626 | 626 | ||
627 | release_sock(sk); | 627 | release_sock(sk); |
628 | 628 | ||
629 | /* Keep this sock alive and therefore do not remove it from the sockets | ||
630 | * list until the DISC PDU has been actually sent. Otherwise we would | ||
631 | * reply with DM PDUs before sending the DISC one. | ||
632 | */ | ||
633 | if (sk->sk_state == LLCP_DISCONNECTING) | ||
634 | return err; | ||
635 | |||
629 | if (sock->type == SOCK_RAW) | 636 | if (sock->type == SOCK_RAW) |
630 | nfc_llcp_sock_unlink(&local->raw_sockets, sk); | 637 | nfc_llcp_sock_unlink(&local->raw_sockets, sk); |
631 | else | 638 | else |
@@ -722,14 +729,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, | |||
722 | if (ret) | 729 | if (ret) |
723 | goto sock_unlink; | 730 | goto sock_unlink; |
724 | 731 | ||
732 | sk->sk_state = LLCP_CONNECTING; | ||
733 | |||
725 | ret = sock_wait_state(sk, LLCP_CONNECTED, | 734 | ret = sock_wait_state(sk, LLCP_CONNECTED, |
726 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | 735 | sock_sndtimeo(sk, flags & O_NONBLOCK)); |
727 | if (ret) | 736 | if (ret && ret != -EINPROGRESS) |
728 | goto sock_unlink; | 737 | goto sock_unlink; |
729 | 738 | ||
730 | release_sock(sk); | 739 | release_sock(sk); |
731 | 740 | ||
732 | return 0; | 741 | return ret; |
733 | 742 | ||
734 | sock_unlink: | 743 | sock_unlink: |
735 | nfc_llcp_put_ssap(local, llcp_sock->ssap); | 744 | nfc_llcp_put_ssap(local, llcp_sock->ssap); |
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig index 6d69b5f0f19b..2a2416080b4f 100644 --- a/net/nfc/nci/Kconfig +++ b/net/nfc/nci/Kconfig | |||
@@ -8,3 +8,13 @@ config NFC_NCI | |||
8 | 8 | ||
9 | Say Y here to compile NCI support into the kernel or say M to | 9 | Say Y here to compile NCI support into the kernel or say M to |
10 | compile it as module (nci). | 10 | compile it as module (nci). |
11 | |||
12 | config NFC_NCI_SPI | ||
13 | depends on NFC_NCI && SPI | ||
14 | bool "NCI over SPI protocol support" | ||
15 | default n | ||
16 | help | ||
17 | NCI (NFC Controller Interface) is a communication protocol between | ||
18 | an NFC Controller (NFCC) and a Device Host (DH). | ||
19 | |||
20 | Say yes if you use an NCI driver that requires SPI link layer. | ||
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile index cdb3a2e44471..7aeedc43187d 100644 --- a/net/nfc/nci/Makefile +++ b/net/nfc/nci/Makefile | |||
@@ -4,4 +4,6 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_NFC_NCI) += nci.o | 5 | obj-$(CONFIG_NFC_NCI) += nci.o |
6 | 6 | ||
7 | nci-objs := core.o data.o lib.o ntf.o rsp.o \ No newline at end of file | 7 | nci-objs := core.o data.o lib.o ntf.o rsp.o |
8 | |||
9 | nci-$(CONFIG_NFC_NCI_SPI) += spi.o | ||
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 48ada0ec749e..b943d46a1644 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c | |||
@@ -636,6 +636,21 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
636 | return rc; | 636 | return rc; |
637 | } | 637 | } |
638 | 638 | ||
639 | static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) | ||
640 | { | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) | ||
645 | { | ||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | static int nci_discover_se(struct nfc_dev *nfc_dev) | ||
650 | { | ||
651 | return 0; | ||
652 | } | ||
653 | |||
639 | static struct nfc_ops nci_nfc_ops = { | 654 | static struct nfc_ops nci_nfc_ops = { |
640 | .dev_up = nci_dev_up, | 655 | .dev_up = nci_dev_up, |
641 | .dev_down = nci_dev_down, | 656 | .dev_down = nci_dev_down, |
@@ -646,6 +661,9 @@ static struct nfc_ops nci_nfc_ops = { | |||
646 | .activate_target = nci_activate_target, | 661 | .activate_target = nci_activate_target, |
647 | .deactivate_target = nci_deactivate_target, | 662 | .deactivate_target = nci_deactivate_target, |
648 | .im_transceive = nci_transceive, | 663 | .im_transceive = nci_transceive, |
664 | .enable_se = nci_enable_se, | ||
665 | .disable_se = nci_disable_se, | ||
666 | .discover_se = nci_discover_se, | ||
649 | }; | 667 | }; |
650 | 668 | ||
651 | /* ---- Interface to NCI drivers ---- */ | 669 | /* ---- Interface to NCI drivers ---- */ |
@@ -658,7 +676,6 @@ static struct nfc_ops nci_nfc_ops = { | |||
658 | */ | 676 | */ |
659 | struct nci_dev *nci_allocate_device(struct nci_ops *ops, | 677 | struct nci_dev *nci_allocate_device(struct nci_ops *ops, |
660 | __u32 supported_protocols, | 678 | __u32 supported_protocols, |
661 | __u32 supported_se, | ||
662 | int tx_headroom, int tx_tailroom) | 679 | int tx_headroom, int tx_tailroom) |
663 | { | 680 | { |
664 | struct nci_dev *ndev; | 681 | struct nci_dev *ndev; |
@@ -681,7 +698,6 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, | |||
681 | 698 | ||
682 | ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, | 699 | ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, |
683 | supported_protocols, | 700 | supported_protocols, |
684 | supported_se, | ||
685 | tx_headroom + NCI_DATA_HDR_SIZE, | 701 | tx_headroom + NCI_DATA_HDR_SIZE, |
686 | tx_tailroom); | 702 | tx_tailroom); |
687 | if (!ndev->nfc_dev) | 703 | if (!ndev->nfc_dev) |
@@ -797,12 +813,11 @@ EXPORT_SYMBOL(nci_unregister_device); | |||
797 | /** | 813 | /** |
798 | * nci_recv_frame - receive frame from NCI drivers | 814 | * nci_recv_frame - receive frame from NCI drivers |
799 | * | 815 | * |
816 | * @ndev: The nci device | ||
800 | * @skb: The sk_buff to receive | 817 | * @skb: The sk_buff to receive |
801 | */ | 818 | */ |
802 | int nci_recv_frame(struct sk_buff *skb) | 819 | int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) |
803 | { | 820 | { |
804 | struct nci_dev *ndev = (struct nci_dev *) skb->dev; | ||
805 | |||
806 | pr_debug("len %d\n", skb->len); | 821 | pr_debug("len %d\n", skb->len); |
807 | 822 | ||
808 | if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && | 823 | if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && |
@@ -819,10 +834,8 @@ int nci_recv_frame(struct sk_buff *skb) | |||
819 | } | 834 | } |
820 | EXPORT_SYMBOL(nci_recv_frame); | 835 | EXPORT_SYMBOL(nci_recv_frame); |
821 | 836 | ||
822 | static int nci_send_frame(struct sk_buff *skb) | 837 | static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb) |
823 | { | 838 | { |
824 | struct nci_dev *ndev = (struct nci_dev *) skb->dev; | ||
825 | |||
826 | pr_debug("len %d\n", skb->len); | 839 | pr_debug("len %d\n", skb->len); |
827 | 840 | ||
828 | if (!ndev) { | 841 | if (!ndev) { |
@@ -833,7 +846,7 @@ static int nci_send_frame(struct sk_buff *skb) | |||
833 | /* Get rid of skb owner, prior to sending to the driver. */ | 846 | /* Get rid of skb owner, prior to sending to the driver. */ |
834 | skb_orphan(skb); | 847 | skb_orphan(skb); |
835 | 848 | ||
836 | return ndev->ops->send(skb); | 849 | return ndev->ops->send(ndev, skb); |
837 | } | 850 | } |
838 | 851 | ||
839 | /* Send NCI command */ | 852 | /* Send NCI command */ |
@@ -861,8 +874,6 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload) | |||
861 | if (plen) | 874 | if (plen) |
862 | memcpy(skb_put(skb, plen), payload, plen); | 875 | memcpy(skb_put(skb, plen), payload, plen); |
863 | 876 | ||
864 | skb->dev = (void *) ndev; | ||
865 | |||
866 | skb_queue_tail(&ndev->cmd_q, skb); | 877 | skb_queue_tail(&ndev->cmd_q, skb); |
867 | queue_work(ndev->cmd_wq, &ndev->cmd_work); | 878 | queue_work(ndev->cmd_wq, &ndev->cmd_work); |
868 | 879 | ||
@@ -894,7 +905,7 @@ static void nci_tx_work(struct work_struct *work) | |||
894 | nci_conn_id(skb->data), | 905 | nci_conn_id(skb->data), |
895 | nci_plen(skb->data)); | 906 | nci_plen(skb->data)); |
896 | 907 | ||
897 | nci_send_frame(skb); | 908 | nci_send_frame(ndev, skb); |
898 | 909 | ||
899 | mod_timer(&ndev->data_timer, | 910 | mod_timer(&ndev->data_timer, |
900 | jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT)); | 911 | jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT)); |
@@ -963,7 +974,7 @@ static void nci_cmd_work(struct work_struct *work) | |||
963 | nci_opcode_oid(nci_opcode(skb->data)), | 974 | nci_opcode_oid(nci_opcode(skb->data)), |
964 | nci_plen(skb->data)); | 975 | nci_plen(skb->data)); |
965 | 976 | ||
966 | nci_send_frame(skb); | 977 | nci_send_frame(ndev, skb); |
967 | 978 | ||
968 | mod_timer(&ndev->cmd_timer, | 979 | mod_timer(&ndev->cmd_timer, |
969 | jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); | 980 | jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); |
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index 76c48c5324f8..2a9399dd6c68 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c | |||
@@ -80,8 +80,6 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev, | |||
80 | 80 | ||
81 | nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT); | 81 | nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT); |
82 | nci_pbf_set((__u8 *)hdr, pbf); | 82 | nci_pbf_set((__u8 *)hdr, pbf); |
83 | |||
84 | skb->dev = (void *) ndev; | ||
85 | } | 83 | } |
86 | 84 | ||
87 | static int nci_queue_tx_data_frags(struct nci_dev *ndev, | 85 | static int nci_queue_tx_data_frags(struct nci_dev *ndev, |
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c new file mode 100644 index 000000000000..c7cf37ba7298 --- /dev/null +++ b/net/nfc/nci/spi.c | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) "nci_spi: %s: " fmt, __func__ | ||
20 | |||
21 | #include <linux/export.h> | ||
22 | #include <linux/spi/spi.h> | ||
23 | #include <linux/crc-ccitt.h> | ||
24 | #include <linux/nfc.h> | ||
25 | #include <net/nfc/nci_core.h> | ||
26 | |||
27 | #define NCI_SPI_HDR_LEN 4 | ||
28 | #define NCI_SPI_CRC_LEN 2 | ||
29 | #define NCI_SPI_ACK_SHIFT 6 | ||
30 | #define NCI_SPI_MSB_PAYLOAD_MASK 0x3F | ||
31 | |||
32 | #define NCI_SPI_SEND_TIMEOUT (NCI_CMD_TIMEOUT > NCI_DATA_TIMEOUT ? \ | ||
33 | NCI_CMD_TIMEOUT : NCI_DATA_TIMEOUT) | ||
34 | |||
35 | #define NCI_SPI_DIRECT_WRITE 0x01 | ||
36 | #define NCI_SPI_DIRECT_READ 0x02 | ||
37 | |||
38 | #define ACKNOWLEDGE_NONE 0 | ||
39 | #define ACKNOWLEDGE_ACK 1 | ||
40 | #define ACKNOWLEDGE_NACK 2 | ||
41 | |||
42 | #define CRC_INIT 0xFFFF | ||
43 | |||
44 | static int nci_spi_open(struct nci_dev *nci_dev) | ||
45 | { | ||
46 | struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev); | ||
47 | |||
48 | return ndev->ops->open(ndev); | ||
49 | } | ||
50 | |||
51 | static int nci_spi_close(struct nci_dev *nci_dev) | ||
52 | { | ||
53 | struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev); | ||
54 | |||
55 | return ndev->ops->close(ndev); | ||
56 | } | ||
57 | |||
58 | static int __nci_spi_send(struct nci_spi_dev *ndev, struct sk_buff *skb) | ||
59 | { | ||
60 | struct spi_message m; | ||
61 | struct spi_transfer t; | ||
62 | |||
63 | t.tx_buf = skb->data; | ||
64 | t.len = skb->len; | ||
65 | t.cs_change = 0; | ||
66 | t.delay_usecs = ndev->xfer_udelay; | ||
67 | |||
68 | spi_message_init(&m); | ||
69 | spi_message_add_tail(&t, &m); | ||
70 | |||
71 | return spi_sync(ndev->spi, &m); | ||
72 | } | ||
73 | |||
74 | static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb) | ||
75 | { | ||
76 | struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev); | ||
77 | unsigned int payload_len = skb->len; | ||
78 | unsigned char *hdr; | ||
79 | int ret; | ||
80 | long completion_rc; | ||
81 | |||
82 | ndev->ops->deassert_int(ndev); | ||
83 | |||
84 | /* add the NCI SPI header to the start of the buffer */ | ||
85 | hdr = skb_push(skb, NCI_SPI_HDR_LEN); | ||
86 | hdr[0] = NCI_SPI_DIRECT_WRITE; | ||
87 | hdr[1] = ndev->acknowledge_mode; | ||
88 | hdr[2] = payload_len >> 8; | ||
89 | hdr[3] = payload_len & 0xFF; | ||
90 | |||
91 | if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) { | ||
92 | u16 crc; | ||
93 | |||
94 | crc = crc_ccitt(CRC_INIT, skb->data, skb->len); | ||
95 | *skb_put(skb, 1) = crc >> 8; | ||
96 | *skb_put(skb, 1) = crc & 0xFF; | ||
97 | } | ||
98 | |||
99 | ret = __nci_spi_send(ndev, skb); | ||
100 | |||
101 | kfree_skb(skb); | ||
102 | ndev->ops->assert_int(ndev); | ||
103 | |||
104 | if (ret != 0 || ndev->acknowledge_mode == NCI_SPI_CRC_DISABLED) | ||
105 | goto done; | ||
106 | |||
107 | init_completion(&ndev->req_completion); | ||
108 | completion_rc = | ||
109 | wait_for_completion_interruptible_timeout(&ndev->req_completion, | ||
110 | NCI_SPI_SEND_TIMEOUT); | ||
111 | |||
112 | if (completion_rc <= 0 || ndev->req_result == ACKNOWLEDGE_NACK) | ||
113 | ret = -EIO; | ||
114 | |||
115 | done: | ||
116 | return ret; | ||
117 | } | ||
118 | |||
119 | static struct nci_ops nci_spi_ops = { | ||
120 | .open = nci_spi_open, | ||
121 | .close = nci_spi_close, | ||
122 | .send = nci_spi_send, | ||
123 | }; | ||
124 | |||
125 | /* ---- Interface to NCI SPI drivers ---- */ | ||
126 | |||
127 | /** | ||
128 | * nci_spi_allocate_device - allocate a new nci spi device | ||
129 | * | ||
130 | * @spi: SPI device | ||
131 | * @ops: device operations | ||
132 | * @supported_protocols: NFC protocols supported by the device | ||
133 | * @supported_se: NFC Secure Elements supported by the device | ||
134 | * @acknowledge_mode: Acknowledge mode used by the device | ||
135 | * @delay: delay between transactions in us | ||
136 | */ | ||
137 | struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi, | ||
138 | struct nci_spi_ops *ops, | ||
139 | u32 supported_protocols, | ||
140 | u32 supported_se, | ||
141 | u8 acknowledge_mode, | ||
142 | unsigned int delay) | ||
143 | { | ||
144 | struct nci_spi_dev *ndev; | ||
145 | int tailroom = 0; | ||
146 | |||
147 | if (!ops->open || !ops->close || !ops->assert_int || !ops->deassert_int) | ||
148 | return NULL; | ||
149 | |||
150 | if (!supported_protocols) | ||
151 | return NULL; | ||
152 | |||
153 | ndev = devm_kzalloc(&spi->dev, sizeof(struct nci_dev), GFP_KERNEL); | ||
154 | if (!ndev) | ||
155 | return NULL; | ||
156 | |||
157 | ndev->ops = ops; | ||
158 | ndev->acknowledge_mode = acknowledge_mode; | ||
159 | ndev->xfer_udelay = delay; | ||
160 | |||
161 | if (acknowledge_mode == NCI_SPI_CRC_ENABLED) | ||
162 | tailroom += NCI_SPI_CRC_LEN; | ||
163 | |||
164 | ndev->nci_dev = nci_allocate_device(&nci_spi_ops, supported_protocols, | ||
165 | NCI_SPI_HDR_LEN, tailroom); | ||
166 | if (!ndev->nci_dev) | ||
167 | return NULL; | ||
168 | |||
169 | nci_set_drvdata(ndev->nci_dev, ndev); | ||
170 | |||
171 | return ndev; | ||
172 | } | ||
173 | EXPORT_SYMBOL_GPL(nci_spi_allocate_device); | ||
174 | |||
175 | /** | ||
176 | * nci_spi_free_device - deallocate nci spi device | ||
177 | * | ||
178 | * @ndev: The nci spi device to deallocate | ||
179 | */ | ||
180 | void nci_spi_free_device(struct nci_spi_dev *ndev) | ||
181 | { | ||
182 | nci_free_device(ndev->nci_dev); | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(nci_spi_free_device); | ||
185 | |||
186 | /** | ||
187 | * nci_spi_register_device - register a nci spi device in the nfc subsystem | ||
188 | * | ||
189 | * @pdev: The nci spi device to register | ||
190 | */ | ||
191 | int nci_spi_register_device(struct nci_spi_dev *ndev) | ||
192 | { | ||
193 | return nci_register_device(ndev->nci_dev); | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(nci_spi_register_device); | ||
196 | |||
197 | /** | ||
198 | * nci_spi_unregister_device - unregister a nci spi device in the nfc subsystem | ||
199 | * | ||
200 | * @dev: The nci spi device to unregister | ||
201 | */ | ||
202 | void nci_spi_unregister_device(struct nci_spi_dev *ndev) | ||
203 | { | ||
204 | nci_unregister_device(ndev->nci_dev); | ||
205 | } | ||
206 | EXPORT_SYMBOL_GPL(nci_spi_unregister_device); | ||
207 | |||
208 | static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge) | ||
209 | { | ||
210 | struct sk_buff *skb; | ||
211 | unsigned char *hdr; | ||
212 | u16 crc; | ||
213 | int ret; | ||
214 | |||
215 | skb = nci_skb_alloc(ndev->nci_dev, 0, GFP_KERNEL); | ||
216 | |||
217 | /* add the NCI SPI header to the start of the buffer */ | ||
218 | hdr = skb_push(skb, NCI_SPI_HDR_LEN); | ||
219 | hdr[0] = NCI_SPI_DIRECT_WRITE; | ||
220 | hdr[1] = NCI_SPI_CRC_ENABLED; | ||
221 | hdr[2] = acknowledge << NCI_SPI_ACK_SHIFT; | ||
222 | hdr[3] = 0; | ||
223 | |||
224 | crc = crc_ccitt(CRC_INIT, skb->data, skb->len); | ||
225 | *skb_put(skb, 1) = crc >> 8; | ||
226 | *skb_put(skb, 1) = crc & 0xFF; | ||
227 | |||
228 | ret = __nci_spi_send(ndev, skb); | ||
229 | |||
230 | kfree_skb(skb); | ||
231 | |||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev) | ||
236 | { | ||
237 | struct sk_buff *skb; | ||
238 | struct spi_message m; | ||
239 | unsigned char req[2], resp_hdr[2]; | ||
240 | struct spi_transfer tx, rx; | ||
241 | unsigned short rx_len = 0; | ||
242 | int ret; | ||
243 | |||
244 | spi_message_init(&m); | ||
245 | req[0] = NCI_SPI_DIRECT_READ; | ||
246 | req[1] = ndev->acknowledge_mode; | ||
247 | tx.tx_buf = req; | ||
248 | tx.len = 2; | ||
249 | tx.cs_change = 0; | ||
250 | spi_message_add_tail(&tx, &m); | ||
251 | rx.rx_buf = resp_hdr; | ||
252 | rx.len = 2; | ||
253 | rx.cs_change = 1; | ||
254 | spi_message_add_tail(&rx, &m); | ||
255 | ret = spi_sync(ndev->spi, &m); | ||
256 | |||
257 | if (ret) | ||
258 | return NULL; | ||
259 | |||
260 | if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) | ||
261 | rx_len = ((resp_hdr[0] & NCI_SPI_MSB_PAYLOAD_MASK) << 8) + | ||
262 | resp_hdr[1] + NCI_SPI_CRC_LEN; | ||
263 | else | ||
264 | rx_len = (resp_hdr[0] << 8) | resp_hdr[1]; | ||
265 | |||
266 | skb = nci_skb_alloc(ndev->nci_dev, rx_len, GFP_KERNEL); | ||
267 | if (!skb) | ||
268 | return NULL; | ||
269 | |||
270 | spi_message_init(&m); | ||
271 | rx.rx_buf = skb_put(skb, rx_len); | ||
272 | rx.len = rx_len; | ||
273 | rx.cs_change = 0; | ||
274 | rx.delay_usecs = ndev->xfer_udelay; | ||
275 | spi_message_add_tail(&rx, &m); | ||
276 | ret = spi_sync(ndev->spi, &m); | ||
277 | |||
278 | if (ret) | ||
279 | goto receive_error; | ||
280 | |||
281 | if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) { | ||
282 | *skb_push(skb, 1) = resp_hdr[1]; | ||
283 | *skb_push(skb, 1) = resp_hdr[0]; | ||
284 | } | ||
285 | |||
286 | return skb; | ||
287 | |||
288 | receive_error: | ||
289 | kfree_skb(skb); | ||
290 | |||
291 | return NULL; | ||
292 | } | ||
293 | |||
294 | static int nci_spi_check_crc(struct sk_buff *skb) | ||
295 | { | ||
296 | u16 crc_data = (skb->data[skb->len - 2] << 8) | | ||
297 | skb->data[skb->len - 1]; | ||
298 | int ret; | ||
299 | |||
300 | ret = (crc_ccitt(CRC_INIT, skb->data, skb->len - NCI_SPI_CRC_LEN) | ||
301 | == crc_data); | ||
302 | |||
303 | skb_trim(skb, skb->len - NCI_SPI_CRC_LEN); | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | static u8 nci_spi_get_ack(struct sk_buff *skb) | ||
309 | { | ||
310 | u8 ret; | ||
311 | |||
312 | ret = skb->data[0] >> NCI_SPI_ACK_SHIFT; | ||
313 | |||
314 | /* Remove NFCC part of the header: ACK, NACK and MSB payload len */ | ||
315 | skb_pull(skb, 2); | ||
316 | |||
317 | return ret; | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * nci_spi_recv_frame - receive frame from NCI SPI drivers | ||
322 | * | ||
323 | * @ndev: The nci spi device | ||
324 | * Context: can sleep | ||
325 | * | ||
326 | * This call may only be used from a context that may sleep. The sleep | ||
327 | * is non-interruptible, and has no timeout. | ||
328 | * | ||
329 | * It returns zero on success, else a negative error code. | ||
330 | */ | ||
331 | int nci_spi_recv_frame(struct nci_spi_dev *ndev) | ||
332 | { | ||
333 | struct sk_buff *skb; | ||
334 | int ret = 0; | ||
335 | |||
336 | ndev->ops->deassert_int(ndev); | ||
337 | |||
338 | /* Retrieve frame from SPI */ | ||
339 | skb = __nci_spi_recv_frame(ndev); | ||
340 | if (!skb) { | ||
341 | ret = -EIO; | ||
342 | goto done; | ||
343 | } | ||
344 | |||
345 | if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) { | ||
346 | if (!nci_spi_check_crc(skb)) { | ||
347 | send_acknowledge(ndev, ACKNOWLEDGE_NACK); | ||
348 | goto done; | ||
349 | } | ||
350 | |||
351 | /* In case of acknowledged mode: if ACK or NACK received, | ||
352 | * unblock completion of latest frame sent. | ||
353 | */ | ||
354 | ndev->req_result = nci_spi_get_ack(skb); | ||
355 | if (ndev->req_result) | ||
356 | complete(&ndev->req_completion); | ||
357 | } | ||
358 | |||
359 | /* If there is no payload (ACK/NACK only frame), | ||
360 | * free the socket buffer | ||
361 | */ | ||
362 | if (skb->len == 0) { | ||
363 | kfree_skb(skb); | ||
364 | goto done; | ||
365 | } | ||
366 | |||
367 | if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) | ||
368 | send_acknowledge(ndev, ACKNOWLEDGE_ACK); | ||
369 | |||
370 | /* Forward skb to NCI core layer */ | ||
371 | ret = nci_recv_frame(ndev->nci_dev, skb); | ||
372 | |||
373 | done: | ||
374 | ndev->ops->assert_int(ndev); | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | EXPORT_SYMBOL_GPL(nci_spi_recv_frame); | ||
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index f0c4d61f37c0..b05ad909778f 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c | |||
@@ -56,6 +56,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { | |||
56 | [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 }, | 56 | [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 }, |
57 | [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 }, | 57 | [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 }, |
58 | [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED }, | 58 | [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED }, |
59 | [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING, | ||
60 | .len = NFC_FIRMWARE_NAME_MAXSIZE }, | ||
59 | }; | 61 | }; |
60 | 62 | ||
61 | static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { | 63 | static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { |
@@ -424,6 +426,69 @@ free_msg: | |||
424 | return rc; | 426 | return rc; |
425 | } | 427 | } |
426 | 428 | ||
429 | int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type) | ||
430 | { | ||
431 | struct sk_buff *msg; | ||
432 | void *hdr; | ||
433 | |||
434 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
435 | if (!msg) | ||
436 | return -ENOMEM; | ||
437 | |||
438 | hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, | ||
439 | NFC_EVENT_SE_ADDED); | ||
440 | if (!hdr) | ||
441 | goto free_msg; | ||
442 | |||
443 | if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || | ||
444 | nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || | ||
445 | nla_put_u8(msg, NFC_ATTR_SE_TYPE, type)) | ||
446 | goto nla_put_failure; | ||
447 | |||
448 | genlmsg_end(msg, hdr); | ||
449 | |||
450 | genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); | ||
451 | |||
452 | return 0; | ||
453 | |||
454 | nla_put_failure: | ||
455 | genlmsg_cancel(msg, hdr); | ||
456 | free_msg: | ||
457 | nlmsg_free(msg); | ||
458 | return -EMSGSIZE; | ||
459 | } | ||
460 | |||
461 | int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx) | ||
462 | { | ||
463 | struct sk_buff *msg; | ||
464 | void *hdr; | ||
465 | |||
466 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
467 | if (!msg) | ||
468 | return -ENOMEM; | ||
469 | |||
470 | hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, | ||
471 | NFC_EVENT_SE_REMOVED); | ||
472 | if (!hdr) | ||
473 | goto free_msg; | ||
474 | |||
475 | if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || | ||
476 | nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx)) | ||
477 | goto nla_put_failure; | ||
478 | |||
479 | genlmsg_end(msg, hdr); | ||
480 | |||
481 | genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); | ||
482 | |||
483 | return 0; | ||
484 | |||
485 | nla_put_failure: | ||
486 | genlmsg_cancel(msg, hdr); | ||
487 | free_msg: | ||
488 | nlmsg_free(msg); | ||
489 | return -EMSGSIZE; | ||
490 | } | ||
491 | |||
427 | static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, | 492 | static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, |
428 | u32 portid, u32 seq, | 493 | u32 portid, u32 seq, |
429 | struct netlink_callback *cb, | 494 | struct netlink_callback *cb, |
@@ -442,7 +507,6 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, | |||
442 | if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || | 507 | if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || |
443 | nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || | 508 | nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || |
444 | nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || | 509 | nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || |
445 | nla_put_u32(msg, NFC_ATTR_SE, dev->supported_se) || | ||
446 | nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || | 510 | nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || |
447 | nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) | 511 | nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) |
448 | goto nla_put_failure; | 512 | goto nla_put_failure; |
@@ -1025,6 +1089,108 @@ exit: | |||
1025 | return rc; | 1089 | return rc; |
1026 | } | 1090 | } |
1027 | 1091 | ||
1092 | static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info) | ||
1093 | { | ||
1094 | struct nfc_dev *dev; | ||
1095 | int rc; | ||
1096 | u32 idx; | ||
1097 | char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; | ||
1098 | |||
1099 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) | ||
1100 | return -EINVAL; | ||
1101 | |||
1102 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); | ||
1103 | |||
1104 | dev = nfc_get_device(idx); | ||
1105 | if (!dev) | ||
1106 | return -ENODEV; | ||
1107 | |||
1108 | nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME], | ||
1109 | sizeof(firmware_name)); | ||
1110 | |||
1111 | rc = nfc_fw_upload(dev, firmware_name); | ||
1112 | |||
1113 | nfc_put_device(dev); | ||
1114 | return rc; | ||
1115 | } | ||
1116 | |||
1117 | int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name) | ||
1118 | { | ||
1119 | struct sk_buff *msg; | ||
1120 | void *hdr; | ||
1121 | |||
1122 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
1123 | if (!msg) | ||
1124 | return -ENOMEM; | ||
1125 | |||
1126 | hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, | ||
1127 | NFC_CMD_FW_UPLOAD); | ||
1128 | if (!hdr) | ||
1129 | goto free_msg; | ||
1130 | |||
1131 | if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) || | ||
1132 | nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) | ||
1133 | goto nla_put_failure; | ||
1134 | |||
1135 | genlmsg_end(msg, hdr); | ||
1136 | |||
1137 | genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL); | ||
1138 | |||
1139 | return 0; | ||
1140 | |||
1141 | nla_put_failure: | ||
1142 | genlmsg_cancel(msg, hdr); | ||
1143 | free_msg: | ||
1144 | nlmsg_free(msg); | ||
1145 | return -EMSGSIZE; | ||
1146 | } | ||
1147 | |||
1148 | static int nfc_genl_enable_se(struct sk_buff *skb, struct genl_info *info) | ||
1149 | { | ||
1150 | struct nfc_dev *dev; | ||
1151 | int rc; | ||
1152 | u32 idx, se_idx; | ||
1153 | |||
1154 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || | ||
1155 | !info->attrs[NFC_ATTR_SE_INDEX]) | ||
1156 | return -EINVAL; | ||
1157 | |||
1158 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); | ||
1159 | se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); | ||
1160 | |||
1161 | dev = nfc_get_device(idx); | ||
1162 | if (!dev) | ||
1163 | return -ENODEV; | ||
1164 | |||
1165 | rc = nfc_enable_se(dev, se_idx); | ||
1166 | |||
1167 | nfc_put_device(dev); | ||
1168 | return rc; | ||
1169 | } | ||
1170 | |||
1171 | static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info) | ||
1172 | { | ||
1173 | struct nfc_dev *dev; | ||
1174 | int rc; | ||
1175 | u32 idx, se_idx; | ||
1176 | |||
1177 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || | ||
1178 | !info->attrs[NFC_ATTR_SE_INDEX]) | ||
1179 | return -EINVAL; | ||
1180 | |||
1181 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); | ||
1182 | se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); | ||
1183 | |||
1184 | dev = nfc_get_device(idx); | ||
1185 | if (!dev) | ||
1186 | return -ENODEV; | ||
1187 | |||
1188 | rc = nfc_disable_se(dev, se_idx); | ||
1189 | |||
1190 | nfc_put_device(dev); | ||
1191 | return rc; | ||
1192 | } | ||
1193 | |||
1028 | static struct genl_ops nfc_genl_ops[] = { | 1194 | static struct genl_ops nfc_genl_ops[] = { |
1029 | { | 1195 | { |
1030 | .cmd = NFC_CMD_GET_DEVICE, | 1196 | .cmd = NFC_CMD_GET_DEVICE, |
@@ -1084,6 +1250,21 @@ static struct genl_ops nfc_genl_ops[] = { | |||
1084 | .doit = nfc_genl_llc_sdreq, | 1250 | .doit = nfc_genl_llc_sdreq, |
1085 | .policy = nfc_genl_policy, | 1251 | .policy = nfc_genl_policy, |
1086 | }, | 1252 | }, |
1253 | { | ||
1254 | .cmd = NFC_CMD_FW_UPLOAD, | ||
1255 | .doit = nfc_genl_fw_upload, | ||
1256 | .policy = nfc_genl_policy, | ||
1257 | }, | ||
1258 | { | ||
1259 | .cmd = NFC_CMD_ENABLE_SE, | ||
1260 | .doit = nfc_genl_enable_se, | ||
1261 | .policy = nfc_genl_policy, | ||
1262 | }, | ||
1263 | { | ||
1264 | .cmd = NFC_CMD_DISABLE_SE, | ||
1265 | .doit = nfc_genl_disable_se, | ||
1266 | .policy = nfc_genl_policy, | ||
1267 | }, | ||
1087 | }; | 1268 | }; |
1088 | 1269 | ||
1089 | 1270 | ||
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index afa1f84ba040..ee85a1fc1b24 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h | |||
@@ -94,6 +94,9 @@ int nfc_genl_tm_deactivated(struct nfc_dev *dev); | |||
94 | 94 | ||
95 | int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list); | 95 | int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list); |
96 | 96 | ||
97 | int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type); | ||
98 | int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx); | ||
99 | |||
97 | struct nfc_dev *nfc_get_device(unsigned int idx); | 100 | struct nfc_dev *nfc_get_device(unsigned int idx); |
98 | 101 | ||
99 | static inline void nfc_put_device(struct nfc_dev *dev) | 102 | static inline void nfc_put_device(struct nfc_dev *dev) |
@@ -120,6 +123,11 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter) | |||
120 | class_dev_iter_exit(iter); | 123 | class_dev_iter_exit(iter); |
121 | } | 124 | } |
122 | 125 | ||
126 | int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name); | ||
127 | int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name); | ||
128 | |||
129 | int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name); | ||
130 | |||
123 | int nfc_dev_up(struct nfc_dev *dev); | 131 | int nfc_dev_up(struct nfc_dev *dev); |
124 | 132 | ||
125 | int nfc_dev_down(struct nfc_dev *dev); | 133 | int nfc_dev_down(struct nfc_dev *dev); |
@@ -139,4 +147,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx); | |||
139 | int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, | 147 | int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, |
140 | data_exchange_cb_t cb, void *cb_context); | 148 | data_exchange_cb_t cb, void *cb_context); |
141 | 149 | ||
150 | int nfc_enable_se(struct nfc_dev *dev, u32 se_idx); | ||
151 | int nfc_disable_se(struct nfc_dev *dev, u32 se_idx); | ||
152 | |||
142 | #endif /* __LOCAL_NFC_H */ | 153 | #endif /* __LOCAL_NFC_H */ |
diff --git a/net/wireless/core.c b/net/wireless/core.c index e4df77490229..f277246080b5 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -301,6 +301,9 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
301 | return NULL; | 301 | return NULL; |
302 | } | 302 | } |
303 | 303 | ||
304 | /* atomic_inc_return makes it start at 1, make it start at 0 */ | ||
305 | rdev->wiphy_idx--; | ||
306 | |||
304 | /* give it a proper name */ | 307 | /* give it a proper name */ |
305 | dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); | 308 | dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); |
306 | 309 | ||
@@ -449,8 +452,13 @@ int wiphy_register(struct wiphy *wiphy) | |||
449 | u16 ifmodes = wiphy->interface_modes; | 452 | u16 ifmodes = wiphy->interface_modes; |
450 | 453 | ||
451 | #ifdef CONFIG_PM | 454 | #ifdef CONFIG_PM |
452 | if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && | 455 | if (WARN_ON(wiphy->wowlan && |
453 | !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) | 456 | (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && |
457 | !(wiphy->wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) | ||
458 | return -EINVAL; | ||
459 | if (WARN_ON(wiphy->wowlan && | ||
460 | !wiphy->wowlan->flags && !wiphy->wowlan->n_patterns && | ||
461 | !wiphy->wowlan->tcp)) | ||
454 | return -EINVAL; | 462 | return -EINVAL; |
455 | #endif | 463 | #endif |
456 | 464 | ||
@@ -540,25 +548,28 @@ int wiphy_register(struct wiphy *wiphy) | |||
540 | } | 548 | } |
541 | 549 | ||
542 | #ifdef CONFIG_PM | 550 | #ifdef CONFIG_PM |
543 | if (rdev->wiphy.wowlan.n_patterns) { | 551 | if (WARN_ON(rdev->wiphy.wowlan && rdev->wiphy.wowlan->n_patterns && |
544 | if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len || | 552 | (!rdev->wiphy.wowlan->pattern_min_len || |
545 | rdev->wiphy.wowlan.pattern_min_len > | 553 | rdev->wiphy.wowlan->pattern_min_len > |
546 | rdev->wiphy.wowlan.pattern_max_len)) | 554 | rdev->wiphy.wowlan->pattern_max_len))) |
547 | return -EINVAL; | 555 | return -EINVAL; |
548 | } | ||
549 | #endif | 556 | #endif |
550 | 557 | ||
551 | /* check and set up bitrates */ | 558 | /* check and set up bitrates */ |
552 | ieee80211_set_bitrate_flags(wiphy); | 559 | ieee80211_set_bitrate_flags(wiphy); |
553 | 560 | ||
554 | rtnl_lock(); | ||
555 | 561 | ||
556 | res = device_add(&rdev->wiphy.dev); | 562 | res = device_add(&rdev->wiphy.dev); |
563 | if (res) | ||
564 | return res; | ||
565 | |||
566 | res = rfkill_register(rdev->rfkill); | ||
557 | if (res) { | 567 | if (res) { |
558 | rtnl_unlock(); | 568 | device_del(&rdev->wiphy.dev); |
559 | return res; | 569 | return res; |
560 | } | 570 | } |
561 | 571 | ||
572 | rtnl_lock(); | ||
562 | /* set up regulatory info */ | 573 | /* set up regulatory info */ |
563 | wiphy_regulatory_register(wiphy); | 574 | wiphy_regulatory_register(wiphy); |
564 | 575 | ||
@@ -585,17 +596,6 @@ int wiphy_register(struct wiphy *wiphy) | |||
585 | 596 | ||
586 | cfg80211_debugfs_rdev_add(rdev); | 597 | cfg80211_debugfs_rdev_add(rdev); |
587 | 598 | ||
588 | res = rfkill_register(rdev->rfkill); | ||
589 | if (res) { | ||
590 | device_del(&rdev->wiphy.dev); | ||
591 | |||
592 | debugfs_remove_recursive(rdev->wiphy.debugfsdir); | ||
593 | list_del_rcu(&rdev->list); | ||
594 | wiphy_regulatory_deregister(wiphy); | ||
595 | rtnl_unlock(); | ||
596 | return res; | ||
597 | } | ||
598 | |||
599 | rdev->wiphy.registered = true; | 599 | rdev->wiphy.registered = true; |
600 | rtnl_unlock(); | 600 | rtnl_unlock(); |
601 | return 0; | 601 | return 0; |
@@ -632,11 +632,11 @@ void wiphy_unregister(struct wiphy *wiphy) | |||
632 | rtnl_unlock(); | 632 | rtnl_unlock(); |
633 | __count == 0; })); | 633 | __count == 0; })); |
634 | 634 | ||
635 | rfkill_unregister(rdev->rfkill); | ||
636 | |||
635 | rtnl_lock(); | 637 | rtnl_lock(); |
636 | rdev->wiphy.registered = false; | 638 | rdev->wiphy.registered = false; |
637 | 639 | ||
638 | rfkill_unregister(rdev->rfkill); | ||
639 | |||
640 | BUG_ON(!list_empty(&rdev->wdev_list)); | 640 | BUG_ON(!list_empty(&rdev->wdev_list)); |
641 | 641 | ||
642 | /* | 642 | /* |
@@ -816,7 +816,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
816 | pr_err("failed to add phy80211 symlink to netdev!\n"); | 816 | pr_err("failed to add phy80211 symlink to netdev!\n"); |
817 | } | 817 | } |
818 | wdev->netdev = dev; | 818 | wdev->netdev = dev; |
819 | wdev->sme_state = CFG80211_SME_IDLE; | ||
820 | #ifdef CONFIG_CFG80211_WEXT | 819 | #ifdef CONFIG_CFG80211_WEXT |
821 | wdev->wext.default_key = -1; | 820 | wdev->wext.default_key = -1; |
822 | wdev->wext.default_mgmt_key = -1; | 821 | wdev->wext.default_mgmt_key = -1; |
diff --git a/net/wireless/core.h b/net/wireless/core.h index a65eaf8a84c1..a6b45bf00f33 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -308,11 +308,6 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | |||
308 | bool local_state_change); | 308 | bool local_state_change); |
309 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | 309 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, |
310 | struct net_device *dev); | 310 | struct net_device *dev); |
311 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | ||
312 | const u8 *req_ie, size_t req_ie_len, | ||
313 | const u8 *resp_ie, size_t resp_ie_len, | ||
314 | u16 status, bool wextev, | ||
315 | struct cfg80211_bss *bss); | ||
316 | int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, | 311 | int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, |
317 | u16 frame_type, const u8 *match_data, | 312 | u16 frame_type, const u8 *match_data, |
318 | int match_len); | 313 | int match_len); |
@@ -328,12 +323,19 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, | |||
328 | void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, | 323 | void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, |
329 | const struct ieee80211_vht_cap *vht_capa_mask); | 324 | const struct ieee80211_vht_cap *vht_capa_mask); |
330 | 325 | ||
331 | /* SME */ | 326 | /* SME events */ |
332 | int cfg80211_connect(struct cfg80211_registered_device *rdev, | 327 | int cfg80211_connect(struct cfg80211_registered_device *rdev, |
333 | struct net_device *dev, | 328 | struct net_device *dev, |
334 | struct cfg80211_connect_params *connect, | 329 | struct cfg80211_connect_params *connect, |
335 | struct cfg80211_cached_keys *connkeys, | 330 | struct cfg80211_cached_keys *connkeys, |
336 | const u8 *prev_bssid); | 331 | const u8 *prev_bssid); |
332 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | ||
333 | const u8 *req_ie, size_t req_ie_len, | ||
334 | const u8 *resp_ie, size_t resp_ie_len, | ||
335 | u16 status, bool wextev, | ||
336 | struct cfg80211_bss *bss); | ||
337 | void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | ||
338 | size_t ie_len, u16 reason, bool from_ap); | ||
337 | int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | 339 | int cfg80211_disconnect(struct cfg80211_registered_device *rdev, |
338 | struct net_device *dev, u16 reason, | 340 | struct net_device *dev, u16 reason, |
339 | bool wextev); | 341 | bool wextev); |
@@ -344,21 +346,21 @@ void __cfg80211_roamed(struct wireless_dev *wdev, | |||
344 | int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, | 346 | int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, |
345 | struct wireless_dev *wdev); | 347 | struct wireless_dev *wdev); |
346 | 348 | ||
349 | /* SME implementation */ | ||
347 | void cfg80211_conn_work(struct work_struct *work); | 350 | void cfg80211_conn_work(struct work_struct *work); |
348 | void cfg80211_sme_failed_assoc(struct wireless_dev *wdev); | 351 | void cfg80211_sme_scan_done(struct net_device *dev); |
349 | bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); | 352 | bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status); |
353 | void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len); | ||
354 | void cfg80211_sme_disassoc(struct wireless_dev *wdev); | ||
355 | void cfg80211_sme_deauth(struct wireless_dev *wdev); | ||
356 | void cfg80211_sme_auth_timeout(struct wireless_dev *wdev); | ||
357 | void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev); | ||
350 | 358 | ||
351 | /* internal helpers */ | 359 | /* internal helpers */ |
352 | bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher); | 360 | bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher); |
353 | int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, | 361 | int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, |
354 | struct key_params *params, int key_idx, | 362 | struct key_params *params, int key_idx, |
355 | bool pairwise, const u8 *mac_addr); | 363 | bool pairwise, const u8 *mac_addr); |
356 | void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | ||
357 | size_t ie_len, u16 reason, bool from_ap); | ||
358 | void cfg80211_sme_scan_done(struct net_device *dev); | ||
359 | void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); | ||
360 | void cfg80211_sme_disassoc(struct net_device *dev, | ||
361 | struct cfg80211_internal_bss *bss); | ||
362 | void __cfg80211_scan_done(struct work_struct *wk); | 364 | void __cfg80211_scan_done(struct work_struct *wk); |
363 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); | 365 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); |
364 | void __cfg80211_sched_scan_results(struct work_struct *wk); | 366 | void __cfg80211_sched_scan_results(struct work_struct *wk); |
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 5449c5a6de84..39bff7d36768 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -43,7 +43,6 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | |||
43 | cfg80211_hold_bss(bss_from_pub(bss)); | 43 | cfg80211_hold_bss(bss_from_pub(bss)); |
44 | wdev->current_bss = bss_from_pub(bss); | 44 | wdev->current_bss = bss_from_pub(bss); |
45 | 45 | ||
46 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
47 | cfg80211_upload_connect_keys(wdev); | 46 | cfg80211_upload_connect_keys(wdev); |
48 | 47 | ||
49 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, | 48 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, |
@@ -64,8 +63,6 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | |||
64 | 63 | ||
65 | trace_cfg80211_ibss_joined(dev, bssid); | 64 | trace_cfg80211_ibss_joined(dev, bssid); |
66 | 65 | ||
67 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); | ||
68 | |||
69 | ev = kzalloc(sizeof(*ev), gfp); | 66 | ev = kzalloc(sizeof(*ev), gfp); |
70 | if (!ev) | 67 | if (!ev) |
71 | return; | 68 | return; |
@@ -120,7 +117,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
120 | #ifdef CONFIG_CFG80211_WEXT | 117 | #ifdef CONFIG_CFG80211_WEXT |
121 | wdev->wext.ibss.chandef = params->chandef; | 118 | wdev->wext.ibss.chandef = params->chandef; |
122 | #endif | 119 | #endif |
123 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
124 | 120 | ||
125 | err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan, | 121 | err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan, |
126 | params->channel_fixed | 122 | params->channel_fixed |
@@ -134,7 +130,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
134 | err = rdev_join_ibss(rdev, dev, params); | 130 | err = rdev_join_ibss(rdev, dev, params); |
135 | if (err) { | 131 | if (err) { |
136 | wdev->connect_keys = NULL; | 132 | wdev->connect_keys = NULL; |
137 | wdev->sme_state = CFG80211_SME_IDLE; | ||
138 | return err; | 133 | return err; |
139 | } | 134 | } |
140 | 135 | ||
@@ -186,7 +181,6 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | |||
186 | } | 181 | } |
187 | 182 | ||
188 | wdev->current_bss = NULL; | 183 | wdev->current_bss = NULL; |
189 | wdev->sme_state = CFG80211_SME_IDLE; | ||
190 | wdev->ssid_len = 0; | 184 | wdev->ssid_len = 0; |
191 | #ifdef CONFIG_CFG80211_WEXT | 185 | #ifdef CONFIG_CFG80211_WEXT |
192 | if (!nowext) | 186 | if (!nowext) |
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 5dfb289ab761..30c49202ee4d 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #define MESH_PATH_TO_ROOT_TIMEOUT 6000 | 18 | #define MESH_PATH_TO_ROOT_TIMEOUT 6000 |
19 | #define MESH_ROOT_INTERVAL 5000 | 19 | #define MESH_ROOT_INTERVAL 5000 |
20 | #define MESH_ROOT_CONFIRMATION_INTERVAL 2000 | 20 | #define MESH_ROOT_CONFIRMATION_INTERVAL 2000 |
21 | #define MESH_DEFAULT_PLINK_TIMEOUT 1800 /* timeout in seconds */ | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * Minimum interval between two consecutive PREQs originated by the same | 24 | * Minimum interval between two consecutive PREQs originated by the same |
@@ -75,6 +76,7 @@ const struct mesh_config default_mesh_config = { | |||
75 | .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL, | 76 | .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL, |
76 | .power_mode = NL80211_MESH_POWER_ACTIVE, | 77 | .power_mode = NL80211_MESH_POWER_ACTIVE, |
77 | .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, | 78 | .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, |
79 | .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT, | ||
78 | }; | 80 | }; |
79 | 81 | ||
80 | const struct mesh_setup default_mesh_setup = { | 82 | const struct mesh_setup default_mesh_setup = { |
@@ -160,6 +162,16 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, | |||
160 | setup->chandef.center_freq1 = setup->chandef.chan->center_freq; | 162 | setup->chandef.center_freq1 = setup->chandef.chan->center_freq; |
161 | } | 163 | } |
162 | 164 | ||
165 | /* | ||
166 | * check if basic rates are available otherwise use mandatory rates as | ||
167 | * basic rates | ||
168 | */ | ||
169 | if (!setup->basic_rates) { | ||
170 | struct ieee80211_supported_band *sband = | ||
171 | rdev->wiphy.bands[setup->chandef.chan->band]; | ||
172 | setup->basic_rates = ieee80211_mandatory_rates(sband); | ||
173 | } | ||
174 | |||
163 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) | 175 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) |
164 | return -EINVAL; | 176 | return -EINVAL; |
165 | 177 | ||
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 7bde5d9c0003..a61a44bc6cf0 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -18,150 +18,107 @@ | |||
18 | #include "rdev-ops.h" | 18 | #include "rdev-ops.h" |
19 | 19 | ||
20 | 20 | ||
21 | void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) | 21 | void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, |
22 | { | ||
23 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
24 | struct wiphy *wiphy = wdev->wiphy; | ||
25 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
26 | |||
27 | trace_cfg80211_send_rx_auth(dev); | ||
28 | |||
29 | nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); | ||
30 | cfg80211_sme_rx_auth(dev, buf, len); | ||
31 | } | ||
32 | EXPORT_SYMBOL(cfg80211_send_rx_auth); | ||
33 | |||
34 | void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, | ||
35 | const u8 *buf, size_t len) | 22 | const u8 *buf, size_t len) |
36 | { | 23 | { |
37 | u16 status_code; | ||
38 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 24 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
39 | struct wiphy *wiphy = wdev->wiphy; | 25 | struct wiphy *wiphy = wdev->wiphy; |
40 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 26 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
41 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | 27 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
42 | u8 *ie = mgmt->u.assoc_resp.variable; | 28 | u8 *ie = mgmt->u.assoc_resp.variable; |
43 | int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); | 29 | int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); |
30 | u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); | ||
44 | 31 | ||
45 | trace_cfg80211_send_rx_assoc(dev, bss); | 32 | trace_cfg80211_send_rx_assoc(dev, bss); |
46 | 33 | ||
47 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); | ||
48 | |||
49 | /* | 34 | /* |
50 | * This is a bit of a hack, we don't notify userspace of | 35 | * This is a bit of a hack, we don't notify userspace of |
51 | * a (re-)association reply if we tried to send a reassoc | 36 | * a (re-)association reply if we tried to send a reassoc |
52 | * and got a reject -- we only try again with an assoc | 37 | * and got a reject -- we only try again with an assoc |
53 | * frame instead of reassoc. | 38 | * frame instead of reassoc. |
54 | */ | 39 | */ |
55 | if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && | 40 | if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) { |
56 | cfg80211_sme_failed_reassoc(wdev)) { | ||
57 | cfg80211_put_bss(wiphy, bss); | 41 | cfg80211_put_bss(wiphy, bss); |
58 | return; | 42 | return; |
59 | } | 43 | } |
60 | 44 | ||
61 | nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); | 45 | nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); |
62 | 46 | /* update current_bss etc., consumes the bss reference */ | |
63 | if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) { | ||
64 | cfg80211_sme_failed_assoc(wdev); | ||
65 | /* | ||
66 | * do not call connect_result() now because the | ||
67 | * sme will schedule work that does it later. | ||
68 | */ | ||
69 | cfg80211_put_bss(wiphy, bss); | ||
70 | return; | ||
71 | } | ||
72 | |||
73 | if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { | ||
74 | /* | ||
75 | * This is for the userspace SME, the CONNECTING | ||
76 | * state will be changed to CONNECTED by | ||
77 | * __cfg80211_connect_result() below. | ||
78 | */ | ||
79 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
80 | } | ||
81 | |||
82 | /* this consumes the bss reference */ | ||
83 | __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, | 47 | __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, |
84 | status_code, | 48 | status_code, |
85 | status_code == WLAN_STATUS_SUCCESS, bss); | 49 | status_code == WLAN_STATUS_SUCCESS, bss); |
86 | } | 50 | } |
87 | EXPORT_SYMBOL(cfg80211_send_rx_assoc); | 51 | EXPORT_SYMBOL(cfg80211_rx_assoc_resp); |
88 | 52 | ||
89 | void cfg80211_send_deauth(struct net_device *dev, | 53 | static void cfg80211_process_auth(struct wireless_dev *wdev, |
90 | const u8 *buf, size_t len) | 54 | const u8 *buf, size_t len) |
91 | { | 55 | { |
92 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 56 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
93 | struct wiphy *wiphy = wdev->wiphy; | 57 | |
94 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 58 | nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL); |
59 | cfg80211_sme_rx_auth(wdev, buf, len); | ||
60 | } | ||
61 | |||
62 | static void cfg80211_process_deauth(struct wireless_dev *wdev, | ||
63 | const u8 *buf, size_t len) | ||
64 | { | ||
65 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
95 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | 66 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
96 | const u8 *bssid = mgmt->bssid; | 67 | const u8 *bssid = mgmt->bssid; |
97 | bool was_current = false; | 68 | u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); |
69 | bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); | ||
98 | 70 | ||
99 | trace_cfg80211_send_deauth(dev); | 71 | nl80211_send_deauth(rdev, wdev->netdev, buf, len, GFP_KERNEL); |
100 | ASSERT_WDEV_LOCK(wdev); | ||
101 | 72 | ||
102 | if (wdev->current_bss && | 73 | if (!wdev->current_bss || |
103 | ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { | 74 | !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) |
104 | cfg80211_unhold_bss(wdev->current_bss); | 75 | return; |
105 | cfg80211_put_bss(wiphy, &wdev->current_bss->pub); | 76 | |
106 | wdev->current_bss = NULL; | 77 | __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); |
107 | was_current = true; | 78 | cfg80211_sme_deauth(wdev); |
108 | } | 79 | } |
109 | 80 | ||
110 | nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); | 81 | static void cfg80211_process_disassoc(struct wireless_dev *wdev, |
82 | const u8 *buf, size_t len) | ||
83 | { | ||
84 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
85 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | ||
86 | const u8 *bssid = mgmt->bssid; | ||
87 | u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); | ||
88 | bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); | ||
111 | 89 | ||
112 | if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) { | 90 | nl80211_send_disassoc(rdev, wdev->netdev, buf, len, GFP_KERNEL); |
113 | u16 reason_code; | ||
114 | bool from_ap; | ||
115 | 91 | ||
116 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); | 92 | if (WARN_ON(!wdev->current_bss || |
93 | !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) | ||
94 | return; | ||
117 | 95 | ||
118 | from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); | 96 | __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); |
119 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); | 97 | cfg80211_sme_disassoc(wdev); |
120 | } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { | ||
121 | __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, | ||
122 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
123 | false, NULL); | ||
124 | } | ||
125 | } | 98 | } |
126 | EXPORT_SYMBOL(cfg80211_send_deauth); | ||
127 | 99 | ||
128 | void cfg80211_send_disassoc(struct net_device *dev, | 100 | void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) |
129 | const u8 *buf, size_t len) | ||
130 | { | 101 | { |
131 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 102 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
132 | struct wiphy *wiphy = wdev->wiphy; | 103 | struct ieee80211_mgmt *mgmt = (void *)buf; |
133 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
134 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | ||
135 | const u8 *bssid = mgmt->bssid; | ||
136 | u16 reason_code; | ||
137 | bool from_ap; | ||
138 | 104 | ||
139 | trace_cfg80211_send_disassoc(dev); | ||
140 | ASSERT_WDEV_LOCK(wdev); | 105 | ASSERT_WDEV_LOCK(wdev); |
141 | 106 | ||
142 | nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); | 107 | trace_cfg80211_rx_mlme_mgmt(dev, buf, len); |
143 | 108 | ||
144 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | 109 | if (WARN_ON(len < 2)) |
145 | return; | 110 | return; |
146 | 111 | ||
147 | if (wdev->current_bss && | 112 | if (ieee80211_is_auth(mgmt->frame_control)) |
148 | ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { | 113 | cfg80211_process_auth(wdev, buf, len); |
149 | cfg80211_sme_disassoc(dev, wdev->current_bss); | 114 | else if (ieee80211_is_deauth(mgmt->frame_control)) |
150 | cfg80211_unhold_bss(wdev->current_bss); | 115 | cfg80211_process_deauth(wdev, buf, len); |
151 | cfg80211_put_bss(wiphy, &wdev->current_bss->pub); | 116 | else if (ieee80211_is_disassoc(mgmt->frame_control)) |
152 | wdev->current_bss = NULL; | 117 | cfg80211_process_disassoc(wdev, buf, len); |
153 | } else | ||
154 | WARN_ON(1); | ||
155 | |||
156 | |||
157 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); | ||
158 | |||
159 | from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); | ||
160 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); | ||
161 | } | 118 | } |
162 | EXPORT_SYMBOL(cfg80211_send_disassoc); | 119 | EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt); |
163 | 120 | ||
164 | void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) | 121 | void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr) |
165 | { | 122 | { |
166 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 123 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
167 | struct wiphy *wiphy = wdev->wiphy; | 124 | struct wiphy *wiphy = wdev->wiphy; |
@@ -170,14 +127,11 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) | |||
170 | trace_cfg80211_send_auth_timeout(dev, addr); | 127 | trace_cfg80211_send_auth_timeout(dev, addr); |
171 | 128 | ||
172 | nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); | 129 | nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); |
173 | if (wdev->sme_state == CFG80211_SME_CONNECTING) | 130 | cfg80211_sme_auth_timeout(wdev); |
174 | __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, | ||
175 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
176 | false, NULL); | ||
177 | } | 131 | } |
178 | EXPORT_SYMBOL(cfg80211_send_auth_timeout); | 132 | EXPORT_SYMBOL(cfg80211_auth_timeout); |
179 | 133 | ||
180 | void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) | 134 | void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr) |
181 | { | 135 | { |
182 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 136 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
183 | struct wiphy *wiphy = wdev->wiphy; | 137 | struct wiphy *wiphy = wdev->wiphy; |
@@ -186,12 +140,28 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) | |||
186 | trace_cfg80211_send_assoc_timeout(dev, addr); | 140 | trace_cfg80211_send_assoc_timeout(dev, addr); |
187 | 141 | ||
188 | nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); | 142 | nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); |
189 | if (wdev->sme_state == CFG80211_SME_CONNECTING) | 143 | cfg80211_sme_assoc_timeout(wdev); |
190 | __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, | ||
191 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
192 | false, NULL); | ||
193 | } | 144 | } |
194 | EXPORT_SYMBOL(cfg80211_send_assoc_timeout); | 145 | EXPORT_SYMBOL(cfg80211_assoc_timeout); |
146 | |||
147 | void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) | ||
148 | { | ||
149 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
150 | struct ieee80211_mgmt *mgmt = (void *)buf; | ||
151 | |||
152 | ASSERT_WDEV_LOCK(wdev); | ||
153 | |||
154 | trace_cfg80211_tx_mlme_mgmt(dev, buf, len); | ||
155 | |||
156 | if (WARN_ON(len < 2)) | ||
157 | return; | ||
158 | |||
159 | if (ieee80211_is_deauth(mgmt->frame_control)) | ||
160 | cfg80211_process_deauth(wdev, buf, len); | ||
161 | else | ||
162 | cfg80211_process_disassoc(wdev, buf, len); | ||
163 | } | ||
164 | EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt); | ||
195 | 165 | ||
196 | void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, | 166 | void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, |
197 | enum nl80211_key_type key_type, int key_id, | 167 | enum nl80211_key_type key_type, int key_id, |
@@ -314,21 +284,12 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | |||
314 | { | 284 | { |
315 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 285 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
316 | int err; | 286 | int err; |
317 | bool was_connected = false; | ||
318 | 287 | ||
319 | ASSERT_WDEV_LOCK(wdev); | 288 | ASSERT_WDEV_LOCK(wdev); |
320 | 289 | ||
321 | if (wdev->current_bss && req->prev_bssid && | 290 | if (wdev->current_bss && |
322 | ether_addr_equal(wdev->current_bss->pub.bssid, req->prev_bssid)) { | 291 | (!req->prev_bssid || !ether_addr_equal(wdev->current_bss->pub.bssid, |
323 | /* | 292 | req->prev_bssid))) |
324 | * Trying to reassociate: Allow this to proceed and let the old | ||
325 | * association to be dropped when the new one is completed. | ||
326 | */ | ||
327 | if (wdev->sme_state == CFG80211_SME_CONNECTED) { | ||
328 | was_connected = true; | ||
329 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
330 | } | ||
331 | } else if (wdev->current_bss) | ||
332 | return -EALREADY; | 293 | return -EALREADY; |
333 | 294 | ||
334 | cfg80211_oper_and_ht_capa(&req->ht_capa_mask, | 295 | cfg80211_oper_and_ht_capa(&req->ht_capa_mask, |
@@ -338,11 +299,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | |||
338 | 299 | ||
339 | req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, | 300 | req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, |
340 | WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); | 301 | WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); |
341 | if (!req->bss) { | 302 | if (!req->bss) |
342 | if (was_connected) | ||
343 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
344 | return -ENOENT; | 303 | return -ENOENT; |
345 | } | ||
346 | 304 | ||
347 | err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED); | 305 | err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED); |
348 | if (err) | 306 | if (err) |
@@ -351,11 +309,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | |||
351 | err = rdev_assoc(rdev, dev, req); | 309 | err = rdev_assoc(rdev, dev, req); |
352 | 310 | ||
353 | out: | 311 | out: |
354 | if (err) { | 312 | if (err) |
355 | if (was_connected) | ||
356 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
357 | cfg80211_put_bss(&rdev->wiphy, req->bss); | 313 | cfg80211_put_bss(&rdev->wiphy, req->bss); |
358 | } | ||
359 | 314 | ||
360 | return err; | 315 | return err; |
361 | } | 316 | } |
@@ -376,8 +331,9 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | |||
376 | 331 | ||
377 | ASSERT_WDEV_LOCK(wdev); | 332 | ASSERT_WDEV_LOCK(wdev); |
378 | 333 | ||
379 | if (local_state_change && (!wdev->current_bss || | 334 | if (local_state_change && |
380 | !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) | 335 | (!wdev->current_bss || |
336 | !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) | ||
381 | return 0; | 337 | return 0; |
382 | 338 | ||
383 | return rdev_deauth(rdev, dev, &req); | 339 | return rdev_deauth(rdev, dev, &req); |
@@ -395,13 +351,11 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | |||
395 | .ie = ie, | 351 | .ie = ie, |
396 | .ie_len = ie_len, | 352 | .ie_len = ie_len, |
397 | }; | 353 | }; |
354 | int err; | ||
398 | 355 | ||
399 | ASSERT_WDEV_LOCK(wdev); | 356 | ASSERT_WDEV_LOCK(wdev); |
400 | 357 | ||
401 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | 358 | if (!wdev->current_bss) |
402 | return -ENOTCONN; | ||
403 | |||
404 | if (WARN(!wdev->current_bss, "sme_state=%d\n", wdev->sme_state)) | ||
405 | return -ENOTCONN; | 359 | return -ENOTCONN; |
406 | 360 | ||
407 | if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) | 361 | if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) |
@@ -409,7 +363,13 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | |||
409 | else | 363 | else |
410 | return -ENOTCONN; | 364 | return -ENOTCONN; |
411 | 365 | ||
412 | return rdev_disassoc(rdev, dev, &req); | 366 | err = rdev_disassoc(rdev, dev, &req); |
367 | if (err) | ||
368 | return err; | ||
369 | |||
370 | /* driver should have reported the disassoc */ | ||
371 | WARN_ON(wdev->current_bss); | ||
372 | return 0; | ||
413 | } | 373 | } |
414 | 374 | ||
415 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | 375 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, |
@@ -417,10 +377,6 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | |||
417 | { | 377 | { |
418 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 378 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
419 | u8 bssid[ETH_ALEN]; | 379 | u8 bssid[ETH_ALEN]; |
420 | struct cfg80211_deauth_request req = { | ||
421 | .reason_code = WLAN_REASON_DEAUTH_LEAVING, | ||
422 | .bssid = bssid, | ||
423 | }; | ||
424 | 380 | ||
425 | ASSERT_WDEV_LOCK(wdev); | 381 | ASSERT_WDEV_LOCK(wdev); |
426 | 382 | ||
@@ -431,13 +387,8 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | |||
431 | return; | 387 | return; |
432 | 388 | ||
433 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); | 389 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); |
434 | rdev_deauth(rdev, dev, &req); | 390 | cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, |
435 | 391 | WLAN_REASON_DEAUTH_LEAVING, false); | |
436 | if (wdev->current_bss) { | ||
437 | cfg80211_unhold_bss(wdev->current_bss); | ||
438 | cfg80211_put_bss(&rdev->wiphy, &wdev->current_bss->pub); | ||
439 | wdev->current_bss = NULL; | ||
440 | } | ||
441 | } | 392 | } |
442 | 393 | ||
443 | struct cfg80211_mgmt_registration { | 394 | struct cfg80211_mgmt_registration { |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ea74b9dd9d82..e545023e2871 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -800,12 +800,9 @@ static int nl80211_key_allowed(struct wireless_dev *wdev) | |||
800 | case NL80211_IFTYPE_MESH_POINT: | 800 | case NL80211_IFTYPE_MESH_POINT: |
801 | break; | 801 | break; |
802 | case NL80211_IFTYPE_ADHOC: | 802 | case NL80211_IFTYPE_ADHOC: |
803 | if (!wdev->current_bss) | ||
804 | return -ENOLINK; | ||
805 | break; | ||
806 | case NL80211_IFTYPE_STATION: | 803 | case NL80211_IFTYPE_STATION: |
807 | case NL80211_IFTYPE_P2P_CLIENT: | 804 | case NL80211_IFTYPE_P2P_CLIENT: |
808 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | 805 | if (!wdev->current_bss) |
809 | return -ENOLINK; | 806 | return -ENOLINK; |
810 | break; | 807 | break; |
811 | default: | 808 | default: |
@@ -908,7 +905,7 @@ nla_put_failure: | |||
908 | static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, | 905 | static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, |
909 | struct sk_buff *msg) | 906 | struct sk_buff *msg) |
910 | { | 907 | { |
911 | const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp; | 908 | const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan->tcp; |
912 | struct nlattr *nl_tcp; | 909 | struct nlattr *nl_tcp; |
913 | 910 | ||
914 | if (!tcp) | 911 | if (!tcp) |
@@ -951,37 +948,37 @@ static int nl80211_send_wowlan(struct sk_buff *msg, | |||
951 | { | 948 | { |
952 | struct nlattr *nl_wowlan; | 949 | struct nlattr *nl_wowlan; |
953 | 950 | ||
954 | if (!dev->wiphy.wowlan.flags && !dev->wiphy.wowlan.n_patterns) | 951 | if (!dev->wiphy.wowlan) |
955 | return 0; | 952 | return 0; |
956 | 953 | ||
957 | nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); | 954 | nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); |
958 | if (!nl_wowlan) | 955 | if (!nl_wowlan) |
959 | return -ENOBUFS; | 956 | return -ENOBUFS; |
960 | 957 | ||
961 | if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) && | 958 | if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) && |
962 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || | 959 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || |
963 | ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) && | 960 | ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) && |
964 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || | 961 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || |
965 | ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) && | 962 | ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) && |
966 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || | 963 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || |
967 | ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && | 964 | ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && |
968 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || | 965 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || |
969 | ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && | 966 | ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && |
970 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || | 967 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || |
971 | ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && | 968 | ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && |
972 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || | 969 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || |
973 | ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && | 970 | ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && |
974 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || | 971 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || |
975 | ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) && | 972 | ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) && |
976 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) | 973 | nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) |
977 | return -ENOBUFS; | 974 | return -ENOBUFS; |
978 | 975 | ||
979 | if (dev->wiphy.wowlan.n_patterns) { | 976 | if (dev->wiphy.wowlan->n_patterns) { |
980 | struct nl80211_wowlan_pattern_support pat = { | 977 | struct nl80211_wowlan_pattern_support pat = { |
981 | .max_patterns = dev->wiphy.wowlan.n_patterns, | 978 | .max_patterns = dev->wiphy.wowlan->n_patterns, |
982 | .min_pattern_len = dev->wiphy.wowlan.pattern_min_len, | 979 | .min_pattern_len = dev->wiphy.wowlan->pattern_min_len, |
983 | .max_pattern_len = dev->wiphy.wowlan.pattern_max_len, | 980 | .max_pattern_len = dev->wiphy.wowlan->pattern_max_len, |
984 | .max_pkt_offset = dev->wiphy.wowlan.max_pkt_offset, | 981 | .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset, |
985 | }; | 982 | }; |
986 | 983 | ||
987 | if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, | 984 | if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, |
@@ -3986,10 +3983,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
3986 | params.listen_interval = | 3983 | params.listen_interval = |
3987 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); | 3984 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); |
3988 | 3985 | ||
3989 | if (info->attrs[NL80211_ATTR_STA_AID]) | 3986 | if (info->attrs[NL80211_ATTR_PEER_AID]) |
3990 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); | ||
3991 | else | ||
3992 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); | 3987 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); |
3988 | else | ||
3989 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); | ||
3993 | if (!params.aid || params.aid > IEEE80211_MAX_AID) | 3990 | if (!params.aid || params.aid > IEEE80211_MAX_AID) |
3994 | return -EINVAL; | 3991 | return -EINVAL; |
3995 | 3992 | ||
@@ -4041,7 +4038,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
4041 | params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; | 4038 | params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; |
4042 | 4039 | ||
4043 | /* TDLS peers cannot be added */ | 4040 | /* TDLS peers cannot be added */ |
4044 | if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) | 4041 | if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || |
4042 | info->attrs[NL80211_ATTR_PEER_AID]) | ||
4045 | return -EINVAL; | 4043 | return -EINVAL; |
4046 | /* but don't bother the driver with it */ | 4044 | /* but don't bother the driver with it */ |
4047 | params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); | 4045 | params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); |
@@ -4067,7 +4065,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
4067 | if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) | 4065 | if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) |
4068 | return -EINVAL; | 4066 | return -EINVAL; |
4069 | /* TDLS peers cannot be added */ | 4067 | /* TDLS peers cannot be added */ |
4070 | if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) | 4068 | if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || |
4069 | info->attrs[NL80211_ATTR_PEER_AID]) | ||
4071 | return -EINVAL; | 4070 | return -EINVAL; |
4072 | break; | 4071 | break; |
4073 | case NL80211_IFTYPE_STATION: | 4072 | case NL80211_IFTYPE_STATION: |
@@ -4589,7 +4588,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, | |||
4589 | nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE, | 4588 | nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE, |
4590 | cur_params.power_mode) || | 4589 | cur_params.power_mode) || |
4591 | nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW, | 4590 | nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW, |
4592 | cur_params.dot11MeshAwakeWindowDuration)) | 4591 | cur_params.dot11MeshAwakeWindowDuration) || |
4592 | nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, | ||
4593 | cur_params.plink_timeout)) | ||
4593 | goto nla_put_failure; | 4594 | goto nla_put_failure; |
4594 | nla_nest_end(msg, pinfoattr); | 4595 | nla_nest_end(msg, pinfoattr); |
4595 | genlmsg_end(msg, hdr); | 4596 | genlmsg_end(msg, hdr); |
@@ -4630,6 +4631,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A | |||
4630 | [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 }, | 4631 | [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 }, |
4631 | [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 }, | 4632 | [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 }, |
4632 | [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, | 4633 | [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, |
4634 | [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, | ||
4633 | }; | 4635 | }; |
4634 | 4636 | ||
4635 | static const struct nla_policy | 4637 | static const struct nla_policy |
@@ -4767,6 +4769,9 @@ do { \ | |||
4767 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, | 4769 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, |
4768 | 0, 65535, mask, | 4770 | 0, 65535, mask, |
4769 | NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); | 4771 | NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); |
4772 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 1, 0xffffffff, | ||
4773 | mask, NL80211_MESHCONF_PLINK_TIMEOUT, | ||
4774 | nla_get_u32); | ||
4770 | if (mask_out) | 4775 | if (mask_out) |
4771 | *mask_out = mask; | 4776 | *mask_out = mask; |
4772 | 4777 | ||
@@ -7153,6 +7158,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
7153 | return -EOPNOTSUPP; | 7158 | return -EOPNOTSUPP; |
7154 | 7159 | ||
7155 | switch (wdev->iftype) { | 7160 | switch (wdev->iftype) { |
7161 | case NL80211_IFTYPE_P2P_DEVICE: | ||
7162 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) | ||
7163 | return -EINVAL; | ||
7156 | case NL80211_IFTYPE_STATION: | 7164 | case NL80211_IFTYPE_STATION: |
7157 | case NL80211_IFTYPE_ADHOC: | 7165 | case NL80211_IFTYPE_ADHOC: |
7158 | case NL80211_IFTYPE_P2P_CLIENT: | 7166 | case NL80211_IFTYPE_P2P_CLIENT: |
@@ -7160,7 +7168,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
7160 | case NL80211_IFTYPE_AP_VLAN: | 7168 | case NL80211_IFTYPE_AP_VLAN: |
7161 | case NL80211_IFTYPE_MESH_POINT: | 7169 | case NL80211_IFTYPE_MESH_POINT: |
7162 | case NL80211_IFTYPE_P2P_GO: | 7170 | case NL80211_IFTYPE_P2P_GO: |
7163 | case NL80211_IFTYPE_P2P_DEVICE: | ||
7164 | break; | 7171 | break; |
7165 | default: | 7172 | default: |
7166 | return -EOPNOTSUPP; | 7173 | return -EOPNOTSUPP; |
@@ -7188,9 +7195,18 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
7188 | 7195 | ||
7189 | no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); | 7196 | no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); |
7190 | 7197 | ||
7191 | err = nl80211_parse_chandef(rdev, info, &chandef); | 7198 | /* get the channel if any has been specified, otherwise pass NULL to |
7192 | if (err) | 7199 | * the driver. The latter will use the current one |
7193 | return err; | 7200 | */ |
7201 | chandef.chan = NULL; | ||
7202 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | ||
7203 | err = nl80211_parse_chandef(rdev, info, &chandef); | ||
7204 | if (err) | ||
7205 | return err; | ||
7206 | } | ||
7207 | |||
7208 | if (!chandef.chan && offchan) | ||
7209 | return -EINVAL; | ||
7194 | 7210 | ||
7195 | if (!dont_wait_for_ack) { | 7211 | if (!dont_wait_for_ack) { |
7196 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 7212 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
@@ -7495,6 +7511,23 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) | |||
7495 | setup.chandef.chan = NULL; | 7511 | setup.chandef.chan = NULL; |
7496 | } | 7512 | } |
7497 | 7513 | ||
7514 | if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { | ||
7515 | u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); | ||
7516 | int n_rates = | ||
7517 | nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); | ||
7518 | struct ieee80211_supported_band *sband; | ||
7519 | |||
7520 | if (!setup.chandef.chan) | ||
7521 | return -EINVAL; | ||
7522 | |||
7523 | sband = rdev->wiphy.bands[setup.chandef.chan->band]; | ||
7524 | |||
7525 | err = ieee80211_get_ratemask(sband, rates, n_rates, | ||
7526 | &setup.basic_rates); | ||
7527 | if (err) | ||
7528 | return err; | ||
7529 | } | ||
7530 | |||
7498 | return cfg80211_join_mesh(rdev, dev, &setup, &cfg); | 7531 | return cfg80211_join_mesh(rdev, dev, &setup, &cfg); |
7499 | } | 7532 | } |
7500 | 7533 | ||
@@ -7591,8 +7624,7 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
7591 | void *hdr; | 7624 | void *hdr; |
7592 | u32 size = NLMSG_DEFAULT_SIZE; | 7625 | u32 size = NLMSG_DEFAULT_SIZE; |
7593 | 7626 | ||
7594 | if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && | 7627 | if (!rdev->wiphy.wowlan) |
7595 | !rdev->wiphy.wowlan.tcp) | ||
7596 | return -EOPNOTSUPP; | 7628 | return -EOPNOTSUPP; |
7597 | 7629 | ||
7598 | if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { | 7630 | if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { |
@@ -7665,7 +7697,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, | |||
7665 | u32 data_size, wake_size, tokens_size = 0, wake_mask_size; | 7697 | u32 data_size, wake_size, tokens_size = 0, wake_mask_size; |
7666 | int err, port; | 7698 | int err, port; |
7667 | 7699 | ||
7668 | if (!rdev->wiphy.wowlan.tcp) | 7700 | if (!rdev->wiphy.wowlan->tcp) |
7669 | return -EINVAL; | 7701 | return -EINVAL; |
7670 | 7702 | ||
7671 | err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP, | 7703 | err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP, |
@@ -7685,16 +7717,16 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, | |||
7685 | return -EINVAL; | 7717 | return -EINVAL; |
7686 | 7718 | ||
7687 | data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]); | 7719 | data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]); |
7688 | if (data_size > rdev->wiphy.wowlan.tcp->data_payload_max) | 7720 | if (data_size > rdev->wiphy.wowlan->tcp->data_payload_max) |
7689 | return -EINVAL; | 7721 | return -EINVAL; |
7690 | 7722 | ||
7691 | if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > | 7723 | if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > |
7692 | rdev->wiphy.wowlan.tcp->data_interval_max || | 7724 | rdev->wiphy.wowlan->tcp->data_interval_max || |
7693 | nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0) | 7725 | nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0) |
7694 | return -EINVAL; | 7726 | return -EINVAL; |
7695 | 7727 | ||
7696 | wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); | 7728 | wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); |
7697 | if (wake_size > rdev->wiphy.wowlan.tcp->wake_payload_max) | 7729 | if (wake_size > rdev->wiphy.wowlan->tcp->wake_payload_max) |
7698 | return -EINVAL; | 7730 | return -EINVAL; |
7699 | 7731 | ||
7700 | wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]); | 7732 | wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]); |
@@ -7709,13 +7741,13 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, | |||
7709 | 7741 | ||
7710 | if (!tok->len || tokens_size % tok->len) | 7742 | if (!tok->len || tokens_size % tok->len) |
7711 | return -EINVAL; | 7743 | return -EINVAL; |
7712 | if (!rdev->wiphy.wowlan.tcp->tok) | 7744 | if (!rdev->wiphy.wowlan->tcp->tok) |
7713 | return -EINVAL; | 7745 | return -EINVAL; |
7714 | if (tok->len > rdev->wiphy.wowlan.tcp->tok->max_len) | 7746 | if (tok->len > rdev->wiphy.wowlan->tcp->tok->max_len) |
7715 | return -EINVAL; | 7747 | return -EINVAL; |
7716 | if (tok->len < rdev->wiphy.wowlan.tcp->tok->min_len) | 7748 | if (tok->len < rdev->wiphy.wowlan->tcp->tok->min_len) |
7717 | return -EINVAL; | 7749 | return -EINVAL; |
7718 | if (tokens_size > rdev->wiphy.wowlan.tcp->tok->bufsize) | 7750 | if (tokens_size > rdev->wiphy.wowlan->tcp->tok->bufsize) |
7719 | return -EINVAL; | 7751 | return -EINVAL; |
7720 | if (tok->offset + tok->len > data_size) | 7752 | if (tok->offset + tok->len > data_size) |
7721 | return -EINVAL; | 7753 | return -EINVAL; |
@@ -7723,7 +7755,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, | |||
7723 | 7755 | ||
7724 | if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) { | 7756 | if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) { |
7725 | seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]); | 7757 | seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]); |
7726 | if (!rdev->wiphy.wowlan.tcp->seq) | 7758 | if (!rdev->wiphy.wowlan->tcp->seq) |
7727 | return -EINVAL; | 7759 | return -EINVAL; |
7728 | if (seq->len == 0 || seq->len > 4) | 7760 | if (seq->len == 0 || seq->len > 4) |
7729 | return -EINVAL; | 7761 | return -EINVAL; |
@@ -7804,12 +7836,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
7804 | struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; | 7836 | struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; |
7805 | struct cfg80211_wowlan new_triggers = {}; | 7837 | struct cfg80211_wowlan new_triggers = {}; |
7806 | struct cfg80211_wowlan *ntrig; | 7838 | struct cfg80211_wowlan *ntrig; |
7807 | struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; | 7839 | const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan; |
7808 | int err, i; | 7840 | int err, i; |
7809 | bool prev_enabled = rdev->wiphy.wowlan_config; | 7841 | bool prev_enabled = rdev->wiphy.wowlan_config; |
7810 | 7842 | ||
7811 | if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && | 7843 | if (!wowlan) |
7812 | !rdev->wiphy.wowlan.tcp) | ||
7813 | return -EOPNOTSUPP; | 7844 | return -EOPNOTSUPP; |
7814 | 7845 | ||
7815 | if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { | 7846 | if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { |
@@ -9326,31 +9357,27 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, | |||
9326 | NL80211_CMD_DISASSOCIATE, gfp); | 9357 | NL80211_CMD_DISASSOCIATE, gfp); |
9327 | } | 9358 | } |
9328 | 9359 | ||
9329 | void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, | 9360 | void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, |
9330 | size_t len) | 9361 | size_t len) |
9331 | { | 9362 | { |
9332 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 9363 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
9333 | struct wiphy *wiphy = wdev->wiphy; | 9364 | struct wiphy *wiphy = wdev->wiphy; |
9334 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 9365 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
9366 | const struct ieee80211_mgmt *mgmt = (void *)buf; | ||
9367 | u32 cmd; | ||
9335 | 9368 | ||
9336 | trace_cfg80211_send_unprot_deauth(dev); | 9369 | if (WARN_ON(len < 2)) |
9337 | nl80211_send_mlme_event(rdev, dev, buf, len, | 9370 | return; |
9338 | NL80211_CMD_UNPROT_DEAUTHENTICATE, GFP_ATOMIC); | ||
9339 | } | ||
9340 | EXPORT_SYMBOL(cfg80211_send_unprot_deauth); | ||
9341 | 9371 | ||
9342 | void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, | 9372 | if (ieee80211_is_deauth(mgmt->frame_control)) |
9343 | size_t len) | 9373 | cmd = NL80211_CMD_UNPROT_DEAUTHENTICATE; |
9344 | { | 9374 | else |
9345 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 9375 | cmd = NL80211_CMD_UNPROT_DISASSOCIATE; |
9346 | struct wiphy *wiphy = wdev->wiphy; | ||
9347 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
9348 | 9376 | ||
9349 | trace_cfg80211_send_unprot_disassoc(dev); | 9377 | trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len); |
9350 | nl80211_send_mlme_event(rdev, dev, buf, len, | 9378 | nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC); |
9351 | NL80211_CMD_UNPROT_DISASSOCIATE, GFP_ATOMIC); | ||
9352 | } | 9379 | } |
9353 | EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); | 9380 | EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt); |
9354 | 9381 | ||
9355 | static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, | 9382 | static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, |
9356 | struct net_device *netdev, int cmd, | 9383 | struct net_device *netdev, int cmd, |
@@ -9861,7 +9888,6 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, | |||
9861 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 9888 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
9862 | struct sk_buff *msg; | 9889 | struct sk_buff *msg; |
9863 | void *hdr; | 9890 | void *hdr; |
9864 | int err; | ||
9865 | u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid); | 9891 | u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid); |
9866 | 9892 | ||
9867 | if (!nlportid) | 9893 | if (!nlportid) |
@@ -9882,12 +9908,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, | |||
9882 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) | 9908 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) |
9883 | goto nla_put_failure; | 9909 | goto nla_put_failure; |
9884 | 9910 | ||
9885 | err = genlmsg_end(msg, hdr); | 9911 | genlmsg_end(msg, hdr); |
9886 | if (err < 0) { | ||
9887 | nlmsg_free(msg); | ||
9888 | return true; | ||
9889 | } | ||
9890 | |||
9891 | genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); | 9912 | genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); |
9892 | return true; | 9913 | return true; |
9893 | 9914 | ||
@@ -10330,10 +10351,7 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev, | |||
10330 | if (nl80211_send_chandef(msg, chandef)) | 10351 | if (nl80211_send_chandef(msg, chandef)) |
10331 | goto nla_put_failure; | 10352 | goto nla_put_failure; |
10332 | 10353 | ||
10333 | if (genlmsg_end(msg, hdr) < 0) { | 10354 | genlmsg_end(msg, hdr); |
10334 | nlmsg_free(msg); | ||
10335 | return; | ||
10336 | } | ||
10337 | 10355 | ||
10338 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | 10356 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
10339 | nl80211_mlme_mcgrp.id, gfp); | 10357 | nl80211_mlme_mcgrp.id, gfp); |
@@ -10399,7 +10417,6 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, | |||
10399 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 10417 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
10400 | struct sk_buff *msg; | 10418 | struct sk_buff *msg; |
10401 | void *hdr; | 10419 | void *hdr; |
10402 | int err; | ||
10403 | 10420 | ||
10404 | trace_cfg80211_probe_status(dev, addr, cookie, acked); | 10421 | trace_cfg80211_probe_status(dev, addr, cookie, acked); |
10405 | 10422 | ||
@@ -10421,11 +10438,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, | |||
10421 | (acked && nla_put_flag(msg, NL80211_ATTR_ACK))) | 10438 | (acked && nla_put_flag(msg, NL80211_ATTR_ACK))) |
10422 | goto nla_put_failure; | 10439 | goto nla_put_failure; |
10423 | 10440 | ||
10424 | err = genlmsg_end(msg, hdr); | 10441 | genlmsg_end(msg, hdr); |
10425 | if (err < 0) { | ||
10426 | nlmsg_free(msg); | ||
10427 | return; | ||
10428 | } | ||
10429 | 10442 | ||
10430 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | 10443 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
10431 | nl80211_mlme_mcgrp.id, gfp); | 10444 | nl80211_mlme_mcgrp.id, gfp); |
@@ -10491,7 +10504,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, | |||
10491 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 10504 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
10492 | struct sk_buff *msg; | 10505 | struct sk_buff *msg; |
10493 | void *hdr; | 10506 | void *hdr; |
10494 | int err, size = 200; | 10507 | int size = 200; |
10495 | 10508 | ||
10496 | trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup); | 10509 | trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup); |
10497 | 10510 | ||
@@ -10577,9 +10590,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, | |||
10577 | nla_nest_end(msg, reasons); | 10590 | nla_nest_end(msg, reasons); |
10578 | } | 10591 | } |
10579 | 10592 | ||
10580 | err = genlmsg_end(msg, hdr); | 10593 | genlmsg_end(msg, hdr); |
10581 | if (err < 0) | ||
10582 | goto free_msg; | ||
10583 | 10594 | ||
10584 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | 10595 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
10585 | nl80211_mlme_mcgrp.id, gfp); | 10596 | nl80211_mlme_mcgrp.id, gfp); |
@@ -10599,7 +10610,6 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, | |||
10599 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 10610 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
10600 | struct sk_buff *msg; | 10611 | struct sk_buff *msg; |
10601 | void *hdr; | 10612 | void *hdr; |
10602 | int err; | ||
10603 | 10613 | ||
10604 | trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper, | 10614 | trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper, |
10605 | reason_code); | 10615 | reason_code); |
@@ -10622,11 +10632,7 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, | |||
10622 | nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code))) | 10632 | nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code))) |
10623 | goto nla_put_failure; | 10633 | goto nla_put_failure; |
10624 | 10634 | ||
10625 | err = genlmsg_end(msg, hdr); | 10635 | genlmsg_end(msg, hdr); |
10626 | if (err < 0) { | ||
10627 | nlmsg_free(msg); | ||
10628 | return; | ||
10629 | } | ||
10630 | 10636 | ||
10631 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | 10637 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
10632 | nl80211_mlme_mcgrp.id, gfp); | 10638 | nl80211_mlme_mcgrp.id, gfp); |
@@ -10684,7 +10690,6 @@ void cfg80211_ft_event(struct net_device *netdev, | |||
10684 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 10690 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
10685 | struct sk_buff *msg; | 10691 | struct sk_buff *msg; |
10686 | void *hdr; | 10692 | void *hdr; |
10687 | int err; | ||
10688 | 10693 | ||
10689 | trace_cfg80211_ft_event(wiphy, netdev, ft_event); | 10694 | trace_cfg80211_ft_event(wiphy, netdev, ft_event); |
10690 | 10695 | ||
@@ -10710,11 +10715,7 @@ void cfg80211_ft_event(struct net_device *netdev, | |||
10710 | nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, | 10715 | nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, |
10711 | ft_event->ric_ies); | 10716 | ft_event->ric_ies); |
10712 | 10717 | ||
10713 | err = genlmsg_end(msg, hdr); | 10718 | genlmsg_end(msg, hdr); |
10714 | if (err < 0) { | ||
10715 | nlmsg_free(msg); | ||
10716 | return; | ||
10717 | } | ||
10718 | 10719 | ||
10719 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | 10720 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
10720 | nl80211_mlme_mcgrp.id, GFP_KERNEL); | 10721 | nl80211_mlme_mcgrp.id, GFP_KERNEL); |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index e1d6749234c6..5a24c986f34b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1345,7 +1345,7 @@ get_reg_request_treatment(struct wiphy *wiphy, | |||
1345 | return REG_REQ_OK; | 1345 | return REG_REQ_OK; |
1346 | return REG_REQ_ALREADY_SET; | 1346 | return REG_REQ_ALREADY_SET; |
1347 | } | 1347 | } |
1348 | return 0; | 1348 | return REG_REQ_OK; |
1349 | case NL80211_REGDOM_SET_BY_DRIVER: | 1349 | case NL80211_REGDOM_SET_BY_DRIVER: |
1350 | if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) { | 1350 | if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) { |
1351 | if (regdom_changes(pending_request->alpha2)) | 1351 | if (regdom_changes(pending_request->alpha2)) |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 81be95f3be74..ae7e2cbf45cb 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * SME code for cfg80211's connect emulation. | 2 | * SME code for cfg80211 |
3 | * both driver SME event handling and the SME implementation | ||
4 | * (for nl80211's connect() and wext) | ||
3 | * | 5 | * |
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | 6 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> |
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | 7 | * Copyright (C) 2009 Intel Corporation. All rights reserved. |
@@ -18,18 +20,24 @@ | |||
18 | #include "reg.h" | 20 | #include "reg.h" |
19 | #include "rdev-ops.h" | 21 | #include "rdev-ops.h" |
20 | 22 | ||
23 | /* | ||
24 | * Software SME in cfg80211, using auth/assoc/deauth calls to the | ||
25 | * driver. This is is for implementing nl80211's connect/disconnect | ||
26 | * and wireless extensions (if configured.) | ||
27 | */ | ||
28 | |||
21 | struct cfg80211_conn { | 29 | struct cfg80211_conn { |
22 | struct cfg80211_connect_params params; | 30 | struct cfg80211_connect_params params; |
23 | /* these are sub-states of the _CONNECTING sme_state */ | 31 | /* these are sub-states of the _CONNECTING sme_state */ |
24 | enum { | 32 | enum { |
25 | CFG80211_CONN_IDLE, | ||
26 | CFG80211_CONN_SCANNING, | 33 | CFG80211_CONN_SCANNING, |
27 | CFG80211_CONN_SCAN_AGAIN, | 34 | CFG80211_CONN_SCAN_AGAIN, |
28 | CFG80211_CONN_AUTHENTICATE_NEXT, | 35 | CFG80211_CONN_AUTHENTICATE_NEXT, |
29 | CFG80211_CONN_AUTHENTICATING, | 36 | CFG80211_CONN_AUTHENTICATING, |
30 | CFG80211_CONN_ASSOCIATE_NEXT, | 37 | CFG80211_CONN_ASSOCIATE_NEXT, |
31 | CFG80211_CONN_ASSOCIATING, | 38 | CFG80211_CONN_ASSOCIATING, |
32 | CFG80211_CONN_DEAUTH_ASSOC_FAIL, | 39 | CFG80211_CONN_DEAUTH, |
40 | CFG80211_CONN_CONNECTED, | ||
33 | } state; | 41 | } state; |
34 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; | 42 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; |
35 | u8 *ie; | 43 | u8 *ie; |
@@ -37,39 +45,16 @@ struct cfg80211_conn { | |||
37 | bool auto_auth, prev_bssid_valid; | 45 | bool auto_auth, prev_bssid_valid; |
38 | }; | 46 | }; |
39 | 47 | ||
40 | static bool cfg80211_is_all_idle(void) | 48 | static void cfg80211_sme_free(struct wireless_dev *wdev) |
41 | { | 49 | { |
42 | struct cfg80211_registered_device *rdev; | 50 | if (!wdev->conn) |
43 | struct wireless_dev *wdev; | 51 | return; |
44 | bool is_all_idle = true; | ||
45 | |||
46 | /* | ||
47 | * All devices must be idle as otherwise if you are actively | ||
48 | * scanning some new beacon hints could be learned and would | ||
49 | * count as new regulatory hints. | ||
50 | */ | ||
51 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { | ||
52 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | ||
53 | wdev_lock(wdev); | ||
54 | if (wdev->sme_state != CFG80211_SME_IDLE) | ||
55 | is_all_idle = false; | ||
56 | wdev_unlock(wdev); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | return is_all_idle; | ||
61 | } | ||
62 | 52 | ||
63 | static void disconnect_work(struct work_struct *work) | 53 | kfree(wdev->conn->ie); |
64 | { | 54 | kfree(wdev->conn); |
65 | rtnl_lock(); | 55 | wdev->conn = NULL; |
66 | if (cfg80211_is_all_idle()) | ||
67 | regulatory_hint_disconnect(); | ||
68 | rtnl_unlock(); | ||
69 | } | 56 | } |
70 | 57 | ||
71 | static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); | ||
72 | |||
73 | static int cfg80211_conn_scan(struct wireless_dev *wdev) | 58 | static int cfg80211_conn_scan(struct wireless_dev *wdev) |
74 | { | 59 | { |
75 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 60 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
@@ -164,6 +149,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) | |||
164 | params = &wdev->conn->params; | 149 | params = &wdev->conn->params; |
165 | 150 | ||
166 | switch (wdev->conn->state) { | 151 | switch (wdev->conn->state) { |
152 | case CFG80211_CONN_SCANNING: | ||
153 | /* didn't find it during scan ... */ | ||
154 | return -ENOENT; | ||
167 | case CFG80211_CONN_SCAN_AGAIN: | 155 | case CFG80211_CONN_SCAN_AGAIN: |
168 | return cfg80211_conn_scan(wdev); | 156 | return cfg80211_conn_scan(wdev); |
169 | case CFG80211_CONN_AUTHENTICATE_NEXT: | 157 | case CFG80211_CONN_AUTHENTICATE_NEXT: |
@@ -200,12 +188,11 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) | |||
200 | WLAN_REASON_DEAUTH_LEAVING, | 188 | WLAN_REASON_DEAUTH_LEAVING, |
201 | false); | 189 | false); |
202 | return err; | 190 | return err; |
203 | case CFG80211_CONN_DEAUTH_ASSOC_FAIL: | 191 | case CFG80211_CONN_DEAUTH: |
204 | cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, | 192 | cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, |
205 | NULL, 0, | 193 | NULL, 0, |
206 | WLAN_REASON_DEAUTH_LEAVING, false); | 194 | WLAN_REASON_DEAUTH_LEAVING, false); |
207 | /* return an error so that we call __cfg80211_connect_result() */ | 195 | return 0; |
208 | return -EINVAL; | ||
209 | default: | 196 | default: |
210 | return 0; | 197 | return 0; |
211 | } | 198 | } |
@@ -229,7 +216,8 @@ void cfg80211_conn_work(struct work_struct *work) | |||
229 | wdev_unlock(wdev); | 216 | wdev_unlock(wdev); |
230 | continue; | 217 | continue; |
231 | } | 218 | } |
232 | if (wdev->sme_state != CFG80211_SME_CONNECTING || !wdev->conn) { | 219 | if (!wdev->conn || |
220 | wdev->conn->state == CFG80211_CONN_CONNECTED) { | ||
233 | wdev_unlock(wdev); | 221 | wdev_unlock(wdev); |
234 | continue; | 222 | continue; |
235 | } | 223 | } |
@@ -237,12 +225,14 @@ void cfg80211_conn_work(struct work_struct *work) | |||
237 | memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); | 225 | memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); |
238 | bssid = bssid_buf; | 226 | bssid = bssid_buf; |
239 | } | 227 | } |
240 | if (cfg80211_conn_do_work(wdev)) | 228 | if (cfg80211_conn_do_work(wdev)) { |
241 | __cfg80211_connect_result( | 229 | __cfg80211_connect_result( |
242 | wdev->netdev, bssid, | 230 | wdev->netdev, bssid, |
243 | NULL, 0, NULL, 0, | 231 | NULL, 0, NULL, 0, |
244 | WLAN_STATUS_UNSPECIFIED_FAILURE, | 232 | WLAN_STATUS_UNSPECIFIED_FAILURE, |
245 | false, NULL); | 233 | false, NULL); |
234 | cfg80211_sme_free(wdev); | ||
235 | } | ||
246 | wdev_unlock(wdev); | 236 | wdev_unlock(wdev); |
247 | } | 237 | } |
248 | 238 | ||
@@ -286,9 +276,6 @@ static void __cfg80211_sme_scan_done(struct net_device *dev) | |||
286 | 276 | ||
287 | ASSERT_WDEV_LOCK(wdev); | 277 | ASSERT_WDEV_LOCK(wdev); |
288 | 278 | ||
289 | if (wdev->sme_state != CFG80211_SME_CONNECTING) | ||
290 | return; | ||
291 | |||
292 | if (!wdev->conn) | 279 | if (!wdev->conn) |
293 | return; | 280 | return; |
294 | 281 | ||
@@ -297,20 +284,10 @@ static void __cfg80211_sme_scan_done(struct net_device *dev) | |||
297 | return; | 284 | return; |
298 | 285 | ||
299 | bss = cfg80211_get_conn_bss(wdev); | 286 | bss = cfg80211_get_conn_bss(wdev); |
300 | if (bss) { | 287 | if (bss) |
301 | cfg80211_put_bss(&rdev->wiphy, bss); | 288 | cfg80211_put_bss(&rdev->wiphy, bss); |
302 | } else { | 289 | else |
303 | /* not found */ | 290 | schedule_work(&rdev->conn_work); |
304 | if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) | ||
305 | schedule_work(&rdev->conn_work); | ||
306 | else | ||
307 | __cfg80211_connect_result( | ||
308 | wdev->netdev, | ||
309 | wdev->conn->params.bssid, | ||
310 | NULL, 0, NULL, 0, | ||
311 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
312 | false, NULL); | ||
313 | } | ||
314 | } | 291 | } |
315 | 292 | ||
316 | void cfg80211_sme_scan_done(struct net_device *dev) | 293 | void cfg80211_sme_scan_done(struct net_device *dev) |
@@ -322,10 +299,8 @@ void cfg80211_sme_scan_done(struct net_device *dev) | |||
322 | wdev_unlock(wdev); | 299 | wdev_unlock(wdev); |
323 | } | 300 | } |
324 | 301 | ||
325 | void cfg80211_sme_rx_auth(struct net_device *dev, | 302 | void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) |
326 | const u8 *buf, size_t len) | ||
327 | { | 303 | { |
328 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
329 | struct wiphy *wiphy = wdev->wiphy; | 304 | struct wiphy *wiphy = wdev->wiphy; |
330 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 305 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
331 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | 306 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
@@ -333,11 +308,7 @@ void cfg80211_sme_rx_auth(struct net_device *dev, | |||
333 | 308 | ||
334 | ASSERT_WDEV_LOCK(wdev); | 309 | ASSERT_WDEV_LOCK(wdev); |
335 | 310 | ||
336 | /* should only RX auth frames when connecting */ | 311 | if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED) |
337 | if (wdev->sme_state != CFG80211_SME_CONNECTING) | ||
338 | return; | ||
339 | |||
340 | if (WARN_ON(!wdev->conn)) | ||
341 | return; | 312 | return; |
342 | 313 | ||
343 | if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && | 314 | if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && |
@@ -366,46 +337,226 @@ void cfg80211_sme_rx_auth(struct net_device *dev, | |||
366 | wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; | 337 | wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; |
367 | schedule_work(&rdev->conn_work); | 338 | schedule_work(&rdev->conn_work); |
368 | } else if (status_code != WLAN_STATUS_SUCCESS) { | 339 | } else if (status_code != WLAN_STATUS_SUCCESS) { |
369 | __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, | 340 | __cfg80211_connect_result(wdev->netdev, mgmt->bssid, |
341 | NULL, 0, NULL, 0, | ||
370 | status_code, false, NULL); | 342 | status_code, false, NULL); |
371 | } else if (wdev->sme_state == CFG80211_SME_CONNECTING && | 343 | } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { |
372 | wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { | ||
373 | wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; | 344 | wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; |
374 | schedule_work(&rdev->conn_work); | 345 | schedule_work(&rdev->conn_work); |
375 | } | 346 | } |
376 | } | 347 | } |
377 | 348 | ||
378 | bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev) | 349 | bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status) |
379 | { | 350 | { |
380 | struct wiphy *wiphy = wdev->wiphy; | 351 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
381 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
382 | 352 | ||
383 | if (WARN_ON(!wdev->conn)) | 353 | if (!wdev->conn) |
384 | return false; | 354 | return false; |
385 | 355 | ||
386 | if (!wdev->conn->prev_bssid_valid) | 356 | if (status == WLAN_STATUS_SUCCESS) { |
357 | wdev->conn->state = CFG80211_CONN_CONNECTED; | ||
387 | return false; | 358 | return false; |
359 | } | ||
388 | 360 | ||
389 | /* | 361 | if (wdev->conn->prev_bssid_valid) { |
390 | * Some stupid APs don't accept reassoc, so we | 362 | /* |
391 | * need to fall back to trying regular assoc. | 363 | * Some stupid APs don't accept reassoc, so we |
392 | */ | 364 | * need to fall back to trying regular assoc; |
393 | wdev->conn->prev_bssid_valid = false; | 365 | * return true so no event is sent to userspace. |
394 | wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; | 366 | */ |
367 | wdev->conn->prev_bssid_valid = false; | ||
368 | wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; | ||
369 | schedule_work(&rdev->conn_work); | ||
370 | return true; | ||
371 | } | ||
372 | |||
373 | wdev->conn->state = CFG80211_CONN_DEAUTH; | ||
395 | schedule_work(&rdev->conn_work); | 374 | schedule_work(&rdev->conn_work); |
375 | return false; | ||
376 | } | ||
396 | 377 | ||
397 | return true; | 378 | void cfg80211_sme_deauth(struct wireless_dev *wdev) |
379 | { | ||
380 | cfg80211_sme_free(wdev); | ||
398 | } | 381 | } |
399 | 382 | ||
400 | void cfg80211_sme_failed_assoc(struct wireless_dev *wdev) | 383 | void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) |
401 | { | 384 | { |
402 | struct wiphy *wiphy = wdev->wiphy; | 385 | cfg80211_sme_free(wdev); |
403 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 386 | } |
404 | 387 | ||
405 | wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL; | 388 | void cfg80211_sme_disassoc(struct wireless_dev *wdev) |
389 | { | ||
390 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
391 | |||
392 | if (!wdev->conn) | ||
393 | return; | ||
394 | |||
395 | wdev->conn->state = CFG80211_CONN_DEAUTH; | ||
406 | schedule_work(&rdev->conn_work); | 396 | schedule_work(&rdev->conn_work); |
407 | } | 397 | } |
408 | 398 | ||
399 | void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) | ||
400 | { | ||
401 | cfg80211_sme_disassoc(wdev); | ||
402 | } | ||
403 | |||
404 | static int cfg80211_sme_connect(struct wireless_dev *wdev, | ||
405 | struct cfg80211_connect_params *connect, | ||
406 | const u8 *prev_bssid) | ||
407 | { | ||
408 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
409 | struct cfg80211_bss *bss; | ||
410 | int err; | ||
411 | |||
412 | if (!rdev->ops->auth || !rdev->ops->assoc) | ||
413 | return -EOPNOTSUPP; | ||
414 | |||
415 | if (wdev->current_bss) | ||
416 | return -EALREADY; | ||
417 | |||
418 | if (WARN_ON(wdev->conn)) | ||
419 | return -EINPROGRESS; | ||
420 | |||
421 | wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); | ||
422 | if (!wdev->conn) | ||
423 | return -ENOMEM; | ||
424 | |||
425 | /* | ||
426 | * Copy all parameters, and treat explicitly IEs, BSSID, SSID. | ||
427 | */ | ||
428 | memcpy(&wdev->conn->params, connect, sizeof(*connect)); | ||
429 | if (connect->bssid) { | ||
430 | wdev->conn->params.bssid = wdev->conn->bssid; | ||
431 | memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); | ||
432 | } | ||
433 | |||
434 | if (connect->ie) { | ||
435 | wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, | ||
436 | GFP_KERNEL); | ||
437 | wdev->conn->params.ie = wdev->conn->ie; | ||
438 | if (!wdev->conn->ie) { | ||
439 | kfree(wdev->conn); | ||
440 | wdev->conn = NULL; | ||
441 | return -ENOMEM; | ||
442 | } | ||
443 | } | ||
444 | |||
445 | if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { | ||
446 | wdev->conn->auto_auth = true; | ||
447 | /* start with open system ... should mostly work */ | ||
448 | wdev->conn->params.auth_type = | ||
449 | NL80211_AUTHTYPE_OPEN_SYSTEM; | ||
450 | } else { | ||
451 | wdev->conn->auto_auth = false; | ||
452 | } | ||
453 | |||
454 | wdev->conn->params.ssid = wdev->ssid; | ||
455 | wdev->conn->params.ssid_len = connect->ssid_len; | ||
456 | |||
457 | /* see if we have the bss already */ | ||
458 | bss = cfg80211_get_conn_bss(wdev); | ||
459 | |||
460 | if (prev_bssid) { | ||
461 | memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); | ||
462 | wdev->conn->prev_bssid_valid = true; | ||
463 | } | ||
464 | |||
465 | /* we're good if we have a matching bss struct */ | ||
466 | if (bss) { | ||
467 | wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; | ||
468 | err = cfg80211_conn_do_work(wdev); | ||
469 | cfg80211_put_bss(wdev->wiphy, bss); | ||
470 | } else { | ||
471 | /* otherwise we'll need to scan for the AP first */ | ||
472 | err = cfg80211_conn_scan(wdev); | ||
473 | |||
474 | /* | ||
475 | * If we can't scan right now, then we need to scan again | ||
476 | * after the current scan finished, since the parameters | ||
477 | * changed (unless we find a good AP anyway). | ||
478 | */ | ||
479 | if (err == -EBUSY) { | ||
480 | err = 0; | ||
481 | wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; | ||
482 | } | ||
483 | } | ||
484 | |||
485 | if (err) | ||
486 | cfg80211_sme_free(wdev); | ||
487 | |||
488 | return err; | ||
489 | } | ||
490 | |||
491 | static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason) | ||
492 | { | ||
493 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
494 | int err; | ||
495 | |||
496 | if (!wdev->conn) | ||
497 | return 0; | ||
498 | |||
499 | if (!rdev->ops->deauth) | ||
500 | return -EOPNOTSUPP; | ||
501 | |||
502 | if (wdev->conn->state == CFG80211_CONN_SCANNING || | ||
503 | wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) { | ||
504 | err = 0; | ||
505 | goto out; | ||
506 | } | ||
507 | |||
508 | /* wdev->conn->params.bssid must be set if > SCANNING */ | ||
509 | err = cfg80211_mlme_deauth(rdev, wdev->netdev, | ||
510 | wdev->conn->params.bssid, | ||
511 | NULL, 0, reason, false); | ||
512 | out: | ||
513 | cfg80211_sme_free(wdev); | ||
514 | return err; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * code shared for in-device and software SME | ||
519 | */ | ||
520 | |||
521 | static bool cfg80211_is_all_idle(void) | ||
522 | { | ||
523 | struct cfg80211_registered_device *rdev; | ||
524 | struct wireless_dev *wdev; | ||
525 | bool is_all_idle = true; | ||
526 | |||
527 | /* | ||
528 | * All devices must be idle as otherwise if you are actively | ||
529 | * scanning some new beacon hints could be learned and would | ||
530 | * count as new regulatory hints. | ||
531 | */ | ||
532 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { | ||
533 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | ||
534 | wdev_lock(wdev); | ||
535 | if (wdev->conn || wdev->current_bss) | ||
536 | is_all_idle = false; | ||
537 | wdev_unlock(wdev); | ||
538 | } | ||
539 | } | ||
540 | |||
541 | return is_all_idle; | ||
542 | } | ||
543 | |||
544 | static void disconnect_work(struct work_struct *work) | ||
545 | { | ||
546 | rtnl_lock(); | ||
547 | if (cfg80211_is_all_idle()) | ||
548 | regulatory_hint_disconnect(); | ||
549 | rtnl_unlock(); | ||
550 | } | ||
551 | |||
552 | static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); | ||
553 | |||
554 | |||
555 | /* | ||
556 | * API calls for drivers implementing connect/disconnect and | ||
557 | * SME event handling | ||
558 | */ | ||
559 | |||
409 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | 560 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, |
410 | const u8 *req_ie, size_t req_ie_len, | 561 | const u8 *req_ie, size_t req_ie_len, |
411 | const u8 *resp_ie, size_t resp_ie_len, | 562 | const u8 *resp_ie, size_t resp_ie_len, |
@@ -424,9 +575,6 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
424 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) | 575 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) |
425 | return; | 576 | return; |
426 | 577 | ||
427 | if (wdev->sme_state != CFG80211_SME_CONNECTING) | ||
428 | return; | ||
429 | |||
430 | nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, | 578 | nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, |
431 | bssid, req_ie, req_ie_len, | 579 | bssid, req_ie, req_ie_len, |
432 | resp_ie, resp_ie_len, | 580 | resp_ie, resp_ie_len, |
@@ -463,15 +611,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
463 | wdev->current_bss = NULL; | 611 | wdev->current_bss = NULL; |
464 | } | 612 | } |
465 | 613 | ||
466 | if (wdev->conn) | ||
467 | wdev->conn->state = CFG80211_CONN_IDLE; | ||
468 | |||
469 | if (status != WLAN_STATUS_SUCCESS) { | 614 | if (status != WLAN_STATUS_SUCCESS) { |
470 | wdev->sme_state = CFG80211_SME_IDLE; | ||
471 | if (wdev->conn) | ||
472 | kfree(wdev->conn->ie); | ||
473 | kfree(wdev->conn); | ||
474 | wdev->conn = NULL; | ||
475 | kfree(wdev->connect_keys); | 615 | kfree(wdev->connect_keys); |
476 | wdev->connect_keys = NULL; | 616 | wdev->connect_keys = NULL; |
477 | wdev->ssid_len = 0; | 617 | wdev->ssid_len = 0; |
@@ -480,21 +620,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
480 | } | 620 | } |
481 | 621 | ||
482 | if (!bss) | 622 | if (!bss) |
483 | bss = cfg80211_get_bss(wdev->wiphy, | 623 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, |
484 | wdev->conn ? wdev->conn->params.channel : | ||
485 | NULL, | ||
486 | bssid, | ||
487 | wdev->ssid, wdev->ssid_len, | 624 | wdev->ssid, wdev->ssid_len, |
488 | WLAN_CAPABILITY_ESS, | 625 | WLAN_CAPABILITY_ESS, |
489 | WLAN_CAPABILITY_ESS); | 626 | WLAN_CAPABILITY_ESS); |
490 | |||
491 | if (WARN_ON(!bss)) | 627 | if (WARN_ON(!bss)) |
492 | return; | 628 | return; |
493 | 629 | ||
494 | cfg80211_hold_bss(bss_from_pub(bss)); | 630 | cfg80211_hold_bss(bss_from_pub(bss)); |
495 | wdev->current_bss = bss_from_pub(bss); | 631 | wdev->current_bss = bss_from_pub(bss); |
496 | 632 | ||
497 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
498 | cfg80211_upload_connect_keys(wdev); | 633 | cfg80211_upload_connect_keys(wdev); |
499 | 634 | ||
500 | rcu_read_lock(); | 635 | rcu_read_lock(); |
@@ -530,8 +665,6 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
530 | struct cfg80211_event *ev; | 665 | struct cfg80211_event *ev; |
531 | unsigned long flags; | 666 | unsigned long flags; |
532 | 667 | ||
533 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); | ||
534 | |||
535 | ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); | 668 | ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); |
536 | if (!ev) | 669 | if (!ev) |
537 | return; | 670 | return; |
@@ -572,13 +705,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev, | |||
572 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) | 705 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) |
573 | goto out; | 706 | goto out; |
574 | 707 | ||
575 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | 708 | if (WARN_ON(!wdev->current_bss)) |
576 | goto out; | ||
577 | |||
578 | /* internal error -- how did we get to CONNECTED w/o BSS? */ | ||
579 | if (WARN_ON(!wdev->current_bss)) { | ||
580 | goto out; | 709 | goto out; |
581 | } | ||
582 | 710 | ||
583 | cfg80211_unhold_bss(wdev->current_bss); | 711 | cfg80211_unhold_bss(wdev->current_bss); |
584 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); | 712 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); |
@@ -628,8 +756,6 @@ void cfg80211_roamed(struct net_device *dev, | |||
628 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 756 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
629 | struct cfg80211_bss *bss; | 757 | struct cfg80211_bss *bss; |
630 | 758 | ||
631 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); | ||
632 | |||
633 | bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, | 759 | bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, |
634 | wdev->ssid_len, WLAN_CAPABILITY_ESS, | 760 | wdev->ssid_len, WLAN_CAPABILITY_ESS, |
635 | WLAN_CAPABILITY_ESS); | 761 | WLAN_CAPABILITY_ESS); |
@@ -651,8 +777,6 @@ void cfg80211_roamed_bss(struct net_device *dev, | |||
651 | struct cfg80211_event *ev; | 777 | struct cfg80211_event *ev; |
652 | unsigned long flags; | 778 | unsigned long flags; |
653 | 779 | ||
654 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); | ||
655 | |||
656 | if (WARN_ON(!bss)) | 780 | if (WARN_ON(!bss)) |
657 | return; | 781 | return; |
658 | 782 | ||
@@ -694,25 +818,14 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
694 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) | 818 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) |
695 | return; | 819 | return; |
696 | 820 | ||
697 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | ||
698 | return; | ||
699 | |||
700 | if (wdev->current_bss) { | 821 | if (wdev->current_bss) { |
701 | cfg80211_unhold_bss(wdev->current_bss); | 822 | cfg80211_unhold_bss(wdev->current_bss); |
702 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); | 823 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); |
703 | } | 824 | } |
704 | 825 | ||
705 | wdev->current_bss = NULL; | 826 | wdev->current_bss = NULL; |
706 | wdev->sme_state = CFG80211_SME_IDLE; | ||
707 | wdev->ssid_len = 0; | 827 | wdev->ssid_len = 0; |
708 | 828 | ||
709 | if (wdev->conn) { | ||
710 | kfree(wdev->conn->ie); | ||
711 | wdev->conn->ie = NULL; | ||
712 | kfree(wdev->conn); | ||
713 | wdev->conn = NULL; | ||
714 | } | ||
715 | |||
716 | nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); | 829 | nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); |
717 | 830 | ||
718 | /* | 831 | /* |
@@ -741,8 +854,6 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, | |||
741 | struct cfg80211_event *ev; | 854 | struct cfg80211_event *ev; |
742 | unsigned long flags; | 855 | unsigned long flags; |
743 | 856 | ||
744 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); | ||
745 | |||
746 | ev = kzalloc(sizeof(*ev) + ie_len, gfp); | 857 | ev = kzalloc(sizeof(*ev) + ie_len, gfp); |
747 | if (!ev) | 858 | if (!ev) |
748 | return; | 859 | return; |
@@ -760,6 +871,9 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, | |||
760 | } | 871 | } |
761 | EXPORT_SYMBOL(cfg80211_disconnected); | 872 | EXPORT_SYMBOL(cfg80211_disconnected); |
762 | 873 | ||
874 | /* | ||
875 | * API calls for nl80211/wext compatibility code | ||
876 | */ | ||
763 | int cfg80211_connect(struct cfg80211_registered_device *rdev, | 877 | int cfg80211_connect(struct cfg80211_registered_device *rdev, |
764 | struct net_device *dev, | 878 | struct net_device *dev, |
765 | struct cfg80211_connect_params *connect, | 879 | struct cfg80211_connect_params *connect, |
@@ -767,14 +881,10 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
767 | const u8 *prev_bssid) | 881 | const u8 *prev_bssid) |
768 | { | 882 | { |
769 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 883 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
770 | struct cfg80211_bss *bss = NULL; | ||
771 | int err; | 884 | int err; |
772 | 885 | ||
773 | ASSERT_WDEV_LOCK(wdev); | 886 | ASSERT_WDEV_LOCK(wdev); |
774 | 887 | ||
775 | if (wdev->sme_state != CFG80211_SME_IDLE) | ||
776 | return -EALREADY; | ||
777 | |||
778 | if (WARN_ON(wdev->connect_keys)) { | 888 | if (WARN_ON(wdev->connect_keys)) { |
779 | kfree(wdev->connect_keys); | 889 | kfree(wdev->connect_keys); |
780 | wdev->connect_keys = NULL; | 890 | wdev->connect_keys = NULL; |
@@ -810,105 +920,22 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
810 | } | 920 | } |
811 | } | 921 | } |
812 | 922 | ||
813 | if (!rdev->ops->connect) { | 923 | wdev->connect_keys = connkeys; |
814 | if (!rdev->ops->auth || !rdev->ops->assoc) | 924 | memcpy(wdev->ssid, connect->ssid, connect->ssid_len); |
815 | return -EOPNOTSUPP; | 925 | wdev->ssid_len = connect->ssid_len; |
816 | |||
817 | if (WARN_ON(wdev->conn)) | ||
818 | return -EINPROGRESS; | ||
819 | |||
820 | wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); | ||
821 | if (!wdev->conn) | ||
822 | return -ENOMEM; | ||
823 | |||
824 | /* | ||
825 | * Copy all parameters, and treat explicitly IEs, BSSID, SSID. | ||
826 | */ | ||
827 | memcpy(&wdev->conn->params, connect, sizeof(*connect)); | ||
828 | if (connect->bssid) { | ||
829 | wdev->conn->params.bssid = wdev->conn->bssid; | ||
830 | memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); | ||
831 | } | ||
832 | 926 | ||
833 | if (connect->ie) { | 927 | if (!rdev->ops->connect) |
834 | wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, | 928 | err = cfg80211_sme_connect(wdev, connect, prev_bssid); |
835 | GFP_KERNEL); | 929 | else |
836 | wdev->conn->params.ie = wdev->conn->ie; | ||
837 | if (!wdev->conn->ie) { | ||
838 | kfree(wdev->conn); | ||
839 | wdev->conn = NULL; | ||
840 | return -ENOMEM; | ||
841 | } | ||
842 | } | ||
843 | |||
844 | if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { | ||
845 | wdev->conn->auto_auth = true; | ||
846 | /* start with open system ... should mostly work */ | ||
847 | wdev->conn->params.auth_type = | ||
848 | NL80211_AUTHTYPE_OPEN_SYSTEM; | ||
849 | } else { | ||
850 | wdev->conn->auto_auth = false; | ||
851 | } | ||
852 | |||
853 | memcpy(wdev->ssid, connect->ssid, connect->ssid_len); | ||
854 | wdev->ssid_len = connect->ssid_len; | ||
855 | wdev->conn->params.ssid = wdev->ssid; | ||
856 | wdev->conn->params.ssid_len = connect->ssid_len; | ||
857 | |||
858 | /* see if we have the bss already */ | ||
859 | bss = cfg80211_get_conn_bss(wdev); | ||
860 | |||
861 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
862 | wdev->connect_keys = connkeys; | ||
863 | |||
864 | if (prev_bssid) { | ||
865 | memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); | ||
866 | wdev->conn->prev_bssid_valid = true; | ||
867 | } | ||
868 | |||
869 | /* we're good if we have a matching bss struct */ | ||
870 | if (bss) { | ||
871 | wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; | ||
872 | err = cfg80211_conn_do_work(wdev); | ||
873 | cfg80211_put_bss(wdev->wiphy, bss); | ||
874 | } else { | ||
875 | /* otherwise we'll need to scan for the AP first */ | ||
876 | err = cfg80211_conn_scan(wdev); | ||
877 | /* | ||
878 | * If we can't scan right now, then we need to scan again | ||
879 | * after the current scan finished, since the parameters | ||
880 | * changed (unless we find a good AP anyway). | ||
881 | */ | ||
882 | if (err == -EBUSY) { | ||
883 | err = 0; | ||
884 | wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; | ||
885 | } | ||
886 | } | ||
887 | if (err) { | ||
888 | kfree(wdev->conn->ie); | ||
889 | kfree(wdev->conn); | ||
890 | wdev->conn = NULL; | ||
891 | wdev->sme_state = CFG80211_SME_IDLE; | ||
892 | wdev->connect_keys = NULL; | ||
893 | wdev->ssid_len = 0; | ||
894 | } | ||
895 | |||
896 | return err; | ||
897 | } else { | ||
898 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
899 | wdev->connect_keys = connkeys; | ||
900 | err = rdev_connect(rdev, dev, connect); | 930 | err = rdev_connect(rdev, dev, connect); |
901 | if (err) { | ||
902 | wdev->connect_keys = NULL; | ||
903 | wdev->sme_state = CFG80211_SME_IDLE; | ||
904 | return err; | ||
905 | } | ||
906 | 931 | ||
907 | memcpy(wdev->ssid, connect->ssid, connect->ssid_len); | 932 | if (err) { |
908 | wdev->ssid_len = connect->ssid_len; | 933 | wdev->connect_keys = NULL; |
909 | 934 | wdev->ssid_len = 0; | |
910 | return 0; | 935 | return err; |
911 | } | 936 | } |
937 | |||
938 | return 0; | ||
912 | } | 939 | } |
913 | 940 | ||
914 | int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | 941 | int cfg80211_disconnect(struct cfg80211_registered_device *rdev, |
@@ -919,78 +946,17 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | |||
919 | 946 | ||
920 | ASSERT_WDEV_LOCK(wdev); | 947 | ASSERT_WDEV_LOCK(wdev); |
921 | 948 | ||
922 | if (wdev->sme_state == CFG80211_SME_IDLE) | ||
923 | return -EINVAL; | ||
924 | |||
925 | kfree(wdev->connect_keys); | 949 | kfree(wdev->connect_keys); |
926 | wdev->connect_keys = NULL; | 950 | wdev->connect_keys = NULL; |
927 | 951 | ||
928 | if (!rdev->ops->disconnect) { | 952 | if (wdev->conn) { |
929 | if (!rdev->ops->deauth) | 953 | err = cfg80211_sme_disconnect(wdev, reason); |
930 | return -EOPNOTSUPP; | 954 | } else if (!rdev->ops->disconnect) { |
931 | 955 | cfg80211_mlme_down(rdev, dev); | |
932 | /* was it connected by userspace SME? */ | 956 | err = 0; |
933 | if (!wdev->conn) { | ||
934 | cfg80211_mlme_down(rdev, dev); | ||
935 | goto disconnect; | ||
936 | } | ||
937 | |||
938 | if (wdev->sme_state == CFG80211_SME_CONNECTING && | ||
939 | (wdev->conn->state == CFG80211_CONN_SCANNING || | ||
940 | wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { | ||
941 | wdev->sme_state = CFG80211_SME_IDLE; | ||
942 | kfree(wdev->conn->ie); | ||
943 | kfree(wdev->conn); | ||
944 | wdev->conn = NULL; | ||
945 | wdev->ssid_len = 0; | ||
946 | return 0; | ||
947 | } | ||
948 | |||
949 | /* wdev->conn->params.bssid must be set if > SCANNING */ | ||
950 | err = cfg80211_mlme_deauth(rdev, dev, | ||
951 | wdev->conn->params.bssid, | ||
952 | NULL, 0, reason, false); | ||
953 | if (err) | ||
954 | return err; | ||
955 | } else { | 957 | } else { |
956 | err = rdev_disconnect(rdev, dev, reason); | 958 | err = rdev_disconnect(rdev, dev, reason); |
957 | if (err) | ||
958 | return err; | ||
959 | } | 959 | } |
960 | 960 | ||
961 | disconnect: | 961 | return err; |
962 | if (wdev->sme_state == CFG80211_SME_CONNECTED) | ||
963 | __cfg80211_disconnected(dev, NULL, 0, 0, false); | ||
964 | else if (wdev->sme_state == CFG80211_SME_CONNECTING) | ||
965 | __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, | ||
966 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
967 | wextev, NULL); | ||
968 | |||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | void cfg80211_sme_disassoc(struct net_device *dev, | ||
973 | struct cfg80211_internal_bss *bss) | ||
974 | { | ||
975 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
976 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
977 | u8 bssid[ETH_ALEN]; | ||
978 | |||
979 | ASSERT_WDEV_LOCK(wdev); | ||
980 | |||
981 | if (!wdev->conn) | ||
982 | return; | ||
983 | |||
984 | if (wdev->conn->state == CFG80211_CONN_IDLE) | ||
985 | return; | ||
986 | |||
987 | /* | ||
988 | * Ok, so the association was made by this SME -- we don't | ||
989 | * want it any more so deauthenticate too. | ||
990 | */ | ||
991 | |||
992 | memcpy(bssid, bss->pub.bssid, ETH_ALEN); | ||
993 | |||
994 | cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, | ||
995 | WLAN_REASON_DEAUTH_LEAVING, false); | ||
996 | } | 962 | } |
diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 23fafeae8a10..e1534baf2ebb 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h | |||
@@ -1911,24 +1911,46 @@ TRACE_EVENT(cfg80211_send_rx_assoc, | |||
1911 | NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) | 1911 | NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) |
1912 | ); | 1912 | ); |
1913 | 1913 | ||
1914 | DEFINE_EVENT(netdev_evt_only, cfg80211_send_deauth, | 1914 | DECLARE_EVENT_CLASS(netdev_frame_event, |
1915 | TP_PROTO(struct net_device *netdev), | 1915 | TP_PROTO(struct net_device *netdev, const u8 *buf, int len), |
1916 | TP_ARGS(netdev) | 1916 | TP_ARGS(netdev, buf, len), |
1917 | TP_STRUCT__entry( | ||
1918 | NETDEV_ENTRY | ||
1919 | __dynamic_array(u8, frame, len) | ||
1920 | ), | ||
1921 | TP_fast_assign( | ||
1922 | NETDEV_ASSIGN; | ||
1923 | memcpy(__get_dynamic_array(frame), buf, len); | ||
1924 | ), | ||
1925 | TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", | ||
1926 | NETDEV_PR_ARG, | ||
1927 | le16_to_cpup((__le16 *)__get_dynamic_array(frame))) | ||
1917 | ); | 1928 | ); |
1918 | 1929 | ||
1919 | DEFINE_EVENT(netdev_evt_only, cfg80211_send_disassoc, | 1930 | DEFINE_EVENT(netdev_frame_event, cfg80211_rx_unprot_mlme_mgmt, |
1920 | TP_PROTO(struct net_device *netdev), | 1931 | TP_PROTO(struct net_device *netdev, const u8 *buf, int len), |
1921 | TP_ARGS(netdev) | 1932 | TP_ARGS(netdev, buf, len) |
1922 | ); | 1933 | ); |
1923 | 1934 | ||
1924 | DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_deauth, | 1935 | DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt, |
1925 | TP_PROTO(struct net_device *netdev), | 1936 | TP_PROTO(struct net_device *netdev, const u8 *buf, int len), |
1926 | TP_ARGS(netdev) | 1937 | TP_ARGS(netdev, buf, len) |
1927 | ); | 1938 | ); |
1928 | 1939 | ||
1929 | DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_disassoc, | 1940 | TRACE_EVENT(cfg80211_tx_mlme_mgmt, |
1930 | TP_PROTO(struct net_device *netdev), | 1941 | TP_PROTO(struct net_device *netdev, const u8 *buf, int len), |
1931 | TP_ARGS(netdev) | 1942 | TP_ARGS(netdev, buf, len), |
1943 | TP_STRUCT__entry( | ||
1944 | NETDEV_ENTRY | ||
1945 | __dynamic_array(u8, frame, len) | ||
1946 | ), | ||
1947 | TP_fast_assign( | ||
1948 | NETDEV_ASSIGN; | ||
1949 | memcpy(__get_dynamic_array(frame), buf, len); | ||
1950 | ), | ||
1951 | TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", | ||
1952 | NETDEV_PR_ARG, | ||
1953 | le16_to_cpup((__le16 *)__get_dynamic_array(frame))) | ||
1932 | ); | 1954 | ); |
1933 | 1955 | ||
1934 | DECLARE_EVENT_CLASS(netdev_mac_evt, | 1956 | DECLARE_EVENT_CLASS(netdev_mac_evt, |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index a53f8404f451..14c9a2583ba0 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -89,7 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
89 | 89 | ||
90 | wdev_lock(wdev); | 90 | wdev_lock(wdev); |
91 | 91 | ||
92 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 92 | if (wdev->conn) { |
93 | bool event = true; | 93 | bool event = true; |
94 | 94 | ||
95 | if (wdev->wext.connect.channel == chan) { | 95 | if (wdev->wext.connect.channel == chan) { |
@@ -188,7 +188,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
188 | 188 | ||
189 | err = 0; | 189 | err = 0; |
190 | 190 | ||
191 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 191 | if (wdev->conn) { |
192 | bool event = true; | 192 | bool event = true; |
193 | 193 | ||
194 | if (wdev->wext.connect.ssid && len && | 194 | if (wdev->wext.connect.ssid && len && |
@@ -277,7 +277,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
277 | 277 | ||
278 | wdev_lock(wdev); | 278 | wdev_lock(wdev); |
279 | 279 | ||
280 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 280 | if (wdev->conn) { |
281 | err = 0; | 281 | err = 0; |
282 | /* both automatic */ | 282 | /* both automatic */ |
283 | if (!bssid && !wdev->wext.connect.bssid) | 283 | if (!bssid && !wdev->wext.connect.bssid) |
@@ -364,7 +364,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev, | |||
364 | wdev->wext.ie = ie; | 364 | wdev->wext.ie = ie; |
365 | wdev->wext.ie_len = ie_len; | 365 | wdev->wext.ie_len = ie_len; |
366 | 366 | ||
367 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 367 | if (wdev->conn) { |
368 | err = cfg80211_disconnect(rdev, dev, | 368 | err = cfg80211_disconnect(rdev, dev, |
369 | WLAN_REASON_DEAUTH_LEAVING, false); | 369 | WLAN_REASON_DEAUTH_LEAVING, false); |
370 | if (err) | 370 | if (err) |